Consistent formatting for Dawn/Tint.

This CL updates the clang format files to have a single shared format
between Dawn and Tint. The major changes are tabs are 4 spaces, lines
are 100 columns and namespaces are not indented.

Bug: dawn:1339
Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
diff --git a/.clang-format b/.clang-format
index ff58eea..30216bc 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,8 +1,5 @@
 # http://clang.llvm.org/docs/ClangFormatStyleOptions.html
 BasedOnStyle: Chromium
-Standard: Cpp11
-
-AllowShortFunctionsOnASingleLine: false
 
 ColumnLimit: 100
 
@@ -11,10 +8,3 @@
 ObjCBlockIndentWidth: 4
 AccessModifierOffset: -2
 
-CompactNamespaces: true
-
-# This should result in only one indentation level with compacted namespaces
-NamespaceIndentation: All
-
-# Use this option once clang-format 6 is out.
-IndentPPDirectives: AfterHash
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 7872f7b..7cfb3ae 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -121,7 +121,7 @@
         "third_party/khronos/KHR/khrplatform.h",  # Third party file
         "tools/roll-all",  # Branch name
         "tools/src/container/key.go",  # External URL
-        "tools/src/go.sum",  # External URL
+        "go.sum",  # External URL
     ]
     return file.LocalPath() not in filter_list
 
diff --git a/include/dawn/CPPLINT.cfg b/include/dawn/CPPLINT.cfg
deleted file mode 100644
index f5c9c6d..0000000
--- a/include/dawn/CPPLINT.cfg
+++ /dev/null
@@ -1 +0,0 @@
-filter=-runtime/indentation_namespace
diff --git a/include/dawn/EnumClassBitmasks.h b/include/dawn/EnumClassBitmasks.h
index 7bfe4ec..0dbe090 100644
--- a/include/dawn/EnumClassBitmasks.h
+++ b/include/dawn/EnumClassBitmasks.h
@@ -31,126 +31,117 @@
 
 namespace dawn {
 
-    template <typename T>
-    struct IsDawnBitmask {
-        static constexpr bool enable = false;
-    };
+template <typename T>
+struct IsDawnBitmask {
+    static constexpr bool enable = false;
+};
 
-    template <typename T, typename Enable = void>
-    struct LowerBitmask {
-        static constexpr bool enable = false;
-    };
+template <typename T, typename Enable = void>
+struct LowerBitmask {
+    static constexpr bool enable = false;
+};
 
-    template <typename T>
-    struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
-        static constexpr bool enable = true;
-        using type = T;
-        constexpr static T Lower(T t) {
-            return t;
-        }
-    };
+template <typename T>
+struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
+    static constexpr bool enable = true;
+    using type = T;
+    constexpr static T Lower(T t) { return t; }
+};
 
-    template <typename T>
-    struct BoolConvertible {
-        using Integral = typename std::underlying_type<T>::type;
+template <typename T>
+struct BoolConvertible {
+    using Integral = typename std::underlying_type<T>::type;
 
-        // NOLINTNEXTLINE(runtime/explicit)
-        constexpr BoolConvertible(Integral value) : value(value) {
-        }
-        constexpr operator bool() const {
-            return value != 0;
-        }
-        constexpr operator T() const {
-            return static_cast<T>(value);
-        }
+    // NOLINTNEXTLINE(runtime/explicit)
+    constexpr BoolConvertible(Integral value) : value(value) {}
+    constexpr operator bool() const { return value != 0; }
+    constexpr operator T() const { return static_cast<T>(value); }
 
-        Integral value;
-    };
+    Integral value;
+};
 
-    template <typename T>
-    struct LowerBitmask<BoolConvertible<T>> {
-        static constexpr bool enable = true;
-        using type = T;
-        static constexpr type Lower(BoolConvertible<T> t) {
-            return t;
-        }
-    };
+template <typename T>
+struct LowerBitmask<BoolConvertible<T>> {
+    static constexpr bool enable = true;
+    using type = T;
+    static constexpr type Lower(BoolConvertible<T> t) { return t; }
+};
 
-    template <typename T1,
-              typename T2,
-              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
-                                                 LowerBitmask<T2>::enable>::type>
-    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
-        using T = typename LowerBitmask<T1>::type;
-        using Integral = typename std::underlying_type<T>::type;
-        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
-               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
-    }
+template <
+    typename T1,
+    typename T2,
+    typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
+    using T = typename LowerBitmask<T1>::type;
+    using Integral = typename std::underlying_type<T>::type;
+    return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
+           static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+}
 
-    template <typename T1,
-              typename T2,
-              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
-                                                 LowerBitmask<T2>::enable>::type>
-    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
-        using T = typename LowerBitmask<T1>::type;
-        using Integral = typename std::underlying_type<T>::type;
-        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
-               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
-    }
+template <
+    typename T1,
+    typename T2,
+    typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
+    using T = typename LowerBitmask<T1>::type;
+    using Integral = typename std::underlying_type<T>::type;
+    return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
+           static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+}
 
-    template <typename T1,
-              typename T2,
-              typename = typename std::enable_if<LowerBitmask<T1>::enable &&
-                                                 LowerBitmask<T2>::enable>::type>
-    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
-        using T = typename LowerBitmask<T1>::type;
-        using Integral = typename std::underlying_type<T>::type;
-        return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
-               static_cast<Integral>(LowerBitmask<T2>::Lower(right));
-    }
+template <
+    typename T1,
+    typename T2,
+    typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
+    using T = typename LowerBitmask<T1>::type;
+    using Integral = typename std::underlying_type<T>::type;
+    return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
+           static_cast<Integral>(LowerBitmask<T2>::Lower(right));
+}
 
-    template <typename T1>
-    constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
-        using T = typename LowerBitmask<T1>::type;
-        using Integral = typename std::underlying_type<T>::type;
-        return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
-    }
+template <typename T1>
+constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
+    using T = typename LowerBitmask<T1>::type;
+    using Integral = typename std::underlying_type<T>::type;
+    return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
+}
 
-    template <typename T,
-              typename T2,
-              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
-                                                 LowerBitmask<T2>::enable>::type>
-    constexpr T& operator&=(T& l, T2 right) {
-        T r = LowerBitmask<T2>::Lower(right);
-        l = l & r;
-        return l;
-    }
+template <
+    typename T,
+    typename T2,
+    typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
+constexpr T& operator&=(T& l, T2 right) {
+    T r = LowerBitmask<T2>::Lower(right);
+    l = l & r;
+    return l;
+}
 
-    template <typename T,
-              typename T2,
-              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
-                                                 LowerBitmask<T2>::enable>::type>
-    constexpr T& operator|=(T& l, T2 right) {
-        T r = LowerBitmask<T2>::Lower(right);
-        l = l | r;
-        return l;
-    }
+template <
+    typename T,
+    typename T2,
+    typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
+constexpr T& operator|=(T& l, T2 right) {
+    T r = LowerBitmask<T2>::Lower(right);
+    l = l | r;
+    return l;
+}
 
-    template <typename T,
-              typename T2,
-              typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
-                                                 LowerBitmask<T2>::enable>::type>
-    constexpr T& operator^=(T& l, T2 right) {
-        T r = LowerBitmask<T2>::Lower(right);
-        l = l ^ r;
-        return l;
-    }
+template <
+    typename T,
+    typename T2,
+    typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
+constexpr T& operator^=(T& l, T2 right) {
+    T r = LowerBitmask<T2>::Lower(right);
+    l = l ^ r;
+    return l;
+}
 
-    template <typename T>
-    constexpr bool HasZeroOrOneBits(T value) {
-        using Integral = typename std::underlying_type<T>::type;
-        return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
-    }
+template <typename T>
+constexpr bool HasZeroOrOneBits(T value) {
+    using Integral = typename std::underlying_type<T>::type;
+    return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
+}
 
 }  // namespace dawn
 
diff --git a/include/dawn/dawn_wsi.h b/include/dawn/dawn_wsi.h
index 8e937b0..aecb252 100644
--- a/include/dawn/dawn_wsi.h
+++ b/include/dawn/dawn_wsi.h
@@ -65,7 +65,7 @@
 #endif
 
 #if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__)
-#    import <Metal/Metal.h>
+#import <Metal/Metal.h>
 
 struct DawnWSIContextMetal {
     id<MTLDevice> device = nil;
diff --git a/include/dawn/native/D3D12Backend.h b/include/dawn/native/D3D12Backend.h
index 58a9f21..2cff0f2 100644
--- a/include/dawn/native/D3D12Backend.h
+++ b/include/dawn/native/D3D12Backend.h
@@ -30,81 +30,81 @@
 
 namespace dawn::native::d3d12 {
 
-    class D3D11on12ResourceCache;
+class D3D11on12ResourceCache;
 
-    DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
-    DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
-                                                                             HWND window);
-    DAWN_NATIVE_EXPORT WGPUTextureFormat
-    GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+                                                                         HWND window);
+DAWN_NATIVE_EXPORT WGPUTextureFormat
+GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
 
-    enum MemorySegment {
-        Local,
-        NonLocal,
-    };
+enum MemorySegment {
+    Local,
+    NonLocal,
+};
 
-    DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
-                                                             uint64_t requestedReservationSize,
-                                                             MemorySegment memorySegment);
+DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
+                                                         uint64_t requestedReservationSize,
+                                                         MemorySegment memorySegment);
 
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
-      public:
-        ExternalImageDescriptorDXGISharedHandle();
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
+  public:
+    ExternalImageDescriptorDXGISharedHandle();
 
-        // Note: SharedHandle must be a handle to a texture object.
-        HANDLE sharedHandle;
-    };
+    // Note: SharedHandle must be a handle to a texture object.
+    HANDLE sharedHandle;
+};
 
-    // Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
-    constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
+// Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
+constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
 
-    struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
-        : ExternalImageAccessDescriptor {
-      public:
-        // TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated
-        // code from Chromium - we use a fixed key of 0 for acquire and release everywhere now.
-        uint64_t acquireMutexKey;
-        uint64_t releaseMutexKey;
-        bool isSwapChainTexture = false;
-    };
+struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
+    : ExternalImageAccessDescriptor {
+  public:
+    // TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated
+    // code from Chromium - we use a fixed key of 0 for acquire and release everywhere now.
+    uint64_t acquireMutexKey;
+    uint64_t releaseMutexKey;
+    bool isSwapChainTexture = false;
+};
 
-    class DAWN_NATIVE_EXPORT ExternalImageDXGI {
-      public:
-        ~ExternalImageDXGI();
+class DAWN_NATIVE_EXPORT ExternalImageDXGI {
+  public:
+    ~ExternalImageDXGI();
 
-        // Note: SharedHandle must be a handle to a texture object.
-        static std::unique_ptr<ExternalImageDXGI> Create(
-            WGPUDevice device,
-            const ExternalImageDescriptorDXGISharedHandle* descriptor);
+    // Note: SharedHandle must be a handle to a texture object.
+    static std::unique_ptr<ExternalImageDXGI> Create(
+        WGPUDevice device,
+        const ExternalImageDescriptorDXGISharedHandle* descriptor);
 
-        WGPUTexture ProduceTexture(WGPUDevice device,
-                                   const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
+    WGPUTexture ProduceTexture(WGPUDevice device,
+                               const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
 
-      private:
-        ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
-                          const WGPUTextureDescriptor* descriptor);
+  private:
+    ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
+                      const WGPUTextureDescriptor* descriptor);
 
-        Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
+    Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
 
-        // Contents of WGPUTextureDescriptor are stored individually since the descriptor
-        // could outlive this image.
-        WGPUTextureUsageFlags mUsage;
-        WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
-        WGPUTextureDimension mDimension;
-        WGPUExtent3D mSize;
-        WGPUTextureFormat mFormat;
-        uint32_t mMipLevelCount;
-        uint32_t mSampleCount;
+    // Contents of WGPUTextureDescriptor are stored individually since the descriptor
+    // could outlive this image.
+    WGPUTextureUsageFlags mUsage;
+    WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
+    WGPUTextureDimension mDimension;
+    WGPUExtent3D mSize;
+    WGPUTextureFormat mFormat;
+    uint32_t mMipLevelCount;
+    uint32_t mSampleCount;
 
-        std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
-    };
+    std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
+};
 
-    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
-        AdapterDiscoveryOptions();
-        explicit AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+    AdapterDiscoveryOptions();
+    explicit AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
 
-        Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
-    };
+    Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/include/dawn/native/DawnNative.h b/include/dawn/native/DawnNative.h
index a208fcf..3541083 100644
--- a/include/dawn/native/DawnNative.h
+++ b/include/dawn/native/DawnNative.h
@@ -23,237 +23,237 @@
 #include "dawn/webgpu.h"
 
 namespace dawn::platform {
-    class Platform;
+class Platform;
 }  // namespace dawn::platform
 
 namespace wgpu {
-    struct AdapterProperties;
-    struct DeviceDescriptor;
+struct AdapterProperties;
+struct DeviceDescriptor;
 }  // namespace wgpu
 
 namespace dawn::native {
 
-    class InstanceBase;
-    class AdapterBase;
+class InstanceBase;
+class AdapterBase;
 
-    // An optional parameter of Adapter::CreateDevice() to send additional information when creating
-    // a Device. For example, we can use it to enable a workaround, optimization or feature.
-    struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
-        std::vector<const char*> requiredFeatures;
-        std::vector<const char*> forceEnabledToggles;
-        std::vector<const char*> forceDisabledToggles;
+// An optional parameter of Adapter::CreateDevice() to send additional information when creating
+// a Device. For example, we can use it to enable a workaround, optimization or feature.
+struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
+    std::vector<const char*> requiredFeatures;
+    std::vector<const char*> forceEnabledToggles;
+    std::vector<const char*> forceDisabledToggles;
 
-        const WGPURequiredLimits* requiredLimits = nullptr;
-    };
+    const WGPURequiredLimits* requiredLimits = nullptr;
+};
 
-    // A struct to record the information of a toggle. A toggle is a code path in Dawn device that
-    // can be manually configured to run or not outside Dawn, including workarounds, special
-    // features and optimizations.
-    struct ToggleInfo {
-        const char* name;
-        const char* description;
-        const char* url;
-    };
+// A struct to record the information of a toggle. A toggle is a code path in Dawn device that
+// can be manually configured to run or not outside Dawn, including workarounds, special
+// features and optimizations.
+struct ToggleInfo {
+    const char* name;
+    const char* description;
+    const char* url;
+};
 
-    // A struct to record the information of a feature. A feature is a GPU feature that is not
-    // required to be supported by all Dawn backends and can only be used when it is enabled on the
-    // creation of device.
-    using FeatureInfo = ToggleInfo;
+// A struct to record the information of a feature. A feature is a GPU feature that is not
+// required to be supported by all Dawn backends and can only be used when it is enabled on the
+// creation of device.
+using FeatureInfo = ToggleInfo;
 
-    // An adapter is an object that represent on possibility of creating devices in the system.
-    // Most of the time it will represent a combination of a physical GPU and an API. Not that the
-    // same GPU can be represented by multiple adapters but on different APIs.
-    //
-    // The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
-    // a reference to an underlying adapter.
-    class DAWN_NATIVE_EXPORT Adapter {
-      public:
-        Adapter();
-        // NOLINTNEXTLINE(runtime/explicit)
-        Adapter(AdapterBase* impl);
-        ~Adapter();
+// An adapter is an object that represent on possibility of creating devices in the system.
+// Most of the time it will represent a combination of a physical GPU and an API. Not that the
+// same GPU can be represented by multiple adapters but on different APIs.
+//
+// The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
+// a reference to an underlying adapter.
+class DAWN_NATIVE_EXPORT Adapter {
+  public:
+    Adapter();
+    // NOLINTNEXTLINE(runtime/explicit)
+    Adapter(AdapterBase* impl);
+    ~Adapter();
 
-        Adapter(const Adapter& other);
-        Adapter& operator=(const Adapter& other);
+    Adapter(const Adapter& other);
+    Adapter& operator=(const Adapter& other);
 
-        // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
-        // dawn.json
-        void GetProperties(wgpu::AdapterProperties* properties) const;
-        void GetProperties(WGPUAdapterProperties* properties) const;
+    // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
+    // dawn.json
+    void GetProperties(wgpu::AdapterProperties* properties) const;
+    void GetProperties(WGPUAdapterProperties* properties) const;
 
-        std::vector<const char*> GetSupportedExtensions() const;
-        std::vector<const char*> GetSupportedFeatures() const;
-        WGPUDeviceProperties GetAdapterProperties() const;
-        bool GetLimits(WGPUSupportedLimits* limits) const;
+    std::vector<const char*> GetSupportedExtensions() const;
+    std::vector<const char*> GetSupportedFeatures() const;
+    WGPUDeviceProperties GetAdapterProperties() const;
+    bool GetLimits(WGPUSupportedLimits* limits) const;
 
-        void SetUseTieredLimits(bool useTieredLimits);
+    void SetUseTieredLimits(bool useTieredLimits);
 
-        // Check that the Adapter is able to support importing external images. This is necessary
-        // to implement the swapchain and interop APIs in Chromium.
-        bool SupportsExternalImages() const;
+    // Check that the Adapter is able to support importing external images. This is necessary
+    // to implement the swapchain and interop APIs in Chromium.
+    bool SupportsExternalImages() const;
 
-        explicit operator bool() const;
+    explicit operator bool() const;
 
-        // Create a device on this adapter. On an error, nullptr is returned.
-        WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor);
-        WGPUDevice CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor);
-        WGPUDevice CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor = nullptr);
+    // Create a device on this adapter. On an error, nullptr is returned.
+    WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor);
+    WGPUDevice CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor);
+    WGPUDevice CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor = nullptr);
 
-        void RequestDevice(const DawnDeviceDescriptor* descriptor,
-                           WGPURequestDeviceCallback callback,
-                           void* userdata);
-        void RequestDevice(const wgpu::DeviceDescriptor* descriptor,
-                           WGPURequestDeviceCallback callback,
-                           void* userdata);
-        void RequestDevice(const WGPUDeviceDescriptor* descriptor,
-                           WGPURequestDeviceCallback callback,
-                           void* userdata);
+    void RequestDevice(const DawnDeviceDescriptor* descriptor,
+                       WGPURequestDeviceCallback callback,
+                       void* userdata);
+    void RequestDevice(const wgpu::DeviceDescriptor* descriptor,
+                       WGPURequestDeviceCallback callback,
+                       void* userdata);
+    void RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                       WGPURequestDeviceCallback callback,
+                       void* userdata);
 
-        // Returns the underlying WGPUAdapter object.
-        WGPUAdapter Get() const;
+    // Returns the underlying WGPUAdapter object.
+    WGPUAdapter Get() const;
 
-        // Reset the backend device object for testing purposes.
-        void ResetInternalDeviceForTesting();
+    // Reset the backend device object for testing purposes.
+    void ResetInternalDeviceForTesting();
 
-      private:
-        AdapterBase* mImpl = nullptr;
-    };
+  private:
+    AdapterBase* mImpl = nullptr;
+};
 
-    // Base class for options passed to Instance::DiscoverAdapters.
-    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
-      public:
-        const WGPUBackendType backendType;
+// Base class for options passed to Instance::DiscoverAdapters.
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
+  public:
+    const WGPUBackendType backendType;
 
-      protected:
-        explicit AdapterDiscoveryOptionsBase(WGPUBackendType type);
-    };
+  protected:
+    explicit AdapterDiscoveryOptionsBase(WGPUBackendType type);
+};
 
-    enum BackendValidationLevel { Full, Partial, Disabled };
+enum BackendValidationLevel { Full, Partial, Disabled };
 
-    // Represents a connection to dawn_native and is used for dependency injection, discovering
-    // system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
-    //
-    // This is an RAII class for Dawn instances and also controls the lifetime of all adapters
-    // for this instance.
-    class DAWN_NATIVE_EXPORT Instance {
-      public:
-        explicit Instance(const WGPUInstanceDescriptor* desc = nullptr);
-        ~Instance();
+// Represents a connection to dawn_native and is used for dependency injection, discovering
+// system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
+//
+// This is an RAII class for Dawn instances and also controls the lifetime of all adapters
+// for this instance.
+class DAWN_NATIVE_EXPORT Instance {
+  public:
+    explicit Instance(const WGPUInstanceDescriptor* desc = nullptr);
+    ~Instance();
 
-        Instance(const Instance& other) = delete;
-        Instance& operator=(const Instance& other) = delete;
+    Instance(const Instance& other) = delete;
+    Instance& operator=(const Instance& other) = delete;
 
-        // Gather all adapters in the system that can be accessed with no special options. These
-        // adapters will later be returned by GetAdapters.
-        void DiscoverDefaultAdapters();
+    // Gather all adapters in the system that can be accessed with no special options. These
+    // adapters will later be returned by GetAdapters.
+    void DiscoverDefaultAdapters();
 
-        // Adds adapters that can be discovered with the options provided (like a getProcAddress).
-        // The backend is chosen based on the type of the options used. Returns true on success.
-        bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
+    // Adds adapters that can be discovered with the options provided (like a getProcAddress).
+    // The backend is chosen based on the type of the options used. Returns true on success.
+    bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
 
-        // Returns all the adapters that the instance knows about.
-        std::vector<Adapter> GetAdapters() const;
+    // Returns all the adapters that the instance knows about.
+    std::vector<Adapter> GetAdapters() const;
 
-        const ToggleInfo* GetToggleInfo(const char* toggleName);
-        const FeatureInfo* GetFeatureInfo(WGPUFeatureName feature);
+    const ToggleInfo* GetToggleInfo(const char* toggleName);
+    const FeatureInfo* GetFeatureInfo(WGPUFeatureName feature);
 
-        // Enables backend validation layers
-        void EnableBackendValidation(bool enableBackendValidation);
-        void SetBackendValidationLevel(BackendValidationLevel validationLevel);
+    // Enables backend validation layers
+    void EnableBackendValidation(bool enableBackendValidation);
+    void SetBackendValidationLevel(BackendValidationLevel validationLevel);
 
-        // Enable debug capture on Dawn startup
-        void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+    // Enable debug capture on Dawn startup
+    void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
 
-        // TODO(dawn:1374) Deprecate this once it is passed via the descriptor.
-        void SetPlatform(dawn::platform::Platform* platform);
+    // TODO(dawn:1374) Deprecate this once it is passed via the descriptor.
+    void SetPlatform(dawn::platform::Platform* platform);
 
-        // Returns the underlying WGPUInstance object.
-        WGPUInstance Get() const;
+    // Returns the underlying WGPUInstance object.
+    WGPUInstance Get() const;
 
-      private:
-        InstanceBase* mImpl = nullptr;
-    };
+  private:
+    InstanceBase* mImpl = nullptr;
+};
 
-    // Backend-agnostic API for dawn_native
-    DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
+// Backend-agnostic API for dawn_native
+DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
 
-    // Query the names of all the toggles that are enabled in device
-    DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
+// Query the names of all the toggles that are enabled in device
+DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
 
-    // Backdoor to get the number of lazy clears for testing
-    DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
+// Backdoor to get the number of lazy clears for testing
+DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
 
-    // Backdoor to get the number of deprecation warnings for testing
-    DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
+// Backdoor to get the number of deprecation warnings for testing
+DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
 
-    //  Query if texture has been initialized
-    DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
-        WGPUTexture texture,
-        uint32_t baseMipLevel,
-        uint32_t levelCount,
-        uint32_t baseArrayLayer,
-        uint32_t layerCount,
-        WGPUTextureAspect aspect = WGPUTextureAspect_All);
+//  Query if texture has been initialized
+DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
+    WGPUTexture texture,
+    uint32_t baseMipLevel,
+    uint32_t levelCount,
+    uint32_t baseArrayLayer,
+    uint32_t layerCount,
+    WGPUTextureAspect aspect = WGPUTextureAspect_All);
 
-    // Backdoor to get the order of the ProcMap for testing
-    DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+// Backdoor to get the order of the ProcMap for testing
+DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
 
-    DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
+DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
 
-    // ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
-    DAWN_NATIVE_EXPORT void EnableErrorInjector();
-    DAWN_NATIVE_EXPORT void DisableErrorInjector();
-    DAWN_NATIVE_EXPORT void ClearErrorInjector();
-    DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
-    DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
+// ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
+DAWN_NATIVE_EXPORT void EnableErrorInjector();
+DAWN_NATIVE_EXPORT void DisableErrorInjector();
+DAWN_NATIVE_EXPORT void ClearErrorInjector();
+DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
+DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
 
-    // The different types of external images
-    enum ExternalImageType {
-        OpaqueFD,
-        DmaBuf,
-        IOSurface,
-        DXGISharedHandle,
-        EGLImage,
-    };
+// The different types of external images
+enum ExternalImageType {
+    OpaqueFD,
+    DmaBuf,
+    IOSurface,
+    DXGISharedHandle,
+    EGLImage,
+};
 
-    // Common properties of external images
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
-      public:
-        const WGPUTextureDescriptor* cTextureDescriptor;  // Must match image creation params
-        bool isInitialized;  // Whether the texture is initialized on import
-        ExternalImageType GetType() const;
+// Common properties of external images
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
+  public:
+    const WGPUTextureDescriptor* cTextureDescriptor;  // Must match image creation params
+    bool isInitialized;  // Whether the texture is initialized on import
+    ExternalImageType GetType() const;
 
-      protected:
-        explicit ExternalImageDescriptor(ExternalImageType type);
+  protected:
+    explicit ExternalImageDescriptor(ExternalImageType type);
 
-      private:
-        ExternalImageType mType;
-    };
+  private:
+    ExternalImageType mType;
+};
 
-    struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
-      public:
-        bool isInitialized;  // Whether the texture is initialized on import
-        WGPUTextureUsageFlags usage;
-    };
+struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
+  public:
+    bool isInitialized;  // Whether the texture is initialized on import
+    WGPUTextureUsageFlags usage;
+};
 
-    struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
-      public:
-        bool isInitialized;  // Whether the texture is initialized after export
-        ExternalImageType GetType() const;
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
+  public:
+    bool isInitialized;  // Whether the texture is initialized after export
+    ExternalImageType GetType() const;
 
-      protected:
-        explicit ExternalImageExportInfo(ExternalImageType type);
+  protected:
+    explicit ExternalImageExportInfo(ExternalImageType type);
 
-      private:
-        ExternalImageType mType;
-    };
+  private:
+    ExternalImageType mType;
+};
 
-    DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
+DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
 
-    DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
+DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
 
-    DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
-                                                                   WGPUBindGroupLayout b);
+DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
+                                                               WGPUBindGroupLayout b);
 
 }  // namespace dawn::native
 
diff --git a/include/dawn/native/MetalBackend.h b/include/dawn/native/MetalBackend.h
index 239e6d2..20c8048 100644
--- a/include/dawn/native/MetalBackend.h
+++ b/include/dawn/native/MetalBackend.h
@@ -29,41 +29,41 @@
 typedef __IOSurface* IOSurfaceRef;
 
 #ifdef __OBJC__
-#    import <Metal/Metal.h>
+#import <Metal/Metal.h>
 #endif  // __OBJC__
 
 namespace dawn::native::metal {
 
-    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
-        AdapterDiscoveryOptions();
-    };
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+    AdapterDiscoveryOptions();
+};
 
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
-      public:
-        ExternalImageDescriptorIOSurface();
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
+  public:
+    ExternalImageDescriptorIOSurface();
 
-        IOSurfaceRef ioSurface;
+    IOSurfaceRef ioSurface;
 
-        // This has been deprecated.
-        uint32_t plane;
-    };
+    // This has been deprecated.
+    uint32_t plane;
+};
 
-    DAWN_NATIVE_EXPORT WGPUTexture
-    WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor);
+DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
+                                             const ExternalImageDescriptorIOSurface* descriptor);
 
-    // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
-    // mean that the operations will be visible to other APIs/Metal devices right away. macOS
-    // does have a global queue of graphics operations, but the command buffers are inserted there
-    // when they are "scheduled". Submitting other operations before the command buffer is
-    // scheduled could lead to races in who gets scheduled first and incorrect rendering.
-    DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
+// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
+// mean that the operations will be visible to other APIs/Metal devices right away. macOS
+// does have a global queue of graphics operations, but the command buffers are inserted there
+// when they are "scheduled". Submitting other operations before the command buffer is
+// scheduled could lead to races in who gets scheduled first and incorrect rendering.
+DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
 
 }  // namespace dawn::native::metal
 
 #ifdef __OBJC__
 namespace dawn::native::metal {
 
-    DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
+DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
 
 }  // namespace dawn::native::metal
 #endif  // __OBJC__
diff --git a/include/dawn/native/NullBackend.h b/include/dawn/native/NullBackend.h
index e94cf2a..bfa8a63 100644
--- a/include/dawn/native/NullBackend.h
+++ b/include/dawn/native/NullBackend.h
@@ -19,7 +19,7 @@
 #include "dawn/native/DawnNative.h"
 
 namespace dawn::native::null {
-    DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
 }  // namespace dawn::native::null
 
 #endif  // INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
diff --git a/include/dawn/native/OpenGLBackend.h b/include/dawn/native/OpenGLBackend.h
index 43b8768..bee9dae 100644
--- a/include/dawn/native/OpenGLBackend.h
+++ b/include/dawn/native/OpenGLBackend.h
@@ -22,33 +22,34 @@
 
 namespace dawn::native::opengl {
 
-    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
-        AdapterDiscoveryOptions();
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+    AdapterDiscoveryOptions();
 
-        void* (*getProc)(const char*);
-    };
+    void* (*getProc)(const char*);
+};
 
-    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
-        AdapterDiscoveryOptionsES();
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
+    AdapterDiscoveryOptionsES();
 
-        void* (*getProc)(const char*);
-    };
+    void* (*getProc)(const char*);
+};
 
-    using PresentCallback = void (*)(void*);
-    DAWN_NATIVE_EXPORT DawnSwapChainImplementation
-    CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata);
-    DAWN_NATIVE_EXPORT WGPUTextureFormat
-    GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+using PresentCallback = void (*)(void*);
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+                                                                         PresentCallback present,
+                                                                         void* presentUserdata);
+DAWN_NATIVE_EXPORT WGPUTextureFormat
+GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
 
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
-      public:
-        ExternalImageDescriptorEGLImage();
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
+  public:
+    ExternalImageDescriptorEGLImage();
 
-        ::EGLImage image;
-    };
+    ::EGLImage image;
+};
 
-    DAWN_NATIVE_EXPORT WGPUTexture
-    WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
+DAWN_NATIVE_EXPORT WGPUTexture
+WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
 
 }  // namespace dawn::native::opengl
 
diff --git a/include/dawn/native/VulkanBackend.h b/include/dawn/native/VulkanBackend.h
index 8ac9f8f..3885dad 100644
--- a/include/dawn/native/VulkanBackend.h
+++ b/include/dawn/native/VulkanBackend.h
@@ -24,116 +24,116 @@
 
 namespace dawn::native::vulkan {
 
-    DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
+DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
 
-    DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
+DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
 
-    DAWN_NATIVE_EXPORT DawnSwapChainImplementation
-    CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface);
-    DAWN_NATIVE_EXPORT WGPUTextureFormat
-    GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+                                                                         ::VkSurfaceKHR surface);
+DAWN_NATIVE_EXPORT WGPUTextureFormat
+GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
 
-    struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
-        AdapterDiscoveryOptions();
+struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+    AdapterDiscoveryOptions();
 
-        bool forceSwiftShader = false;
-    };
+    bool forceSwiftShader = false;
+};
 
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
-      public:
-        // The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
-        // since the import does not need to preserve texture contents.
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
+  public:
+    // The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
+    // since the import does not need to preserve texture contents.
 
-        // See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire
-        // operation old/new layouts must match exactly the layouts in the release operation. So
-        // we may need to issue two barriers releasedOldLayout -> releasedNewLayout ->
-        // cTextureDescriptor.usage if the new layout is not compatible with the desired usage.
-        // The first barrier is the queue transfer, the second is the layout transition to our
-        // desired usage.
-        VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL;
-        VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL;
+    // See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire
+    // operation old/new layouts must match exactly the layouts in the release operation. So
+    // we may need to issue two barriers releasedOldLayout -> releasedNewLayout ->
+    // cTextureDescriptor.usage if the new layout is not compatible with the desired usage.
+    // The first barrier is the queue transfer, the second is the layout transition to our
+    // desired usage.
+    VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL;
+    VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL;
 
-      protected:
-        using ExternalImageDescriptor::ExternalImageDescriptor;
-    };
+  protected:
+    using ExternalImageDescriptor::ExternalImageDescriptor;
+};
 
-    struct ExternalImageExportInfoVk : ExternalImageExportInfo {
-      public:
-        // See comments in |ExternalImageDescriptorVk|
-        // Contains the old/new layouts used in the queue release operation.
-        VkImageLayout releasedOldLayout;
-        VkImageLayout releasedNewLayout;
+struct ExternalImageExportInfoVk : ExternalImageExportInfo {
+  public:
+    // See comments in |ExternalImageDescriptorVk|
+    // Contains the old/new layouts used in the queue release operation.
+    VkImageLayout releasedOldLayout;
+    VkImageLayout releasedNewLayout;
 
-      protected:
-        using ExternalImageExportInfo::ExternalImageExportInfo;
-    };
+  protected:
+    using ExternalImageExportInfo::ExternalImageExportInfo;
+};
 
 // Can't use DAWN_PLATFORM_LINUX since header included in both Dawn and Chrome
 #ifdef __linux__
 
-    // Common properties of external images represented by FDs. On successful import the file
-    // descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
-    // used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
-    // caller can assume the FD is always consumed.
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
-      public:
-        int memoryFD;              // A file descriptor from an export of the memory of the image
-        std::vector<int> waitFDs;  // File descriptors of semaphores which will be waited on
+// Common properties of external images represented by FDs. On successful import the file
+// descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
+// used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
+// caller can assume the FD is always consumed.
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
+  public:
+    int memoryFD;              // A file descriptor from an export of the memory of the image
+    std::vector<int> waitFDs;  // File descriptors of semaphores which will be waited on
 
-      protected:
-        using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
-    };
+  protected:
+    using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
+};
 
-    // Descriptor for opaque file descriptor image import
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
-        ExternalImageDescriptorOpaqueFD();
+// Descriptor for opaque file descriptor image import
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
+    ExternalImageDescriptorOpaqueFD();
 
-        VkDeviceSize allocationSize;  // Must match VkMemoryAllocateInfo from image creation
-        uint32_t memoryTypeIndex;     // Must match VkMemoryAllocateInfo from image creation
-    };
+    VkDeviceSize allocationSize;  // Must match VkMemoryAllocateInfo from image creation
+    uint32_t memoryTypeIndex;     // Must match VkMemoryAllocateInfo from image creation
+};
 
-    // Descriptor for dma-buf file descriptor image import
-    struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
-        ExternalImageDescriptorDmaBuf();
+// Descriptor for dma-buf file descriptor image import
+struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
+    ExternalImageDescriptorDmaBuf();
 
-        uint32_t stride;       // Stride of the buffer in bytes
-        uint64_t drmModifier;  // DRM modifier of the buffer
-    };
+    uint32_t stride;       // Stride of the buffer in bytes
+    uint64_t drmModifier;  // DRM modifier of the buffer
+};
 
-    // Info struct that is written to in |ExportVulkanImage|.
-    struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
-      public:
-        // Contains the exported semaphore handles.
-        std::vector<int> semaphoreHandles;
+// Info struct that is written to in |ExportVulkanImage|.
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
+  public:
+    // Contains the exported semaphore handles.
+    std::vector<int> semaphoreHandles;
 
-      protected:
-        using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
-    };
+  protected:
+    using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
+};
 
-    struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
-        ExternalImageExportInfoOpaqueFD();
-    };
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
+    ExternalImageExportInfoOpaqueFD();
+};
 
-    struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
-        ExternalImageExportInfoDmaBuf();
-    };
+struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
+    ExternalImageExportInfoDmaBuf();
+};
 
 #endif  // __linux__
 
-    // Imports external memory into a Vulkan image. Internally, this uses external memory /
-    // semaphore extensions to import the image and wait on the provided synchronizaton
-    // primitives before the texture can be used.
-    // On failure, returns a nullptr.
-    DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
-                                                   const ExternalImageDescriptorVk* descriptor);
+// Imports external memory into a Vulkan image. Internally, this uses external memory /
+// semaphore extensions to import the image and wait on the provided synchronizaton
+// primitives before the texture can be used.
+// On failure, returns a nullptr.
+DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
+                                               const ExternalImageDescriptorVk* descriptor);
 
-    // Exports external memory from a Vulkan image. This must be called on wrapped textures
-    // before they are destroyed. It writes the semaphore to wait on and the old/new image
-    // layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
-    // perform a layout transition.
-    DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
-                                              VkImageLayout desiredLayout,
-                                              ExternalImageExportInfoVk* info);
+// Exports external memory from a Vulkan image. This must be called on wrapped textures
+// before they are destroyed. It writes the semaphore to wait on and the old/new image
+// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
+// perform a layout transition.
+DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
+                                          VkImageLayout desiredLayout,
+                                          ExternalImageExportInfoVk* info);
 
 }  // namespace dawn::native::vulkan
 
diff --git a/include/dawn/native/dawn_native_export.h b/include/dawn/native/dawn_native_export.h
index 329b1a1..c237720 100644
--- a/include/dawn/native/dawn_native_export.h
+++ b/include/dawn/native/dawn_native_export.h
@@ -16,21 +16,21 @@
 #define INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
 
 #if defined(DAWN_NATIVE_SHARED_LIBRARY)
-#    if defined(_WIN32)
-#        if defined(DAWN_NATIVE_IMPLEMENTATION)
-#            define DAWN_NATIVE_EXPORT __declspec(dllexport)
-#        else
-#            define DAWN_NATIVE_EXPORT __declspec(dllimport)
-#        endif
-#    else  // defined(_WIN32)
-#        if defined(DAWN_NATIVE_IMPLEMENTATION)
-#            define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
-#        else
-#            define DAWN_NATIVE_EXPORT
-#        endif
-#    endif  // defined(_WIN32)
-#else       // defined(DAWN_NATIVE_SHARED_LIBRARY)
-#    define DAWN_NATIVE_EXPORT
+#if defined(_WIN32)
+#if defined(DAWN_NATIVE_IMPLEMENTATION)
+#define DAWN_NATIVE_EXPORT __declspec(dllexport)
+#else
+#define DAWN_NATIVE_EXPORT __declspec(dllimport)
+#endif
+#else  // defined(_WIN32)
+#if defined(DAWN_NATIVE_IMPLEMENTATION)
+#define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
+#else
+#define DAWN_NATIVE_EXPORT
+#endif
+#endif  // defined(_WIN32)
+#else   // defined(DAWN_NATIVE_SHARED_LIBRARY)
+#define DAWN_NATIVE_EXPORT
 #endif  // defined(DAWN_NATIVE_SHARED_LIBRARY)
 
 #endif  // INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
diff --git a/include/dawn/platform/DawnPlatform.h b/include/dawn/platform/DawnPlatform.h
index d3cb0be..610cc05 100644
--- a/include/dawn/platform/DawnPlatform.h
+++ b/include/dawn/platform/DawnPlatform.h
@@ -24,91 +24,90 @@
 
 namespace dawn::platform {
 
-    enum class TraceCategory {
-        General,     // General trace events
-        Validation,  // Dawn validation
-        Recording,   // Native command recording
-        GPUWork,     // Actual GPU work
-    };
+enum class TraceCategory {
+    General,     // General trace events
+    Validation,  // Dawn validation
+    Recording,   // Native command recording
+    GPUWork,     // Actual GPU work
+};
 
-    class DAWN_PLATFORM_EXPORT CachingInterface {
-      public:
-        CachingInterface();
-        virtual ~CachingInterface();
+class DAWN_PLATFORM_EXPORT CachingInterface {
+  public:
+    CachingInterface();
+    virtual ~CachingInterface();
 
-        // LoadData has two modes. The first mode is used to get a value which
-        // corresponds to the |key|. The |valueOut| is a caller provided buffer
-        // allocated to the size |valueSize| which is loaded with data of the
-        // size returned. The second mode is used to query for the existence of
-        // the |key| where |valueOut| is nullptr and |valueSize| must be 0.
-        // The return size is non-zero if the |key| exists.
-        virtual size_t LoadData(const WGPUDevice device,
-                                const void* key,
-                                size_t keySize,
-                                void* valueOut,
-                                size_t valueSize) = 0;
+    // LoadData has two modes. The first mode is used to get a value which
+    // corresponds to the |key|. The |valueOut| is a caller provided buffer
+    // allocated to the size |valueSize| which is loaded with data of the
+    // size returned. The second mode is used to query for the existence of
+    // the |key| where |valueOut| is nullptr and |valueSize| must be 0.
+    // The return size is non-zero if the |key| exists.
+    virtual size_t LoadData(const WGPUDevice device,
+                            const void* key,
+                            size_t keySize,
+                            void* valueOut,
+                            size_t valueSize) = 0;
 
-        // StoreData puts a |value| in the cache which corresponds to the |key|.
-        virtual void StoreData(const WGPUDevice device,
-                               const void* key,
-                               size_t keySize,
-                               const void* value,
-                               size_t valueSize) = 0;
+    // StoreData puts a |value| in the cache which corresponds to the |key|.
+    virtual void StoreData(const WGPUDevice device,
+                           const void* key,
+                           size_t keySize,
+                           const void* value,
+                           size_t valueSize) = 0;
 
-      private:
-        CachingInterface(const CachingInterface&) = delete;
-        CachingInterface& operator=(const CachingInterface&) = delete;
-    };
+  private:
+    CachingInterface(const CachingInterface&) = delete;
+    CachingInterface& operator=(const CachingInterface&) = delete;
+};
 
-    class DAWN_PLATFORM_EXPORT WaitableEvent {
-      public:
-        WaitableEvent() = default;
-        virtual ~WaitableEvent() = default;
-        virtual void Wait() = 0;        // Wait for completion
-        virtual bool IsComplete() = 0;  // Non-blocking check if the event is complete
-    };
+class DAWN_PLATFORM_EXPORT WaitableEvent {
+  public:
+    WaitableEvent() = default;
+    virtual ~WaitableEvent() = default;
+    virtual void Wait() = 0;        // Wait for completion
+    virtual bool IsComplete() = 0;  // Non-blocking check if the event is complete
+};
 
-    using PostWorkerTaskCallback = void (*)(void* userdata);
+using PostWorkerTaskCallback = void (*)(void* userdata);
 
-    class DAWN_PLATFORM_EXPORT WorkerTaskPool {
-      public:
-        WorkerTaskPool() = default;
-        virtual ~WorkerTaskPool() = default;
-        virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
-                                                              void* userdata) = 0;
-    };
+class DAWN_PLATFORM_EXPORT WorkerTaskPool {
+  public:
+    WorkerTaskPool() = default;
+    virtual ~WorkerTaskPool() = default;
+    virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
+                                                          void* userdata) = 0;
+};
 
-    class DAWN_PLATFORM_EXPORT Platform {
-      public:
-        Platform();
-        virtual ~Platform();
+class DAWN_PLATFORM_EXPORT Platform {
+  public:
+    Platform();
+    virtual ~Platform();
 
-        virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category);
+    virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category);
 
-        virtual double MonotonicallyIncreasingTime();
+    virtual double MonotonicallyIncreasingTime();
 
-        virtual uint64_t AddTraceEvent(char phase,
-                                       const unsigned char* categoryGroupEnabled,
-                                       const char* name,
-                                       uint64_t id,
-                                       double timestamp,
-                                       int numArgs,
-                                       const char** argNames,
-                                       const unsigned char* argTypes,
-                                       const uint64_t* argValues,
-                                       unsigned char flags);
+    virtual uint64_t AddTraceEvent(char phase,
+                                   const unsigned char* categoryGroupEnabled,
+                                   const char* name,
+                                   uint64_t id,
+                                   double timestamp,
+                                   int numArgs,
+                                   const char** argNames,
+                                   const unsigned char* argTypes,
+                                   const uint64_t* argValues,
+                                   unsigned char flags);
 
-        // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
-        // when the fingerprint changes. The returned CachingInterface is expected to outlive the
-        // device which uses it to persistently cache objects.
-        virtual CachingInterface* GetCachingInterface(const void* fingerprint,
-                                                      size_t fingerprintSize);
-        virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
+    // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
+    // when the fingerprint changes. The returned CachingInterface is expected to outlive the
+    // device which uses it to persistently cache objects.
+    virtual CachingInterface* GetCachingInterface(const void* fingerprint, size_t fingerprintSize);
+    virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
 
-      private:
-        Platform(const Platform&) = delete;
-        Platform& operator=(const Platform&) = delete;
-    };
+  private:
+    Platform(const Platform&) = delete;
+    Platform& operator=(const Platform&) = delete;
+};
 
 }  // namespace dawn::platform
 
diff --git a/include/dawn/platform/dawn_platform_export.h b/include/dawn/platform/dawn_platform_export.h
index e8d22e3..fbdb33c 100644
--- a/include/dawn/platform/dawn_platform_export.h
+++ b/include/dawn/platform/dawn_platform_export.h
@@ -16,21 +16,21 @@
 #define INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_
 
 #if defined(DAWN_PLATFORM_SHARED_LIBRARY)
-#    if defined(_WIN32)
-#        if defined(DAWN_PLATFORM_IMPLEMENTATION)
-#            define DAWN_PLATFORM_EXPORT __declspec(dllexport)
-#        else
-#            define DAWN_PLATFORM_EXPORT __declspec(dllimport)
-#        endif
-#    else  // defined(_WIN32)
-#        if defined(DAWN_PLATFORM_IMPLEMENTATION)
-#            define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
-#        else
-#            define DAWN_PLATFORM_EXPORT
-#        endif
-#    endif  // defined(_WIN32)
-#else       // defined(DAWN_PLATFORM_SHARED_LIBRARY)
-#    define DAWN_PLATFORM_EXPORT
+#if defined(_WIN32)
+#if defined(DAWN_PLATFORM_IMPLEMENTATION)
+#define DAWN_PLATFORM_EXPORT __declspec(dllexport)
+#else
+#define DAWN_PLATFORM_EXPORT __declspec(dllimport)
+#endif
+#else  // defined(_WIN32)
+#if defined(DAWN_PLATFORM_IMPLEMENTATION)
+#define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
+#else
+#define DAWN_PLATFORM_EXPORT
+#endif
+#endif  // defined(_WIN32)
+#else   // defined(DAWN_PLATFORM_SHARED_LIBRARY)
+#define DAWN_PLATFORM_EXPORT
 #endif  // defined(DAWN_PLATFORM_SHARED_LIBRARY)
 
 #endif  // INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_
diff --git a/include/dawn/wire/Wire.h b/include/dawn/wire/Wire.h
index 10028e0..1f8e96b 100644
--- a/include/dawn/wire/Wire.h
+++ b/include/dawn/wire/Wire.h
@@ -23,53 +23,52 @@
 
 namespace dawn::wire {
 
-    class DAWN_WIRE_EXPORT CommandSerializer {
-      public:
-        CommandSerializer();
-        virtual ~CommandSerializer();
-        CommandSerializer(const CommandSerializer& rhs) = delete;
-        CommandSerializer& operator=(const CommandSerializer& rhs) = delete;
+class DAWN_WIRE_EXPORT CommandSerializer {
+  public:
+    CommandSerializer();
+    virtual ~CommandSerializer();
+    CommandSerializer(const CommandSerializer& rhs) = delete;
+    CommandSerializer& operator=(const CommandSerializer& rhs) = delete;
 
-        // Get space for serializing commands.
-        // GetCmdSpace will never be called with a value larger than
-        // what GetMaximumAllocationSize returns. Return nullptr to indicate
-        // a fatal error.
-        virtual void* GetCmdSpace(size_t size) = 0;
-        virtual bool Flush() = 0;
-        virtual size_t GetMaximumAllocationSize() const = 0;
-        virtual void OnSerializeError();
-    };
+    // Get space for serializing commands.
+    // GetCmdSpace will never be called with a value larger than
+    // what GetMaximumAllocationSize returns. Return nullptr to indicate
+    // a fatal error.
+    virtual void* GetCmdSpace(size_t size) = 0;
+    virtual bool Flush() = 0;
+    virtual size_t GetMaximumAllocationSize() const = 0;
+    virtual void OnSerializeError();
+};
 
-    class DAWN_WIRE_EXPORT CommandHandler {
-      public:
-        CommandHandler();
-        virtual ~CommandHandler();
-        CommandHandler(const CommandHandler& rhs) = delete;
-        CommandHandler& operator=(const CommandHandler& rhs) = delete;
+class DAWN_WIRE_EXPORT CommandHandler {
+  public:
+    CommandHandler();
+    virtual ~CommandHandler();
+    CommandHandler(const CommandHandler& rhs) = delete;
+    CommandHandler& operator=(const CommandHandler& rhs) = delete;
 
-        virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
-    };
+    virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
+};
 
-    DAWN_WIRE_EXPORT size_t
-    SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
+DAWN_WIRE_EXPORT size_t
+SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
 
-    DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(
-        const WGPUDeviceProperties* deviceProperties,
-        char* serializeBuffer);
+DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(const WGPUDeviceProperties* deviceProperties,
+                                                    char* serializeBuffer);
 
-    DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
-                                                          const volatile char* deserializeBuffer,
-                                                          size_t deserializeBufferSize);
+DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
+                                                      const volatile char* deserializeBuffer,
+                                                      size_t deserializeBufferSize);
 
-    DAWN_WIRE_EXPORT size_t
-    SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
+DAWN_WIRE_EXPORT size_t
+SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
 
-    DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
-                                                       char* serializeBuffer);
+DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
+                                                   char* serializeBuffer);
 
-    DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
-                                                         const volatile char* deserializeBuffer,
-                                                         size_t deserializeBufferSize);
+DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
+                                                     const volatile char* deserializeBuffer,
+                                                     size_t deserializeBufferSize);
 
 }  // namespace dawn::wire
 
diff --git a/include/dawn/wire/WireClient.h b/include/dawn/wire/WireClient.h
index d8b50a3..26e1f1d 100644
--- a/include/dawn/wire/WireClient.h
+++ b/include/dawn/wire/WireClient.h
@@ -23,160 +23,158 @@
 
 namespace dawn::wire {
 
-    namespace client {
-        class Client;
-        class MemoryTransferService;
+namespace client {
+class Client;
+class MemoryTransferService;
 
-        DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
-    }  // namespace client
+DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
+}  // namespace client
 
-    struct ReservedTexture {
-        WGPUTexture texture;
-        uint32_t id;
-        uint32_t generation;
-        uint32_t deviceId;
-        uint32_t deviceGeneration;
-    };
+struct ReservedTexture {
+    WGPUTexture texture;
+    uint32_t id;
+    uint32_t generation;
+    uint32_t deviceId;
+    uint32_t deviceGeneration;
+};
 
-    struct ReservedSwapChain {
-        WGPUSwapChain swapchain;
-        uint32_t id;
-        uint32_t generation;
-        uint32_t deviceId;
-        uint32_t deviceGeneration;
-    };
+struct ReservedSwapChain {
+    WGPUSwapChain swapchain;
+    uint32_t id;
+    uint32_t generation;
+    uint32_t deviceId;
+    uint32_t deviceGeneration;
+};
 
-    struct ReservedDevice {
-        WGPUDevice device;
-        uint32_t id;
-        uint32_t generation;
-    };
+struct ReservedDevice {
+    WGPUDevice device;
+    uint32_t id;
+    uint32_t generation;
+};
 
-    struct ReservedInstance {
-        WGPUInstance instance;
-        uint32_t id;
-        uint32_t generation;
-    };
+struct ReservedInstance {
+    WGPUInstance instance;
+    uint32_t id;
+    uint32_t generation;
+};
 
-    struct DAWN_WIRE_EXPORT WireClientDescriptor {
-        CommandSerializer* serializer;
-        client::MemoryTransferService* memoryTransferService = nullptr;
-    };
+struct DAWN_WIRE_EXPORT WireClientDescriptor {
+    CommandSerializer* serializer;
+    client::MemoryTransferService* memoryTransferService = nullptr;
+};
 
-    class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
+class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
+  public:
+    explicit WireClient(const WireClientDescriptor& descriptor);
+    ~WireClient() override;
+
+    const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
+
+    ReservedTexture ReserveTexture(WGPUDevice device);
+    ReservedSwapChain ReserveSwapChain(WGPUDevice device);
+    ReservedDevice ReserveDevice();
+    ReservedInstance ReserveInstance();
+
+    void ReclaimTextureReservation(const ReservedTexture& reservation);
+    void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
+    void ReclaimDeviceReservation(const ReservedDevice& reservation);
+    void ReclaimInstanceReservation(const ReservedInstance& reservation);
+
+    // Disconnects the client.
+    // Commands allocated after this point will not be sent.
+    void Disconnect();
+
+  private:
+    std::unique_ptr<client::Client> mImpl;
+};
+
+namespace client {
+class DAWN_WIRE_EXPORT MemoryTransferService {
+  public:
+    MemoryTransferService();
+    virtual ~MemoryTransferService();
+
+    class ReadHandle;
+    class WriteHandle;
+
+    // Create a handle for reading server data.
+    // This may fail and return nullptr.
+    virtual ReadHandle* CreateReadHandle(size_t) = 0;
+
+    // Create a handle for writing server data.
+    // This may fail and return nullptr.
+    virtual WriteHandle* CreateWriteHandle(size_t) = 0;
+
+    class DAWN_WIRE_EXPORT ReadHandle {
       public:
-        explicit WireClient(const WireClientDescriptor& descriptor);
-        ~WireClient() override;
+        ReadHandle();
+        virtual ~ReadHandle();
 
-        const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
+        // Get the required serialization size for SerializeCreate
+        virtual size_t SerializeCreateSize() = 0;
 
-        ReservedTexture ReserveTexture(WGPUDevice device);
-        ReservedSwapChain ReserveSwapChain(WGPUDevice device);
-        ReservedDevice ReserveDevice();
-        ReservedInstance ReserveInstance();
+        // Serialize the handle into |serializePointer| so it can be received by the server.
+        virtual void SerializeCreate(void* serializePointer) = 0;
 
-        void ReclaimTextureReservation(const ReservedTexture& reservation);
-        void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
-        void ReclaimDeviceReservation(const ReservedDevice& reservation);
-        void ReclaimInstanceReservation(const ReservedInstance& reservation);
+        // Simply return the base address of the allocation (without applying any offset)
+        // Returns nullptr if the allocation failed.
+        // The data must live at least until the ReadHandle is destructued
+        virtual const void* GetData() = 0;
 
-        // Disconnects the client.
-        // Commands allocated after this point will not be sent.
-        void Disconnect();
+        // Gets called when a MapReadCallback resolves.
+        // deserialize the data update and apply
+        // it to the range (offset, offset + size) of allocation
+        // There could be nothing to be deserialized (if using shared memory)
+        // Needs to check potential offset/size OOB and overflow
+        virtual bool DeserializeDataUpdate(const void* deserializePointer,
+                                           size_t deserializeSize,
+                                           size_t offset,
+                                           size_t size) = 0;
 
       private:
-        std::unique_ptr<client::Client> mImpl;
+        ReadHandle(const ReadHandle&) = delete;
+        ReadHandle& operator=(const ReadHandle&) = delete;
     };
 
-    namespace client {
-        class DAWN_WIRE_EXPORT MemoryTransferService {
-          public:
-            MemoryTransferService();
-            virtual ~MemoryTransferService();
+    class DAWN_WIRE_EXPORT WriteHandle {
+      public:
+        WriteHandle();
+        virtual ~WriteHandle();
 
-            class ReadHandle;
-            class WriteHandle;
+        // Get the required serialization size for SerializeCreate
+        virtual size_t SerializeCreateSize() = 0;
 
-            // Create a handle for reading server data.
-            // This may fail and return nullptr.
-            virtual ReadHandle* CreateReadHandle(size_t) = 0;
+        // Serialize the handle into |serializePointer| so it can be received by the server.
+        virtual void SerializeCreate(void* serializePointer) = 0;
 
-            // Create a handle for writing server data.
-            // This may fail and return nullptr.
-            virtual WriteHandle* CreateWriteHandle(size_t) = 0;
+        // Simply return the base address of the allocation (without applying any offset)
+        // The data returned should be zero-initialized.
+        // The data returned must live at least until the WriteHandle is destructed.
+        // On failure, the pointer returned should be null.
+        virtual void* GetData() = 0;
 
-            class DAWN_WIRE_EXPORT ReadHandle {
-              public:
-                ReadHandle();
-                virtual ~ReadHandle();
+        // Get the required serialization size for SerializeDataUpdate
+        virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
 
-                // Get the required serialization size for SerializeCreate
-                virtual size_t SerializeCreateSize() = 0;
+        // Serialize a command to send the modified contents of
+        // the subrange (offset, offset + size) of the allocation at buffer unmap
+        // This subrange is always the whole mapped region for now
+        // There could be nothing to be serialized (if using shared memory)
+        virtual void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) = 0;
 
-                // Serialize the handle into |serializePointer| so it can be received by the server.
-                virtual void SerializeCreate(void* serializePointer) = 0;
+      private:
+        WriteHandle(const WriteHandle&) = delete;
+        WriteHandle& operator=(const WriteHandle&) = delete;
+    };
 
-                // Simply return the base address of the allocation (without applying any offset)
-                // Returns nullptr if the allocation failed.
-                // The data must live at least until the ReadHandle is destructued
-                virtual const void* GetData() = 0;
+  private:
+    MemoryTransferService(const MemoryTransferService&) = delete;
+    MemoryTransferService& operator=(const MemoryTransferService&) = delete;
+};
 
-                // Gets called when a MapReadCallback resolves.
-                // deserialize the data update and apply
-                // it to the range (offset, offset + size) of allocation
-                // There could be nothing to be deserialized (if using shared memory)
-                // Needs to check potential offset/size OOB and overflow
-                virtual bool DeserializeDataUpdate(const void* deserializePointer,
-                                                   size_t deserializeSize,
-                                                   size_t offset,
-                                                   size_t size) = 0;
-
-              private:
-                ReadHandle(const ReadHandle&) = delete;
-                ReadHandle& operator=(const ReadHandle&) = delete;
-            };
-
-            class DAWN_WIRE_EXPORT WriteHandle {
-              public:
-                WriteHandle();
-                virtual ~WriteHandle();
-
-                // Get the required serialization size for SerializeCreate
-                virtual size_t SerializeCreateSize() = 0;
-
-                // Serialize the handle into |serializePointer| so it can be received by the server.
-                virtual void SerializeCreate(void* serializePointer) = 0;
-
-                // Simply return the base address of the allocation (without applying any offset)
-                // The data returned should be zero-initialized.
-                // The data returned must live at least until the WriteHandle is destructed.
-                // On failure, the pointer returned should be null.
-                virtual void* GetData() = 0;
-
-                // Get the required serialization size for SerializeDataUpdate
-                virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
-
-                // Serialize a command to send the modified contents of
-                // the subrange (offset, offset + size) of the allocation at buffer unmap
-                // This subrange is always the whole mapped region for now
-                // There could be nothing to be serialized (if using shared memory)
-                virtual void SerializeDataUpdate(void* serializePointer,
-                                                 size_t offset,
-                                                 size_t size) = 0;
-
-              private:
-                WriteHandle(const WriteHandle&) = delete;
-                WriteHandle& operator=(const WriteHandle&) = delete;
-            };
-
-          private:
-            MemoryTransferService(const MemoryTransferService&) = delete;
-            MemoryTransferService& operator=(const MemoryTransferService&) = delete;
-        };
-
-        // Backdoor to get the order of the ProcMap for testing
-        DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
-    }  // namespace client
+// Backdoor to get the order of the ProcMap for testing
+DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+}  // namespace client
 }  // namespace dawn::wire
 
 #endif  // INCLUDE_DAWN_WIRE_WIRECLIENT_H_
diff --git a/include/dawn/wire/WireServer.h b/include/dawn/wire/WireServer.h
index 1957de0..9905608 100644
--- a/include/dawn/wire/WireServer.h
+++ b/include/dawn/wire/WireServer.h
@@ -23,126 +23,126 @@
 
 namespace dawn::wire {
 
-    namespace server {
-        class Server;
-        class MemoryTransferService;
-    }  // namespace server
+namespace server {
+class Server;
+class MemoryTransferService;
+}  // namespace server
 
-    struct DAWN_WIRE_EXPORT WireServerDescriptor {
-        const DawnProcTable* procs;
-        CommandSerializer* serializer;
-        server::MemoryTransferService* memoryTransferService = nullptr;
-    };
+struct DAWN_WIRE_EXPORT WireServerDescriptor {
+    const DawnProcTable* procs;
+    CommandSerializer* serializer;
+    server::MemoryTransferService* memoryTransferService = nullptr;
+};
 
-    class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
+class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
+  public:
+    explicit WireServer(const WireServerDescriptor& descriptor);
+    ~WireServer() override;
+
+    const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
+
+    bool InjectTexture(WGPUTexture texture,
+                       uint32_t id,
+                       uint32_t generation,
+                       uint32_t deviceId,
+                       uint32_t deviceGeneration);
+    bool InjectSwapChain(WGPUSwapChain swapchain,
+                         uint32_t id,
+                         uint32_t generation,
+                         uint32_t deviceId,
+                         uint32_t deviceGeneration);
+
+    bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
+
+    bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
+
+    // Look up a device by (id, generation) pair. Returns nullptr if the generation
+    // has expired or the id is not found.
+    // The Wire does not have destroy hooks to allow an embedder to observe when an object
+    // has been destroyed, but in Chrome, we need to know the list of live devices so we
+    // can call device.Tick() on all of them periodically to ensure progress on asynchronous
+    // work is made. Getting this list can be done by tracking the (id, generation) of
+    // previously injected devices, and observing if GetDevice(id, generation) returns non-null.
+    WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+
+  private:
+    std::unique_ptr<server::Server> mImpl;
+};
+
+namespace server {
+class DAWN_WIRE_EXPORT MemoryTransferService {
+  public:
+    MemoryTransferService();
+    virtual ~MemoryTransferService();
+
+    class ReadHandle;
+    class WriteHandle;
+
+    // Deserialize data to create Read/Write handles. These handles are for the client
+    // to Read/Write data.
+    virtual bool DeserializeReadHandle(const void* deserializePointer,
+                                       size_t deserializeSize,
+                                       ReadHandle** readHandle) = 0;
+    virtual bool DeserializeWriteHandle(const void* deserializePointer,
+                                        size_t deserializeSize,
+                                        WriteHandle** writeHandle) = 0;
+
+    class DAWN_WIRE_EXPORT ReadHandle {
       public:
-        explicit WireServer(const WireServerDescriptor& descriptor);
-        ~WireServer() override;
+        ReadHandle();
+        virtual ~ReadHandle();
 
-        const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
+        // Return the size of the command serialized if
+        // SerializeDataUpdate is called with the same offset/size args
+        virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
 
-        bool InjectTexture(WGPUTexture texture,
-                           uint32_t id,
-                           uint32_t generation,
-                           uint32_t deviceId,
-                           uint32_t deviceGeneration);
-        bool InjectSwapChain(WGPUSwapChain swapchain,
-                             uint32_t id,
-                             uint32_t generation,
-                             uint32_t deviceId,
-                             uint32_t deviceGeneration);
-
-        bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
-
-        bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
-
-        // Look up a device by (id, generation) pair. Returns nullptr if the generation
-        // has expired or the id is not found.
-        // The Wire does not have destroy hooks to allow an embedder to observe when an object
-        // has been destroyed, but in Chrome, we need to know the list of live devices so we
-        // can call device.Tick() on all of them periodically to ensure progress on asynchronous
-        // work is made. Getting this list can be done by tracking the (id, generation) of
-        // previously injected devices, and observing if GetDevice(id, generation) returns non-null.
-        WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+        // Gets called when a MapReadCallback resolves.
+        // Serialize the data update for the range (offset, offset + size) into
+        // |serializePointer| to the client There could be nothing to be serialized (if
+        // using shared memory)
+        virtual void SerializeDataUpdate(const void* data,
+                                         size_t offset,
+                                         size_t size,
+                                         void* serializePointer) = 0;
 
       private:
-        std::unique_ptr<server::Server> mImpl;
+        ReadHandle(const ReadHandle&) = delete;
+        ReadHandle& operator=(const ReadHandle&) = delete;
     };
 
-    namespace server {
-        class DAWN_WIRE_EXPORT MemoryTransferService {
-          public:
-            MemoryTransferService();
-            virtual ~MemoryTransferService();
+    class DAWN_WIRE_EXPORT WriteHandle {
+      public:
+        WriteHandle();
+        virtual ~WriteHandle();
 
-            class ReadHandle;
-            class WriteHandle;
+        // Set the target for writes from the client. DeserializeFlush should copy data
+        // into the target.
+        void SetTarget(void* data);
+        // Set Staging data length for OOB check
+        void SetDataLength(size_t dataLength);
 
-            // Deserialize data to create Read/Write handles. These handles are for the client
-            // to Read/Write data.
-            virtual bool DeserializeReadHandle(const void* deserializePointer,
-                                               size_t deserializeSize,
-                                               ReadHandle** readHandle) = 0;
-            virtual bool DeserializeWriteHandle(const void* deserializePointer,
-                                                size_t deserializeSize,
-                                                WriteHandle** writeHandle) = 0;
+        // This function takes in the serialized result of
+        // client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
+        // Needs to check potential offset/size OOB and overflow
+        virtual bool DeserializeDataUpdate(const void* deserializePointer,
+                                           size_t deserializeSize,
+                                           size_t offset,
+                                           size_t size) = 0;
 
-            class DAWN_WIRE_EXPORT ReadHandle {
-              public:
-                ReadHandle();
-                virtual ~ReadHandle();
+      protected:
+        void* mTargetData = nullptr;
+        size_t mDataLength = 0;
 
-                // Return the size of the command serialized if
-                // SerializeDataUpdate is called with the same offset/size args
-                virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
+      private:
+        WriteHandle(const WriteHandle&) = delete;
+        WriteHandle& operator=(const WriteHandle&) = delete;
+    };
 
-                // Gets called when a MapReadCallback resolves.
-                // Serialize the data update for the range (offset, offset + size) into
-                // |serializePointer| to the client There could be nothing to be serialized (if
-                // using shared memory)
-                virtual void SerializeDataUpdate(const void* data,
-                                                 size_t offset,
-                                                 size_t size,
-                                                 void* serializePointer) = 0;
-
-              private:
-                ReadHandle(const ReadHandle&) = delete;
-                ReadHandle& operator=(const ReadHandle&) = delete;
-            };
-
-            class DAWN_WIRE_EXPORT WriteHandle {
-              public:
-                WriteHandle();
-                virtual ~WriteHandle();
-
-                // Set the target for writes from the client. DeserializeFlush should copy data
-                // into the target.
-                void SetTarget(void* data);
-                // Set Staging data length for OOB check
-                void SetDataLength(size_t dataLength);
-
-                // This function takes in the serialized result of
-                // client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
-                // Needs to check potential offset/size OOB and overflow
-                virtual bool DeserializeDataUpdate(const void* deserializePointer,
-                                                   size_t deserializeSize,
-                                                   size_t offset,
-                                                   size_t size) = 0;
-
-              protected:
-                void* mTargetData = nullptr;
-                size_t mDataLength = 0;
-
-              private:
-                WriteHandle(const WriteHandle&) = delete;
-                WriteHandle& operator=(const WriteHandle&) = delete;
-            };
-
-          private:
-            MemoryTransferService(const MemoryTransferService&) = delete;
-            MemoryTransferService& operator=(const MemoryTransferService&) = delete;
-        };
-    }  // namespace server
+  private:
+    MemoryTransferService(const MemoryTransferService&) = delete;
+    MemoryTransferService& operator=(const MemoryTransferService&) = delete;
+};
+}  // namespace server
 
 }  // namespace dawn::wire
 
diff --git a/include/dawn/wire/dawn_wire_export.h b/include/dawn/wire/dawn_wire_export.h
index 285d5db..e5b2113 100644
--- a/include/dawn/wire/dawn_wire_export.h
+++ b/include/dawn/wire/dawn_wire_export.h
@@ -16,21 +16,21 @@
 #define INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
 
 #if defined(DAWN_WIRE_SHARED_LIBRARY)
-#    if defined(_WIN32)
-#        if defined(DAWN_WIRE_IMPLEMENTATION)
-#            define DAWN_WIRE_EXPORT __declspec(dllexport)
-#        else
-#            define DAWN_WIRE_EXPORT __declspec(dllimport)
-#        endif
-#    else  // defined(_WIN32)
-#        if defined(DAWN_WIRE_IMPLEMENTATION)
-#            define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
-#        else
-#            define DAWN_WIRE_EXPORT
-#        endif
-#    endif  // defined(_WIN32)
-#else       // defined(DAWN_WIRE_SHARED_LIBRARY)
-#    define DAWN_WIRE_EXPORT
+#if defined(_WIN32)
+#if defined(DAWN_WIRE_IMPLEMENTATION)
+#define DAWN_WIRE_EXPORT __declspec(dllexport)
+#else
+#define DAWN_WIRE_EXPORT __declspec(dllimport)
+#endif
+#else  // defined(_WIN32)
+#if defined(DAWN_WIRE_IMPLEMENTATION)
+#define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
+#else
+#define DAWN_WIRE_EXPORT
+#endif
+#endif  // defined(_WIN32)
+#else   // defined(DAWN_WIRE_SHARED_LIBRARY)
+#define DAWN_WIRE_EXPORT
 #endif  // defined(DAWN_WIRE_SHARED_LIBRARY)
 
 #endif  // INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
diff --git a/include/tint/.clang-format b/include/tint/.clang-format
deleted file mode 100644
index 2fb833a..0000000
--- a/include/tint/.clang-format
+++ /dev/null
@@ -1,2 +0,0 @@
-# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
-BasedOnStyle: Chromium
diff --git a/src/dawn/CPPLINT.cfg b/src/dawn/CPPLINT.cfg
deleted file mode 100644
index f5c9c6d..0000000
--- a/src/dawn/CPPLINT.cfg
+++ /dev/null
@@ -1 +0,0 @@
-filter=-runtime/indentation_namespace
diff --git a/src/dawn/common/Assert.h b/src/dawn/common/Assert.h
index ee9eeb4..ba4a429 100644
--- a/src/dawn/common/Assert.h
+++ b/src/dawn/common/Assert.h
@@ -32,32 +32,32 @@
 // MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
 // points out that it looks like an owl face.
 #if defined(DAWN_COMPILER_MSVC)
-#    define DAWN_ASSERT_LOOP_CONDITION (0, 0)
+#define DAWN_ASSERT_LOOP_CONDITION (0, 0)
 #else
-#    define DAWN_ASSERT_LOOP_CONDITION (0)
+#define DAWN_ASSERT_LOOP_CONDITION (0)
 #endif
 
 // DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
 // expect of an assert and in release it tries to give hints to make the compiler generate better
 // code.
 #if defined(DAWN_ENABLE_ASSERTS)
-#    define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition)  \
-        do {                                                          \
-            if (!(condition)) {                                       \
-                HandleAssertionFailure(file, func, line, #condition); \
-            }                                                         \
-        } while (DAWN_ASSERT_LOOP_CONDITION)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition)  \
+    do {                                                          \
+        if (!(condition)) {                                       \
+            HandleAssertionFailure(file, func, line, #condition); \
+        }                                                         \
+    } while (DAWN_ASSERT_LOOP_CONDITION)
 #else
-#    if defined(DAWN_COMPILER_MSVC)
-#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
-#    elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
-#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
-#    else
-#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
-            do {                                                         \
-                DAWN_UNUSED(sizeof(condition));                          \
-            } while (DAWN_ASSERT_LOOP_CONDITION)
-#    endif
+#if defined(DAWN_COMPILER_MSVC)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
+#elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
+#else
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
+    do {                                                         \
+        DAWN_UNUSED(sizeof(condition));                          \
+    } while (DAWN_ASSERT_LOOP_CONDITION)
+#endif
 #endif
 
 #define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
@@ -68,8 +68,8 @@
     } while (DAWN_ASSERT_LOOP_CONDITION)
 
 #if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
-#    define ASSERT DAWN_ASSERT
-#    define UNREACHABLE DAWN_UNREACHABLE
+#define ASSERT DAWN_ASSERT
+#define UNREACHABLE DAWN_UNREACHABLE
 #endif
 
 void HandleAssertionFailure(const char* file,
diff --git a/src/dawn/common/BitSetIterator.h b/src/dawn/common/BitSetIterator.h
index 0f1997c..a011249 100644
--- a/src/dawn/common/BitSetIterator.h
+++ b/src/dawn/common/BitSetIterator.h
@@ -62,24 +62,18 @@
         uint32_t mOffset;
     };
 
-    Iterator begin() const {
-        return Iterator(mBits);
-    }
-    Iterator end() const {
-        return Iterator(std::bitset<N>(0));
-    }
+    Iterator begin() const { return Iterator(mBits); }
+    Iterator end() const { return Iterator(std::bitset<N>(0)); }
 
   private:
     const std::bitset<N> mBits;
 };
 
 template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
-}
+BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {}
 
 template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
-}
+BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {}
 
 template <size_t N, typename T>
 BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
diff --git a/src/dawn/common/Compiler.h b/src/dawn/common/Compiler.h
index db75933..fc29c40 100644
--- a/src/dawn/common/Compiler.h
+++ b/src/dawn/common/Compiler.h
@@ -29,50 +29,50 @@
 
 // Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
 #if defined(__GNUC__) || defined(__clang__)
-#    if defined(__clang__)
-#        define DAWN_COMPILER_CLANG
-#    else
-#        define DAWN_COMPILER_GCC
-#    endif
+#if defined(__clang__)
+#define DAWN_COMPILER_CLANG
+#else
+#define DAWN_COMPILER_GCC
+#endif
 
-#    if defined(__i386__) || defined(__x86_64__)
-#        define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
-#    else
+#if defined(__i386__) || defined(__x86_64__)
+#define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
+#else
 // TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
-#        define DAWN_BREAKPOINT()
-#    endif
+#define DAWN_BREAKPOINT()
+#endif
 
-#    define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
-#    define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
-#    define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
+#define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
+#define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
 
-#    if !defined(__has_cpp_attribute)
-#        define __has_cpp_attribute(name) 0
-#    endif
+#if !defined(__has_cpp_attribute)
+#define __has_cpp_attribute(name) 0
+#endif
 
-#    define DAWN_DECLARE_UNUSED __attribute__((unused))
-#    if defined(NDEBUG)
-#        define DAWN_FORCE_INLINE inline __attribute__((always_inline))
-#    endif
-#    define DAWN_NOINLINE __attribute__((noinline))
+#define DAWN_DECLARE_UNUSED __attribute__((unused))
+#if defined(NDEBUG)
+#define DAWN_FORCE_INLINE inline __attribute__((always_inline))
+#endif
+#define DAWN_NOINLINE __attribute__((noinline))
 
 // MSVC
 #elif defined(_MSC_VER)
-#    define DAWN_COMPILER_MSVC
+#define DAWN_COMPILER_MSVC
 
 extern void __cdecl __debugbreak(void);
-#    define DAWN_BREAKPOINT() __debugbreak()
+#define DAWN_BREAKPOINT() __debugbreak()
 
-#    define DAWN_BUILTIN_UNREACHABLE() __assume(false)
+#define DAWN_BUILTIN_UNREACHABLE() __assume(false)
 
-#    define DAWN_DECLARE_UNUSED
-#    if defined(NDEBUG)
-#        define DAWN_FORCE_INLINE __forceinline
-#    endif
-#    define DAWN_NOINLINE __declspec(noinline)
+#define DAWN_DECLARE_UNUSED
+#if defined(NDEBUG)
+#define DAWN_FORCE_INLINE __forceinline
+#endif
+#define DAWN_NOINLINE __declspec(noinline)
 
 #else
-#    error "Unsupported compiler"
+#error "Unsupported compiler"
 #endif
 
 // It seems that (void) EXPR works on all compilers to silence the unused variable warning.
@@ -82,16 +82,16 @@
 
 // Add noop replacements for macros for features that aren't supported by the compiler.
 #if !defined(DAWN_LIKELY)
-#    define DAWN_LIKELY(X) X
+#define DAWN_LIKELY(X) X
 #endif
 #if !defined(DAWN_UNLIKELY)
-#    define DAWN_UNLIKELY(X) X
+#define DAWN_UNLIKELY(X) X
 #endif
 #if !defined(DAWN_FORCE_INLINE)
-#    define DAWN_FORCE_INLINE inline
+#define DAWN_FORCE_INLINE inline
 #endif
 #if !defined(DAWN_NOINLINE)
-#    define DAWN_NOINLINE
+#define DAWN_NOINLINE
 #endif
 
 #endif  // SRC_DAWN_COMMON_COMPILER_H_
diff --git a/src/dawn/common/CoreFoundationRef.h b/src/dawn/common/CoreFoundationRef.h
index d790c4d..3a4724e 100644
--- a/src/dawn/common/CoreFoundationRef.h
+++ b/src/dawn/common/CoreFoundationRef.h
@@ -22,12 +22,8 @@
 template <typename T>
 struct CoreFoundationRefTraits {
     static constexpr T kNullValue = nullptr;
-    static void Reference(T value) {
-        CFRetain(value);
-    }
-    static void Release(T value) {
-        CFRelease(value);
-    }
+    static void Reference(T value) { CFRetain(value); }
+    static void Release(T value) { CFRelease(value); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/DynamicLib.cpp b/src/dawn/common/DynamicLib.cpp
index 182673e..8767ec4 100644
--- a/src/dawn/common/DynamicLib.cpp
+++ b/src/dawn/common/DynamicLib.cpp
@@ -19,14 +19,14 @@
 #include "dawn/common/Platform.h"
 
 #if DAWN_PLATFORM_WINDOWS
-#    include "dawn/common/windows_with_undefs.h"
-#    if DAWN_PLATFORM_WINUWP
-#        include "dawn/common/WindowsUtils.h"
-#    endif
+#include "dawn/common/windows_with_undefs.h"
+#if DAWN_PLATFORM_WINUWP
+#include "dawn/common/WindowsUtils.h"
+#endif
 #elif DAWN_PLATFORM_POSIX
-#    include <dlfcn.h>
+#include <dlfcn.h>
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
 DynamicLib::~DynamicLib() {
@@ -48,11 +48,11 @@
 
 bool DynamicLib::Open(const std::string& filename, std::string* error) {
 #if DAWN_PLATFORM_WINDOWS
-#    if DAWN_PLATFORM_WINUWP
+#if DAWN_PLATFORM_WINUWP
     mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
-#    else
+#else
     mHandle = LoadLibraryA(filename.c_str());
-#    endif
+#endif
     if (mHandle == nullptr && error != nullptr) {
         *error = "Windows Error: " + std::to_string(GetLastError());
     }
@@ -63,7 +63,7 @@
         *error = dlerror();
     }
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
     return mHandle != nullptr;
@@ -79,7 +79,7 @@
 #elif DAWN_PLATFORM_POSIX
     dlclose(mHandle);
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
     mHandle = nullptr;
@@ -101,7 +101,7 @@
         *error = dlerror();
     }
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
     return proc;
diff --git a/src/dawn/common/GPUInfo.cpp b/src/dawn/common/GPUInfo.cpp
index 5b73517..014297d 100644
--- a/src/dawn/common/GPUInfo.cpp
+++ b/src/dawn/common/GPUInfo.cpp
@@ -20,89 +20,89 @@
 #include "dawn/common/Assert.h"
 
 namespace gpu_info {
-    namespace {
-        // Intel
-        // Referenced from the following Mesa source code:
-        // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
-        // gen9
-        const std::array<uint32_t, 25> Skylake = {
-            {0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
-             0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
-             0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
-        // gen9p5
-        const std::array<uint32_t, 20> Kabylake = {
-            {0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
-             0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
-        const std::array<uint32_t, 17> Coffeelake = {
-            {0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
-             0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
-        const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
-        const std::array<uint32_t, 21> Cometlake = {
-            {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
-             0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
+namespace {
+// Intel
+// Referenced from the following Mesa source code:
+// https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
+// gen9
+const std::array<uint32_t, 25> Skylake = {{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913,
+                                           0x1915, 0x1916, 0x1917, 0x191A, 0x191B, 0x191D, 0x191E,
+                                           0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, 0x192D,
+                                           0x1932, 0x193A, 0x193B, 0x193D}};
+// gen9p5
+const std::array<uint32_t, 20> Kabylake = {{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E,
+                                            0x591E, 0x5912, 0x5917, 0x5902, 0x591B, 0x593B, 0x590B,
+                                            0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
+const std::array<uint32_t, 17> Coffeelake = {{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91,
+                                              0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E94,
+                                              0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
+const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
+const std::array<uint32_t, 21> Cometlake = {
+    {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
+     0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
 
-        // According to Intel graphics driver version schema, build number is generated from the
-        // last two fields.
-        // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
-        // more details.
-        uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
-            return driverVersion[2] * 10000 + driverVersion[3];
-        }
+// According to Intel graphics driver version schema, build number is generated from the
+// last two fields.
+// See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
+// more details.
+uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
+    return driverVersion[2] * 10000 + driverVersion[3];
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    bool IsAMD(PCIVendorID vendorId) {
-        return vendorId == kVendorID_AMD;
-    }
-    bool IsARM(PCIVendorID vendorId) {
-        return vendorId == kVendorID_ARM;
-    }
-    bool IsImgTec(PCIVendorID vendorId) {
-        return vendorId == kVendorID_ImgTec;
-    }
-    bool IsIntel(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Intel;
-    }
-    bool IsMesa(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Mesa;
-    }
-    bool IsNvidia(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Nvidia;
-    }
-    bool IsQualcomm(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Qualcomm;
-    }
-    bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
-        return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
-    }
-    bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
-        return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
+bool IsAMD(PCIVendorID vendorId) {
+    return vendorId == kVendorID_AMD;
+}
+bool IsARM(PCIVendorID vendorId) {
+    return vendorId == kVendorID_ARM;
+}
+bool IsImgTec(PCIVendorID vendorId) {
+    return vendorId == kVendorID_ImgTec;
+}
+bool IsIntel(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Intel;
+}
+bool IsMesa(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Mesa;
+}
+bool IsNvidia(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Nvidia;
+}
+bool IsQualcomm(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Qualcomm;
+}
+bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
+    return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
+}
+bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
+    return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
+}
+
+int CompareD3DDriverVersion(PCIVendorID vendorId,
+                            const D3DDriverVersion& version1,
+                            const D3DDriverVersion& version2) {
+    if (IsIntel(vendorId)) {
+        uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
+        uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
+        return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
     }
 
-    int CompareD3DDriverVersion(PCIVendorID vendorId,
-                                const D3DDriverVersion& version1,
-                                const D3DDriverVersion& version2) {
-        if (IsIntel(vendorId)) {
-            uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
-            uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
-            return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
-        }
+    // TODO(crbug.com/dawn/823): support other GPU vendors
+    UNREACHABLE();
+    return 0;
+}
 
-        // TODO(crbug.com/dawn/823): support other GPU vendors
-        UNREACHABLE();
-        return 0;
-    }
-
-    // Intel GPUs
-    bool IsSkylake(PCIDeviceID deviceId) {
-        return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
-    }
-    bool IsKabylake(PCIDeviceID deviceId) {
-        return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
-    }
-    bool IsCoffeelake(PCIDeviceID deviceId) {
-        return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
-               (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
-               (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
-    }
+// Intel GPUs
+bool IsSkylake(PCIDeviceID deviceId) {
+    return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
+}
+bool IsKabylake(PCIDeviceID deviceId) {
+    return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
+}
+bool IsCoffeelake(PCIDeviceID deviceId) {
+    return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
+           (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
+           (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
+}
 }  // namespace gpu_info
diff --git a/src/dawn/common/GPUInfo.h b/src/dawn/common/GPUInfo.h
index 9a03677..9b7f4c0 100644
--- a/src/dawn/common/GPUInfo.h
+++ b/src/dawn/common/GPUInfo.h
@@ -23,44 +23,44 @@
 
 namespace gpu_info {
 
-    static constexpr PCIVendorID kVendorID_AMD = 0x1002;
-    static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
-    static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
-    static constexpr PCIVendorID kVendorID_Intel = 0x8086;
-    static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
-    static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
-    static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
-    static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
-    static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
+static constexpr PCIVendorID kVendorID_AMD = 0x1002;
+static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
+static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
+static constexpr PCIVendorID kVendorID_Intel = 0x8086;
+static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
+static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
+static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
+static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
+static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
 
-    static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
-    static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
+static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
+static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
 
-    bool IsAMD(PCIVendorID vendorId);
-    bool IsARM(PCIVendorID vendorId);
-    bool IsImgTec(PCIVendorID vendorId);
-    bool IsIntel(PCIVendorID vendorId);
-    bool IsMesa(PCIVendorID vendorId);
-    bool IsNvidia(PCIVendorID vendorId);
-    bool IsQualcomm(PCIVendorID vendorId);
-    bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
-    bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
+bool IsAMD(PCIVendorID vendorId);
+bool IsARM(PCIVendorID vendorId);
+bool IsImgTec(PCIVendorID vendorId);
+bool IsIntel(PCIVendorID vendorId);
+bool IsMesa(PCIVendorID vendorId);
+bool IsNvidia(PCIVendorID vendorId);
+bool IsQualcomm(PCIVendorID vendorId);
+bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
+bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
 
-    using D3DDriverVersion = std::array<uint16_t, 4>;
+using D3DDriverVersion = std::array<uint16_t, 4>;
 
-    // Do comparison between two driver versions. Currently we only support the comparison between
-    // Intel D3D driver versions.
-    // - Return -1 if build number of version1 is smaller
-    // - Return 1 if build number of version1 is bigger
-    // - Return 0 if version1 and version2 represent same driver version
-    int CompareD3DDriverVersion(PCIVendorID vendorId,
-                                const D3DDriverVersion& version1,
-                                const D3DDriverVersion& version2);
+// Do comparison between two driver versions. Currently we only support the comparison between
+// Intel D3D driver versions.
+// - Return -1 if build number of version1 is smaller
+// - Return 1 if build number of version1 is bigger
+// - Return 0 if version1 and version2 represent same driver version
+int CompareD3DDriverVersion(PCIVendorID vendorId,
+                            const D3DDriverVersion& version1,
+                            const D3DDriverVersion& version2);
 
-    // Intel architectures
-    bool IsSkylake(PCIDeviceID deviceId);
-    bool IsKabylake(PCIDeviceID deviceId);
-    bool IsCoffeelake(PCIDeviceID deviceId);
+// Intel architectures
+bool IsSkylake(PCIDeviceID deviceId);
+bool IsKabylake(PCIDeviceID deviceId);
+bool IsCoffeelake(PCIDeviceID deviceId);
 
 }  // namespace gpu_info
 #endif  // SRC_DAWN_COMMON_GPUINFO_H_
diff --git a/src/dawn/common/HashUtils.h b/src/dawn/common/HashUtils.h
index 1fa421c..342c9b6 100644
--- a/src/dawn/common/HashUtils.h
+++ b/src/dawn/common/HashUtils.h
@@ -50,7 +50,7 @@
 #elif defined(DAWN_PLATFORM_32_BIT)
     const size_t offset = 0x9e3779b9;
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
     *hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
 }
@@ -89,13 +89,13 @@
 #endif
 
 namespace std {
-    template <typename Index, size_t N>
-    struct hash<ityp::bitset<Index, N>> {
-      public:
-        size_t operator()(const ityp::bitset<Index, N>& value) const {
-            return Hash(static_cast<const std::bitset<N>&>(value));
-        }
-    };
+template <typename Index, size_t N>
+struct hash<ityp::bitset<Index, N>> {
+  public:
+    size_t operator()(const ityp::bitset<Index, N>& value) const {
+        return Hash(static_cast<const std::bitset<N>&>(value));
+    }
+};
 }  // namespace std
 
 #endif  // SRC_DAWN_COMMON_HASHUTILS_H_
diff --git a/src/dawn/common/IOKitRef.h b/src/dawn/common/IOKitRef.h
index d8fe960..33367b1 100644
--- a/src/dawn/common/IOKitRef.h
+++ b/src/dawn/common/IOKitRef.h
@@ -22,12 +22,8 @@
 template <typename T>
 struct IOKitRefTraits {
     static constexpr T kNullValue = IO_OBJECT_NULL;
-    static void Reference(T value) {
-        IOObjectRetain(value);
-    }
-    static void Release(T value) {
-        IOObjectRelease(value);
-    }
+    static void Reference(T value) { IOObjectRetain(value); }
+    static void Release(T value) { IOObjectRelease(value); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/LinkedList.h b/src/dawn/common/LinkedList.h
index 5227041..b9503ee 100644
--- a/src/dawn/common/LinkedList.h
+++ b/src/dawn/common/LinkedList.h
@@ -99,10 +99,8 @@
 template <typename T>
 class LinkNode {
   public:
-    LinkNode() : previous_(nullptr), next_(nullptr) {
-    }
-    LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
-    }
+    LinkNode() : previous_(nullptr), next_(nullptr) {}
+    LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {}
 
     LinkNode(LinkNode<T>&& rhs) {
         next_ = rhs.next_;
@@ -154,22 +152,14 @@
         return true;
     }
 
-    LinkNode<T>* previous() const {
-        return previous_;
-    }
+    LinkNode<T>* previous() const { return previous_; }
 
-    LinkNode<T>* next() const {
-        return next_;
-    }
+    LinkNode<T>* next() const { return next_; }
 
     // Cast from the node-type to the value type.
-    const T* value() const {
-        return static_cast<const T*>(this);
-    }
+    const T* value() const { return static_cast<const T*>(this); }
 
-    T* value() {
-        return static_cast<T*>(this);
-    }
+    T* value() { return static_cast<T*>(this); }
 
   private:
     friend class LinkedList<T>;
@@ -183,8 +173,7 @@
     // The "root" node is self-referential, and forms the basis of a circular
     // list (root_.next() will point back to the start of the list,
     // and root_->previous() wraps around to the end of the list).
-    LinkedList() : root_(&root_, &root_) {
-    }
+    LinkedList() : root_(&root_, &root_) {}
 
     ~LinkedList() {
         // If any LinkNodes still exist in the LinkedList, there will be outstanding references to
@@ -194,9 +183,7 @@
     }
 
     // Appends |e| to the end of the linked list.
-    void Append(LinkNode<T>* e) {
-        e->InsertBefore(&root_);
-    }
+    void Append(LinkNode<T>* e) { e->InsertBefore(&root_); }
 
     // Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
     void MoveInto(LinkedList<T>* l) {
@@ -212,21 +199,13 @@
         root_.previous_ = &root_;
     }
 
-    LinkNode<T>* head() const {
-        return root_.next();
-    }
+    LinkNode<T>* head() const { return root_.next(); }
 
-    LinkNode<T>* tail() const {
-        return root_.previous();
-    }
+    LinkNode<T>* tail() const { return root_.previous(); }
 
-    const LinkNode<T>* end() const {
-        return &root_;
-    }
+    const LinkNode<T>* end() const { return &root_; }
 
-    bool empty() const {
-        return head() == end();
-    }
+    bool empty() const { return head() == end(); }
 
   private:
     LinkNode<T> root_;
@@ -235,8 +214,7 @@
 template <typename T>
 class LinkedListIterator {
   public:
-    explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
-    }
+    explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {}
 
     // We keep an early reference to the next node in the list so that even if the current element
     // is modified or removed from the list, we have a valid next node.
@@ -246,13 +224,9 @@
         return *this;
     }
 
-    bool operator!=(const LinkedListIterator<T>& other) const {
-        return current_ != other.current_;
-    }
+    bool operator!=(const LinkedListIterator<T>& other) const { return current_ != other.current_; }
 
-    LinkNode<T>* operator*() const {
-        return current_;
-    }
+    LinkNode<T>* operator*() const { return current_; }
 
   private:
     LinkNode<T>* current_;
diff --git a/src/dawn/common/Log.cpp b/src/dawn/common/Log.cpp
index 18d4af2..ab1eb07 100644
--- a/src/dawn/common/Log.cpp
+++ b/src/dawn/common/Log.cpp
@@ -21,97 +21,96 @@
 #include "dawn/common/Platform.h"
 
 #if defined(DAWN_PLATFORM_ANDROID)
-#    include <android/log.h>
+#include <android/log.h>
 #endif
 
 namespace dawn {
 
-    namespace {
+namespace {
 
-        const char* SeverityName(LogSeverity severity) {
-            switch (severity) {
-                case LogSeverity::Debug:
-                    return "Debug";
-                case LogSeverity::Info:
-                    return "Info";
-                case LogSeverity::Warning:
-                    return "Warning";
-                case LogSeverity::Error:
-                    return "Error";
-                default:
-                    UNREACHABLE();
-                    return "";
-            }
-        }
+const char* SeverityName(LogSeverity severity) {
+    switch (severity) {
+        case LogSeverity::Debug:
+            return "Debug";
+        case LogSeverity::Info:
+            return "Info";
+        case LogSeverity::Warning:
+            return "Warning";
+        case LogSeverity::Error:
+            return "Error";
+        default:
+            UNREACHABLE();
+            return "";
+    }
+}
 
 #if defined(DAWN_PLATFORM_ANDROID)
-        android_LogPriority AndroidLogPriority(LogSeverity severity) {
-            switch (severity) {
-                case LogSeverity::Debug:
-                    return ANDROID_LOG_INFO;
-                case LogSeverity::Info:
-                    return ANDROID_LOG_INFO;
-                case LogSeverity::Warning:
-                    return ANDROID_LOG_WARN;
-                case LogSeverity::Error:
-                    return ANDROID_LOG_ERROR;
-                default:
-                    UNREACHABLE();
-                    return ANDROID_LOG_ERROR;
-            }
-        }
+android_LogPriority AndroidLogPriority(LogSeverity severity) {
+    switch (severity) {
+        case LogSeverity::Debug:
+            return ANDROID_LOG_INFO;
+        case LogSeverity::Info:
+            return ANDROID_LOG_INFO;
+        case LogSeverity::Warning:
+            return ANDROID_LOG_WARN;
+        case LogSeverity::Error:
+            return ANDROID_LOG_ERROR;
+        default:
+            UNREACHABLE();
+            return ANDROID_LOG_ERROR;
+    }
+}
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
+LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {}
+
+LogMessage::~LogMessage() {
+    std::string fullMessage = mStream.str();
+
+    // If this message has been moved, its stream is empty.
+    if (fullMessage.empty()) {
+        return;
     }
 
-    LogMessage::~LogMessage() {
-        std::string fullMessage = mStream.str();
-
-        // If this message has been moved, its stream is empty.
-        if (fullMessage.empty()) {
-            return;
-        }
-
-        const char* severityName = SeverityName(mSeverity);
+    const char* severityName = SeverityName(mSeverity);
 
 #if defined(DAWN_PLATFORM_ANDROID)
-        android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
-        __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
+    android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
+    __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
 #else   // defined(DAWN_PLATFORM_ANDROID)
-        FILE* outputStream = stdout;
-        if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
-            outputStream = stderr;
-        }
+    FILE* outputStream = stdout;
+    if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
+        outputStream = stderr;
+    }
 
-        // Note: we use fprintf because <iostream> includes static initializers.
-        fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
-        fflush(outputStream);
+    // Note: we use fprintf because <iostream> includes static initializers.
+    fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
+    fflush(outputStream);
 #endif  // defined(DAWN_PLATFORM_ANDROID)
-    }
+}
 
-    LogMessage DebugLog() {
-        return LogMessage(LogSeverity::Debug);
-    }
+LogMessage DebugLog() {
+    return LogMessage(LogSeverity::Debug);
+}
 
-    LogMessage InfoLog() {
-        return LogMessage(LogSeverity::Info);
-    }
+LogMessage InfoLog() {
+    return LogMessage(LogSeverity::Info);
+}
 
-    LogMessage WarningLog() {
-        return LogMessage(LogSeverity::Warning);
-    }
+LogMessage WarningLog() {
+    return LogMessage(LogSeverity::Warning);
+}
 
-    LogMessage ErrorLog() {
-        return LogMessage(LogSeverity::Error);
-    }
+LogMessage ErrorLog() {
+    return LogMessage(LogSeverity::Error);
+}
 
-    LogMessage DebugLog(const char* file, const char* function, int line) {
-        LogMessage message = DebugLog();
-        message << file << ":" << line << "(" << function << ")";
-        return message;
-    }
+LogMessage DebugLog(const char* file, const char* function, int line) {
+    LogMessage message = DebugLog();
+    message << file << ":" << line << "(" << function << ")";
+    return message;
+}
 
 }  // namespace dawn
diff --git a/src/dawn/common/Log.h b/src/dawn/common/Log.h
index fc7481a..3b338b3 100644
--- a/src/dawn/common/Log.h
+++ b/src/dawn/common/Log.h
@@ -47,47 +47,47 @@
 
 namespace dawn {
 
-    // Log levels mostly used to signal intent where the log message is produced and used to route
-    // the message to the correct output.
-    enum class LogSeverity {
-        Debug,
-        Info,
-        Warning,
-        Error,
-    };
+// Log levels mostly used to signal intent where the log message is produced and used to route
+// the message to the correct output.
+enum class LogSeverity {
+    Debug,
+    Info,
+    Warning,
+    Error,
+};
 
-    // Essentially an ostringstream that will print itself in its destructor.
-    class LogMessage {
-      public:
-        explicit LogMessage(LogSeverity severity);
-        ~LogMessage();
+// Essentially an ostringstream that will print itself in its destructor.
+class LogMessage {
+  public:
+    explicit LogMessage(LogSeverity severity);
+    ~LogMessage();
 
-        LogMessage(LogMessage&& other) = default;
-        LogMessage& operator=(LogMessage&& other) = default;
+    LogMessage(LogMessage&& other) = default;
+    LogMessage& operator=(LogMessage&& other) = default;
 
-        template <typename T>
-        LogMessage& operator<<(T&& value) {
-            mStream << value;
-            return *this;
-        }
+    template <typename T>
+    LogMessage& operator<<(T&& value) {
+        mStream << value;
+        return *this;
+    }
 
-      private:
-        LogMessage(const LogMessage& other) = delete;
-        LogMessage& operator=(const LogMessage& other) = delete;
+  private:
+    LogMessage(const LogMessage& other) = delete;
+    LogMessage& operator=(const LogMessage& other) = delete;
 
-        LogSeverity mSeverity;
-        std::ostringstream mStream;
-    };
+    LogSeverity mSeverity;
+    std::ostringstream mStream;
+};
 
-    // Short-hands to create a LogMessage with the respective severity.
-    LogMessage DebugLog();
-    LogMessage InfoLog();
-    LogMessage WarningLog();
-    LogMessage ErrorLog();
+// Short-hands to create a LogMessage with the respective severity.
+LogMessage DebugLog();
+LogMessage InfoLog();
+LogMessage WarningLog();
+LogMessage ErrorLog();
 
-    // DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
-    // information
-    LogMessage DebugLog(const char* file, const char* function, int line);
+// DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
+// information
+LogMessage DebugLog(const char* file, const char* function, int line);
 #define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__)
 
 }  // namespace dawn
diff --git a/src/dawn/common/Math.cpp b/src/dawn/common/Math.cpp
index f0dd0a1..3ee1ba3 100644
--- a/src/dawn/common/Math.cpp
+++ b/src/dawn/common/Math.cpp
@@ -22,7 +22,7 @@
 #include "dawn/common/Platform.h"
 
 #if defined(DAWN_COMPILER_MSVC)
-#    include <intrin.h>
+#include <intrin.h>
 #endif
 
 uint32_t ScanForward(uint32_t bits) {
@@ -54,13 +54,13 @@
 uint32_t Log2(uint64_t value) {
     ASSERT(value != 0);
 #if defined(DAWN_COMPILER_MSVC)
-#    if defined(DAWN_PLATFORM_64_BIT)
+#if defined(DAWN_PLATFORM_64_BIT)
     // NOLINTNEXTLINE(runtime/int)
     unsigned long firstBitIndex = 0ul;
     unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
     ASSERT(ret != 0);
     return firstBitIndex;
-#    else   // defined(DAWN_PLATFORM_64_BIT)
+#else   // defined(DAWN_PLATFORM_64_BIT)
     // NOLINTNEXTLINE(runtime/int)
     unsigned long firstBitIndex = 0ul;
     if (_BitScanReverse(&firstBitIndex, value >> 32)) {
@@ -69,10 +69,10 @@
     unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
     ASSERT(ret != 0);
     return firstBitIndex;
-#    endif  // defined(DAWN_PLATFORM_64_BIT)
-#else       // defined(DAWN_COMPILER_MSVC)
+#endif  // defined(DAWN_PLATFORM_64_BIT)
+#else   // defined(DAWN_COMPILER_MSVC)
     return 63 - static_cast<uint32_t>(__builtin_clzll(value));
-#endif      // defined(DAWN_COMPILER_MSVC)
+#endif  // defined(DAWN_COMPILER_MSVC)
 }
 
 uint64_t NextPowerOfTwo(uint64_t n) {
diff --git a/src/dawn/common/NSRef.h b/src/dawn/common/NSRef.h
index ddec95e..4afb5e0 100644
--- a/src/dawn/common/NSRef.h
+++ b/src/dawn/common/NSRef.h
@@ -20,7 +20,7 @@
 #import <Foundation/NSObject.h>
 
 #if !defined(__OBJC__)
-#    error "NSRef can only be used in Objective C/C++ code."
+#error "NSRef can only be used in Objective C/C++ code."
 #endif
 
 // This file contains smart pointers that automatically reference and release Objective C objects
@@ -67,12 +67,8 @@
 template <typename T>
 struct NSRefTraits {
     static constexpr T kNullValue = nullptr;
-    static void Reference(T value) {
-        [value retain];
-    }
-    static void Release(T value) {
-        [value release];
-    }
+    static void Reference(T value) { [value retain]; }
+    static void Release(T value) { [value release]; }
 };
 
 template <typename T>
@@ -80,13 +76,9 @@
   public:
     using RefBase<T*, NSRefTraits<T*>>::RefBase;
 
-    const T* operator*() const {
-        return this->Get();
-    }
+    const T* operator*() const { return this->Get(); }
 
-    T* operator*() {
-        return this->Get();
-    }
+    T* operator*() { return this->Get(); }
 };
 
 template <typename T>
@@ -104,13 +96,9 @@
   public:
     using RefBase<T, NSRefTraits<T>>::RefBase;
 
-    const T operator*() const {
-        return this->Get();
-    }
+    const T operator*() const { return this->Get(); }
 
-    T operator*() {
-        return this->Get();
-    }
+    T operator*() { return this->Get(); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/Numeric.h b/src/dawn/common/Numeric.h
index a9a4521..50f6d40 100644
--- a/src/dawn/common/Numeric.h
+++ b/src/dawn/common/Numeric.h
@@ -22,17 +22,17 @@
 
 namespace detail {
 
-    template <typename T>
-    inline constexpr uint32_t u32_sizeof() {
-        static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max());
-        return uint32_t(sizeof(T));
-    }
+template <typename T>
+inline constexpr uint32_t u32_sizeof() {
+    static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max());
+    return uint32_t(sizeof(T));
+}
 
-    template <typename T>
-    inline constexpr uint32_t u32_alignof() {
-        static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max());
-        return uint32_t(alignof(T));
-    }
+template <typename T>
+inline constexpr uint32_t u32_alignof() {
+    static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max());
+    return uint32_t(alignof(T));
+}
 
 }  // namespace detail
 
diff --git a/src/dawn/common/Platform.h b/src/dawn/common/Platform.h
index 5e4f9d7..8e81b48 100644
--- a/src/dawn/common/Platform.h
+++ b/src/dawn/common/Platform.h
@@ -16,67 +16,67 @@
 #define SRC_DAWN_COMMON_PLATFORM_H_
 
 #if defined(_WIN32) || defined(_WIN64)
-#    include <winapifamily.h>
-#    define DAWN_PLATFORM_WINDOWS 1
-#    if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
-#        define DAWN_PLATFORM_WIN32 1
-#    elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
-#        define DAWN_PLATFORM_WINUWP 1
-#    else
-#        error "Unsupported Windows platform."
-#    endif
+#include <winapifamily.h>
+#define DAWN_PLATFORM_WINDOWS 1
+#if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
+#define DAWN_PLATFORM_WIN32 1
+#elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
+#define DAWN_PLATFORM_WINUWP 1
+#else
+#error "Unsupported Windows platform."
+#endif
 
 #elif defined(__linux__)
-#    define DAWN_PLATFORM_LINUX 1
-#    define DAWN_PLATFORM_POSIX 1
-#    if defined(__ANDROID__)
-#        define DAWN_PLATFORM_ANDROID 1
-#    endif
+#define DAWN_PLATFORM_LINUX 1
+#define DAWN_PLATFORM_POSIX 1
+#if defined(__ANDROID__)
+#define DAWN_PLATFORM_ANDROID 1
+#endif
 
 #elif defined(__APPLE__)
-#    define DAWN_PLATFORM_APPLE 1
-#    define DAWN_PLATFORM_POSIX 1
-#    include <TargetConditionals.h>
-#    if TARGET_OS_IPHONE
-#        define DAWN_PLATFORM_IOS
-#    elif TARGET_OS_MAC
-#        define DAWN_PLATFORM_MACOS
-#    else
-#        error "Unsupported Apple platform."
-#    endif
+#define DAWN_PLATFORM_APPLE 1
+#define DAWN_PLATFORM_POSIX 1
+#include <TargetConditionals.h>
+#if TARGET_OS_IPHONE
+#define DAWN_PLATFORM_IOS
+#elif TARGET_OS_MAC
+#define DAWN_PLATFORM_MACOS
+#else
+#error "Unsupported Apple platform."
+#endif
 
 #elif defined(__Fuchsia__)
-#    define DAWN_PLATFORM_FUCHSIA 1
-#    define DAWN_PLATFORM_POSIX 1
+#define DAWN_PLATFORM_FUCHSIA 1
+#define DAWN_PLATFORM_POSIX 1
 
 #elif defined(__EMSCRIPTEN__)
-#    define DAWN_PLATFORM_EMSCRIPTEN 1
-#    define DAWN_PLATFORM_POSIX 1
+#define DAWN_PLATFORM_EMSCRIPTEN 1
+#define DAWN_PLATFORM_POSIX 1
 
 #else
-#    error "Unsupported platform."
+#error "Unsupported platform."
 #endif
 
 // Distinguish mips32.
 #if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__)
-#    define __mips32__
+#define __mips32__
 #endif
 
 // Distinguish mips64.
 #if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__)
-#    define __mips64__
+#define __mips64__
 #endif
 
 #if defined(_WIN64) || defined(__aarch64__) || defined(__x86_64__) || defined(__mips64__) || \
     defined(__s390x__) || defined(__PPC64__)
-#    define DAWN_PLATFORM_64_BIT 1
+#define DAWN_PLATFORM_64_BIT 1
 static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8");
 #elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \
     defined(__s390__) || defined(__EMSCRIPTEN__)
-#    define DAWN_PLATFORM_32_BIT 1
+#define DAWN_PLATFORM_32_BIT 1
 static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4");
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
 
 #endif  // SRC_DAWN_COMMON_PLATFORM_H_
diff --git a/src/dawn/common/RefBase.h b/src/dawn/common/RefBase.h
index 5613916..8f06f19 100644
--- a/src/dawn/common/RefBase.h
+++ b/src/dawn/common/RefBase.h
@@ -36,17 +36,13 @@
 class RefBase {
   public:
     // Default constructor and destructor.
-    RefBase() : mValue(Traits::kNullValue) {
-    }
+    RefBase() : mValue(Traits::kNullValue) {}
 
-    ~RefBase() {
-        Release(mValue);
-    }
+    ~RefBase() { Release(mValue); }
 
     // Constructors from nullptr.
     // NOLINTNEXTLINE(runtime/explicit)
-    constexpr RefBase(std::nullptr_t) : RefBase() {
-    }
+    constexpr RefBase(std::nullptr_t) : RefBase() {}
 
     RefBase<T, Traits>& operator=(std::nullptr_t) {
         Set(Traits::kNullValue);
@@ -55,9 +51,7 @@
 
     // Constructors from a value T.
     // NOLINTNEXTLINE(runtime/explicit)
-    RefBase(T value) : mValue(value) {
-        Reference(value);
-    }
+    RefBase(T value) : mValue(value) { Reference(value); }
 
     RefBase<T, Traits>& operator=(const T& value) {
         Set(value);
@@ -65,18 +59,14 @@
     }
 
     // Constructors from a RefBase<T>
-    RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
-        Reference(other.mValue);
-    }
+    RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { Reference(other.mValue); }
 
     RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
         Set(other.mValue);
         return *this;
     }
 
-    RefBase(RefBase<T, Traits>&& other) {
-        mValue = other.Detach();
-    }
+    RefBase(RefBase<T, Traits>&& other) { mValue = other.Detach(); }
 
     RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
         if (&other != this) {
@@ -113,28 +103,16 @@
     }
 
     // Comparison operators.
-    bool operator==(const T& other) const {
-        return mValue == other;
-    }
+    bool operator==(const T& other) const { return mValue == other; }
 
-    bool operator!=(const T& other) const {
-        return mValue != other;
-    }
+    bool operator!=(const T& other) const { return mValue != other; }
 
-    const T operator->() const {
-        return mValue;
-    }
-    T operator->() {
-        return mValue;
-    }
+    const T operator->() const { return mValue; }
+    T operator->() { return mValue; }
 
     // Smart pointer methods.
-    const T& Get() const {
-        return mValue;
-    }
-    T& Get() {
-        return mValue;
-    }
+    const T& Get() const { return mValue; }
+    T& Get() { return mValue; }
 
     [[nodiscard]] T Detach() {
         T value{std::move(mValue)};
diff --git a/src/dawn/common/RefCounted.h b/src/dawn/common/RefCounted.h
index 0593544..1ee413d 100644
--- a/src/dawn/common/RefCounted.h
+++ b/src/dawn/common/RefCounted.h
@@ -45,12 +45,8 @@
 template <typename T>
 struct RefCountedTraits {
     static constexpr T* kNullValue = nullptr;
-    static void Reference(T* value) {
-        value->Reference();
-    }
-    static void Release(T* value) {
-        value->Release();
-    }
+    static void Reference(T* value) { value->Reference(); }
+    static void Release(T* value) { value->Release(); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/Result.cpp b/src/dawn/common/Result.cpp
index 2101e47..c009df6 100644
--- a/src/dawn/common/Result.cpp
+++ b/src/dawn/common/Result.cpp
@@ -17,14 +17,14 @@
 // Implementation details of the tagged pointer Results
 namespace detail {
 
-    intptr_t MakePayload(const void* pointer, PayloadType type) {
-        intptr_t payload = reinterpret_cast<intptr_t>(pointer);
-        ASSERT((payload & 3) == 0);
-        return payload | type;
-    }
+intptr_t MakePayload(const void* pointer, PayloadType type) {
+    intptr_t payload = reinterpret_cast<intptr_t>(pointer);
+    ASSERT((payload & 3) == 0);
+    return payload | type;
+}
 
-    PayloadType GetPayloadType(intptr_t payload) {
-        return static_cast<PayloadType>(payload & 3);
-    }
+PayloadType GetPayloadType(intptr_t payload) {
+    return static_cast<PayloadType>(payload & 3);
+}
 
 }  // namespace detail
diff --git a/src/dawn/common/Result.h b/src/dawn/common/Result.h
index 82ac894..849cd30 100644
--- a/src/dawn/common/Result.h
+++ b/src/dawn/common/Result.h
@@ -63,7 +63,7 @@
     Result();
     Result(std::unique_ptr<E> error);
 
-    Result(Result<void, E> && other);
+    Result(Result<void, E>&& other);
     Result<void, E>& operator=(Result<void, E>&& other);
 
     ~Result();
@@ -89,23 +89,23 @@
 // tagged pointer. The tag for Success is 0 so that returning the value is fastest.
 
 namespace detail {
-    // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
-    // but we really want them inlined so we keep them in the headers
-    enum PayloadType {
-        Success = 0,
-        Error = 1,
-        Empty = 2,
-    };
+// Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
+// but we really want them inlined so we keep them in the headers
+enum PayloadType {
+    Success = 0,
+    Error = 1,
+    Empty = 2,
+};
 
-    intptr_t MakePayload(const void* pointer, PayloadType type);
-    PayloadType GetPayloadType(intptr_t payload);
+intptr_t MakePayload(const void* pointer, PayloadType type);
+PayloadType GetPayloadType(intptr_t payload);
 
-    template <typename T>
-    static T* GetSuccessFromPayload(intptr_t payload);
-    template <typename E>
-    static E* GetErrorFromPayload(intptr_t payload);
+template <typename T>
+static T* GetSuccessFromPayload(intptr_t payload);
+template <typename E>
+static E* GetErrorFromPayload(intptr_t payload);
 
-    constexpr static intptr_t kEmptyPayload = Empty;
+constexpr static intptr_t kEmptyPayload = Empty;
 }  // namespace detail
 
 template <typename T, typename E>
@@ -116,12 +116,12 @@
     static_assert(alignof_if_defined_else_default<E, 4> >= 4,
                   "Result<T*, E*> reserves two bits for tagging pointers");
 
-    Result(T * success);
+    Result(T* success);
     Result(std::unique_ptr<E> error);
 
     // Support returning a Result<T*, E*> from a Result<TChild*, E*>
     template <typename TChild>
-    Result(Result<TChild*, E> && other);
+    Result(Result<TChild*, E>&& other);
     template <typename TChild>
     Result<T*, E>& operator=(Result<TChild*, E>&& other);
 
@@ -151,7 +151,7 @@
     Result(const T* success);
     Result(std::unique_ptr<E> error);
 
-    Result(Result<const T*, E> && other);
+    Result(Result<const T*, E>&& other);
     Result<const T*, E>& operator=(Result<const T*, E>&& other);
 
     ~Result();
@@ -178,13 +178,13 @@
                   "Result<Ref<T>, E> reserves two bits for tagging pointers");
 
     template <typename U>
-    Result(Ref<U> && success);
+    Result(Ref<U>&& success);
     template <typename U>
     Result(const Ref<U>& success);
     Result(std::unique_ptr<E> error);
 
     template <typename U>
-    Result(Result<Ref<U>, E> && other);
+    Result(Result<Ref<U>, E>&& other);
     template <typename U>
     Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
 
@@ -209,10 +209,10 @@
 template <typename T, typename E>
 class [[nodiscard]] Result {
   public:
-    Result(T && success);
+    Result(T&& success);
     Result(std::unique_ptr<E> error);
 
-    Result(Result<T, E> && other);
+    Result(Result<T, E>&& other);
     Result<T, E>& operator=(Result<T, E>&& other);
 
     ~Result();
@@ -237,16 +237,13 @@
 
 // Implementation of Result<void, E>
 template <typename E>
-Result<void, E>::Result() {
-}
+Result<void, E>::Result() {}
 
 template <typename E>
-Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
-}
+Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {}
 
 template <typename E>
-Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
-}
+Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {}
 
 template <typename E>
 Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
@@ -271,8 +268,7 @@
 }
 
 template <typename E>
-void Result<void, E>::AcquireSuccess() {
-}
+void Result<void, E>::AcquireSuccess() {}
 
 template <typename E>
 std::unique_ptr<E> Result<void, E>::AcquireError() {
@@ -282,29 +278,27 @@
 // Implementation details of the tagged pointer Results
 namespace detail {
 
-    template <typename T>
-    T* GetSuccessFromPayload(intptr_t payload) {
-        ASSERT(GetPayloadType(payload) == Success);
-        return reinterpret_cast<T*>(payload);
-    }
+template <typename T>
+T* GetSuccessFromPayload(intptr_t payload) {
+    ASSERT(GetPayloadType(payload) == Success);
+    return reinterpret_cast<T*>(payload);
+}
 
-    template <typename E>
-    E* GetErrorFromPayload(intptr_t payload) {
-        ASSERT(GetPayloadType(payload) == Error);
-        return reinterpret_cast<E*>(payload ^ 1);
-    }
+template <typename E>
+E* GetErrorFromPayload(intptr_t payload) {
+    ASSERT(GetPayloadType(payload) == Error);
+    return reinterpret_cast<E*>(payload ^ 1);
+}
 
 }  // namespace detail
 
 // Implementation of Result<T*, E>
 template <typename T, typename E>
-Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
-}
+Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {}
 
 template <typename T, typename E>
 Result<T*, E>::Result(std::unique_ptr<E> error)
-    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
 
 template <typename T, typename E>
 template <typename TChild>
@@ -355,13 +349,11 @@
 // Implementation of Result<const T*, E*>
 template <typename T, typename E>
 Result<const T*, E>::Result(const T* success)
-    : mPayload(detail::MakePayload(success, detail::Success)) {
-}
+    : mPayload(detail::MakePayload(success, detail::Success)) {}
 
 template <typename T, typename E>
 Result<const T*, E>::Result(std::unique_ptr<E> error)
-    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
 
 template <typename T, typename E>
 Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
@@ -415,13 +407,11 @@
 
 template <typename T, typename E>
 template <typename U>
-Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {
-}
+Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {}
 
 template <typename T, typename E>
 Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
-    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
 
 template <typename T, typename E>
 template <typename U>
@@ -473,12 +463,10 @@
 
 // Implementation of Result<T, E>
 template <typename T, typename E>
-Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
-}
+Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {}
 
 template <typename T, typename E>
-Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
-}
+Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {}
 
 template <typename T, typename E>
 Result<T, E>::~Result() {
diff --git a/src/dawn/common/SerialStorage.h b/src/dawn/common/SerialStorage.h
index 2eae0ad..0d4c8b5 100644
--- a/src/dawn/common/SerialStorage.h
+++ b/src/dawn/common/SerialStorage.h
@@ -193,8 +193,7 @@
 template <typename Derived>
 SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
                                            typename SerialStorage<Derived>::StorageIterator end)
-    : mStartIt(start), mEndIt(end) {
-}
+    : mStartIt(start), mEndIt(end) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
@@ -210,8 +209,7 @@
 
 template <typename Derived>
 SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
-    : mStorageIterator(start), mSerialIterator(nullptr) {
-}
+    : mStorageIterator(start), mSerialIterator(nullptr) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
@@ -257,8 +255,7 @@
 SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
     typename SerialStorage<Derived>::ConstStorageIterator start,
     typename SerialStorage<Derived>::ConstStorageIterator end)
-    : mStartIt(start), mEndIt(end) {
-}
+    : mStartIt(start), mEndIt(end) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
@@ -276,8 +273,7 @@
 template <typename Derived>
 SerialStorage<Derived>::ConstIterator::ConstIterator(
     typename SerialStorage<Derived>::ConstStorageIterator start)
-    : mStorageIterator(start), mSerialIterator(nullptr) {
-}
+    : mStorageIterator(start), mSerialIterator(nullptr) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::ConstIterator&
diff --git a/src/dawn/common/SlabAllocator.cpp b/src/dawn/common/SlabAllocator.cpp
index 23540f5..b4d1827 100644
--- a/src/dawn/common/SlabAllocator.cpp
+++ b/src/dawn/common/SlabAllocator.cpp
@@ -25,19 +25,16 @@
 // IndexLinkNode
 
 SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
-    : index(index), nextIndex(nextIndex) {
-}
+    : index(index), nextIndex(nextIndex) {}
 
 // Slab
 
 SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
-    : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
-}
+    : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {}
 
 SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
 
-SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
-}
+SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {}
 
 SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
 
@@ -83,8 +80,7 @@
       mTotalAllocationSize(rhs.mTotalAllocationSize),
       mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
       mFullSlabs(std::move(rhs.mFullSlabs)),
-      mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
-}
+      mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {}
 
 SlabAllocatorImpl::~SlabAllocatorImpl() = default;
 
diff --git a/src/dawn/common/SlabAllocator.h b/src/dawn/common/SlabAllocator.h
index c94bc25..e828dea 100644
--- a/src/dawn/common/SlabAllocator.h
+++ b/src/dawn/common/SlabAllocator.h
@@ -168,8 +168,7 @@
     SlabAllocator(size_t totalObjectBytes,
                   uint32_t objectSize = u32_sizeof<T>,
                   uint32_t objectAlignment = u32_alignof<T>)
-        : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
-    }
+        : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {}
 
     template <typename... Args>
     T* Allocate(Args&&... args) {
@@ -177,9 +176,7 @@
         return new (ptr) T(std::forward<Args>(args)...);
     }
 
-    void Deallocate(T* object) {
-        SlabAllocatorImpl::Deallocate(object);
-    }
+    void Deallocate(T* object) { SlabAllocatorImpl::Deallocate(object); }
 };
 
 #endif  // SRC_DAWN_COMMON_SLABALLOCATOR_H_
diff --git a/src/dawn/common/StackContainer.h b/src/dawn/common/StackContainer.h
index f531261..ba3bfae 100644
--- a/src/dawn/common/StackContainer.h
+++ b/src/dawn/common/StackContainer.h
@@ -41,16 +41,11 @@
     // maintaining this for as long as any containers using this allocator are
     // live.
     struct Source {
-        Source() : used_stack_buffer_(false) {
-        }
+        Source() : used_stack_buffer_(false) {}
 
         // Casts the buffer in its right type.
-        T* stack_buffer() {
-            return reinterpret_cast<T*>(stack_buffer_);
-        }
-        const T* stack_buffer() const {
-            return reinterpret_cast<const T*>(&stack_buffer_);
-        }
+        T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
+        const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
 
         // The buffer itself. It is not of type T because we don't want the
         // constructors and destructors to be automatically called. Define a POD
@@ -73,8 +68,7 @@
 
     // For the straight up copy c-tor, we can share storage.
     StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
-        : std::allocator<T>(), source_(rhs.source_) {
-    }
+        : std::allocator<T>(), source_(rhs.source_) {}
 
     // ISO C++ requires the following constructor to be defined,
     // and std::vector in VC++2008SP1 Release fails with an error
@@ -84,18 +78,15 @@
     // no guarantee that the Source buffer of Ts is large enough
     // for Us.
     template <typename U, size_t other_capacity>
-    StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
-    }
+    StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
 
     // This constructor must exist. It creates a default allocator that doesn't
     // actually have a stack buffer. glibc's std::string() will compare the
     // current allocator against the default-constructed allocator, so this
     // should be fast.
-    StackAllocator() : source_(nullptr) {
-    }
+    StackAllocator() : source_(nullptr) {}
 
-    explicit StackAllocator(Source* source) : source_(source) {
-    }
+    explicit StackAllocator(Source* source) : source_(source) {}
 
     // Actually do the allocation. Use the stack buffer if nobody has used it yet
     // and the size requested fits. Otherwise, fall through to the standard
@@ -154,28 +145,18 @@
     // shorter lifetimes than the source. The copy will share the same allocator
     // and therefore the same stack buffer as the original. Use std::copy to
     // copy into a "real" container for longer-lived objects.
-    ContainerType& container() {
-        return container_;
-    }
-    const ContainerType& container() const {
-        return container_;
-    }
+    ContainerType& container() { return container_; }
+    const ContainerType& container() const { return container_; }
 
     // Support operator-> to get to the container. This allows nicer syntax like:
     //   StackContainer<...> foo;
     //   std::sort(foo->begin(), foo->end());
-    ContainerType* operator->() {
-        return &container_;
-    }
-    const ContainerType* operator->() const {
-        return &container_;
-    }
+    ContainerType* operator->() { return &container_; }
+    const ContainerType* operator->() const { return &container_; }
 
     // Retrieves the stack source so that that unit tests can verify that the
     // buffer is being used properly.
-    const typename Allocator::Source& stack_data() const {
-        return stack_data_;
-    }
+    const typename Allocator::Source& stack_data() const { return stack_data_; }
 
   protected:
     typename Allocator::Source stack_data_;
@@ -225,8 +206,7 @@
     : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
   public:
     StackVector()
-        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
-    }
+        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
 
     // We need to put this in STL containers sometimes, which requires a copy
     // constructor. We can't call the regular copy constructor because that will
@@ -244,12 +224,8 @@
 
     // Vectors are commonly indexed, which isn't very convenient even with
     // operator-> (using "->at()" does exception stuff we don't want).
-    T& operator[](size_t i) {
-        return this->container().operator[](i);
-    }
-    const T& operator[](size_t i) const {
-        return this->container().operator[](i);
-    }
+    T& operator[](size_t i) { return this->container().operator[](i); }
+    const T& operator[](size_t i) const { return this->container().operator[](i); }
 
   private:
     // StackVector(const StackVector& rhs) = delete;
diff --git a/src/dawn/common/SystemUtils.cpp b/src/dawn/common/SystemUtils.cpp
index c8df8af..2d47fd1 100644
--- a/src/dawn/common/SystemUtils.cpp
+++ b/src/dawn/common/SystemUtils.cpp
@@ -18,17 +18,17 @@
 #include "dawn/common/Log.h"
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include <Windows.h>
-#    include <vector>
+#include <Windows.h>
+#include <vector>
 #elif defined(DAWN_PLATFORM_LINUX)
-#    include <dlfcn.h>
-#    include <limits.h>
-#    include <unistd.h>
-#    include <cstdlib>
+#include <dlfcn.h>
+#include <limits.h>
+#include <unistd.h>
+#include <cstdlib>
 #elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
-#    include <dlfcn.h>
-#    include <mach-o/dyld.h>
-#    include <vector>
+#include <dlfcn.h>
+#include <mach-o/dyld.h>
+#include <vector>
 #endif
 
 #include <array>
@@ -84,7 +84,7 @@
     return setenv(variableName, value, 1) == 0;
 }
 #else
-#    error "Implement Get/SetEnvironmentVar for your platform."
+#error "Implement Get/SetEnvironmentVar for your platform."
 #endif
 
 #if defined(DAWN_PLATFORM_WINDOWS)
@@ -134,7 +134,7 @@
     return {};
 }
 #else
-#    error "Implement GetExecutablePath for your platform."
+#error "Implement GetExecutablePath for your platform."
 #endif
 
 std::optional<std::string> GetExecutableDirectory() {
@@ -168,15 +168,15 @@
     static int placeholderSymbol = 0;
     HMODULE module = nullptr;
 // GetModuleHandleEx is unavailable on UWP
-#    if defined(DAWN_IS_WINUWP)
+#if defined(DAWN_IS_WINUWP)
     return {};
-#    else
+#else
     if (!GetModuleHandleExA(
             GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
             reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) {
         return {};
     }
-#    endif
+#endif
     return GetHModulePath(module);
 }
 #elif defined(DAWN_PLATFORM_FUCHSIA)
@@ -188,7 +188,7 @@
     return {};
 }
 #else
-#    error "Implement GetModulePath for your platform."
+#error "Implement GetModulePath for your platform."
 #endif
 
 std::optional<std::string> GetModuleDirectory() {
@@ -208,8 +208,7 @@
 ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
     : mName(variableName),
       mOriginalValue(GetEnvironmentVar(variableName)),
-      mIsSet(SetEnvironmentVar(variableName, value)) {
-}
+      mIsSet(SetEnvironmentVar(variableName, value)) {}
 
 ScopedEnvironmentVar::~ScopedEnvironmentVar() {
     if (mIsSet) {
diff --git a/src/dawn/common/TypedInteger.h b/src/dawn/common/TypedInteger.h
index 4844419..b9d43a4 100644
--- a/src/dawn/common/TypedInteger.h
+++ b/src/dawn/common/TypedInteger.h
@@ -50,8 +50,8 @@
 //     uint32_t aValue = static_cast<uint32_t>(a);
 //
 namespace detail {
-    template <typename Tag, typename T>
-    class TypedIntegerImpl;
+template <typename Tag, typename T>
+class TypedIntegerImpl;
 }  // namespace detail
 
 template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
@@ -62,200 +62,198 @@
 #endif
 
 namespace detail {
-    template <typename Tag, typename T>
-    class alignas(T) TypedIntegerImpl {
-        static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
-        T mValue;
+template <typename Tag, typename T>
+class alignas(T) TypedIntegerImpl {
+    static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
+    T mValue;
 
-      public:
-        constexpr TypedIntegerImpl() : mValue(0) {
-            static_assert(alignof(TypedIntegerImpl) == alignof(T));
-            static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
-        }
+  public:
+    constexpr TypedIntegerImpl() : mValue(0) {
+        static_assert(alignof(TypedIntegerImpl) == alignof(T));
+        static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
+    }
 
-        // Construction from non-narrowing integral types.
-        template <typename I,
-                  typename = std::enable_if_t<
-                      std::is_integral<I>::value &&
-                      std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
-                      std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
-        explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
-        }
+    // Construction from non-narrowing integral types.
+    template <typename I,
+              typename =
+                  std::enable_if_t<std::is_integral<I>::value &&
+                                   std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
+                                   std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
+    explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {}
 
-        // Allow explicit casts only to the underlying type. If you're casting out of an
-        // TypedInteger, you should know what what you're doing, and exactly what type you
-        // expect.
-        explicit constexpr operator T() const {
-            return static_cast<T>(this->mValue);
-        }
+    // Allow explicit casts only to the underlying type. If you're casting out of an
+    // TypedInteger, you should know what what you're doing, and exactly what type you
+    // expect.
+    explicit constexpr operator T() const { return static_cast<T>(this->mValue); }
 
 // Same-tag TypedInteger comparison operators
-#define TYPED_COMPARISON(op)                                        \
-    constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
-        return mValue op rhs.mValue;                                \
-    }
-        TYPED_COMPARISON(<)
-        TYPED_COMPARISON(<=)
-        TYPED_COMPARISON(>)
-        TYPED_COMPARISON(>=)
-        TYPED_COMPARISON(==)
-        TYPED_COMPARISON(!=)
+#define TYPED_COMPARISON(op) \
+    constexpr bool operator op(const TypedIntegerImpl& rhs) const { return mValue op rhs.mValue; }
+    TYPED_COMPARISON(<)
+    TYPED_COMPARISON(<=)
+    TYPED_COMPARISON(>)
+    TYPED_COMPARISON(>=)
+    TYPED_COMPARISON(==)
+    TYPED_COMPARISON(!=)
 #undef TYPED_COMPARISON
 
-        // Increment / decrement operators for for-loop iteration
-        constexpr TypedIntegerImpl& operator++() {
-            ASSERT(this->mValue < std::numeric_limits<T>::max());
-            ++this->mValue;
-            return *this;
+    // Increment / decrement operators for for-loop iteration
+    constexpr TypedIntegerImpl& operator++() {
+        ASSERT(this->mValue < std::numeric_limits<T>::max());
+        ++this->mValue;
+        return *this;
+    }
+
+    constexpr TypedIntegerImpl operator++(int) {
+        TypedIntegerImpl ret = *this;
+
+        ASSERT(this->mValue < std::numeric_limits<T>::max());
+        ++this->mValue;
+        return ret;
+    }
+
+    constexpr TypedIntegerImpl& operator--() {
+        ASSERT(this->mValue > std::numeric_limits<T>::min());
+        --this->mValue;
+        return *this;
+    }
+
+    constexpr TypedIntegerImpl operator--(int) {
+        TypedIntegerImpl ret = *this;
+
+        ASSERT(this->mValue > std::numeric_limits<T>::min());
+        --this->mValue;
+        return ret;
+    }
+
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> AddImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
+
+        // Overflow would wrap around
+        ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
+        return lhs.mValue + rhs.mValue;
+    }
+
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> AddImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
+
+        if (lhs.mValue > 0) {
+            // rhs is positive: |rhs| is at most the distance between max and |lhs|.
+            // rhs is negative: (positive + negative) won't overflow
+            ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
+        } else {
+            // rhs is postive: (negative + positive) won't underflow
+            // rhs is negative: |rhs| isn't less than the (negative) distance between min
+            // and |lhs|
+            ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
         }
+        return lhs.mValue + rhs.mValue;
+    }
 
-        constexpr TypedIntegerImpl operator++(int) {
-            TypedIntegerImpl ret = *this;
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> SubImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
 
-            ASSERT(this->mValue < std::numeric_limits<T>::max());
-            ++this->mValue;
-            return ret;
+        // Overflow would wrap around
+        ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
+        return lhs.mValue - rhs.mValue;
+    }
+
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
+
+        if (lhs.mValue > 0) {
+            // rhs is positive: positive minus positive won't overflow
+            // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
+            // and max.
+            ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
+        } else {
+            // rhs is positive: |rhs| is at most the distance between min and |lhs|
+            // rhs is negative: negative minus negative won't overflow
+            ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
         }
+        return lhs.mValue - rhs.mValue;
+    }
 
-        constexpr TypedIntegerImpl& operator--() {
-            ASSERT(this->mValue > std::numeric_limits<T>::min());
-            --this->mValue;
-            return *this;
-        }
+    template <typename T2 = T>
+    constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
+        static_assert(std::is_same<T, T2>::value);
+        // The negation of the most negative value cannot be represented.
+        ASSERT(this->mValue != std::numeric_limits<T>::min());
+        return TypedIntegerImpl(-this->mValue);
+    }
 
-        constexpr TypedIntegerImpl operator--(int) {
-            TypedIntegerImpl ret = *this;
+    constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
+        auto result = AddImpl(*this, rhs);
+        static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
+        return TypedIntegerImpl(result);
+    }
 
-            ASSERT(this->mValue > std::numeric_limits<T>::min());
-            --this->mValue;
-            return ret;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
-        AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            // Overflow would wrap around
-            ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
-            return lhs.mValue + rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
-        AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            if (lhs.mValue > 0) {
-                // rhs is positive: |rhs| is at most the distance between max and |lhs|.
-                // rhs is negative: (positive + negative) won't overflow
-                ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
-            } else {
-                // rhs is postive: (negative + positive) won't underflow
-                // rhs is negative: |rhs| isn't less than the (negative) distance between min
-                // and |lhs|
-                ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
-            }
-            return lhs.mValue + rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
-        SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            // Overflow would wrap around
-            ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
-            return lhs.mValue - rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
-            TypedIntegerImpl<Tag, T> lhs,
-            TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            if (lhs.mValue > 0) {
-                // rhs is positive: positive minus positive won't overflow
-                // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
-                // and max.
-                ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
-            } else {
-                // rhs is positive: |rhs| is at most the distance between min and |lhs|
-                // rhs is negative: negative minus negative won't overflow
-                ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
-            }
-            return lhs.mValue - rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
-            static_assert(std::is_same<T, T2>::value);
-            // The negation of the most negative value cannot be represented.
-            ASSERT(this->mValue != std::numeric_limits<T>::min());
-            return TypedIntegerImpl(-this->mValue);
-        }
-
-        constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
-            auto result = AddImpl(*this, rhs);
-            static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
-            return TypedIntegerImpl(result);
-        }
-
-        constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
-            auto result = SubImpl(*this, rhs);
-            static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
-            return TypedIntegerImpl(result);
-        }
-    };
+    constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
+        auto result = SubImpl(*this, rhs);
+        static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
+        return TypedIntegerImpl(result);
+    }
+};
 
 }  // namespace detail
 
 namespace std {
 
-    template <typename Tag, typename T>
-    class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
-      public:
-        static detail::TypedIntegerImpl<Tag, T> max() noexcept {
-            return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
-        }
-        static detail::TypedIntegerImpl<Tag, T> min() noexcept {
-            return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
-        }
-    };
+template <typename Tag, typename T>
+class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
+  public:
+    static detail::TypedIntegerImpl<Tag, T> max() noexcept {
+        return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
+    }
+    static detail::TypedIntegerImpl<Tag, T> min() noexcept {
+        return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
+    }
+};
 
 }  // namespace std
 
 namespace ityp {
 
-    // These helpers below are provided since the default arithmetic operators for small integer
-    // types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
-    // casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
-    // ityp::Sub(a, b) instead.
+// These helpers below are provided since the default arithmetic operators for small integer
+// types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
+// casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
+// ityp::Sub(a, b) instead.
 
-    template <typename Tag, typename T>
-    constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
-                                                     ::detail::TypedIntegerImpl<Tag, T> rhs) {
-        return ::detail::TypedIntegerImpl<Tag, T>(
-            static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
-    }
+template <typename Tag, typename T>
+constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
+                                                 ::detail::TypedIntegerImpl<Tag, T> rhs) {
+    return ::detail::TypedIntegerImpl<Tag, T>(
+        static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
+}
 
-    template <typename Tag, typename T>
-    constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
-                                                     ::detail::TypedIntegerImpl<Tag, T> rhs) {
-        return ::detail::TypedIntegerImpl<Tag, T>(
-            static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
-    }
+template <typename Tag, typename T>
+constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
+                                                 ::detail::TypedIntegerImpl<Tag, T> rhs) {
+    return ::detail::TypedIntegerImpl<Tag, T>(
+        static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
+}
 
-    template <typename T>
-    constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
-        return static_cast<T>(lhs + rhs);
-    }
+template <typename T>
+constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
+    return static_cast<T>(lhs + rhs);
+}
 
-    template <typename T>
-    constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
-        return static_cast<T>(lhs - rhs);
-    }
+template <typename T>
+constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
+    return static_cast<T>(lhs - rhs);
+}
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/UnderlyingType.h b/src/dawn/common/UnderlyingType.h
index 5b499da..b4ff8ea 100644
--- a/src/dawn/common/UnderlyingType.h
+++ b/src/dawn/common/UnderlyingType.h
@@ -22,27 +22,27 @@
 // template parameter. It includes a specialization for detail::TypedIntegerImpl which yields
 // the wrapped integer type.
 namespace detail {
-    template <typename T, typename Enable = void>
-    struct UnderlyingTypeImpl;
+template <typename T, typename Enable = void>
+struct UnderlyingTypeImpl;
 
-    template <typename I>
-    struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
-        using type = I;
-    };
+template <typename I>
+struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
+    using type = I;
+};
 
-    template <typename E>
-    struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
-        using type = std::underlying_type_t<E>;
-    };
+template <typename E>
+struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
+    using type = std::underlying_type_t<E>;
+};
 
-    // Forward declare the TypedInteger impl.
-    template <typename Tag, typename T>
-    class TypedIntegerImpl;
+// Forward declare the TypedInteger impl.
+template <typename Tag, typename T>
+class TypedIntegerImpl;
 
-    template <typename Tag, typename I>
-    struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
-        using type = typename UnderlyingTypeImpl<I>::type;
-    };
+template <typename Tag, typename I>
+struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
+    using type = typename UnderlyingTypeImpl<I>::type;
+};
 }  // namespace detail
 
 template <typename T>
diff --git a/src/dawn/common/ityp_array.h b/src/dawn/common/ityp_array.h
index d84db7c..a410302 100644
--- a/src/dawn/common/ityp_array.h
+++ b/src/dawn/common/ityp_array.h
@@ -26,75 +26,64 @@
 
 namespace ityp {
 
-    // ityp::array is a helper class that wraps std::array with the restriction that
-    // indices must be a particular type |Index|. Dawn uses multiple flat maps of
-    // index-->data, and this class helps ensure an indices cannot be passed interchangably
-    // to a flat map of a different type.
-    template <typename Index, typename Value, size_t Size>
-    class array : private std::array<Value, Size> {
-        using I = UnderlyingType<Index>;
-        using Base = std::array<Value, Size>;
+// ityp::array is a helper class that wraps std::array with the restriction that
+// indices must be a particular type |Index|. Dawn uses multiple flat maps of
+// index-->data, and this class helps ensure an indices cannot be passed interchangably
+// to a flat map of a different type.
+template <typename Index, typename Value, size_t Size>
+class array : private std::array<Value, Size> {
+    using I = UnderlyingType<Index>;
+    using Base = std::array<Value, Size>;
 
-        static_assert(Size <= std::numeric_limits<I>::max());
+    static_assert(Size <= std::numeric_limits<I>::max());
 
-      public:
-        constexpr array() = default;
+  public:
+    constexpr array() = default;
 
-        template <typename... Values>
-        // NOLINTNEXTLINE(runtime/explicit)
-        constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
-        }
+    template <typename... Values>
+    // NOLINTNEXTLINE(runtime/explicit)
+    constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {}
 
-        Value& operator[](Index i) {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::operator[](index);
-        }
+    Value& operator[](Index i) {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::operator[](index);
+    }
 
-        constexpr const Value& operator[](Index i) const {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::operator[](index);
-        }
+    constexpr const Value& operator[](Index i) const {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::operator[](index);
+    }
 
-        Value& at(Index i) {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::at(index);
-        }
+    Value& at(Index i) {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::at(index);
+    }
 
-        constexpr const Value& at(Index i) const {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::at(index);
-        }
+    constexpr const Value& at(Index i) const {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::at(index);
+    }
 
-        typename Base::iterator begin() noexcept {
-            return Base::begin();
-        }
+    typename Base::iterator begin() noexcept { return Base::begin(); }
 
-        typename Base::const_iterator begin() const noexcept {
-            return Base::begin();
-        }
+    typename Base::const_iterator begin() const noexcept { return Base::begin(); }
 
-        typename Base::iterator end() noexcept {
-            return Base::end();
-        }
+    typename Base::iterator end() noexcept { return Base::end(); }
 
-        typename Base::const_iterator end() const noexcept {
-            return Base::end();
-        }
+    typename Base::const_iterator end() const noexcept { return Base::end(); }
 
-        constexpr Index size() const {
-            return Index(I(Size));
-        }
+    constexpr Index size() const { return Index(I(Size)); }
 
-        using Base::back;
-        using Base::data;
-        using Base::empty;
-        using Base::fill;
-        using Base::front;
-    };
+    using Base::back;
+    using Base::data;
+    using Base::empty;
+    using Base::fill;
+    using Base::front;
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/ityp_bitset.h b/src/dawn/common/ityp_bitset.h
index 43ca81a..e9cfa05 100644
--- a/src/dawn/common/ityp_bitset.h
+++ b/src/dawn/common/ityp_bitset.h
@@ -21,116 +21,95 @@
 
 namespace ityp {
 
-    // ityp::bitset is a helper class that wraps std::bitset with the restriction that
-    // indices must be a particular type |Index|.
-    template <typename Index, size_t N>
-    class bitset : private std::bitset<N> {
-        using I = UnderlyingType<Index>;
-        using Base = std::bitset<N>;
+// ityp::bitset is a helper class that wraps std::bitset with the restriction that
+// indices must be a particular type |Index|.
+template <typename Index, size_t N>
+class bitset : private std::bitset<N> {
+    using I = UnderlyingType<Index>;
+    using Base = std::bitset<N>;
 
-        static_assert(sizeof(I) <= sizeof(size_t));
+    static_assert(sizeof(I) <= sizeof(size_t));
 
-        explicit constexpr bitset(const Base& rhs) : Base(rhs) {
-        }
+    explicit constexpr bitset(const Base& rhs) : Base(rhs) {}
 
-      public:
-        using reference = typename Base::reference;
+  public:
+    using reference = typename Base::reference;
 
-        constexpr bitset() noexcept : Base() {
-        }
+    constexpr bitset() noexcept : Base() {}
 
-        // NOLINTNEXTLINE(runtime/explicit)
-        constexpr bitset(uint64_t value) noexcept : Base(value) {
-        }
+    // NOLINTNEXTLINE(runtime/explicit)
+    constexpr bitset(uint64_t value) noexcept : Base(value) {}
 
-        constexpr bool operator[](Index i) const {
-            return Base::operator[](static_cast<I>(i));
-        }
+    constexpr bool operator[](Index i) const { return Base::operator[](static_cast<I>(i)); }
 
-        typename Base::reference operator[](Index i) {
-            return Base::operator[](static_cast<I>(i));
-        }
+    typename Base::reference operator[](Index i) { return Base::operator[](static_cast<I>(i)); }
 
-        bool test(Index i) const {
-            return Base::test(static_cast<I>(i));
-        }
+    bool test(Index i) const { return Base::test(static_cast<I>(i)); }
 
-        using Base::all;
-        using Base::any;
-        using Base::count;
-        using Base::none;
-        using Base::size;
+    using Base::all;
+    using Base::any;
+    using Base::count;
+    using Base::none;
+    using Base::size;
 
-        bool operator==(const bitset& other) const noexcept {
-            return Base::operator==(static_cast<const Base&>(other));
-        }
+    bool operator==(const bitset& other) const noexcept {
+        return Base::operator==(static_cast<const Base&>(other));
+    }
 
-        bool operator!=(const bitset& other) const noexcept {
-            return Base::operator!=(static_cast<const Base&>(other));
-        }
+    bool operator!=(const bitset& other) const noexcept {
+        return Base::operator!=(static_cast<const Base&>(other));
+    }
 
-        bitset& operator&=(const bitset& other) noexcept {
-            return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
-        }
+    bitset& operator&=(const bitset& other) noexcept {
+        return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
+    }
 
-        bitset& operator|=(const bitset& other) noexcept {
-            return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
-        }
+    bitset& operator|=(const bitset& other) noexcept {
+        return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
+    }
 
-        bitset& operator^=(const bitset& other) noexcept {
-            return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
-        }
+    bitset& operator^=(const bitset& other) noexcept {
+        return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
+    }
 
-        bitset operator~() const noexcept {
-            return bitset(*this).flip();
-        }
+    bitset operator~() const noexcept { return bitset(*this).flip(); }
 
-        bitset& set() noexcept {
-            return static_cast<bitset&>(Base::set());
-        }
+    bitset& set() noexcept { return static_cast<bitset&>(Base::set()); }
 
-        bitset& set(Index i, bool value = true) {
-            return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
-        }
+    bitset& set(Index i, bool value = true) {
+        return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
+    }
 
-        bitset& reset() noexcept {
-            return static_cast<bitset&>(Base::reset());
-        }
+    bitset& reset() noexcept { return static_cast<bitset&>(Base::reset()); }
 
-        bitset& reset(Index i) {
-            return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
-        }
+    bitset& reset(Index i) { return static_cast<bitset&>(Base::reset(static_cast<I>(i))); }
 
-        bitset& flip() noexcept {
-            return static_cast<bitset&>(Base::flip());
-        }
+    bitset& flip() noexcept { return static_cast<bitset&>(Base::flip()); }
 
-        bitset& flip(Index i) {
-            return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
-        }
+    bitset& flip(Index i) { return static_cast<bitset&>(Base::flip(static_cast<I>(i))); }
 
-        using Base::to_string;
-        using Base::to_ullong;
-        using Base::to_ulong;
+    using Base::to_string;
+    using Base::to_ullong;
+    using Base::to_ulong;
 
-        friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
-            return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
-        }
+    friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
+        return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
+    }
 
-        friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
-            return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
-        }
+    friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
+        return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
+    }
 
-        friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
-            return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
-        }
+    friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
+        return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
+    }
 
-        friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
-            return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
-        }
+    friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
+        return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
+    }
 
-        friend struct std::hash<bitset>;
-    };
+    friend struct std::hash<bitset>;
+};
 
 }  // namespace ityp
 
@@ -147,7 +126,7 @@
     using I = UnderlyingType<Index>;
 #if defined(DAWN_COMPILER_MSVC)
     if constexpr (N > 32) {
-#    if defined(DAWN_PLATFORM_64_BIT)
+#if defined(DAWN_PLATFORM_64_BIT)
         // NOLINTNEXTLINE(runtime/int)
         unsigned long firstBitIndex = 0ul;
         unsigned char ret = _BitScanReverse64(&firstBitIndex, bitset.to_ullong());
@@ -155,7 +134,7 @@
             return Index(static_cast<I>(0));
         }
         return Index(static_cast<I>(firstBitIndex + 1));
-#    else   // defined(DAWN_PLATFORM_64_BIT)
+#else   // defined(DAWN_PLATFORM_64_BIT)
         if (bitset.none()) {
             return Index(static_cast<I>(0));
         }
@@ -165,7 +144,7 @@
             }
         }
         UNREACHABLE();
-#    endif  // defined(DAWN_PLATFORM_64_BIT)
+#endif  // defined(DAWN_PLATFORM_64_BIT)
     } else {
         // NOLINTNEXTLINE(runtime/int)
         unsigned long firstBitIndex = 0ul;
diff --git a/src/dawn/common/ityp_span.h b/src/dawn/common/ityp_span.h
index 7b0bb2b..4f76b57 100644
--- a/src/dawn/common/ityp_span.h
+++ b/src/dawn/common/ityp_span.h
@@ -22,81 +22,65 @@
 
 namespace ityp {
 
-    // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
-    // It stores the size and pointer to first element. It has the restriction that
-    // indices must be a particular type |Index|. This provides a type-safe way to index
-    // raw pointers.
-    template <typename Index, typename Value>
-    class span {
-        using I = UnderlyingType<Index>;
+// ityp::span is a helper class that wraps an unowned packed array of type |Value|.
+// It stores the size and pointer to first element. It has the restriction that
+// indices must be a particular type |Index|. This provides a type-safe way to index
+// raw pointers.
+template <typename Index, typename Value>
+class span {
+    using I = UnderlyingType<Index>;
 
-      public:
-        constexpr span() : mData(nullptr), mSize(0) {
-        }
-        constexpr span(Value* data, Index size) : mData(data), mSize(size) {
-        }
+  public:
+    constexpr span() : mData(nullptr), mSize(0) {}
+    constexpr span(Value* data, Index size) : mData(data), mSize(size) {}
 
-        constexpr Value& operator[](Index i) const {
-            ASSERT(i < mSize);
-            return mData[static_cast<I>(i)];
-        }
+    constexpr Value& operator[](Index i) const {
+        ASSERT(i < mSize);
+        return mData[static_cast<I>(i)];
+    }
 
-        Value* data() noexcept {
-            return mData;
-        }
+    Value* data() noexcept { return mData; }
 
-        const Value* data() const noexcept {
-            return mData;
-        }
+    const Value* data() const noexcept { return mData; }
 
-        Value* begin() noexcept {
-            return mData;
-        }
+    Value* begin() noexcept { return mData; }
 
-        const Value* begin() const noexcept {
-            return mData;
-        }
+    const Value* begin() const noexcept { return mData; }
 
-        Value* end() noexcept {
-            return mData + static_cast<I>(mSize);
-        }
+    Value* end() noexcept { return mData + static_cast<I>(mSize); }
 
-        const Value* end() const noexcept {
-            return mData + static_cast<I>(mSize);
-        }
+    const Value* end() const noexcept { return mData + static_cast<I>(mSize); }
 
-        Value& front() {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *mData;
-        }
+    Value& front() {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *mData;
+    }
 
-        const Value& front() const {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *mData;
-        }
+    const Value& front() const {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *mData;
+    }
 
-        Value& back() {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *(mData + static_cast<I>(mSize) - 1);
-        }
+    Value& back() {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *(mData + static_cast<I>(mSize) - 1);
+    }
 
-        const Value& back() const {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *(mData + static_cast<I>(mSize) - 1);
-        }
+    const Value& back() const {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *(mData + static_cast<I>(mSize) - 1);
+    }
 
-        Index size() const {
-            return mSize;
-        }
+    Index size() const { return mSize; }
 
-      private:
-        Value* mData;
-        Index mSize;
-    };
+  private:
+    Value* mData;
+    Index mSize;
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/ityp_stack_vec.h b/src/dawn/common/ityp_stack_vec.h
index fb3fcf7..d35adf6 100644
--- a/src/dawn/common/ityp_stack_vec.h
+++ b/src/dawn/common/ityp_stack_vec.h
@@ -24,82 +24,53 @@
 
 namespace ityp {
 
-    template <typename Index, typename Value, size_t StaticCapacity>
-    class stack_vec : private StackVector<Value, StaticCapacity> {
-        using I = UnderlyingType<Index>;
-        using Base = StackVector<Value, StaticCapacity>;
-        using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
-        static_assert(StaticCapacity <= std::numeric_limits<I>::max());
+template <typename Index, typename Value, size_t StaticCapacity>
+class stack_vec : private StackVector<Value, StaticCapacity> {
+    using I = UnderlyingType<Index>;
+    using Base = StackVector<Value, StaticCapacity>;
+    using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
+    static_assert(StaticCapacity <= std::numeric_limits<I>::max());
 
-      public:
-        stack_vec() : Base() {
-        }
-        explicit stack_vec(Index size) : Base() {
-            this->container().resize(static_cast<I>(size));
-        }
+  public:
+    stack_vec() : Base() {}
+    explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
 
-        Value& operator[](Index i) {
-            ASSERT(i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    Value& operator[](Index i) {
+        ASSERT(i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        constexpr const Value& operator[](Index i) const {
-            ASSERT(i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    constexpr const Value& operator[](Index i) const {
+        ASSERT(i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        void resize(Index size) {
-            this->container().resize(static_cast<I>(size));
-        }
+    void resize(Index size) { this->container().resize(static_cast<I>(size)); }
 
-        void reserve(Index size) {
-            this->container().reserve(static_cast<I>(size));
-        }
+    void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
 
-        Value* data() {
-            return this->container().data();
-        }
+    Value* data() { return this->container().data(); }
 
-        const Value* data() const {
-            return this->container().data();
-        }
+    const Value* data() const { return this->container().data(); }
 
-        typename VectorBase::iterator begin() noexcept {
-            return this->container().begin();
-        }
+    typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
 
-        typename VectorBase::const_iterator begin() const noexcept {
-            return this->container().begin();
-        }
+    typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
 
-        typename VectorBase::iterator end() noexcept {
-            return this->container().end();
-        }
+    typename VectorBase::iterator end() noexcept { return this->container().end(); }
 
-        typename VectorBase::const_iterator end() const noexcept {
-            return this->container().end();
-        }
+    typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
 
-        typename VectorBase::reference front() {
-            return this->container().front();
-        }
+    typename VectorBase::reference front() { return this->container().front(); }
 
-        typename VectorBase::const_reference front() const {
-            return this->container().front();
-        }
+    typename VectorBase::const_reference front() const { return this->container().front(); }
 
-        typename VectorBase::reference back() {
-            return this->container().back();
-        }
+    typename VectorBase::reference back() { return this->container().back(); }
 
-        typename VectorBase::const_reference back() const {
-            return this->container().back();
-        }
+    typename VectorBase::const_reference back() const { return this->container().back(); }
 
-        Index size() const {
-            return Index(static_cast<I>(this->container().size()));
-        }
-    };
+    Index size() const { return Index(static_cast<I>(this->container().size())); }
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/ityp_vector.h b/src/dawn/common/ityp_vector.h
index 2088c4f..3d402cf 100644
--- a/src/dawn/common/ityp_vector.h
+++ b/src/dawn/common/ityp_vector.h
@@ -24,85 +24,75 @@
 
 namespace ityp {
 
-    // ityp::vector is a helper class that wraps std::vector with the restriction that
-    // indices must be a particular type |Index|.
-    template <typename Index, typename Value>
-    class vector : public std::vector<Value> {
-        using I = UnderlyingType<Index>;
-        using Base = std::vector<Value>;
+// ityp::vector is a helper class that wraps std::vector with the restriction that
+// indices must be a particular type |Index|.
+template <typename Index, typename Value>
+class vector : public std::vector<Value> {
+    using I = UnderlyingType<Index>;
+    using Base = std::vector<Value>;
 
-      private:
-        // Disallow access to base constructors and untyped index/size-related operators.
-        using Base::Base;
-        using Base::operator=;
-        using Base::operator[];
-        using Base::at;
-        using Base::reserve;
-        using Base::resize;
-        using Base::size;
+  private:
+    // Disallow access to base constructors and untyped index/size-related operators.
+    using Base::Base;
+    using Base::operator=;
+    using Base::operator[];
+    using Base::at;
+    using Base::reserve;
+    using Base::resize;
+    using Base::size;
 
-      public:
-        vector() : Base() {
-        }
+  public:
+    vector() : Base() {}
 
-        explicit vector(Index size) : Base(static_cast<I>(size)) {
-        }
+    explicit vector(Index size) : Base(static_cast<I>(size)) {}
 
-        vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
-        }
+    vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {}
 
-        vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
-        }
+    vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {}
 
-        vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
-        }
+    vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {}
 
-        vector(std::initializer_list<Value> init) : Base(init) {
-        }
+    vector(std::initializer_list<Value> init) : Base(init) {}
 
-        vector& operator=(const vector& rhs) {
-            Base::operator=(static_cast<const Base&>(rhs));
-            return *this;
-        }
+    vector& operator=(const vector& rhs) {
+        Base::operator=(static_cast<const Base&>(rhs));
+        return *this;
+    }
 
-        vector& operator=(vector&& rhs) noexcept {
-            Base::operator=(static_cast<Base&&>(rhs));
-            return *this;
-        }
+    vector& operator=(vector&& rhs) noexcept {
+        Base::operator=(static_cast<Base&&>(rhs));
+        return *this;
+    }
 
-        Value& operator[](Index i) {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    Value& operator[](Index i) {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        constexpr const Value& operator[](Index i) const {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    constexpr const Value& operator[](Index i) const {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        Value& at(Index i) {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::at(static_cast<I>(i));
-        }
+    Value& at(Index i) {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::at(static_cast<I>(i));
+    }
 
-        constexpr const Value& at(Index i) const {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::at(static_cast<I>(i));
-        }
+    constexpr const Value& at(Index i) const {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::at(static_cast<I>(i));
+    }
 
-        constexpr Index size() const {
-            ASSERT(std::numeric_limits<I>::max() >= Base::size());
-            return Index(static_cast<I>(Base::size()));
-        }
+    constexpr Index size() const {
+        ASSERT(std::numeric_limits<I>::max() >= Base::size());
+        return Index(static_cast<I>(Base::size()));
+    }
 
-        void resize(Index size) {
-            Base::resize(static_cast<I>(size));
-        }
+    void resize(Index size) { Base::resize(static_cast<I>(size)); }
 
-        void reserve(Index size) {
-            Base::reserve(static_cast<I>(size));
-        }
-    };
+    void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/vulkan_platform.h b/src/dawn/common/vulkan_platform.h
index 17d275f..ef90910 100644
--- a/src/dawn/common/vulkan_platform.h
+++ b/src/dawn/common/vulkan_platform.h
@@ -16,10 +16,10 @@
 #define SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
 
 #if !defined(DAWN_ENABLE_BACKEND_VULKAN)
-#    error "vulkan_platform.h included without the Vulkan backend enabled"
+#error "vulkan_platform.h included without the Vulkan backend enabled"
 #endif
 #if defined(VULKAN_CORE_H_)
-#    error "vulkan.h included before vulkan_platform.h"
+#error "vulkan.h included before vulkan_platform.h"
 #endif
 
 #include <cstddef>
@@ -36,7 +36,7 @@
 // (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
 
 #if defined(DAWN_PLATFORM_64_BIT)
-#    define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
+#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
 // This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
 // TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
 template <typename T>
@@ -44,13 +44,13 @@
     return reinterpret_cast<T>(u64);
 }
 #elif defined(DAWN_PLATFORM_32_BIT)
-#    define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
+#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
 template <typename T>
 T NativeNonDispatachableHandleFromU64(uint64_t u64) {
     return u64;
 }
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
 
 // Define a placeholder Vulkan handle for use before we include vulkan.h
@@ -67,89 +67,73 @@
 
 namespace dawn::native::vulkan {
 
-    namespace detail {
-        template <typename T>
-        struct WrapperStruct {
-            T member;
-        };
+namespace detail {
+template <typename T>
+struct WrapperStruct {
+    T member;
+};
 
-        template <typename T>
-        static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
+template <typename T>
+static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
 
-        static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
-        static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
+static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
+static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
 
-        // Simple handle types that supports "nullptr_t" as a 0 value.
-        template <typename Tag, typename HandleType>
-        class alignas(detail::kNativeVkHandleAlignment) VkHandle {
-          public:
-            // Default constructor and assigning of VK_NULL_HANDLE
-            VkHandle() = default;
-            VkHandle(std::nullptr_t) {
-            }
+// Simple handle types that supports "nullptr_t" as a 0 value.
+template <typename Tag, typename HandleType>
+class alignas(detail::kNativeVkHandleAlignment) VkHandle {
+  public:
+    // Default constructor and assigning of VK_NULL_HANDLE
+    VkHandle() = default;
+    VkHandle(std::nullptr_t) {}
 
-            // Use default copy constructor/assignment
-            VkHandle(const VkHandle<Tag, HandleType>& other) = default;
-            VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
+    // Use default copy constructor/assignment
+    VkHandle(const VkHandle<Tag, HandleType>& other) = default;
+    VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
 
-            // Comparisons between handles
-            bool operator==(VkHandle<Tag, HandleType> other) const {
-                return mHandle == other.mHandle;
-            }
-            bool operator!=(VkHandle<Tag, HandleType> other) const {
-                return mHandle != other.mHandle;
-            }
+    // Comparisons between handles
+    bool operator==(VkHandle<Tag, HandleType> other) const { return mHandle == other.mHandle; }
+    bool operator!=(VkHandle<Tag, HandleType> other) const { return mHandle != other.mHandle; }
 
-            // Comparisons between handles and VK_NULL_HANDLE
-            bool operator==(std::nullptr_t) const {
-                return mHandle == 0;
-            }
-            bool operator!=(std::nullptr_t) const {
-                return mHandle != 0;
-            }
+    // Comparisons between handles and VK_NULL_HANDLE
+    bool operator==(std::nullptr_t) const { return mHandle == 0; }
+    bool operator!=(std::nullptr_t) const { return mHandle != 0; }
 
-            // Implicit conversion to real Vulkan types.
-            operator HandleType() const {
-                return GetHandle();
-            }
+    // Implicit conversion to real Vulkan types.
+    operator HandleType() const { return GetHandle(); }
 
-            HandleType GetHandle() const {
-                return mHandle;
-            }
+    HandleType GetHandle() const { return mHandle; }
 
-            HandleType& operator*() {
-                return mHandle;
-            }
+    HandleType& operator*() { return mHandle; }
 
-            static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
-                return VkHandle{handle};
-            }
-
-          private:
-            explicit VkHandle(HandleType handle) : mHandle(handle) {
-            }
-
-            HandleType mHandle = 0;
-        };
-    }  // namespace detail
-
-    static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
-
-    template <typename Tag, typename HandleType>
-    HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
-        return reinterpret_cast<HandleType*>(handle);
+    static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
+        return VkHandle{handle};
     }
 
+  private:
+    explicit VkHandle(HandleType handle) : mHandle(handle) {}
+
+    HandleType mHandle = 0;
+};
+}  // namespace detail
+
+static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
+
+template <typename Tag, typename HandleType>
+HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
+    return reinterpret_cast<HandleType*>(handle);
+}
+
 }  // namespace dawn::native::vulkan
 
-#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object)                           \
-    DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object)                      \
-    namespace dawn::native::vulkan {                                        \
-        using object = detail::VkHandle<struct VkTag##object, ::object>;    \
-        static_assert(sizeof(object) == sizeof(uint64_t));                  \
-        static_assert(alignof(object) == detail::kUint64Alignment);         \
-        static_assert(sizeof(object) == sizeof(::object));                  \
-        static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object)                       \
+    DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object)                  \
+    namespace dawn::native::vulkan {                                    \
+    using object = detail::VkHandle<struct VkTag##object, ::object>;    \
+    static_assert(sizeof(object) == sizeof(uint64_t));                  \
+    static_assert(alignof(object) == detail::kUint64Alignment);         \
+    static_assert(sizeof(object) == sizeof(::object));                  \
+    static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
     }  // namespace dawn::native::vulkan
 
 // Import additional parts of Vulkan that are supported on our architecture and preemptively include
@@ -157,36 +141,36 @@
 // defines are defined already in the Vulkan-Header BUILD.gn, but are needed when building with
 // CMake, hence they cannot be removed at the moment.
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    ifndef VK_USE_PLATFORM_WIN32_KHR
-#        define VK_USE_PLATFORM_WIN32_KHR
-#    endif
-#    include "dawn/common/windows_with_undefs.h"
+#ifndef VK_USE_PLATFORM_WIN32_KHR
+#define VK_USE_PLATFORM_WIN32_KHR
+#endif
+#include "dawn/common/windows_with_undefs.h"
 #endif  // DAWN_PLATFORM_WINDOWS
 
 #if defined(DAWN_USE_X11)
-#    define VK_USE_PLATFORM_XLIB_KHR
-#    ifndef VK_USE_PLATFORM_XCB_KHR
-#        define VK_USE_PLATFORM_XCB_KHR
-#    endif
-#    include "dawn/common/xlib_with_undefs.h"
+#define VK_USE_PLATFORM_XLIB_KHR
+#ifndef VK_USE_PLATFORM_XCB_KHR
+#define VK_USE_PLATFORM_XCB_KHR
+#endif
+#include "dawn/common/xlib_with_undefs.h"
 #endif  // defined(DAWN_USE_X11)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-#    ifndef VK_USE_PLATFORM_METAL_EXT
-#        define VK_USE_PLATFORM_METAL_EXT
-#    endif
+#ifndef VK_USE_PLATFORM_METAL_EXT
+#define VK_USE_PLATFORM_METAL_EXT
+#endif
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_PLATFORM_ANDROID)
-#    ifndef VK_USE_PLATFORM_ANDROID_KHR
-#        define VK_USE_PLATFORM_ANDROID_KHR
-#    endif
+#ifndef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_USE_PLATFORM_ANDROID_KHR
+#endif
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
 #if defined(DAWN_PLATFORM_FUCHSIA)
-#    ifndef VK_USE_PLATFORM_FUCHSIA
-#        define VK_USE_PLATFORM_FUCHSIA
-#    endif
+#ifndef VK_USE_PLATFORM_FUCHSIA
+#define VK_USE_PLATFORM_FUCHSIA
+#endif
 #endif  // defined(DAWN_PLATFORM_FUCHSIA)
 
 // The actual inclusion of vulkan.h!
@@ -200,7 +184,7 @@
 #elif defined(DAWN_PLATFORM_32_BIT)
 static constexpr uint64_t VK_NULL_HANDLE = 0;
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
 
 #endif  // SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
diff --git a/src/dawn/common/windows_with_undefs.h b/src/dawn/common/windows_with_undefs.h
index 337ed60..63c27db 100644
--- a/src/dawn/common/windows_with_undefs.h
+++ b/src/dawn/common/windows_with_undefs.h
@@ -18,7 +18,7 @@
 #include "dawn/common/Platform.h"
 
 #if !defined(DAWN_PLATFORM_WINDOWS)
-#    error "windows_with_undefs.h included on non-Windows"
+#error "windows_with_undefs.h included on non-Windows"
 #endif
 
 // This header includes <windows.h> but removes all the extra defines that conflict with identifiers
diff --git a/src/dawn/common/xlib_with_undefs.h b/src/dawn/common/xlib_with_undefs.h
index 8073aa2..f9db481 100644
--- a/src/dawn/common/xlib_with_undefs.h
+++ b/src/dawn/common/xlib_with_undefs.h
@@ -18,7 +18,7 @@
 #include "dawn/common/Platform.h"
 
 #if !defined(DAWN_PLATFORM_LINUX)
-#    error "xlib_with_undefs.h included on non-Linux"
+#error "xlib_with_undefs.h included on non-Linux"
 #endif
 
 // This header includes <X11/Xlib.h> but removes all the extra defines that conflict with
diff --git a/src/dawn/fuzzers/DawnWireServerFuzzer.cpp b/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
index 5250d69..1d325d4 100644
--- a/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
+++ b/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
@@ -29,39 +29,37 @@
 
 namespace {
 
-    class DevNull : public dawn::wire::CommandSerializer {
-      public:
-        size_t GetMaximumAllocationSize() const override {
-            // Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that.
-            return 1024 * 1024 * 1024;
-        }
-        void* GetCmdSpace(size_t size) override {
-            if (size > buf.size()) {
-                buf.resize(size);
-            }
-            return buf.data();
-        }
-        bool Flush() override {
-            return true;
-        }
-
-      private:
-        std::vector<char> buf;
-    };
-
-    std::unique_ptr<dawn::native::Instance> sInstance;
-    WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
-
-    bool sCommandsComplete = false;
-
-    WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
-                                             WGPUSurface surface,
-                                             const WGPUSwapChainDescriptor*) {
-        WGPUSwapChainDescriptor desc = {};
-        // A 0 implementation will trigger a swapchain creation error.
-        desc.implementation = 0;
-        return sOriginalDeviceCreateSwapChain(device, surface, &desc);
+class DevNull : public dawn::wire::CommandSerializer {
+  public:
+    size_t GetMaximumAllocationSize() const override {
+        // Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that.
+        return 1024 * 1024 * 1024;
     }
+    void* GetCmdSpace(size_t size) override {
+        if (size > buf.size()) {
+            buf.resize(size);
+        }
+        return buf.data();
+    }
+    bool Flush() override { return true; }
+
+  private:
+    std::vector<char> buf;
+};
+
+std::unique_ptr<dawn::native::Instance> sInstance;
+WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
+
+bool sCommandsComplete = false;
+
+WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
+                                         WGPUSurface surface,
+                                         const WGPUSwapChainDescriptor*) {
+    WGPUSwapChainDescriptor desc = {};
+    // A 0 implementation will trigger a swapchain creation error.
+    desc.implementation = 0;
+    return sOriginalDeviceCreateSwapChain(device, surface, &desc);
+}
 
 }  // namespace
 
diff --git a/src/dawn/fuzzers/DawnWireServerFuzzer.h b/src/dawn/fuzzers/DawnWireServerFuzzer.h
index d6349ce..5ebc6b3 100644
--- a/src/dawn/fuzzers/DawnWireServerFuzzer.h
+++ b/src/dawn/fuzzers/DawnWireServerFuzzer.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class Instance;
+class Instance;
 
 }  // namespace dawn::native
 
 namespace DawnWireServerFuzzer {
 
-    using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
+using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
 
-    int Initialize(int* argc, char*** argv);
+int Initialize(int* argc, char*** argv);
 
-    int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
+int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
 
 }  // namespace DawnWireServerFuzzer
 
diff --git a/src/dawn/native/Adapter.cpp b/src/dawn/native/Adapter.cpp
index c6b1039..b24e920 100644
--- a/src/dawn/native/Adapter.cpp
+++ b/src/dawn/native/Adapter.cpp
@@ -24,207 +24,206 @@
 
 namespace dawn::native {
 
-    AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
-        : mInstance(instance), mBackend(backend) {
-        mSupportedFeatures.EnableFeature(Feature::DawnNative);
-        mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
+AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
+    : mInstance(instance), mBackend(backend) {
+    mSupportedFeatures.EnableFeature(Feature::DawnNative);
+    mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
+}
+
+MaybeError AdapterBase::Initialize() {
+    DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
+    DAWN_TRY_CONTEXT(
+        InitializeSupportedFeaturesImpl(),
+        "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+        "backend=%s type=%s)",
+        mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+    DAWN_TRY_CONTEXT(
+        InitializeSupportedLimitsImpl(&mLimits),
+        "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+        "backend=%s type=%s)",
+        mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+
+    // Enforce internal Dawn constants.
+    mLimits.v1.maxVertexBufferArrayStride =
+        std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
+    mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
+    mLimits.v1.maxVertexAttributes =
+        std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
+    mLimits.v1.maxVertexBuffers =
+        std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
+    mLimits.v1.maxInterStageShaderComponents =
+        std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
+    mLimits.v1.maxSampledTexturesPerShaderStage =
+        std::min(mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
+    mLimits.v1.maxSamplersPerShaderStage =
+        std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
+    mLimits.v1.maxStorageBuffersPerShaderStage =
+        std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
+    mLimits.v1.maxStorageTexturesPerShaderStage =
+        std::min(mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
+    mLimits.v1.maxUniformBuffersPerShaderStage =
+        std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
+    mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
+        std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
+                 kMaxDynamicUniformBuffersPerPipelineLayout);
+    mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
+        std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
+                 kMaxDynamicStorageBuffersPerPipelineLayout);
+
+    return {};
+}
+
+bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
+    return GetLimits(limits);
+}
+
+void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
+    properties->vendorID = mVendorId;
+    properties->deviceID = mDeviceId;
+    properties->name = mName.c_str();
+    properties->driverDescription = mDriverDescription.c_str();
+    properties->adapterType = mAdapterType;
+    properties->backendType = mBackend;
+}
+
+bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
+    return mSupportedFeatures.IsEnabled(feature);
+}
+
+size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+    return mSupportedFeatures.EnumerateFeatures(features);
+}
+
+DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
+    DeviceDescriptor defaultDesc = {};
+    if (descriptor == nullptr) {
+        descriptor = &defaultDesc;
     }
-
-    MaybeError AdapterBase::Initialize() {
-        DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
-        DAWN_TRY_CONTEXT(
-            InitializeSupportedFeaturesImpl(),
-            "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
-            "backend=%s type=%s)",
-            mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
-        DAWN_TRY_CONTEXT(
-            InitializeSupportedLimitsImpl(&mLimits),
-            "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
-            "backend=%s type=%s)",
-            mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
-
-        // Enforce internal Dawn constants.
-        mLimits.v1.maxVertexBufferArrayStride =
-            std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
-        mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
-        mLimits.v1.maxVertexAttributes =
-            std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
-        mLimits.v1.maxVertexBuffers =
-            std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
-        mLimits.v1.maxInterStageShaderComponents =
-            std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
-        mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
-            mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
-        mLimits.v1.maxSamplersPerShaderStage =
-            std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
-        mLimits.v1.maxStorageBuffersPerShaderStage =
-            std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
-        mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
-            mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
-        mLimits.v1.maxUniformBuffersPerShaderStage =
-            std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
-        mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
-            std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
-                     kMaxDynamicUniformBuffersPerPipelineLayout);
-        mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
-            std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
-                     kMaxDynamicStorageBuffersPerPipelineLayout);
-
-        return {};
+    auto result = CreateDeviceInternal(descriptor);
+    if (result.IsError()) {
+        mInstance->ConsumedError(result.AcquireError());
+        return nullptr;
     }
+    return result.AcquireSuccess().Detach();
+}
 
-    bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
-        return GetLimits(limits);
+void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
+                                   WGPURequestDeviceCallback callback,
+                                   void* userdata) {
+    static constexpr DeviceDescriptor kDefaultDescriptor = {};
+    if (descriptor == nullptr) {
+        descriptor = &kDefaultDescriptor;
     }
+    auto result = CreateDeviceInternal(descriptor);
 
-    void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
-        properties->vendorID = mVendorId;
-        properties->deviceID = mDeviceId;
-        properties->name = mName.c_str();
-        properties->driverDescription = mDriverDescription.c_str();
-        properties->adapterType = mAdapterType;
-        properties->backendType = mBackend;
-    }
-
-    bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
-        return mSupportedFeatures.IsEnabled(feature);
-    }
-
-    size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
-        return mSupportedFeatures.EnumerateFeatures(features);
-    }
-
-    DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
-        DeviceDescriptor defaultDesc = {};
-        if (descriptor == nullptr) {
-            descriptor = &defaultDesc;
-        }
-        auto result = CreateDeviceInternal(descriptor);
-        if (result.IsError()) {
-            mInstance->ConsumedError(result.AcquireError());
-            return nullptr;
-        }
-        return result.AcquireSuccess().Detach();
-    }
-
-    void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
-                                       WGPURequestDeviceCallback callback,
-                                       void* userdata) {
-        static constexpr DeviceDescriptor kDefaultDescriptor = {};
-        if (descriptor == nullptr) {
-            descriptor = &kDefaultDescriptor;
-        }
-        auto result = CreateDeviceInternal(descriptor);
-
-        if (result.IsError()) {
-            std::unique_ptr<ErrorData> errorData = result.AcquireError();
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPURequestDeviceStatus_Error, nullptr,
-                     errorData->GetFormattedMessage().c_str(), userdata);
-            return;
-        }
-
-        Ref<DeviceBase> device = result.AcquireSuccess();
-
-        WGPURequestDeviceStatus status =
-            device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+    if (result.IsError()) {
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
         // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-        callback(status, ToAPI(device.Detach()), nullptr, userdata);
+        callback(WGPURequestDeviceStatus_Error, nullptr, errorData->GetFormattedMessage().c_str(),
+                 userdata);
+        return;
     }
 
-    uint32_t AdapterBase::GetVendorId() const {
-        return mVendorId;
-    }
+    Ref<DeviceBase> device = result.AcquireSuccess();
 
-    uint32_t AdapterBase::GetDeviceId() const {
-        return mDeviceId;
-    }
+    WGPURequestDeviceStatus status =
+        device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+    // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+    callback(status, ToAPI(device.Detach()), nullptr, userdata);
+}
 
-    wgpu::BackendType AdapterBase::GetBackendType() const {
-        return mBackend;
-    }
+uint32_t AdapterBase::GetVendorId() const {
+    return mVendorId;
+}
 
-    InstanceBase* AdapterBase::GetInstance() const {
-        return mInstance;
-    }
+uint32_t AdapterBase::GetDeviceId() const {
+    return mDeviceId;
+}
 
-    FeaturesSet AdapterBase::GetSupportedFeatures() const {
-        return mSupportedFeatures;
-    }
+wgpu::BackendType AdapterBase::GetBackendType() const {
+    return mBackend;
+}
 
-    bool AdapterBase::SupportsAllRequiredFeatures(
-        const ityp::span<size_t, const wgpu::FeatureName>& features) const {
-        for (wgpu::FeatureName f : features) {
-            if (!mSupportedFeatures.IsEnabled(f)) {
-                return false;
-            }
-        }
-        return true;
-    }
+InstanceBase* AdapterBase::GetInstance() const {
+    return mInstance;
+}
 
-    WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
-        WGPUDeviceProperties adapterProperties = {};
-        adapterProperties.deviceID = mDeviceId;
-        adapterProperties.vendorID = mVendorId;
-        adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
+FeaturesSet AdapterBase::GetSupportedFeatures() const {
+    return mSupportedFeatures;
+}
 
-        mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
-        // This is OK for now because there are no limit feature structs.
-        // If we add additional structs, the caller will need to provide memory
-        // to store them (ex. by calling GetLimits directly instead). Currently,
-        // we keep this function as it's only used internally in Chromium to
-        // send the adapter properties across the wire.
-        GetLimits(FromAPI(&adapterProperties.limits));
-        return adapterProperties;
-    }
-
-    bool AdapterBase::GetLimits(SupportedLimits* limits) const {
-        ASSERT(limits != nullptr);
-        if (limits->nextInChain != nullptr) {
+bool AdapterBase::SupportsAllRequiredFeatures(
+    const ityp::span<size_t, const wgpu::FeatureName>& features) const {
+    for (wgpu::FeatureName f : features) {
+        if (!mSupportedFeatures.IsEnabled(f)) {
             return false;
         }
-        if (mUseTieredLimits) {
-            limits->limits = ApplyLimitTiers(mLimits.v1);
-        } else {
-            limits->limits = mLimits.v1;
-        }
-        return true;
+    }
+    return true;
+}
+
+WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
+    WGPUDeviceProperties adapterProperties = {};
+    adapterProperties.deviceID = mDeviceId;
+    adapterProperties.vendorID = mVendorId;
+    adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
+
+    mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
+    // This is OK for now because there are no limit feature structs.
+    // If we add additional structs, the caller will need to provide memory
+    // to store them (ex. by calling GetLimits directly instead). Currently,
+    // we keep this function as it's only used internally in Chromium to
+    // send the adapter properties across the wire.
+    GetLimits(FromAPI(&adapterProperties.limits));
+    return adapterProperties;
+}
+
+bool AdapterBase::GetLimits(SupportedLimits* limits) const {
+    ASSERT(limits != nullptr);
+    if (limits->nextInChain != nullptr) {
+        return false;
+    }
+    if (mUseTieredLimits) {
+        limits->limits = ApplyLimitTiers(mLimits.v1);
+    } else {
+        limits->limits = mLimits.v1;
+    }
+    return true;
+}
+
+ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
+    const DeviceDescriptor* descriptor) {
+    ASSERT(descriptor != nullptr);
+
+    for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
+        wgpu::FeatureName f = descriptor->requiredFeatures[i];
+        DAWN_TRY(ValidateFeatureName(f));
+        DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), "Requested feature %s is not supported.",
+                        f);
     }
 
-    ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
-        const DeviceDescriptor* descriptor) {
-        ASSERT(descriptor != nullptr);
+    if (descriptor->requiredLimits != nullptr) {
+        DAWN_TRY_CONTEXT(ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
+                                        descriptor->requiredLimits->limits),
+                         "validating required limits");
 
-        for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
-            wgpu::FeatureName f = descriptor->requiredFeatures[i];
-            DAWN_TRY(ValidateFeatureName(f));
-            DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
-                            "Requested feature %s is not supported.", f);
-        }
-
-        if (descriptor->requiredLimits != nullptr) {
-            DAWN_TRY_CONTEXT(
-                ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
-                               descriptor->requiredLimits->limits),
-                "validating required limits");
-
-            DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
-                            "nextInChain is not nullptr.");
-        }
-        return CreateDeviceImpl(descriptor);
+        DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
+                        "nextInChain is not nullptr.");
     }
+    return CreateDeviceImpl(descriptor);
+}
 
-    void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
-        mUseTieredLimits = useTieredLimits;
-    }
+void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
+    mUseTieredLimits = useTieredLimits;
+}
 
-    void AdapterBase::ResetInternalDeviceForTesting() {
-        mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
-    }
+void AdapterBase::ResetInternalDeviceForTesting() {
+    mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
+}
 
-    MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
-        return DAWN_INTERNAL_ERROR(
-            "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
-    }
+MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
+    return DAWN_INTERNAL_ERROR(
+        "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Adapter.h b/src/dawn/native/Adapter.h
index d0f7cf1..9a1b24f 100644
--- a/src/dawn/native/Adapter.h
+++ b/src/dawn/native/Adapter.h
@@ -28,71 +28,70 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    class AdapterBase : public RefCounted {
-      public:
-        AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
-        virtual ~AdapterBase() = default;
+class AdapterBase : public RefCounted {
+  public:
+    AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
+    virtual ~AdapterBase() = default;
 
-        MaybeError Initialize();
+    MaybeError Initialize();
 
-        // WebGPU API
-        bool APIGetLimits(SupportedLimits* limits) const;
-        void APIGetProperties(AdapterProperties* properties) const;
-        bool APIHasFeature(wgpu::FeatureName feature) const;
-        size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
-        void APIRequestDevice(const DeviceDescriptor* descriptor,
-                              WGPURequestDeviceCallback callback,
-                              void* userdata);
-        DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
+    // WebGPU API
+    bool APIGetLimits(SupportedLimits* limits) const;
+    void APIGetProperties(AdapterProperties* properties) const;
+    bool APIHasFeature(wgpu::FeatureName feature) const;
+    size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+    void APIRequestDevice(const DeviceDescriptor* descriptor,
+                          WGPURequestDeviceCallback callback,
+                          void* userdata);
+    DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
 
-        uint32_t GetVendorId() const;
-        uint32_t GetDeviceId() const;
-        wgpu::BackendType GetBackendType() const;
-        InstanceBase* GetInstance() const;
+    uint32_t GetVendorId() const;
+    uint32_t GetDeviceId() const;
+    wgpu::BackendType GetBackendType() const;
+    InstanceBase* GetInstance() const;
 
-        void ResetInternalDeviceForTesting();
+    void ResetInternalDeviceForTesting();
 
-        FeaturesSet GetSupportedFeatures() const;
-        bool SupportsAllRequiredFeatures(
-            const ityp::span<size_t, const wgpu::FeatureName>& features) const;
-        WGPUDeviceProperties GetAdapterProperties() const;
+    FeaturesSet GetSupportedFeatures() const;
+    bool SupportsAllRequiredFeatures(
+        const ityp::span<size_t, const wgpu::FeatureName>& features) const;
+    WGPUDeviceProperties GetAdapterProperties() const;
 
-        bool GetLimits(SupportedLimits* limits) const;
+    bool GetLimits(SupportedLimits* limits) const;
 
-        void SetUseTieredLimits(bool useTieredLimits);
+    void SetUseTieredLimits(bool useTieredLimits);
 
-        virtual bool SupportsExternalImages() const = 0;
+    virtual bool SupportsExternalImages() const = 0;
 
-      protected:
-        uint32_t mVendorId = 0xFFFFFFFF;
-        uint32_t mDeviceId = 0xFFFFFFFF;
-        std::string mName;
-        wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
-        std::string mDriverDescription;
-        FeaturesSet mSupportedFeatures;
+  protected:
+    uint32_t mVendorId = 0xFFFFFFFF;
+    uint32_t mDeviceId = 0xFFFFFFFF;
+    std::string mName;
+    wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
+    std::string mDriverDescription;
+    FeaturesSet mSupportedFeatures;
 
-      private:
-        virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
-            const DeviceDescriptor* descriptor) = 0;
+  private:
+    virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
 
-        virtual MaybeError InitializeImpl() = 0;
+    virtual MaybeError InitializeImpl() = 0;
 
-        // Check base WebGPU features and discover supported featurees.
-        virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
+    // Check base WebGPU features and discover supported featurees.
+    virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
 
-        // Check base WebGPU limits and populate supported limits.
-        virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
+    // Check base WebGPU limits and populate supported limits.
+    virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
 
-        ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
+    ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
 
-        virtual MaybeError ResetInternalDeviceForTestingImpl();
-        InstanceBase* mInstance = nullptr;
-        wgpu::BackendType mBackend;
-        CombinedLimits mLimits;
-        bool mUseTieredLimits = false;
-    };
+    virtual MaybeError ResetInternalDeviceForTestingImpl();
+    InstanceBase* mInstance = nullptr;
+    wgpu::BackendType mBackend;
+    CombinedLimits mLimits;
+    bool mUseTieredLimits = false;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/AsyncTask.cpp b/src/dawn/native/AsyncTask.cpp
index b1af966..00e6a64 100644
--- a/src/dawn/native/AsyncTask.cpp
+++ b/src/dawn/native/AsyncTask.cpp
@@ -20,62 +20,61 @@
 
 namespace dawn::native {
 
-    AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
-        : mWorkerTaskPool(workerTaskPool) {
-    }
+AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
+    : mWorkerTaskPool(workerTaskPool) {}
 
-    void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
-        // If these allocations becomes expensive, we can slab-allocate tasks.
-        Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
-        waitableTask->taskManager = this;
-        waitableTask->asyncTask = std::move(asyncTask);
+void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
+    // If these allocations becomes expensive, we can slab-allocate tasks.
+    Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
+    waitableTask->taskManager = this;
+    waitableTask->asyncTask = std::move(asyncTask);
 
-        {
-            // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
-            // and we may remove waitableTask objects from mPendingTasks in either main thread
-            // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
-            // protected by a mutex.
-            std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-            mPendingTasks.emplace(waitableTask.Get(), waitableTask);
-        }
-
-        // Ref the task since it is accessed inside the worker function.
-        // The worker function will acquire and release the task upon completion.
-        waitableTask->Reference();
-        waitableTask->waitableEvent =
-            mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
-    }
-
-    void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+    {
+        // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
+        // and we may remove waitableTask objects from mPendingTasks in either main thread
+        // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
+        // protected by a mutex.
         std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-        auto iter = mPendingTasks.find(task);
-        if (iter != mPendingTasks.end()) {
-            mPendingTasks.erase(iter);
-        }
+        mPendingTasks.emplace(waitableTask.Get(), waitableTask);
     }
 
-    void AsyncTaskManager::WaitAllPendingTasks() {
-        std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
+    // Ref the task since it is accessed inside the worker function.
+    // The worker function will acquire and release the task upon completion.
+    waitableTask->Reference();
+    waitableTask->waitableEvent =
+        mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
+}
 
-        {
-            std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-            allPendingTasks.swap(mPendingTasks);
-        }
-
-        for (auto& [_, task] : allPendingTasks) {
-            task->waitableEvent->Wait();
-        }
+void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+    std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+    auto iter = mPendingTasks.find(task);
+    if (iter != mPendingTasks.end()) {
+        mPendingTasks.erase(iter);
     }
+}
 
-    bool AsyncTaskManager::HasPendingTasks() {
+void AsyncTaskManager::WaitAllPendingTasks() {
+    std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
+
+    {
         std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-        return !mPendingTasks.empty();
+        allPendingTasks.swap(mPendingTasks);
     }
 
-    void AsyncTaskManager::DoWaitableTask(void* task) {
-        Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
-        waitableTask->asyncTask();
-        waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+    for (auto& [_, task] : allPendingTasks) {
+        task->waitableEvent->Wait();
     }
+}
+
+bool AsyncTaskManager::HasPendingTasks() {
+    std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+    return !mPendingTasks.empty();
+}
+
+void AsyncTaskManager::DoWaitableTask(void* task) {
+    Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
+    waitableTask->asyncTask();
+    waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/AsyncTask.h b/src/dawn/native/AsyncTask.h
index b71c80e..d2c28fa 100644
--- a/src/dawn/native/AsyncTask.h
+++ b/src/dawn/native/AsyncTask.h
@@ -23,43 +23,43 @@
 #include "dawn/common/RefCounted.h"
 
 namespace dawn::platform {
-    class WaitableEvent;
-    class WorkerTaskPool;
+class WaitableEvent;
+class WorkerTaskPool;
 }  // namespace dawn::platform
 
 namespace dawn::native {
 
-    // TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
-    // Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
-    // shutting down the device. RunNow() could be used for more advanced scenarios, for example
-    // always doing ShaderModule initial compilation asynchronously, but being able to steal the
-    // task if we need it for synchronous pipeline compilation.
-    using AsyncTask = std::function<void()>;
+// TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
+// Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
+// shutting down the device. RunNow() could be used for more advanced scenarios, for example
+// always doing ShaderModule initial compilation asynchronously, but being able to steal the
+// task if we need it for synchronous pipeline compilation.
+using AsyncTask = std::function<void()>;
 
-    class AsyncTaskManager {
+class AsyncTaskManager {
+  public:
+    explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
+
+    void PostTask(AsyncTask asyncTask);
+    void WaitAllPendingTasks();
+    bool HasPendingTasks();
+
+  private:
+    class WaitableTask : public RefCounted {
       public:
-        explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
-
-        void PostTask(AsyncTask asyncTask);
-        void WaitAllPendingTasks();
-        bool HasPendingTasks();
-
-      private:
-        class WaitableTask : public RefCounted {
-          public:
-            AsyncTask asyncTask;
-            AsyncTaskManager* taskManager;
-            std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
-        };
-
-        static void DoWaitableTask(void* task);
-        void HandleTaskCompletion(WaitableTask* task);
-
-        std::mutex mPendingTasksMutex;
-        std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
-        dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+        AsyncTask asyncTask;
+        AsyncTaskManager* taskManager;
+        std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
     };
 
+    static void DoWaitableTask(void* task);
+    void HandleTaskCompletion(WaitableTask* task);
+
+    std::mutex mPendingTasksMutex;
+    std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
+    dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_ASYNCTASK_H_
diff --git a/src/dawn/native/AttachmentState.cpp b/src/dawn/native/AttachmentState.cpp
index 1e38d9d..bbb8ecd 100644
--- a/src/dawn/native/AttachmentState.cpp
+++ b/src/dawn/native/AttachmentState.cpp
@@ -21,155 +21,148 @@
 
 namespace dawn::native {
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(
-        const RenderBundleEncoderDescriptor* descriptor)
-        : mSampleCount(descriptor->sampleCount) {
-        ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor)
+    : mSampleCount(descriptor->sampleCount) {
+    ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+    for (ColorAttachmentIndex i(uint8_t(0));
+         i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
+        wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
+        if (format != wgpu::TextureFormat::Undefined) {
+            mColorAttachmentsSet.set(i);
+            mColorFormats[i] = format;
+        }
+    }
+    mDepthStencilFormat = descriptor->depthStencilFormat;
+}
+
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
+    : mSampleCount(descriptor->multisample.count) {
+    if (descriptor->fragment != nullptr) {
+        ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
         for (ColorAttachmentIndex i(uint8_t(0));
-             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
-            wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
+             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
+             ++i) {
+            wgpu::TextureFormat format =
+                descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
             if (format != wgpu::TextureFormat::Undefined) {
                 mColorAttachmentsSet.set(i);
                 mColorFormats[i] = format;
             }
         }
-        mDepthStencilFormat = descriptor->depthStencilFormat;
     }
+    if (descriptor->depthStencil != nullptr) {
+        mDepthStencilFormat = descriptor->depthStencil->format;
+    }
+}
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
-        : mSampleCount(descriptor->multisample.count) {
-        if (descriptor->fragment != nullptr) {
-            ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
-            for (ColorAttachmentIndex i(uint8_t(0));
-                 i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
-                 ++i) {
-                wgpu::TextureFormat format =
-                    descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
-                if (format != wgpu::TextureFormat::Undefined) {
-                    mColorAttachmentsSet.set(i);
-                    mColorFormats[i] = format;
-                }
-            }
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
+    for (ColorAttachmentIndex i(uint8_t(0));
+         i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); ++i) {
+        TextureViewBase* attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
+        if (attachment == nullptr) {
+            continue;
         }
-        if (descriptor->depthStencil != nullptr) {
-            mDepthStencilFormat = descriptor->depthStencil->format;
+        mColorAttachmentsSet.set(i);
+        mColorFormats[i] = attachment->GetFormat().format;
+        if (mSampleCount == 0) {
+            mSampleCount = attachment->GetTexture()->GetSampleCount();
+        } else {
+            ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
         }
     }
+    if (descriptor->depthStencilAttachment != nullptr) {
+        TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
+        mDepthStencilFormat = attachment->GetFormat().format;
+        if (mSampleCount == 0) {
+            mSampleCount = attachment->GetTexture()->GetSampleCount();
+        } else {
+            ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
+        }
+    }
+    ASSERT(mSampleCount > 0);
+}
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
-        for (ColorAttachmentIndex i(uint8_t(0));
-             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
-             ++i) {
-            TextureViewBase* attachment =
-                descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
-            if (attachment == nullptr) {
-                continue;
-            }
-            mColorAttachmentsSet.set(i);
-            mColorFormats[i] = attachment->GetFormat().format;
-            if (mSampleCount == 0) {
-                mSampleCount = attachment->GetTexture()->GetSampleCount();
-            } else {
-                ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
-            }
-        }
-        if (descriptor->depthStencilAttachment != nullptr) {
-            TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
-            mDepthStencilFormat = attachment->GetFormat().format;
-            if (mSampleCount == 0) {
-                mSampleCount = attachment->GetTexture()->GetSampleCount();
-            } else {
-                ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
-            }
-        }
-        ASSERT(mSampleCount > 0);
+AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = default;
+
+size_t AttachmentStateBlueprint::HashFunc::operator()(
+    const AttachmentStateBlueprint* attachmentState) const {
+    size_t hash = 0;
+
+    // Hash color formats
+    HashCombine(&hash, attachmentState->mColorAttachmentsSet);
+    for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
+        HashCombine(&hash, attachmentState->mColorFormats[i]);
     }
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
-        default;
+    // Hash depth stencil attachment
+    HashCombine(&hash, attachmentState->mDepthStencilFormat);
 
-    size_t AttachmentStateBlueprint::HashFunc::operator()(
-        const AttachmentStateBlueprint* attachmentState) const {
-        size_t hash = 0;
+    // Hash sample count
+    HashCombine(&hash, attachmentState->mSampleCount);
 
-        // Hash color formats
-        HashCombine(&hash, attachmentState->mColorAttachmentsSet);
-        for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
-            HashCombine(&hash, attachmentState->mColorFormats[i]);
-        }
+    return hash;
+}
 
-        // Hash depth stencil attachment
-        HashCombine(&hash, attachmentState->mDepthStencilFormat);
-
-        // Hash sample count
-        HashCombine(&hash, attachmentState->mSampleCount);
-
-        return hash;
+bool AttachmentStateBlueprint::EqualityFunc::operator()(const AttachmentStateBlueprint* a,
+                                                        const AttachmentStateBlueprint* b) const {
+    // Check set attachments
+    if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
+        return false;
     }
 
-    bool AttachmentStateBlueprint::EqualityFunc::operator()(
-        const AttachmentStateBlueprint* a,
-        const AttachmentStateBlueprint* b) const {
-        // Check set attachments
-        if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
+    // Check color formats
+    for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
+        if (a->mColorFormats[i] != b->mColorFormats[i]) {
             return false;
         }
-
-        // Check color formats
-        for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
-            if (a->mColorFormats[i] != b->mColorFormats[i]) {
-                return false;
-            }
-        }
-
-        // Check depth stencil format
-        if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
-            return false;
-        }
-
-        // Check sample count
-        if (a->mSampleCount != b->mSampleCount) {
-            return false;
-        }
-
-        return true;
     }
 
-    AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
-        : AttachmentStateBlueprint(blueprint), ObjectBase(device) {
+    // Check depth stencil format
+    if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
+        return false;
     }
 
-    AttachmentState::~AttachmentState() {
-        GetDevice()->UncacheAttachmentState(this);
+    // Check sample count
+    if (a->mSampleCount != b->mSampleCount) {
+        return false;
     }
 
-    size_t AttachmentState::ComputeContentHash() {
-        // TODO(dawn:549): skip this traversal and reuse the blueprint.
-        return AttachmentStateBlueprint::HashFunc()(this);
-    }
+    return true;
+}
 
-    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
-    AttachmentState::GetColorAttachmentsMask() const {
-        return mColorAttachmentsSet;
-    }
+AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
+    : AttachmentStateBlueprint(blueprint), ObjectBase(device) {}
 
-    wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
-        ColorAttachmentIndex index) const {
-        ASSERT(mColorAttachmentsSet[index]);
-        return mColorFormats[index];
-    }
+AttachmentState::~AttachmentState() {
+    GetDevice()->UncacheAttachmentState(this);
+}
 
-    bool AttachmentState::HasDepthStencilAttachment() const {
-        return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
-    }
+size_t AttachmentState::ComputeContentHash() {
+    // TODO(dawn:549): skip this traversal and reuse the blueprint.
+    return AttachmentStateBlueprint::HashFunc()(this);
+}
 
-    wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
-        ASSERT(HasDepthStencilAttachment());
-        return mDepthStencilFormat;
-    }
+ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask()
+    const {
+    return mColorAttachmentsSet;
+}
 
-    uint32_t AttachmentState::GetSampleCount() const {
-        return mSampleCount;
-    }
+wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(ColorAttachmentIndex index) const {
+    ASSERT(mColorAttachmentsSet[index]);
+    return mColorFormats[index];
+}
+
+bool AttachmentState::HasDepthStencilAttachment() const {
+    return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
+}
+
+wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
+    ASSERT(HasDepthStencilAttachment());
+    return mDepthStencilFormat;
+}
+
+uint32_t AttachmentState::GetSampleCount() const {
+    return mSampleCount;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/AttachmentState.h b/src/dawn/native/AttachmentState.h
index c1e6445..815ce29 100644
--- a/src/dawn/native/AttachmentState.h
+++ b/src/dawn/native/AttachmentState.h
@@ -29,54 +29,53 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    // AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
-    // can be constructed by copying the blueprint state instead of traversing descriptors.
-    // Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
-    class AttachmentStateBlueprint {
-      public:
-        // Note: Descriptors must be validated before the AttachmentState is constructed.
-        explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
-        explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
-        explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
+// AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
+// can be constructed by copying the blueprint state instead of traversing descriptors.
+// Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
+class AttachmentStateBlueprint {
+  public:
+    // Note: Descriptors must be validated before the AttachmentState is constructed.
+    explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
+    explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
+    explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
 
-        AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
+    AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
 
-        // Functors necessary for the unordered_set<AttachmentState*>-based cache.
-        struct HashFunc {
-            size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
-        };
-        struct EqualityFunc {
-            bool operator()(const AttachmentStateBlueprint* a,
-                            const AttachmentStateBlueprint* b) const;
-        };
-
-      protected:
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
-        ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
-        // Default (texture format Undefined) indicates there is no depth stencil attachment.
-        wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
-        uint32_t mSampleCount = 0;
+    // Functors necessary for the unordered_set<AttachmentState*>-based cache.
+    struct HashFunc {
+        size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
+    };
+    struct EqualityFunc {
+        bool operator()(const AttachmentStateBlueprint* a, const AttachmentStateBlueprint* b) const;
     };
 
-    class AttachmentState final : public AttachmentStateBlueprint,
-                                  public ObjectBase,
-                                  public CachedObject {
-      public:
-        AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
+  protected:
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
+    ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
+    // Default (texture format Undefined) indicates there is no depth stencil attachment.
+    wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
+    uint32_t mSampleCount = 0;
+};
 
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
-        wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
-        bool HasDepthStencilAttachment() const;
-        wgpu::TextureFormat GetDepthStencilFormat() const;
-        uint32_t GetSampleCount() const;
+class AttachmentState final : public AttachmentStateBlueprint,
+                              public ObjectBase,
+                              public CachedObject {
+  public:
+    AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
 
-        size_t ComputeContentHash() override;
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+    wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
+    bool HasDepthStencilAttachment() const;
+    wgpu::TextureFormat GetDepthStencilFormat() const;
+    uint32_t GetSampleCount() const;
 
-      private:
-        ~AttachmentState() override;
-    };
+    size_t ComputeContentHash() override;
+
+  private:
+    ~AttachmentState() override;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BackendConnection.cpp b/src/dawn/native/BackendConnection.cpp
index abcc271..0c54731 100644
--- a/src/dawn/native/BackendConnection.cpp
+++ b/src/dawn/native/BackendConnection.cpp
@@ -16,21 +16,20 @@
 
 namespace dawn::native {
 
-    BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
-        : mInstance(instance), mType(type) {
-    }
+BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
+    : mInstance(instance), mType(type) {}
 
-    wgpu::BackendType BackendConnection::GetType() const {
-        return mType;
-    }
+wgpu::BackendType BackendConnection::GetType() const {
+    return mType;
+}
 
-    InstanceBase* BackendConnection::GetInstance() const {
-        return mInstance;
-    }
+InstanceBase* BackendConnection::GetInstance() const {
+    return mInstance;
+}
 
-    ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
-        const AdapterDiscoveryOptionsBase* options) {
-        return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
-    }
+ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
+    const AdapterDiscoveryOptionsBase* options) {
+    return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BackendConnection.h b/src/dawn/native/BackendConnection.h
index d9f5dac..04fe35b 100644
--- a/src/dawn/native/BackendConnection.h
+++ b/src/dawn/native/BackendConnection.h
@@ -23,28 +23,28 @@
 
 namespace dawn::native {
 
-    // An common interface for all backends. Mostly used to create adapters for a particular
-    // backend.
-    class BackendConnection {
-      public:
-        BackendConnection(InstanceBase* instance, wgpu::BackendType type);
-        virtual ~BackendConnection() = default;
+// An common interface for all backends. Mostly used to create adapters for a particular
+// backend.
+class BackendConnection {
+  public:
+    BackendConnection(InstanceBase* instance, wgpu::BackendType type);
+    virtual ~BackendConnection() = default;
 
-        wgpu::BackendType GetType() const;
-        InstanceBase* GetInstance() const;
+    wgpu::BackendType GetType() const;
+    InstanceBase* GetInstance() const;
 
-        // Returns all the adapters for the system that can be created by the backend, without extra
-        // options (such as debug adapters, custom driver libraries, etc.)
-        virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
+    // Returns all the adapters for the system that can be created by the backend, without extra
+    // options (such as debug adapters, custom driver libraries, etc.)
+    virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
 
-        // Returns new adapters created with the backend-specific options.
-        virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
-            const AdapterDiscoveryOptionsBase* options);
+    // Returns new adapters created with the backend-specific options.
+    virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* options);
 
-      private:
-        InstanceBase* mInstance = nullptr;
-        wgpu::BackendType mType;
-    };
+  private:
+    InstanceBase* mInstance = nullptr;
+    wgpu::BackendType mType;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BindGroup.cpp b/src/dawn/native/BindGroup.cpp
index df91fe7..802213c 100644
--- a/src/dawn/native/BindGroup.cpp
+++ b/src/dawn/native/BindGroup.cpp
@@ -29,517 +29,498 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        // Helper functions to perform binding-type specific validation
+// Helper functions to perform binding-type specific validation
 
-        MaybeError ValidateBufferBinding(const DeviceBase* device,
-                                         const BindGroupEntry& entry,
-                                         const BindingInfo& bindingInfo) {
-            DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
+MaybeError ValidateBufferBinding(const DeviceBase* device,
+                                 const BindGroupEntry& entry,
+                                 const BindingInfo& bindingInfo) {
+    DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
 
-            DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
-                            "Expected only buffer to be set for binding entry.");
+    DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
+                    "Expected only buffer to be set for binding entry.");
 
-            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+    DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
 
-            DAWN_TRY(device->ValidateObject(entry.buffer));
+    DAWN_TRY(device->ValidateObject(entry.buffer));
 
-            ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+    ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
 
-            wgpu::BufferUsage requiredUsage;
-            uint64_t maxBindingSize;
-            uint64_t requiredBindingAlignment;
-            switch (bindingInfo.buffer.type) {
-                case wgpu::BufferBindingType::Uniform:
-                    requiredUsage = wgpu::BufferUsage::Uniform;
-                    maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
-                    requiredBindingAlignment =
-                        device->GetLimits().v1.minUniformBufferOffsetAlignment;
-                    break;
-                case wgpu::BufferBindingType::Storage:
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                    requiredUsage = wgpu::BufferUsage::Storage;
-                    maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
-                    requiredBindingAlignment =
-                        device->GetLimits().v1.minStorageBufferOffsetAlignment;
-                    break;
-                case kInternalStorageBufferBinding:
-                    requiredUsage = kInternalStorageBuffer;
-                    maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
-                    requiredBindingAlignment =
-                        device->GetLimits().v1.minStorageBufferOffsetAlignment;
-                    break;
-                case wgpu::BufferBindingType::Undefined:
-                    UNREACHABLE();
-            }
+    wgpu::BufferUsage requiredUsage;
+    uint64_t maxBindingSize;
+    uint64_t requiredBindingAlignment;
+    switch (bindingInfo.buffer.type) {
+        case wgpu::BufferBindingType::Uniform:
+            requiredUsage = wgpu::BufferUsage::Uniform;
+            maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
+            requiredBindingAlignment = device->GetLimits().v1.minUniformBufferOffsetAlignment;
+            break;
+        case wgpu::BufferBindingType::Storage:
+        case wgpu::BufferBindingType::ReadOnlyStorage:
+            requiredUsage = wgpu::BufferUsage::Storage;
+            maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+            requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
+            break;
+        case kInternalStorageBufferBinding:
+            requiredUsage = kInternalStorageBuffer;
+            maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+            requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
+            break;
+        case wgpu::BufferBindingType::Undefined:
+            UNREACHABLE();
+    }
 
-            uint64_t bufferSize = entry.buffer->GetSize();
+    uint64_t bufferSize = entry.buffer->GetSize();
 
-            // Handle wgpu::WholeSize, avoiding overflows.
-            DAWN_INVALID_IF(entry.offset > bufferSize,
-                            "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
-                            bufferSize, entry.buffer);
+    // Handle wgpu::WholeSize, avoiding overflows.
+    DAWN_INVALID_IF(entry.offset > bufferSize,
+                    "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
+                    bufferSize, entry.buffer);
 
-            uint64_t bindingSize =
-                (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
+    uint64_t bindingSize =
+        (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
 
-            DAWN_INVALID_IF(bindingSize > bufferSize,
-                            "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
-                            bufferSize, entry.buffer);
+    DAWN_INVALID_IF(bindingSize > bufferSize,
+                    "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
+                    bufferSize, entry.buffer);
 
-            DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
+    DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
 
-            // Note that no overflow can happen because we already checked that
-            // bufferSize >= bindingSize
-            DAWN_INVALID_IF(
-                entry.offset > bufferSize - bindingSize,
-                "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
-                entry.offset, bufferSize, bindingSize, entry.buffer);
+    // Note that no overflow can happen because we already checked that
+    // bufferSize >= bindingSize
+    DAWN_INVALID_IF(entry.offset > bufferSize - bindingSize,
+                    "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
+                    entry.offset, bufferSize, bindingSize, entry.buffer);
 
-            DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
-                            "Offset (%u) does not satisfy the minimum %s alignment (%u).",
-                            entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
+    DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
+                    "Offset (%u) does not satisfy the minimum %s alignment (%u).", entry.offset,
+                    bindingInfo.buffer.type, requiredBindingAlignment);
 
-            DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
-                            "Binding usage (%s) of %s doesn't match expected usage (%s).",
-                            entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
+    DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
+                    "Binding usage (%s) of %s doesn't match expected usage (%s).",
+                    entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
 
-            DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
-                            "Binding size (%u) is smaller than the minimum binding size (%u).",
-                            bindingSize, bindingInfo.buffer.minBindingSize);
+    DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
+                    "Binding size (%u) is smaller than the minimum binding size (%u).", bindingSize,
+                    bindingInfo.buffer.minBindingSize);
 
-            DAWN_INVALID_IF(bindingSize > maxBindingSize,
-                            "Binding size (%u) is larger than the maximum binding size (%u).",
-                            bindingSize, maxBindingSize);
+    DAWN_INVALID_IF(bindingSize > maxBindingSize,
+                    "Binding size (%u) is larger than the maximum binding size (%u).", bindingSize,
+                    maxBindingSize);
 
-            return {};
-        }
+    return {};
+}
 
-        MaybeError ValidateTextureBinding(DeviceBase* device,
-                                          const BindGroupEntry& entry,
-                                          const BindingInfo& bindingInfo) {
-            DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
+MaybeError ValidateTextureBinding(DeviceBase* device,
+                                  const BindGroupEntry& entry,
+                                  const BindingInfo& bindingInfo) {
+    DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
 
-            DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
-                            "Expected only textureView to be set for binding entry.");
+    DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
+                    "Expected only textureView to be set for binding entry.");
 
-            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+    DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
 
-            DAWN_TRY(device->ValidateObject(entry.textureView));
+    DAWN_TRY(device->ValidateObject(entry.textureView));
 
-            TextureViewBase* view = entry.textureView;
+    TextureViewBase* view = entry.textureView;
 
-            Aspect aspect = view->GetAspects();
-            DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect,
-                            view);
+    Aspect aspect = view->GetAspects();
+    DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, view);
 
-            TextureBase* texture = view->GetTexture();
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Texture: {
-                    SampleTypeBit supportedTypes =
-                        texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
-                    SampleTypeBit requiredType =
-                        SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
+    TextureBase* texture = view->GetTexture();
+    switch (bindingInfo.bindingType) {
+        case BindingInfoType::Texture: {
+            SampleTypeBit supportedTypes =
+                texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
+            SampleTypeBit requiredType = SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
 
-                    DAWN_INVALID_IF(
-                        !(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
-                        "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
-                        texture->GetUsage(), texture);
+            DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
+                            "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
+                            texture->GetUsage(), texture);
 
-                    DAWN_INVALID_IF(
-                        texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
-                        "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
-                        texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
-
-                    DAWN_INVALID_IF(
-                        (supportedTypes & requiredType) == 0,
-                        "None of the supported sample types (%s) of %s match the expected sample "
-                        "types (%s).",
-                        supportedTypes, texture, requiredType);
-
-                    DAWN_INVALID_IF(
-                        entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
-                        "Dimension (%s) of %s doesn't match the expected dimension (%s).",
-                        entry.textureView->GetDimension(), entry.textureView,
-                        bindingInfo.texture.viewDimension);
-                    break;
-                }
-                case BindingInfoType::StorageTexture: {
-                    DAWN_INVALID_IF(
-                        !(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
-                        "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
-                        texture->GetUsage(), texture);
-
-                    ASSERT(!texture->IsMultisampledTexture());
-
-                    DAWN_INVALID_IF(
-                        texture->GetFormat().format != bindingInfo.storageTexture.format,
-                        "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
-                        texture, bindingInfo.storageTexture.format);
-
-                    DAWN_INVALID_IF(
-                        entry.textureView->GetDimension() !=
-                            bindingInfo.storageTexture.viewDimension,
-                        "Dimension (%s) of %s doesn't match the expected dimension (%s).",
-                        entry.textureView->GetDimension(), entry.textureView,
-                        bindingInfo.storageTexture.viewDimension);
-
-                    DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
-                                    "mipLevelCount (%u) of %s expected to be 1.",
-                                    entry.textureView->GetLevelCount(), entry.textureView);
-                    break;
-                }
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateSamplerBinding(const DeviceBase* device,
-                                          const BindGroupEntry& entry,
-                                          const BindingInfo& bindingInfo) {
-            DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
-
-            DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
-                            "Expected only sampler to be set for binding entry.");
-
-            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
-            DAWN_TRY(device->ValidateObject(entry.sampler));
-
-            ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
-
-            switch (bindingInfo.sampler.type) {
-                case wgpu::SamplerBindingType::NonFiltering:
-                    DAWN_INVALID_IF(
-                        entry.sampler->IsFiltering(),
-                        "Filtering sampler %s is incompatible with non-filtering sampler "
-                        "binding.",
-                        entry.sampler);
-                    [[fallthrough]];
-                case wgpu::SamplerBindingType::Filtering:
-                    DAWN_INVALID_IF(
-                        entry.sampler->IsComparison(),
-                        "Comparison sampler %s is incompatible with non-comparison sampler "
-                        "binding.",
-                        entry.sampler);
-                    break;
-                case wgpu::SamplerBindingType::Comparison:
-                    DAWN_INVALID_IF(
-                        !entry.sampler->IsComparison(),
-                        "Non-comparison sampler %s is imcompatible with comparison sampler "
-                        "binding.",
-                        entry.sampler);
-                    break;
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateExternalTextureBinding(
-            const DeviceBase* device,
-            const BindGroupEntry& entry,
-            const ExternalTextureBindingEntry* externalTextureBindingEntry,
-            const ExternalTextureBindingExpansionMap& expansions) {
-            DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
-                            "Binding entry external texture not set.");
+            DAWN_INVALID_IF(texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
+                            "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
+                            texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
 
             DAWN_INVALID_IF(
-                entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
-                "Expected only external texture to be set for binding entry.");
+                (supportedTypes & requiredType) == 0,
+                "None of the supported sample types (%s) of %s match the expected sample "
+                "types (%s).",
+                supportedTypes, texture, requiredType);
+
+            DAWN_INVALID_IF(entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
+                            "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+                            entry.textureView->GetDimension(), entry.textureView,
+                            bindingInfo.texture.viewDimension);
+            break;
+        }
+        case BindingInfoType::StorageTexture: {
+            DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
+                            "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
+                            texture->GetUsage(), texture);
+
+            ASSERT(!texture->IsMultisampledTexture());
+
+            DAWN_INVALID_IF(texture->GetFormat().format != bindingInfo.storageTexture.format,
+                            "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
+                            texture, bindingInfo.storageTexture.format);
 
             DAWN_INVALID_IF(
-                expansions.find(BindingNumber(entry.binding)) == expansions.end(),
-                "External texture binding entry %u is not present in the bind group layout.",
-                entry.binding);
+                entry.textureView->GetDimension() != bindingInfo.storageTexture.viewDimension,
+                "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+                entry.textureView->GetDimension(), entry.textureView,
+                bindingInfo.storageTexture.viewDimension);
 
-            DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
-                                         wgpu::SType::ExternalTextureBindingEntry));
+            DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
+                            "mipLevelCount (%u) of %s expected to be 1.",
+                            entry.textureView->GetLevelCount(), entry.textureView);
+            break;
+        }
+        default:
+            UNREACHABLE();
+            break;
+    }
 
-            DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+    return {};
+}
 
-            return {};
+MaybeError ValidateSamplerBinding(const DeviceBase* device,
+                                  const BindGroupEntry& entry,
+                                  const BindingInfo& bindingInfo) {
+    DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
+
+    DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
+                    "Expected only sampler to be set for binding entry.");
+
+    DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    DAWN_TRY(device->ValidateObject(entry.sampler));
+
+    ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
+
+    switch (bindingInfo.sampler.type) {
+        case wgpu::SamplerBindingType::NonFiltering:
+            DAWN_INVALID_IF(entry.sampler->IsFiltering(),
+                            "Filtering sampler %s is incompatible with non-filtering sampler "
+                            "binding.",
+                            entry.sampler);
+            [[fallthrough]];
+        case wgpu::SamplerBindingType::Filtering:
+            DAWN_INVALID_IF(entry.sampler->IsComparison(),
+                            "Comparison sampler %s is incompatible with non-comparison sampler "
+                            "binding.",
+                            entry.sampler);
+            break;
+        case wgpu::SamplerBindingType::Comparison:
+            DAWN_INVALID_IF(!entry.sampler->IsComparison(),
+                            "Non-comparison sampler %s is imcompatible with comparison sampler "
+                            "binding.",
+                            entry.sampler);
+            break;
+        default:
+            UNREACHABLE();
+            break;
+    }
+
+    return {};
+}
+
+MaybeError ValidateExternalTextureBinding(
+    const DeviceBase* device,
+    const BindGroupEntry& entry,
+    const ExternalTextureBindingEntry* externalTextureBindingEntry,
+    const ExternalTextureBindingExpansionMap& expansions) {
+    DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
+                    "Binding entry external texture not set.");
+
+    DAWN_INVALID_IF(
+        entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
+        "Expected only external texture to be set for binding entry.");
+
+    DAWN_INVALID_IF(expansions.find(BindingNumber(entry.binding)) == expansions.end(),
+                    "External texture binding entry %u is not present in the bind group layout.",
+                    entry.binding);
+
+    DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
+                                 wgpu::SType::ExternalTextureBindingEntry));
+
+    DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+
+    return {};
+}
+
+}  // anonymous namespace
+
+MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    DAWN_TRY(device->ValidateObject(descriptor->layout));
+
+    DAWN_INVALID_IF(
+        descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
+        "Number of entries (%u) did not match the number of entries (%u) specified in %s."
+        "\nExpected layout: %s",
+        descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
+        descriptor->layout, descriptor->layout->EntriesToString());
+
+    const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+    ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
+
+    ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
+    for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+        const BindGroupEntry& entry = descriptor->entries[i];
+
+        const auto& it = bindingMap.find(BindingNumber(entry.binding));
+        DAWN_INVALID_IF(it == bindingMap.end(),
+                        "In entries[%u], binding index %u not present in the bind group layout."
+                        "\nExpected layout: %s",
+                        i, entry.binding, descriptor->layout->EntriesToString());
+
+        BindingIndex bindingIndex = it->second;
+        ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+        DAWN_INVALID_IF(bindingsSet[bindingIndex],
+                        "In entries[%u], binding index %u already used by a previous entry", i,
+                        entry.binding);
+
+        bindingsSet.set(bindingIndex);
+
+        // Below this block we validate entries based on the bind group layout, in which
+        // external textures have been expanded into their underlying contents. For this reason
+        // we must identify external texture binding entries by checking the bind group entry
+        // itself.
+        // TODO(dawn:1293): Store external textures in
+        // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
+        // be moved in the switch below.
+        const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+        FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+        if (externalTextureBindingEntry != nullptr) {
+            DAWN_TRY(ValidateExternalTextureBinding(
+                device, entry, externalTextureBindingEntry,
+                descriptor->layout->GetExternalTextureBindingExpansionMap()));
+            continue;
         }
 
-    }  // anonymous namespace
+        const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
 
-    MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
-                                           const BindGroupDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+        // Perform binding-type specific validation.
+        switch (bindingInfo.bindingType) {
+            case BindingInfoType::Buffer:
+                DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
+                                 "validating entries[%u] as a Buffer."
+                                 "\nExpected entry layout: %s",
+                                 i, bindingInfo);
+                break;
+            case BindingInfoType::Texture:
+            case BindingInfoType::StorageTexture:
+                DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
+                                 "validating entries[%u] as a Texture."
+                                 "\nExpected entry layout: %s",
+                                 i, bindingInfo);
+                break;
+            case BindingInfoType::Sampler:
+                DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
+                                 "validating entries[%u] as a Sampler."
+                                 "\nExpected entry layout: %s",
+                                 i, bindingInfo);
+                break;
+            case BindingInfoType::ExternalTexture:
+                UNREACHABLE();
+                break;
+        }
+    }
 
-        DAWN_TRY(device->ValidateObject(descriptor->layout));
+    // This should always be true because
+    //  - numBindings has to match between the bind group and its layout.
+    //  - Each binding must be set at most once
+    //
+    // We don't validate the equality because it wouldn't be possible to cover it with a test.
+    ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
 
-        DAWN_INVALID_IF(
-            descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
-            "Number of entries (%u) did not match the number of entries (%u) specified in %s."
-            "\nExpected layout: %s",
-            descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
-            descriptor->layout, descriptor->layout->EntriesToString());
+    return {};
+}  // anonymous namespace
 
-        const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
-        ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
+// BindGroup
 
-        ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
-        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
-            const BindGroupEntry& entry = descriptor->entries[i];
+BindGroupBase::BindGroupBase(DeviceBase* device,
+                             const BindGroupDescriptor* descriptor,
+                             void* bindingDataStart)
+    : ApiObjectBase(device, descriptor->label),
+      mLayout(descriptor->layout),
+      mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+    for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+        // TODO(enga): Shouldn't be needed when bindings are tightly packed.
+        // This is to fill Ref<ObjectBase> holes with nullptrs.
+        new (&mBindingData.bindings[i]) Ref<ObjectBase>();
+    }
 
-            const auto& it = bindingMap.find(BindingNumber(entry.binding));
-            DAWN_INVALID_IF(it == bindingMap.end(),
-                            "In entries[%u], binding index %u not present in the bind group layout."
-                            "\nExpected layout: %s",
-                            i, entry.binding, descriptor->layout->EntriesToString());
+    for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+        const BindGroupEntry& entry = descriptor->entries[i];
 
-            BindingIndex bindingIndex = it->second;
-            ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+        BindingIndex bindingIndex =
+            descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
+        ASSERT(bindingIndex < mLayout->GetBindingCount());
 
-            DAWN_INVALID_IF(bindingsSet[bindingIndex],
-                            "In entries[%u], binding index %u already used by a previous entry", i,
-                            entry.binding);
+        // Only a single binding type should be set, so once we found it we can skip to the
+        // next loop iteration.
 
-            bindingsSet.set(bindingIndex);
-
-            // Below this block we validate entries based on the bind group layout, in which
-            // external textures have been expanded into their underlying contents. For this reason
-            // we must identify external texture binding entries by checking the bind group entry
-            // itself.
-            // TODO(dawn:1293): Store external textures in
-            // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
-            // be moved in the switch below.
-            const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
-            FindInChain(entry.nextInChain, &externalTextureBindingEntry);
-            if (externalTextureBindingEntry != nullptr) {
-                DAWN_TRY(ValidateExternalTextureBinding(
-                    device, entry, externalTextureBindingEntry,
-                    descriptor->layout->GetExternalTextureBindingExpansionMap()));
-                continue;
-            }
-
-            const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
-
-            // Perform binding-type specific validation.
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Buffer:
-                    DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
-                                     "validating entries[%u] as a Buffer."
-                                     "\nExpected entry layout: %s",
-                                     i, bindingInfo);
-                    break;
-                case BindingInfoType::Texture:
-                case BindingInfoType::StorageTexture:
-                    DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
-                                     "validating entries[%u] as a Texture."
-                                     "\nExpected entry layout: %s",
-                                     i, bindingInfo);
-                    break;
-                case BindingInfoType::Sampler:
-                    DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
-                                     "validating entries[%u] as a Sampler."
-                                     "\nExpected entry layout: %s",
-                                     i, bindingInfo);
-                    break;
-                case BindingInfoType::ExternalTexture:
-                    UNREACHABLE();
-                    break;
-            }
+        if (entry.buffer != nullptr) {
+            ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+            mBindingData.bindings[bindingIndex] = entry.buffer;
+            mBindingData.bufferData[bindingIndex].offset = entry.offset;
+            uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
+                                      ? entry.buffer->GetSize() - entry.offset
+                                      : entry.size;
+            mBindingData.bufferData[bindingIndex].size = bufferSize;
+            continue;
         }
 
-        // This should always be true because
-        //  - numBindings has to match between the bind group and its layout.
-        //  - Each binding must be set at most once
-        //
-        // We don't validate the equality because it wouldn't be possible to cover it with a test.
-        ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
+        if (entry.textureView != nullptr) {
+            ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+            mBindingData.bindings[bindingIndex] = entry.textureView;
+            continue;
+        }
 
-        return {};
-    }  // anonymous namespace
+        if (entry.sampler != nullptr) {
+            ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+            mBindingData.bindings[bindingIndex] = entry.sampler;
+            continue;
+        }
 
-    // BindGroup
+        // Here we unpack external texture bindings into multiple additional bindings for the
+        // external texture's contents. New binding locations previously determined in the bind
+        // group layout are created in this bind group and filled with the external texture's
+        // underlying resources.
+        const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+        FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+        if (externalTextureBindingEntry != nullptr) {
+            mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
 
-    BindGroupBase::BindGroupBase(DeviceBase* device,
-                                 const BindGroupDescriptor* descriptor,
-                                 void* bindingDataStart)
-        : ApiObjectBase(device, descriptor->label),
-          mLayout(descriptor->layout),
-          mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+            ExternalTextureBindingExpansionMap expansions =
+                mLayout->GetExternalTextureBindingExpansionMap();
+            ExternalTextureBindingExpansionMap::iterator it =
+                expansions.find(BindingNumber(entry.binding));
+
+            ASSERT(it != expansions.end());
+
+            BindingIndex plane0BindingIndex =
+                descriptor->layout->GetBindingIndex(it->second.plane0);
+            BindingIndex plane1BindingIndex =
+                descriptor->layout->GetBindingIndex(it->second.plane1);
+            BindingIndex paramsBindingIndex =
+                descriptor->layout->GetBindingIndex(it->second.params);
+
+            ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
+
+            mBindingData.bindings[plane0BindingIndex] =
+                externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
+
+            ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
+            mBindingData.bindings[plane1BindingIndex] =
+                externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
+
+            ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
+            mBindingData.bindings[paramsBindingIndex] =
+                externalTextureBindingEntry->externalTexture->GetParamsBuffer();
+            mBindingData.bufferData[paramsBindingIndex].offset = 0;
+            mBindingData.bufferData[paramsBindingIndex].size =
+                sizeof(dawn_native::ExternalTextureParams);
+
+            continue;
+        }
+    }
+
+    uint32_t packedIdx = 0;
+    for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
+         ++bindingIndex) {
+        if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
+            mBindingData.unverifiedBufferSizes[packedIdx] =
+                mBindingData.bufferData[bindingIndex].size;
+            ++packedIdx;
+        }
+    }
+
+    TrackInDevice();
+}
+
+BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+BindGroupBase::~BindGroupBase() = default;
+
+void BindGroupBase::DestroyImpl() {
+    if (mLayout != nullptr) {
+        ASSERT(!IsError());
         for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
-            // TODO(enga): Shouldn't be needed when bindings are tightly packed.
-            // This is to fill Ref<ObjectBase> holes with nullptrs.
-            new (&mBindingData.bindings[i]) Ref<ObjectBase>();
-        }
-
-        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
-            const BindGroupEntry& entry = descriptor->entries[i];
-
-            BindingIndex bindingIndex =
-                descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
-            ASSERT(bindingIndex < mLayout->GetBindingCount());
-
-            // Only a single binding type should be set, so once we found it we can skip to the
-            // next loop iteration.
-
-            if (entry.buffer != nullptr) {
-                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
-                mBindingData.bindings[bindingIndex] = entry.buffer;
-                mBindingData.bufferData[bindingIndex].offset = entry.offset;
-                uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
-                                          ? entry.buffer->GetSize() - entry.offset
-                                          : entry.size;
-                mBindingData.bufferData[bindingIndex].size = bufferSize;
-                continue;
-            }
-
-            if (entry.textureView != nullptr) {
-                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
-                mBindingData.bindings[bindingIndex] = entry.textureView;
-                continue;
-            }
-
-            if (entry.sampler != nullptr) {
-                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
-                mBindingData.bindings[bindingIndex] = entry.sampler;
-                continue;
-            }
-
-            // Here we unpack external texture bindings into multiple additional bindings for the
-            // external texture's contents. New binding locations previously determined in the bind
-            // group layout are created in this bind group and filled with the external texture's
-            // underlying resources.
-            const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
-            FindInChain(entry.nextInChain, &externalTextureBindingEntry);
-            if (externalTextureBindingEntry != nullptr) {
-                mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
-
-                ExternalTextureBindingExpansionMap expansions =
-                    mLayout->GetExternalTextureBindingExpansionMap();
-                ExternalTextureBindingExpansionMap::iterator it =
-                    expansions.find(BindingNumber(entry.binding));
-
-                ASSERT(it != expansions.end());
-
-                BindingIndex plane0BindingIndex =
-                    descriptor->layout->GetBindingIndex(it->second.plane0);
-                BindingIndex plane1BindingIndex =
-                    descriptor->layout->GetBindingIndex(it->second.plane1);
-                BindingIndex paramsBindingIndex =
-                    descriptor->layout->GetBindingIndex(it->second.params);
-
-                ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
-
-                mBindingData.bindings[plane0BindingIndex] =
-                    externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
-
-                ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
-                mBindingData.bindings[plane1BindingIndex] =
-                    externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
-
-                ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
-                mBindingData.bindings[paramsBindingIndex] =
-                    externalTextureBindingEntry->externalTexture->GetParamsBuffer();
-                mBindingData.bufferData[paramsBindingIndex].offset = 0;
-                mBindingData.bufferData[paramsBindingIndex].size =
-                    sizeof(dawn_native::ExternalTextureParams);
-
-                continue;
-            }
-        }
-
-        uint32_t packedIdx = 0;
-        for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
-             ++bindingIndex) {
-            if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
-                mBindingData.unverifiedBufferSizes[packedIdx] =
-                    mBindingData.bufferData[bindingIndex].size;
-                ++packedIdx;
-            }
-        }
-
-        TrackInDevice();
-    }
-
-    BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
-
-    BindGroupBase::~BindGroupBase() = default;
-
-    void BindGroupBase::DestroyImpl() {
-        if (mLayout != nullptr) {
-            ASSERT(!IsError());
-            for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
-                mBindingData.bindings[i].~Ref<ObjectBase>();
-            }
+            mBindingData.bindings[i].~Ref<ObjectBase>();
         }
     }
+}
 
-    void BindGroupBase::DeleteThis() {
-        // Add another ref to the layout so that if this is the last ref, the layout
-        // is destroyed after the bind group. The bind group is slab-allocated inside
-        // memory owned by the layout (except for the null backend).
-        Ref<BindGroupLayoutBase> layout = mLayout;
-        ApiObjectBase::DeleteThis();
-    }
+void BindGroupBase::DeleteThis() {
+    // Add another ref to the layout so that if this is the last ref, the layout
+    // is destroyed after the bind group. The bind group is slab-allocated inside
+    // memory owned by the layout (except for the null backend).
+    Ref<BindGroupLayoutBase> layout = mLayout;
+    ApiObjectBase::DeleteThis();
+}
 
-    BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag), mBindingData() {
-    }
+BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag), mBindingData() {}
 
-    // static
-    BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
-        return new BindGroupBase(device, ObjectBase::kError);
-    }
+// static
+BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
+    return new BindGroupBase(device, ObjectBase::kError);
+}
 
-    ObjectType BindGroupBase::GetType() const {
-        return ObjectType::BindGroup;
-    }
+ObjectType BindGroupBase::GetType() const {
+    return ObjectType::BindGroup;
+}
 
-    BindGroupLayoutBase* BindGroupBase::GetLayout() {
-        ASSERT(!IsError());
-        return mLayout.Get();
-    }
+BindGroupLayoutBase* BindGroupBase::GetLayout() {
+    ASSERT(!IsError());
+    return mLayout.Get();
+}
 
-    const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
-        ASSERT(!IsError());
-        return mLayout.Get();
-    }
+const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
+    ASSERT(!IsError());
+    return mLayout.Get();
+}
 
-    const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
-        ASSERT(!IsError());
-        return mBindingData.unverifiedBufferSizes;
-    }
+const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
+    ASSERT(!IsError());
+    return mBindingData.unverifiedBufferSizes;
+}
 
-    BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
-        ASSERT(!IsError());
-        ASSERT(bindingIndex < mLayout->GetBindingCount());
-        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
-        BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
-        return {buffer, mBindingData.bufferData[bindingIndex].offset,
-                mBindingData.bufferData[bindingIndex].size};
-    }
+BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
+    ASSERT(!IsError());
+    ASSERT(bindingIndex < mLayout->GetBindingCount());
+    ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
+    BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
+    return {buffer, mBindingData.bufferData[bindingIndex].offset,
+            mBindingData.bufferData[bindingIndex].size};
+}
 
-    SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
-        ASSERT(!IsError());
-        ASSERT(bindingIndex < mLayout->GetBindingCount());
-        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
-        return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
-    }
+SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
+    ASSERT(!IsError());
+    ASSERT(bindingIndex < mLayout->GetBindingCount());
+    ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
+    return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
+}
 
-    TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
-        ASSERT(!IsError());
-        ASSERT(bindingIndex < mLayout->GetBindingCount());
-        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
-               mLayout->GetBindingInfo(bindingIndex).bindingType ==
-                   BindingInfoType::StorageTexture);
-        return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
-    }
+TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
+    ASSERT(!IsError());
+    ASSERT(bindingIndex < mLayout->GetBindingCount());
+    ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
+           mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::StorageTexture);
+    return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
+}
 
-    const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
-        return mBoundExternalTextures;
-    }
+const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
+    return mBoundExternalTextures;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BindGroup.h b/src/dawn/native/BindGroup.h
index 4826136..236e4fb 100644
--- a/src/dawn/native/BindGroup.h
+++ b/src/dawn/native/BindGroup.h
@@ -29,68 +29,67 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
-                                           const BindGroupDescriptor* descriptor);
+MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor);
 
-    struct BufferBinding {
-        BufferBase* buffer;
-        uint64_t offset;
-        uint64_t size;
-    };
+struct BufferBinding {
+    BufferBase* buffer;
+    uint64_t offset;
+    uint64_t size;
+};
 
-    class BindGroupBase : public ApiObjectBase {
-      public:
-        static BindGroupBase* MakeError(DeviceBase* device);
+class BindGroupBase : public ApiObjectBase {
+  public:
+    static BindGroupBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        BindGroupLayoutBase* GetLayout();
-        const BindGroupLayoutBase* GetLayout() const;
-        BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
-        SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
-        TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
-        const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
-        const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
+    BindGroupLayoutBase* GetLayout();
+    const BindGroupLayoutBase* GetLayout() const;
+    BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
+    SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
+    TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
+    const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
+    const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
 
-      protected:
-        // To save memory, the size of a bind group is dynamically determined and the bind group is
-        // placement-allocated into memory big enough to hold the bind group with its
-        // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
-        // binding data should be passed as |bindingDataStart|.
-        BindGroupBase(DeviceBase* device,
-                      const BindGroupDescriptor* descriptor,
-                      void* bindingDataStart);
+  protected:
+    // To save memory, the size of a bind group is dynamically determined and the bind group is
+    // placement-allocated into memory big enough to hold the bind group with its
+    // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
+    // binding data should be passed as |bindingDataStart|.
+    BindGroupBase(DeviceBase* device,
+                  const BindGroupDescriptor* descriptor,
+                  void* bindingDataStart);
 
-        // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
-        // be first in the allocation. The binding data is stored after the Derived class.
-        template <typename Derived>
-        BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
-            : BindGroupBase(device,
-                            descriptor,
-                            AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
-                                     descriptor->layout->GetBindingDataAlignment())) {
-            static_assert(std::is_base_of<BindGroupBase, Derived>::value);
-        }
+    // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
+    // be first in the allocation. The binding data is stored after the Derived class.
+    template <typename Derived>
+    BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
+        : BindGroupBase(device,
+                        descriptor,
+                        AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
+                                 descriptor->layout->GetBindingDataAlignment())) {
+        static_assert(std::is_base_of<BindGroupBase, Derived>::value);
+    }
 
-        // Constructor used only for mocking and testing.
-        explicit BindGroupBase(DeviceBase* device);
-        void DestroyImpl() override;
+    // Constructor used only for mocking and testing.
+    explicit BindGroupBase(DeviceBase* device);
+    void DestroyImpl() override;
 
-        ~BindGroupBase() override;
+    ~BindGroupBase() override;
 
-      private:
-        BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-        void DeleteThis() override;
+  private:
+    BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    void DeleteThis() override;
 
-        Ref<BindGroupLayoutBase> mLayout;
-        BindGroupLayoutBase::BindingDataPointers mBindingData;
+    Ref<BindGroupLayoutBase> mLayout;
+    BindGroupLayoutBase::BindingDataPointers mBindingData;
 
-        // TODO(dawn:1293): Store external textures in
-        // BindGroupLayoutBase::BindingDataPointers::bindings
-        std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
-    };
+    // TODO(dawn:1293): Store external textures in
+    // BindGroupLayoutBase::BindingDataPointers::bindings
+    std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BindGroupLayout.cpp b/src/dawn/native/BindGroupLayout.cpp
index f2c4e72..b57cd69 100644
--- a/src/dawn/native/BindGroupLayout.cpp
+++ b/src/dawn/native/BindGroupLayout.cpp
@@ -31,648 +31,639 @@
 
 namespace dawn::native {
 
-    namespace {
-        MaybeError ValidateStorageTextureFormat(DeviceBase* device,
-                                                wgpu::TextureFormat storageTextureFormat) {
-            const Format* format = nullptr;
-            DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
+namespace {
+MaybeError ValidateStorageTextureFormat(DeviceBase* device,
+                                        wgpu::TextureFormat storageTextureFormat) {
+    const Format* format = nullptr;
+    DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
 
-            ASSERT(format != nullptr);
-            DAWN_INVALID_IF(!format->supportsStorageUsage,
-                            "Texture format (%s) does not support storage textures.",
-                            storageTextureFormat);
+    ASSERT(format != nullptr);
+    DAWN_INVALID_IF(!format->supportsStorageUsage,
+                    "Texture format (%s) does not support storage textures.", storageTextureFormat);
 
+    return {};
+}
+
+MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
+    switch (dimension) {
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+            return DAWN_FORMAT_VALIDATION_ERROR(
+                "%s texture views cannot be used as storage textures.", dimension);
+
+        case wgpu::TextureViewDimension::e1D:
+        case wgpu::TextureViewDimension::e2D:
+        case wgpu::TextureViewDimension::e2DArray:
+        case wgpu::TextureViewDimension::e3D:
             return {};
+
+        case wgpu::TextureViewDimension::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
+                                        const BindGroupLayoutEntry& entry,
+                                        bool allowInternalBinding) {
+    DAWN_TRY(ValidateShaderStage(entry.visibility));
+
+    int bindingMemberCount = 0;
+    BindingInfoType bindingType;
+    wgpu::ShaderStage allowedStages = kAllStages;
+
+    if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::Buffer;
+        const BufferBindingLayout& buffer = entry.buffer;
+
+        // The kInternalStorageBufferBinding is used internally and not a value
+        // in wgpu::BufferBindingType.
+        if (buffer.type == kInternalStorageBufferBinding) {
+            DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
+        } else {
+            DAWN_TRY(ValidateBufferBindingType(buffer.type));
         }
 
-        MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
-            switch (dimension) {
-                case wgpu::TextureViewDimension::Cube:
-                case wgpu::TextureViewDimension::CubeArray:
-                    return DAWN_FORMAT_VALIDATION_ERROR(
-                        "%s texture views cannot be used as storage textures.", dimension);
-
-                case wgpu::TextureViewDimension::e1D:
-                case wgpu::TextureViewDimension::e2D:
-                case wgpu::TextureViewDimension::e2DArray:
-                case wgpu::TextureViewDimension::e3D:
-                    return {};
-
-                case wgpu::TextureViewDimension::Undefined:
-                    break;
-            }
-            UNREACHABLE();
+        if (buffer.type == wgpu::BufferBindingType::Storage ||
+            buffer.type == kInternalStorageBufferBinding) {
+            allowedStages &= ~wgpu::ShaderStage::Vertex;
         }
-
-        MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
-                                                const BindGroupLayoutEntry& entry,
-                                                bool allowInternalBinding) {
-            DAWN_TRY(ValidateShaderStage(entry.visibility));
-
-            int bindingMemberCount = 0;
-            BindingInfoType bindingType;
-            wgpu::ShaderStage allowedStages = kAllStages;
-
-            if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::Buffer;
-                const BufferBindingLayout& buffer = entry.buffer;
-
-                // The kInternalStorageBufferBinding is used internally and not a value
-                // in wgpu::BufferBindingType.
-                if (buffer.type == kInternalStorageBufferBinding) {
-                    DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
-                } else {
-                    DAWN_TRY(ValidateBufferBindingType(buffer.type));
-                }
-
-                if (buffer.type == wgpu::BufferBindingType::Storage ||
-                    buffer.type == kInternalStorageBufferBinding) {
-                    allowedStages &= ~wgpu::ShaderStage::Vertex;
-                }
-            }
-
-            if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::Sampler;
-                DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
-            }
-
-            if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::Texture;
-                const TextureBindingLayout& texture = entry.texture;
-                DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
-
-                // viewDimension defaults to 2D if left undefined, needs validation otherwise.
-                wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
-                if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
-                    DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
-                    viewDimension = texture.viewDimension;
-                }
-
-                DAWN_INVALID_IF(
-                    texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
-                    "View dimension (%s) for a multisampled texture bindings was not %s.",
-                    viewDimension, wgpu::TextureViewDimension::e2D);
-            }
-
-            if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::StorageTexture;
-                const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
-                DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
-                DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
-
-                // viewDimension defaults to 2D if left undefined, needs validation otherwise.
-                if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
-                    DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
-                    DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
-                }
-
-                if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
-                    allowedStages &= ~wgpu::ShaderStage::Vertex;
-                }
-            }
-
-            const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
-            FindInChain(entry.nextInChain, &externalTextureBindingLayout);
-            if (externalTextureBindingLayout != nullptr) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::ExternalTexture;
-            }
-
-            DAWN_INVALID_IF(bindingMemberCount == 0,
-                            "BindGroupLayoutEntry had none of buffer, sampler, texture, "
-                            "storageTexture, or externalTexture set");
-
-            DAWN_INVALID_IF(bindingMemberCount != 1,
-                            "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
-                            "storageTexture, or externalTexture set");
-
-            DAWN_INVALID_IF(
-                !IsSubset(entry.visibility, allowedStages),
-                "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
-                bindingType, entry.visibility, allowedStages);
-
-            return {};
-        }
-
-        BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(
-            uint32_t binding,
-            wgpu::ShaderStage visibility) {
-            BindGroupLayoutEntry entry;
-            entry.binding = binding;
-            entry.visibility = visibility;
-            entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
-            entry.texture.multisampled = false;
-            entry.texture.sampleType = wgpu::TextureSampleType::Float;
-            return entry;
-        }
-
-        BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
-                                                                    wgpu::ShaderStage visibility) {
-            BindGroupLayoutEntry entry;
-            entry.binding = binding;
-            entry.visibility = visibility;
-            entry.buffer.hasDynamicOffset = false;
-            entry.buffer.type = wgpu::BufferBindingType::Uniform;
-            return entry;
-        }
-
-        std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
-            const BindGroupLayoutDescriptor* descriptor,
-            BindingCounts* bindingCounts,
-            ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
-            std::vector<BindGroupLayoutEntry> expandedOutput;
-
-            // When new bgl entries are created, we use binding numbers larger than
-            // kMaxBindingNumber to ensure there are no collisions.
-            uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
-            for (uint32_t i = 0; i < descriptor->entryCount; i++) {
-                const BindGroupLayoutEntry& entry = descriptor->entries[i];
-                const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
-                FindInChain(entry.nextInChain, &externalTextureBindingLayout);
-                // External textures are expanded from a texture_external into two sampled texture
-                // bindings and one uniform buffer binding. The original binding number is used
-                // for the first sampled texture.
-                if (externalTextureBindingLayout != nullptr) {
-                    for (SingleShaderStage stage : IterateStages(entry.visibility)) {
-                        // External textures are not fully implemented, which means that expanding
-                        // the external texture at this time will not occupy the same number of
-                        // binding slots as defined in the WebGPU specification. Here we prematurely
-                        // increment the binding counts for an additional sampled textures and a
-                        // sampler so that an external texture will occupy the correct number of
-                        // slots for correct validation of shader binding limits.
-                        // TODO(dawn:1082): Consider removing this and instead making a change to
-                        // the validation.
-                        constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
-                        constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
-                        bindingCounts->perStage[stage].sampledTextureCount +=
-                            kUnimplementedSampledTexturesPerExternalTexture;
-                        bindingCounts->perStage[stage].samplerCount +=
-                            kUnimplementedSamplersPerExternalTexture;
-                    }
-
-                    dawn_native::ExternalTextureBindingExpansion bindingExpansion;
-
-                    BindGroupLayoutEntry plane0Entry =
-                        CreateSampledTextureBindingForExternalTexture(entry.binding,
-                                                                      entry.visibility);
-                    bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
-                    expandedOutput.push_back(plane0Entry);
-
-                    BindGroupLayoutEntry plane1Entry =
-                        CreateSampledTextureBindingForExternalTexture(
-                            nextOpenBindingNumberForNewEntry++, entry.visibility);
-                    bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
-                    expandedOutput.push_back(plane1Entry);
-
-                    BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
-                        nextOpenBindingNumberForNewEntry++, entry.visibility);
-                    bindingExpansion.params = BindingNumber(paramsEntry.binding);
-                    expandedOutput.push_back(paramsEntry);
-
-                    externalTextureBindingExpansions->insert(
-                        {BindingNumber(entry.binding), bindingExpansion});
-                } else {
-                    expandedOutput.push_back(entry);
-                }
-            }
-
-            return expandedOutput;
-        }
-    }  // anonymous namespace
-
-    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
-                                                 const BindGroupLayoutDescriptor* descriptor,
-                                                 bool allowInternalBinding) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
-        std::set<BindingNumber> bindingsSet;
-        BindingCounts bindingCounts = {};
-
-        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
-            const BindGroupLayoutEntry& entry = descriptor->entries[i];
-            BindingNumber bindingNumber = BindingNumber(entry.binding);
-
-            DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
-                            "Binding number (%u) exceeds the maximum binding number (%u).",
-                            uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
-            DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
-                            "On entries[%u]: binding index (%u) was specified by a previous entry.",
-                            i, entry.binding);
-
-            DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
-                             "validating entries[%u]", i);
-
-            IncrementBindingCounts(&bindingCounts, entry);
-
-            bindingsSet.insert(bindingNumber);
-        }
-
-        DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
-
-        return {};
     }
 
-    namespace {
+    if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::Sampler;
+        DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
+    }
 
-        bool operator!=(const BindingInfo& a, const BindingInfo& b) {
-            if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
-                return true;
-            }
+    if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::Texture;
+        const TextureBindingLayout& texture = entry.texture;
+        DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
 
-            switch (a.bindingType) {
-                case BindingInfoType::Buffer:
-                    return a.buffer.type != b.buffer.type ||
-                           a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
-                           a.buffer.minBindingSize != b.buffer.minBindingSize;
-                case BindingInfoType::Sampler:
-                    return a.sampler.type != b.sampler.type;
-                case BindingInfoType::Texture:
-                    return a.texture.sampleType != b.texture.sampleType ||
-                           a.texture.viewDimension != b.texture.viewDimension ||
-                           a.texture.multisampled != b.texture.multisampled;
-                case BindingInfoType::StorageTexture:
-                    return a.storageTexture.access != b.storageTexture.access ||
-                           a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
-                           a.storageTexture.format != b.storageTexture.format;
-                case BindingInfoType::ExternalTexture:
-                    return false;
-            }
-            UNREACHABLE();
+        // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+        wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
+        if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+            DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
+            viewDimension = texture.viewDimension;
         }
 
-        bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
-            return binding.buffer.type != wgpu::BufferBindingType::Undefined;
+        DAWN_INVALID_IF(texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
+                        "View dimension (%s) for a multisampled texture bindings was not %s.",
+                        viewDimension, wgpu::TextureViewDimension::e2D);
+    }
+
+    if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::StorageTexture;
+        const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
+        DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
+        DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
+
+        // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+        if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+            DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
+            DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
         }
 
-        bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
-            if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
-                return binding.buffer.hasDynamicOffset;
+        if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
+            allowedStages &= ~wgpu::ShaderStage::Vertex;
+        }
+    }
+
+    const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+    FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+    if (externalTextureBindingLayout != nullptr) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::ExternalTexture;
+    }
+
+    DAWN_INVALID_IF(bindingMemberCount == 0,
+                    "BindGroupLayoutEntry had none of buffer, sampler, texture, "
+                    "storageTexture, or externalTexture set");
+
+    DAWN_INVALID_IF(bindingMemberCount != 1,
+                    "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
+                    "storageTexture, or externalTexture set");
+
+    DAWN_INVALID_IF(!IsSubset(entry.visibility, allowedStages),
+                    "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
+                    bindingType, entry.visibility, allowedStages);
+
+    return {};
+}
+
+BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(uint32_t binding,
+                                                                   wgpu::ShaderStage visibility) {
+    BindGroupLayoutEntry entry;
+    entry.binding = binding;
+    entry.visibility = visibility;
+    entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+    entry.texture.multisampled = false;
+    entry.texture.sampleType = wgpu::TextureSampleType::Float;
+    return entry;
+}
+
+BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
+                                                            wgpu::ShaderStage visibility) {
+    BindGroupLayoutEntry entry;
+    entry.binding = binding;
+    entry.visibility = visibility;
+    entry.buffer.hasDynamicOffset = false;
+    entry.buffer.type = wgpu::BufferBindingType::Uniform;
+    return entry;
+}
+
+std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
+    const BindGroupLayoutDescriptor* descriptor,
+    BindingCounts* bindingCounts,
+    ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
+    std::vector<BindGroupLayoutEntry> expandedOutput;
+
+    // When new bgl entries are created, we use binding numbers larger than
+    // kMaxBindingNumber to ensure there are no collisions.
+    uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
+    for (uint32_t i = 0; i < descriptor->entryCount; i++) {
+        const BindGroupLayoutEntry& entry = descriptor->entries[i];
+        const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+        FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+        // External textures are expanded from a texture_external into two sampled texture
+        // bindings and one uniform buffer binding. The original binding number is used
+        // for the first sampled texture.
+        if (externalTextureBindingLayout != nullptr) {
+            for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+                // External textures are not fully implemented, which means that expanding
+                // the external texture at this time will not occupy the same number of
+                // binding slots as defined in the WebGPU specification. Here we prematurely
+                // increment the binding counts for an additional sampled textures and a
+                // sampler so that an external texture will occupy the correct number of
+                // slots for correct validation of shader binding limits.
+                // TODO(dawn:1082): Consider removing this and instead making a change to
+                // the validation.
+                constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
+                constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
+                bindingCounts->perStage[stage].sampledTextureCount +=
+                    kUnimplementedSampledTexturesPerExternalTexture;
+                bindingCounts->perStage[stage].samplerCount +=
+                    kUnimplementedSamplersPerExternalTexture;
             }
+
+            dawn_native::ExternalTextureBindingExpansion bindingExpansion;
+
+            BindGroupLayoutEntry plane0Entry =
+                CreateSampledTextureBindingForExternalTexture(entry.binding, entry.visibility);
+            bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
+            expandedOutput.push_back(plane0Entry);
+
+            BindGroupLayoutEntry plane1Entry = CreateSampledTextureBindingForExternalTexture(
+                nextOpenBindingNumberForNewEntry++, entry.visibility);
+            bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
+            expandedOutput.push_back(plane1Entry);
+
+            BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
+                nextOpenBindingNumberForNewEntry++, entry.visibility);
+            bindingExpansion.params = BindingNumber(paramsEntry.binding);
+            expandedOutput.push_back(paramsEntry);
+
+            externalTextureBindingExpansions->insert(
+                {BindingNumber(entry.binding), bindingExpansion});
+        } else {
+            expandedOutput.push_back(entry);
+        }
+    }
+
+    return expandedOutput;
+}
+}  // anonymous namespace
+
+MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+                                             const BindGroupLayoutDescriptor* descriptor,
+                                             bool allowInternalBinding) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+    std::set<BindingNumber> bindingsSet;
+    BindingCounts bindingCounts = {};
+
+    for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+        const BindGroupLayoutEntry& entry = descriptor->entries[i];
+        BindingNumber bindingNumber = BindingNumber(entry.binding);
+
+        DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
+                        "Binding number (%u) exceeds the maximum binding number (%u).",
+                        uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
+        DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
+                        "On entries[%u]: binding index (%u) was specified by a previous entry.", i,
+                        entry.binding);
+
+        DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
+                         "validating entries[%u]", i);
+
+        IncrementBindingCounts(&bindingCounts, entry);
+
+        bindingsSet.insert(bindingNumber);
+    }
+
+    DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
+
+    return {};
+}
+
+namespace {
+
+bool operator!=(const BindingInfo& a, const BindingInfo& b) {
+    if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
+        return true;
+    }
+
+    switch (a.bindingType) {
+        case BindingInfoType::Buffer:
+            return a.buffer.type != b.buffer.type ||
+                   a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
+                   a.buffer.minBindingSize != b.buffer.minBindingSize;
+        case BindingInfoType::Sampler:
+            return a.sampler.type != b.sampler.type;
+        case BindingInfoType::Texture:
+            return a.texture.sampleType != b.texture.sampleType ||
+                   a.texture.viewDimension != b.texture.viewDimension ||
+                   a.texture.multisampled != b.texture.multisampled;
+        case BindingInfoType::StorageTexture:
+            return a.storageTexture.access != b.storageTexture.access ||
+                   a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
+                   a.storageTexture.format != b.storageTexture.format;
+        case BindingInfoType::ExternalTexture:
             return false;
+    }
+    UNREACHABLE();
+}
+
+bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
+    return binding.buffer.type != wgpu::BufferBindingType::Undefined;
+}
+
+bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
+    if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+        return binding.buffer.hasDynamicOffset;
+    }
+    return false;
+}
+
+BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
+    BindingInfo bindingInfo;
+    bindingInfo.binding = BindingNumber(binding.binding);
+    bindingInfo.visibility = binding.visibility;
+
+    if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::Buffer;
+        bindingInfo.buffer = binding.buffer;
+    } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::Sampler;
+        bindingInfo.sampler = binding.sampler;
+    } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::Texture;
+        bindingInfo.texture = binding.texture;
+
+        if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+            bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
         }
+    } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::StorageTexture;
+        bindingInfo.storageTexture = binding.storageTexture;
 
-        BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
-            BindingInfo bindingInfo;
-            bindingInfo.binding = BindingNumber(binding.binding);
-            bindingInfo.visibility = binding.visibility;
-
-            if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::Buffer;
-                bindingInfo.buffer = binding.buffer;
-            } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::Sampler;
-                bindingInfo.sampler = binding.sampler;
-            } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::Texture;
-                bindingInfo.texture = binding.texture;
-
-                if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
-                    bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
-                }
-            } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::StorageTexture;
-                bindingInfo.storageTexture = binding.storageTexture;
-
-                if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
-                    bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
-                }
-            } else {
-                const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
-                FindInChain(binding.nextInChain, &externalTextureBindingLayout);
-                if (externalTextureBindingLayout != nullptr) {
-                    bindingInfo.bindingType = BindingInfoType::ExternalTexture;
-                }
-            }
-
-            return bindingInfo;
+        if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+            bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
         }
+    } else {
+        const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+        FindInChain(binding.nextInChain, &externalTextureBindingLayout);
+        if (externalTextureBindingLayout != nullptr) {
+            bindingInfo.bindingType = BindingInfoType::ExternalTexture;
+        }
+    }
 
-        bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
-            const bool aIsBuffer = IsBufferBinding(a);
-            const bool bIsBuffer = IsBufferBinding(b);
-            if (aIsBuffer != bIsBuffer) {
-                // Always place buffers first.
-                return aIsBuffer;
-            }
+    return bindingInfo;
+}
 
-            if (aIsBuffer) {
-                bool aHasDynamicOffset = BindingHasDynamicOffset(a);
-                bool bHasDynamicOffset = BindingHasDynamicOffset(b);
-                ASSERT(bIsBuffer);
-                if (aHasDynamicOffset != bHasDynamicOffset) {
-                    // Buffers with dynamic offsets should come before those without.
-                    // This makes it easy to iterate over the dynamic buffer bindings
-                    // [0, dynamicBufferCount) during validation.
-                    return aHasDynamicOffset;
-                }
-                if (aHasDynamicOffset) {
-                    ASSERT(bHasDynamicOffset);
-                    ASSERT(a.binding != b.binding);
-                    // Above, we ensured that dynamic buffers are first. Now, ensure that
-                    // dynamic buffer bindings are in increasing order. This is because dynamic
-                    // buffer offsets are applied in increasing order of binding number.
-                    return a.binding < b.binding;
-                }
-            }
+bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
+    const bool aIsBuffer = IsBufferBinding(a);
+    const bool bIsBuffer = IsBufferBinding(b);
+    if (aIsBuffer != bIsBuffer) {
+        // Always place buffers first.
+        return aIsBuffer;
+    }
 
-            // This applies some defaults and gives us a single value to check for the binding type.
-            BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
-            BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
-
-            // Sort by type.
-            if (aInfo.bindingType != bInfo.bindingType) {
-                return aInfo.bindingType < bInfo.bindingType;
-            }
-
-            if (a.visibility != b.visibility) {
-                return a.visibility < b.visibility;
-            }
-
-            switch (aInfo.bindingType) {
-                case BindingInfoType::Buffer:
-                    if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
-                        return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
-                    }
-                    break;
-                case BindingInfoType::Sampler:
-                    if (aInfo.sampler.type != bInfo.sampler.type) {
-                        return aInfo.sampler.type < bInfo.sampler.type;
-                    }
-                    break;
-                case BindingInfoType::Texture:
-                    if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
-                        return aInfo.texture.multisampled < bInfo.texture.multisampled;
-                    }
-                    if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
-                        return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
-                    }
-                    if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
-                        return aInfo.texture.sampleType < bInfo.texture.sampleType;
-                    }
-                    break;
-                case BindingInfoType::StorageTexture:
-                    if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
-                        return aInfo.storageTexture.access < bInfo.storageTexture.access;
-                    }
-                    if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
-                        return aInfo.storageTexture.viewDimension <
-                               bInfo.storageTexture.viewDimension;
-                    }
-                    if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
-                        return aInfo.storageTexture.format < bInfo.storageTexture.format;
-                    }
-                    break;
-                case BindingInfoType::ExternalTexture:
-                    break;
-            }
+    if (aIsBuffer) {
+        bool aHasDynamicOffset = BindingHasDynamicOffset(a);
+        bool bHasDynamicOffset = BindingHasDynamicOffset(b);
+        ASSERT(bIsBuffer);
+        if (aHasDynamicOffset != bHasDynamicOffset) {
+            // Buffers with dynamic offsets should come before those without.
+            // This makes it easy to iterate over the dynamic buffer bindings
+            // [0, dynamicBufferCount) during validation.
+            return aHasDynamicOffset;
+        }
+        if (aHasDynamicOffset) {
+            ASSERT(bHasDynamicOffset);
+            ASSERT(a.binding != b.binding);
+            // Above, we ensured that dynamic buffers are first. Now, ensure that
+            // dynamic buffer bindings are in increasing order. This is because dynamic
+            // buffer offsets are applied in increasing order of binding number.
             return a.binding < b.binding;
         }
+    }
 
-        // This is a utility function to help ASSERT that the BGL-binding comparator places buffers
-        // first.
-        bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
-            BindingIndex lastBufferIndex{0};
-            BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
-            for (BindingIndex i{0}; i < bindings.size(); ++i) {
-                if (bindings[i].bindingType == BindingInfoType::Buffer) {
-                    lastBufferIndex = std::max(i, lastBufferIndex);
-                } else {
-                    firstNonBufferIndex = std::min(i, firstNonBufferIndex);
-                }
+    // This applies some defaults and gives us a single value to check for the binding type.
+    BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
+    BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
+
+    // Sort by type.
+    if (aInfo.bindingType != bInfo.bindingType) {
+        return aInfo.bindingType < bInfo.bindingType;
+    }
+
+    if (a.visibility != b.visibility) {
+        return a.visibility < b.visibility;
+    }
+
+    switch (aInfo.bindingType) {
+        case BindingInfoType::Buffer:
+            if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
+                return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
             }
-
-            // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
-            // |firstNonBufferIndex| gets set to 0.
-            return firstNonBufferIndex >= lastBufferIndex;
-        }
-
-    }  // namespace
-
-    // BindGroupLayoutBase
-
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
-                                             const BindGroupLayoutDescriptor* descriptor,
-                                             PipelineCompatibilityToken pipelineCompatibilityToken,
-                                             ApiObjectBase::UntrackedByDeviceTag tag)
-        : ApiObjectBase(device, descriptor->label),
-          mPipelineCompatibilityToken(pipelineCompatibilityToken),
-          mUnexpandedBindingCount(descriptor->entryCount) {
-        std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
-            descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
-
-        std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
-
-        for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
-            const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
-
-            mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
-
-            if (IsBufferBinding(binding)) {
-                // Buffers must be contiguously packed at the start of the binding info.
-                ASSERT(GetBufferCount() == BindingIndex(i));
+            break;
+        case BindingInfoType::Sampler:
+            if (aInfo.sampler.type != bInfo.sampler.type) {
+                return aInfo.sampler.type < bInfo.sampler.type;
             }
-            IncrementBindingCounts(&mBindingCounts, binding);
-
-            const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
-            ASSERT(inserted);
-        }
-        ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
-        ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+            break;
+        case BindingInfoType::Texture:
+            if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
+                return aInfo.texture.multisampled < bInfo.texture.multisampled;
+            }
+            if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
+                return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
+            }
+            if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
+                return aInfo.texture.sampleType < bInfo.texture.sampleType;
+            }
+            break;
+        case BindingInfoType::StorageTexture:
+            if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
+                return aInfo.storageTexture.access < bInfo.storageTexture.access;
+            }
+            if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
+                return aInfo.storageTexture.viewDimension < bInfo.storageTexture.viewDimension;
+            }
+            if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
+                return aInfo.storageTexture.format < bInfo.storageTexture.format;
+            }
+            break;
+        case BindingInfoType::ExternalTexture:
+            break;
     }
+    return a.binding < b.binding;
+}
 
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
-                                             const BindGroupLayoutDescriptor* descriptor,
-                                             PipelineCompatibilityToken pipelineCompatibilityToken)
-        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
-        TrackInDevice();
-    }
-
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
-
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
-        : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
-
-    BindGroupLayoutBase::~BindGroupLayoutBase() = default;
-
-    void BindGroupLayoutBase::DestroyImpl() {
-        if (IsCachedReference()) {
-            // Do not uncache the actual cached object if we are a blueprint.
-            GetDevice()->UncacheBindGroupLayout(this);
+// This is a utility function to help ASSERT that the BGL-binding comparator places buffers
+// first.
+bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
+    BindingIndex lastBufferIndex{0};
+    BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
+    for (BindingIndex i{0}; i < bindings.size(); ++i) {
+        if (bindings[i].bindingType == BindingInfoType::Buffer) {
+            lastBufferIndex = std::max(i, lastBufferIndex);
+        } else {
+            firstNonBufferIndex = std::min(i, firstNonBufferIndex);
         }
     }
 
-    // static
-    BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
-        return new BindGroupLayoutBase(device, ObjectBase::kError);
-    }
+    // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
+    // |firstNonBufferIndex| gets set to 0.
+    return firstNonBufferIndex >= lastBufferIndex;
+}
 
-    ObjectType BindGroupLayoutBase::GetType() const {
-        return ObjectType::BindGroupLayout;
-    }
+}  // namespace
 
-    const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
-        ASSERT(!IsError());
-        return mBindingMap;
-    }
+// BindGroupLayoutBase
 
-    bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
-        return mBindingMap.count(bindingNumber) != 0;
-    }
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+                                         const BindGroupLayoutDescriptor* descriptor,
+                                         PipelineCompatibilityToken pipelineCompatibilityToken,
+                                         ApiObjectBase::UntrackedByDeviceTag tag)
+    : ApiObjectBase(device, descriptor->label),
+      mPipelineCompatibilityToken(pipelineCompatibilityToken),
+      mUnexpandedBindingCount(descriptor->entryCount) {
+    std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
+        descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
 
-    BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
-        ASSERT(!IsError());
-        const auto& it = mBindingMap.find(bindingNumber);
-        ASSERT(it != mBindingMap.end());
-        return it->second;
-    }
+    std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
 
-    size_t BindGroupLayoutBase::ComputeContentHash() {
-        ObjectContentHasher recorder;
-        recorder.Record(mPipelineCompatibilityToken);
+    for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
+        const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
 
-        // std::map is sorted by key, so two BGLs constructed in different orders
-        // will still record the same.
-        for (const auto [id, index] : mBindingMap) {
-            recorder.Record(id, index);
+        mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
 
-            const BindingInfo& info = mBindingInfo[index];
-            recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
-                            info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
-                            info.texture.sampleType, info.texture.viewDimension,
-                            info.texture.multisampled, info.storageTexture.access,
-                            info.storageTexture.format, info.storageTexture.viewDimension);
+        if (IsBufferBinding(binding)) {
+            // Buffers must be contiguously packed at the start of the binding info.
+            ASSERT(GetBufferCount() == BindingIndex(i));
         }
+        IncrementBindingCounts(&mBindingCounts, binding);
 
-        return recorder.GetContentHash();
+        const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
+        ASSERT(inserted);
+    }
+    ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
+    ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+                                         const BindGroupLayoutDescriptor* descriptor,
+                                         PipelineCompatibilityToken pipelineCompatibilityToken)
+    : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
+    TrackInDevice();
+}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
+    : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+BindGroupLayoutBase::~BindGroupLayoutBase() = default;
+
+void BindGroupLayoutBase::DestroyImpl() {
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheBindGroupLayout(this);
+    }
+}
+
+// static
+BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
+    return new BindGroupLayoutBase(device, ObjectBase::kError);
+}
+
+ObjectType BindGroupLayoutBase::GetType() const {
+    return ObjectType::BindGroupLayout;
+}
+
+const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
+    ASSERT(!IsError());
+    return mBindingMap;
+}
+
+bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
+    return mBindingMap.count(bindingNumber) != 0;
+}
+
+BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
+    ASSERT(!IsError());
+    const auto& it = mBindingMap.find(bindingNumber);
+    ASSERT(it != mBindingMap.end());
+    return it->second;
+}
+
+size_t BindGroupLayoutBase::ComputeContentHash() {
+    ObjectContentHasher recorder;
+    recorder.Record(mPipelineCompatibilityToken);
+
+    // std::map is sorted by key, so two BGLs constructed in different orders
+    // will still record the same.
+    for (const auto [id, index] : mBindingMap) {
+        recorder.Record(id, index);
+
+        const BindingInfo& info = mBindingInfo[index];
+        recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
+                        info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
+                        info.texture.sampleType, info.texture.viewDimension,
+                        info.texture.multisampled, info.storageTexture.access,
+                        info.storageTexture.format, info.storageTexture.viewDimension);
     }
 
-    bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
-                                                       const BindGroupLayoutBase* b) const {
-        return a->IsLayoutEqual(b);
-    }
+    return recorder.GetContentHash();
+}
 
-    BindingIndex BindGroupLayoutBase::GetBindingCount() const {
-        return mBindingInfo.size();
-    }
+bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
+                                                   const BindGroupLayoutBase* b) const {
+    return a->IsLayoutEqual(b);
+}
 
-    BindingIndex BindGroupLayoutBase::GetBufferCount() const {
-        return BindingIndex(mBindingCounts.bufferCount);
-    }
+BindingIndex BindGroupLayoutBase::GetBindingCount() const {
+    return mBindingInfo.size();
+}
 
-    BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
-        // This is a binding index because dynamic buffers are packed at the front of the binding
-        // info.
-        return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
-                                         mBindingCounts.dynamicUniformBufferCount);
-    }
+BindingIndex BindGroupLayoutBase::GetBufferCount() const {
+    return BindingIndex(mBindingCounts.bufferCount);
+}
 
-    uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
-        return mBindingCounts.unverifiedBufferCount;
-    }
+BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
+    // This is a binding index because dynamic buffers are packed at the front of the binding
+    // info.
+    return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
+                                     mBindingCounts.dynamicUniformBufferCount);
+}
 
-    uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
-        return mExternalTextureBindingExpansionMap.size();
-    }
+uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
+    return mBindingCounts.unverifiedBufferCount;
+}
 
-    const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
-        return mBindingCounts;
-    }
+uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
+    return mExternalTextureBindingExpansionMap.size();
+}
 
-    const ExternalTextureBindingExpansionMap&
-    BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
-        return mExternalTextureBindingExpansionMap;
-    }
+const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
+    return mBindingCounts;
+}
 
-    uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
-        return mUnexpandedBindingCount;
-    }
+const ExternalTextureBindingExpansionMap&
+BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
+    return mExternalTextureBindingExpansionMap;
+}
 
-    bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
-                                            bool excludePipelineCompatibiltyToken) const {
-        if (!excludePipelineCompatibiltyToken &&
-            GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
+    return mUnexpandedBindingCount;
+}
+
+bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
+                                        bool excludePipelineCompatibiltyToken) const {
+    if (!excludePipelineCompatibiltyToken &&
+        GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+        return false;
+    }
+    if (GetBindingCount() != other->GetBindingCount()) {
+        return false;
+    }
+    for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
+        if (mBindingInfo[i] != other->mBindingInfo[i]) {
             return false;
         }
-        if (GetBindingCount() != other->GetBindingCount()) {
+    }
+    return mBindingMap == other->mBindingMap;
+}
+
+PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
+    return mPipelineCompatibilityToken;
+}
+
+size_t BindGroupLayoutBase::GetBindingDataSize() const {
+    // | ------ buffer-specific ----------| ------------ object pointers -------------|
+    // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
+    // Followed by:
+    // |---------buffer size array--------|
+    // |-uint64_t[mUnverifiedBufferCount]-|
+    size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
+    ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
+    size_t bufferSizeArrayStart = Align(
+        objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>), sizeof(uint64_t));
+    ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
+    return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
+}
+
+BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
+    void* dataStart) const {
+    BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
+    auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
+    uint64_t* unverifiedBufferSizes = AlignPtr(
+        reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
+
+    ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
+    ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
+    ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
+
+    return {{bufferData, GetBufferCount()},
+            {bindings, GetBindingCount()},
+            {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
+}
+
+bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
+    ASSERT(bindingIndex < GetBufferCount());
+    switch (GetBindingInfo(bindingIndex).buffer.type) {
+        case wgpu::BufferBindingType::Uniform:
             return false;
-        }
-        for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
-            if (mBindingInfo[i] != other->mBindingInfo[i]) {
-                return false;
-            }
-        }
-        return mBindingMap == other->mBindingMap;
+        case kInternalStorageBufferBinding:
+        case wgpu::BufferBindingType::Storage:
+        case wgpu::BufferBindingType::ReadOnlyStorage:
+            return true;
+        case wgpu::BufferBindingType::Undefined:
+            break;
     }
+    UNREACHABLE();
+}
 
-    PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
-        return mPipelineCompatibilityToken;
+std::string BindGroupLayoutBase::EntriesToString() const {
+    std::string entries = "[";
+    std::string sep = "";
+    const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
+    for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+        const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+        entries += absl::StrFormat("%s%s", sep, bindingInfo);
+        sep = ", ";
     }
-
-    size_t BindGroupLayoutBase::GetBindingDataSize() const {
-        // | ------ buffer-specific ----------| ------------ object pointers -------------|
-        // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
-        // Followed by:
-        // |---------buffer size array--------|
-        // |-uint64_t[mUnverifiedBufferCount]-|
-        size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
-        ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
-        size_t bufferSizeArrayStart =
-            Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
-                  sizeof(uint64_t));
-        ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
-        return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
-    }
-
-    BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
-        void* dataStart) const {
-        BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
-        auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
-        uint64_t* unverifiedBufferSizes = AlignPtr(
-            reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
-
-        ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
-        ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
-        ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
-
-        return {{bufferData, GetBufferCount()},
-                {bindings, GetBindingCount()},
-                {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
-    }
-
-    bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
-        ASSERT(bindingIndex < GetBufferCount());
-        switch (GetBindingInfo(bindingIndex).buffer.type) {
-            case wgpu::BufferBindingType::Uniform:
-                return false;
-            case kInternalStorageBufferBinding:
-            case wgpu::BufferBindingType::Storage:
-            case wgpu::BufferBindingType::ReadOnlyStorage:
-                return true;
-            case wgpu::BufferBindingType::Undefined:
-                break;
-        }
-        UNREACHABLE();
-    }
-
-    std::string BindGroupLayoutBase::EntriesToString() const {
-        std::string entries = "[";
-        std::string sep = "";
-        const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
-        for (const auto [bindingNumber, bindingIndex] : bindingMap) {
-            const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
-            entries += absl::StrFormat("%s%s", sep, bindingInfo);
-            sep = ", ";
-        }
-        entries += "]";
-        return entries;
-    }
+    entries += "]";
+    return entries;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BindGroupLayout.h b/src/dawn/native/BindGroupLayout.h
index 3b909d1..a218877 100644
--- a/src/dawn/native/BindGroupLayout.h
+++ b/src/dawn/native/BindGroupLayout.h
@@ -34,139 +34,137 @@
 #include "dawn/native/dawn_platform.h"
 
 namespace dawn::native {
-    // TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
-    struct ExternalTextureBindingExpansion {
-        BindingNumber plane0;
-        BindingNumber plane1;
-        BindingNumber params;
+// TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
+struct ExternalTextureBindingExpansion {
+    BindingNumber plane0;
+    BindingNumber plane1;
+    BindingNumber params;
+};
+
+using ExternalTextureBindingExpansionMap = std::map<BindingNumber, ExternalTextureBindingExpansion>;
+
+MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+                                             const BindGroupLayoutDescriptor* descriptor,
+                                             bool allowInternalBinding = false);
+
+// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
+// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
+// into a packed range of |BindingIndex| integers.
+class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
+  public:
+    BindGroupLayoutBase(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken,
+                        ApiObjectBase::UntrackedByDeviceTag tag);
+    BindGroupLayoutBase(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken);
+    ~BindGroupLayoutBase() override;
+
+    static BindGroupLayoutBase* MakeError(DeviceBase* device);
+
+    ObjectType GetType() const override;
+
+    // A map from the BindingNumber to its packed BindingIndex.
+    using BindingMap = std::map<BindingNumber, BindingIndex>;
+
+    const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
+        ASSERT(!IsError());
+        ASSERT(bindingIndex < mBindingInfo.size());
+        return mBindingInfo[bindingIndex];
+    }
+    const BindingMap& GetBindingMap() const;
+    bool HasBinding(BindingNumber bindingNumber) const;
+    BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
+
+    // Functions necessary for the unordered_set<BGLBase*>-based cache.
+    size_t ComputeContentHash() override;
+
+    struct EqualityFunc {
+        bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
     };
 
-    using ExternalTextureBindingExpansionMap =
-        std::map<BindingNumber, ExternalTextureBindingExpansion>;
+    BindingIndex GetBindingCount() const;
+    // Returns |BindingIndex| because buffers are packed at the front.
+    BindingIndex GetBufferCount() const;
+    // Returns |BindingIndex| because dynamic buffers are packed at the front.
+    BindingIndex GetDynamicBufferCount() const;
+    uint32_t GetUnverifiedBufferCount() const;
 
-    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
-                                                 const BindGroupLayoutDescriptor* descriptor,
-                                                 bool allowInternalBinding = false);
+    // Used to get counts and validate them in pipeline layout creation. Other getters
+    // should be used to get typed integer counts.
+    const BindingCounts& GetBindingCountInfo() const;
 
-    // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
-    // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
-    // into a packed range of |BindingIndex| integers.
-    class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
-      public:
-        BindGroupLayoutBase(DeviceBase* device,
-                            const BindGroupLayoutDescriptor* descriptor,
-                            PipelineCompatibilityToken pipelineCompatibilityToken,
-                            ApiObjectBase::UntrackedByDeviceTag tag);
-        BindGroupLayoutBase(DeviceBase* device,
-                            const BindGroupLayoutDescriptor* descriptor,
-                            PipelineCompatibilityToken pipelineCompatibilityToken);
-        ~BindGroupLayoutBase() override;
+    uint32_t GetExternalTextureBindingCount() const;
 
-        static BindGroupLayoutBase* MakeError(DeviceBase* device);
+    // Used to specify unpacked external texture binding slots when transforming shader modules.
+    const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
 
-        ObjectType GetType() const override;
+    uint32_t GetUnexpandedBindingCount() const;
 
-        // A map from the BindingNumber to its packed BindingIndex.
-        using BindingMap = std::map<BindingNumber, BindingIndex>;
+    // Tests that the BindingInfo of two bind groups are equal,
+    // ignoring their compatibility groups.
+    bool IsLayoutEqual(const BindGroupLayoutBase* other,
+                       bool excludePipelineCompatibiltyToken = false) const;
+    PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
 
-        const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
-            ASSERT(!IsError());
-            ASSERT(bindingIndex < mBindingInfo.size());
-            return mBindingInfo[bindingIndex];
-        }
-        const BindingMap& GetBindingMap() const;
-        bool HasBinding(BindingNumber bindingNumber) const;
-        BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
-
-        // Functions necessary for the unordered_set<BGLBase*>-based cache.
-        size_t ComputeContentHash() override;
-
-        struct EqualityFunc {
-            bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
-        };
-
-        BindingIndex GetBindingCount() const;
-        // Returns |BindingIndex| because buffers are packed at the front.
-        BindingIndex GetBufferCount() const;
-        // Returns |BindingIndex| because dynamic buffers are packed at the front.
-        BindingIndex GetDynamicBufferCount() const;
-        uint32_t GetUnverifiedBufferCount() const;
-
-        // Used to get counts and validate them in pipeline layout creation. Other getters
-        // should be used to get typed integer counts.
-        const BindingCounts& GetBindingCountInfo() const;
-
-        uint32_t GetExternalTextureBindingCount() const;
-
-        // Used to specify unpacked external texture binding slots when transforming shader modules.
-        const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
-
-        uint32_t GetUnexpandedBindingCount() const;
-
-        // Tests that the BindingInfo of two bind groups are equal,
-        // ignoring their compatibility groups.
-        bool IsLayoutEqual(const BindGroupLayoutBase* other,
-                           bool excludePipelineCompatibiltyToken = false) const;
-        PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
-
-        struct BufferBindingData {
-            uint64_t offset;
-            uint64_t size;
-        };
-
-        struct BindingDataPointers {
-            ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
-            ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
-            ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
-        };
-
-        // Compute the amount of space / alignment required to store bindings for a bind group of
-        // this layout.
-        size_t GetBindingDataSize() const;
-        static constexpr size_t GetBindingDataAlignment() {
-            static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
-            return alignof(BufferBindingData);
-        }
-
-        BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
-
-        bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
-
-        // Returns a detailed string representation of the layout entries for use in error messages.
-        std::string EntriesToString() const;
-
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit BindGroupLayoutBase(DeviceBase* device);
-        void DestroyImpl() override;
-
-        template <typename BindGroup>
-        SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
-            return SlabAllocator<BindGroup>(
-                size,  // bytes
-                Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(),  // size
-                std::max(alignof(BindGroup), GetBindingDataAlignment())  // alignment
-            );
-        }
-
-      private:
-        BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
-        BindingCounts mBindingCounts = {};
-        ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
-
-        // Map from BindGroupLayoutEntry.binding to packed indices.
-        BindingMap mBindingMap;
-
-        ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
-
-        // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
-        const PipelineCompatibilityToken mPipelineCompatibilityToken =
-            PipelineCompatibilityToken(0);
-
-        uint32_t mUnexpandedBindingCount;
+    struct BufferBindingData {
+        uint64_t offset;
+        uint64_t size;
     };
 
+    struct BindingDataPointers {
+        ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
+        ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
+        ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
+    };
+
+    // Compute the amount of space / alignment required to store bindings for a bind group of
+    // this layout.
+    size_t GetBindingDataSize() const;
+    static constexpr size_t GetBindingDataAlignment() {
+        static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
+        return alignof(BufferBindingData);
+    }
+
+    BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
+
+    bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
+
+    // Returns a detailed string representation of the layout entries for use in error messages.
+    std::string EntriesToString() const;
+
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit BindGroupLayoutBase(DeviceBase* device);
+    void DestroyImpl() override;
+
+    template <typename BindGroup>
+    SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
+        return SlabAllocator<BindGroup>(
+            size,                                                                        // bytes
+            Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(),  // size
+            std::max(alignof(BindGroup), GetBindingDataAlignment())  // alignment
+        );
+    }
+
+  private:
+    BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+    BindingCounts mBindingCounts = {};
+    ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
+
+    // Map from BindGroupLayoutEntry.binding to packed indices.
+    BindingMap mBindingMap;
+
+    ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
+
+    // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
+    const PipelineCompatibilityToken mPipelineCompatibilityToken = PipelineCompatibilityToken(0);
+
+    uint32_t mUnexpandedBindingCount;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_BINDGROUPLAYOUT_H_
diff --git a/src/dawn/native/BindGroupTracker.h b/src/dawn/native/BindGroupTracker.h
index ce081e9..cd8254c 100644
--- a/src/dawn/native/BindGroupTracker.h
+++ b/src/dawn/native/BindGroupTracker.h
@@ -25,119 +25,117 @@
 
 namespace dawn::native {
 
-    // Keeps track of the dirty bind groups so they can be lazily applied when we know the
-    // pipeline state or it changes.
-    // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
-    // in other backends.
-    template <bool CanInheritBindGroups, typename DynamicOffset>
-    class BindGroupTrackerBase {
-      public:
-        void OnSetBindGroup(BindGroupIndex index,
-                            BindGroupBase* bindGroup,
-                            uint32_t dynamicOffsetCount,
-                            uint32_t* dynamicOffsets) {
-            ASSERT(index < kMaxBindGroupsTyped);
+// Keeps track of the dirty bind groups so they can be lazily applied when we know the
+// pipeline state or it changes.
+// |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
+// in other backends.
+template <bool CanInheritBindGroups, typename DynamicOffset>
+class BindGroupTrackerBase {
+  public:
+    void OnSetBindGroup(BindGroupIndex index,
+                        BindGroupBase* bindGroup,
+                        uint32_t dynamicOffsetCount,
+                        uint32_t* dynamicOffsets) {
+        ASSERT(index < kMaxBindGroupsTyped);
 
-            if (mBindGroupLayoutsMask[index]) {
-                // It is okay to only dirty bind groups that are used by the current pipeline
-                // layout. If the pipeline layout changes, then the bind groups it uses will
-                // become dirty.
+        if (mBindGroupLayoutsMask[index]) {
+            // It is okay to only dirty bind groups that are used by the current pipeline
+            // layout. If the pipeline layout changes, then the bind groups it uses will
+            // become dirty.
 
-                if (mBindGroups[index] != bindGroup) {
-                    mDirtyBindGroups.set(index);
-                    mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
-                }
-
-                if (dynamicOffsetCount > 0) {
-                    mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
-                }
+            if (mBindGroups[index] != bindGroup) {
+                mDirtyBindGroups.set(index);
+                mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
             }
 
-            mBindGroups[index] = bindGroup;
-            mDynamicOffsetCounts[index] = dynamicOffsetCount;
-            SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
-        }
-
-        void OnSetPipeline(PipelineBase* pipeline) {
-            mPipelineLayout = pipeline->GetLayout();
-        }
-
-      protected:
-        // The Derived class should call this before it applies bind groups.
-        void BeforeApply() {
-            if (mLastAppliedPipelineLayout == mPipelineLayout) {
-                return;
-            }
-
-            // Use the bind group layout mask to avoid marking unused bind groups as dirty.
-            mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
-
-            // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
-            // the first |k| matching bind groups may be inherited.
-            if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
-                // Dirty bind groups that cannot be inherited.
-                BindGroupLayoutMask dirtiedGroups =
-                    ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
-
-                mDirtyBindGroups |= dirtiedGroups;
-                mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
-
-                // Clear any bind groups not in the mask.
-                mDirtyBindGroups &= mBindGroupLayoutsMask;
-                mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
-            } else {
-                mDirtyBindGroups = mBindGroupLayoutsMask;
-                mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
-            }
-        }
-
-        // The Derived class should call this after it applies bind groups.
-        void AfterApply() {
-            // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
-            // will be dirtied again by the next pipeline change.
-            mDirtyBindGroups.reset();
-            mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
-            // Keep track of the last applied pipeline layout. This allows us to avoid computing
-            // the intersection of the dirty bind groups and bind group layout mask in next Draw
-            // or Dispatch (which is very hot code) until the layout is changed again.
-            mLastAppliedPipelineLayout = mPipelineLayout;
-        }
-
-        BindGroupLayoutMask mDirtyBindGroups = 0;
-        BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
-        BindGroupLayoutMask mBindGroupLayoutsMask = 0;
-        ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
-        ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
-        ityp::array<BindGroupIndex,
-                    std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
-                    kMaxBindGroups>
-            mDynamicOffsets = {};
-
-        // |mPipelineLayout| is the current pipeline layout set on the command buffer.
-        // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
-        // to the bind group bindings.
-        PipelineLayoutBase* mPipelineLayout = nullptr;
-        PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
-
-      private:
-        // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
-        // in other backends.
-        static void SetDynamicOffsets(uint64_t* data,
-                                      uint32_t dynamicOffsetCount,
-                                      uint32_t* dynamicOffsets) {
-            for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
-                data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
-            }
-        }
-
-        static void SetDynamicOffsets(uint32_t* data,
-                                      uint32_t dynamicOffsetCount,
-                                      uint32_t* dynamicOffsets) {
             if (dynamicOffsetCount > 0) {
-                memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
+                mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
             }
         }
-    };
+
+        mBindGroups[index] = bindGroup;
+        mDynamicOffsetCounts[index] = dynamicOffsetCount;
+        SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
+    }
+
+    void OnSetPipeline(PipelineBase* pipeline) { mPipelineLayout = pipeline->GetLayout(); }
+
+  protected:
+    // The Derived class should call this before it applies bind groups.
+    void BeforeApply() {
+        if (mLastAppliedPipelineLayout == mPipelineLayout) {
+            return;
+        }
+
+        // Use the bind group layout mask to avoid marking unused bind groups as dirty.
+        mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
+
+        // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
+        // the first |k| matching bind groups may be inherited.
+        if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
+            // Dirty bind groups that cannot be inherited.
+            BindGroupLayoutMask dirtiedGroups =
+                ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
+
+            mDirtyBindGroups |= dirtiedGroups;
+            mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
+
+            // Clear any bind groups not in the mask.
+            mDirtyBindGroups &= mBindGroupLayoutsMask;
+            mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
+        } else {
+            mDirtyBindGroups = mBindGroupLayoutsMask;
+            mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
+        }
+    }
+
+    // The Derived class should call this after it applies bind groups.
+    void AfterApply() {
+        // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
+        // will be dirtied again by the next pipeline change.
+        mDirtyBindGroups.reset();
+        mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
+        // Keep track of the last applied pipeline layout. This allows us to avoid computing
+        // the intersection of the dirty bind groups and bind group layout mask in next Draw
+        // or Dispatch (which is very hot code) until the layout is changed again.
+        mLastAppliedPipelineLayout = mPipelineLayout;
+    }
+
+    BindGroupLayoutMask mDirtyBindGroups = 0;
+    BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
+    BindGroupLayoutMask mBindGroupLayoutsMask = 0;
+    ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
+    ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
+    ityp::array<BindGroupIndex,
+                std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
+                kMaxBindGroups>
+        mDynamicOffsets = {};
+
+    // |mPipelineLayout| is the current pipeline layout set on the command buffer.
+    // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
+    // to the bind group bindings.
+    PipelineLayoutBase* mPipelineLayout = nullptr;
+    PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
+
+  private:
+    // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
+    // in other backends.
+    static void SetDynamicOffsets(uint64_t* data,
+                                  uint32_t dynamicOffsetCount,
+                                  uint32_t* dynamicOffsets) {
+        for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+            data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
+        }
+    }
+
+    static void SetDynamicOffsets(uint32_t* data,
+                                  uint32_t dynamicOffsetCount,
+                                  uint32_t* dynamicOffsets) {
+        if (dynamicOffsetCount > 0) {
+            memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
+        }
+    }
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BindingInfo.cpp b/src/dawn/native/BindingInfo.cpp
index 009735c..1d4b60d 100644
--- a/src/dawn/native/BindingInfo.cpp
+++ b/src/dawn/native/BindingInfo.cpp
@@ -18,178 +18,172 @@
 
 namespace dawn::native {
 
-    void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
-        bindingCounts->totalCount += 1;
+void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
+    bindingCounts->totalCount += 1;
 
-        uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
+    uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
 
-        if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
-            ++bindingCounts->bufferCount;
-            const BufferBindingLayout& buffer = entry.buffer;
+    if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+        ++bindingCounts->bufferCount;
+        const BufferBindingLayout& buffer = entry.buffer;
 
-            if (buffer.minBindingSize == 0) {
-                ++bindingCounts->unverifiedBufferCount;
-            }
-
-            switch (buffer.type) {
-                case wgpu::BufferBindingType::Uniform:
-                    if (buffer.hasDynamicOffset) {
-                        ++bindingCounts->dynamicUniformBufferCount;
-                    }
-                    perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
-                    break;
-
-                case wgpu::BufferBindingType::Storage:
-                case kInternalStorageBufferBinding:
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                    if (buffer.hasDynamicOffset) {
-                        ++bindingCounts->dynamicStorageBufferCount;
-                    }
-                    perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
-                    break;
-
-                case wgpu::BufferBindingType::Undefined:
-                    // Can't get here due to the enclosing if statement.
-                    UNREACHABLE();
-                    break;
-            }
-        } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
-            perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
-        } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
-            perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
-        } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
-            perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
-        } else {
-            const ExternalTextureBindingLayout* externalTextureBindingLayout;
-            FindInChain(entry.nextInChain, &externalTextureBindingLayout);
-            if (externalTextureBindingLayout != nullptr) {
-                perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
-            }
+        if (buffer.minBindingSize == 0) {
+            ++bindingCounts->unverifiedBufferCount;
         }
 
-        ASSERT(perStageBindingCountMember != nullptr);
-        for (SingleShaderStage stage : IterateStages(entry.visibility)) {
-            ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
+        switch (buffer.type) {
+            case wgpu::BufferBindingType::Uniform:
+                if (buffer.hasDynamicOffset) {
+                    ++bindingCounts->dynamicUniformBufferCount;
+                }
+                perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
+                break;
+
+            case wgpu::BufferBindingType::Storage:
+            case kInternalStorageBufferBinding:
+            case wgpu::BufferBindingType::ReadOnlyStorage:
+                if (buffer.hasDynamicOffset) {
+                    ++bindingCounts->dynamicStorageBufferCount;
+                }
+                perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
+                break;
+
+            case wgpu::BufferBindingType::Undefined:
+                // Can't get here due to the enclosing if statement.
+                UNREACHABLE();
+                break;
+        }
+    } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+        perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
+    } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+        perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
+    } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+        perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
+    } else {
+        const ExternalTextureBindingLayout* externalTextureBindingLayout;
+        FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+        if (externalTextureBindingLayout != nullptr) {
+            perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
         }
     }
 
-    void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
-        bindingCounts->totalCount += rhs.totalCount;
-        bindingCounts->bufferCount += rhs.bufferCount;
-        bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
-        bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
-        bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
-
-        for (SingleShaderStage stage : IterateStages(kAllStages)) {
-            bindingCounts->perStage[stage].sampledTextureCount +=
-                rhs.perStage[stage].sampledTextureCount;
-            bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
-            bindingCounts->perStage[stage].storageBufferCount +=
-                rhs.perStage[stage].storageBufferCount;
-            bindingCounts->perStage[stage].storageTextureCount +=
-                rhs.perStage[stage].storageTextureCount;
-            bindingCounts->perStage[stage].uniformBufferCount +=
-                rhs.perStage[stage].uniformBufferCount;
-            bindingCounts->perStage[stage].externalTextureCount +=
-                rhs.perStage[stage].externalTextureCount;
-        }
+    ASSERT(perStageBindingCountMember != nullptr);
+    for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+        ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
     }
+}
 
-    MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
+    bindingCounts->totalCount += rhs.totalCount;
+    bindingCounts->bufferCount += rhs.bufferCount;
+    bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
+    bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
+    bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
+
+    for (SingleShaderStage stage : IterateStages(kAllStages)) {
+        bindingCounts->perStage[stage].sampledTextureCount +=
+            rhs.perStage[stage].sampledTextureCount;
+        bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
+        bindingCounts->perStage[stage].storageBufferCount += rhs.perStage[stage].storageBufferCount;
+        bindingCounts->perStage[stage].storageTextureCount +=
+            rhs.perStage[stage].storageTextureCount;
+        bindingCounts->perStage[stage].uniformBufferCount += rhs.perStage[stage].uniformBufferCount;
+        bindingCounts->perStage[stage].externalTextureCount +=
+            rhs.perStage[stage].externalTextureCount;
+    }
+}
+
+MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+    DAWN_INVALID_IF(
+        bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
+        "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
+        "limit (%u).",
+        bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+
+    DAWN_INVALID_IF(
+        bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
+        "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+        "limit (%u).",
+        bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
+
+    for (SingleShaderStage stage : IterateStages(kAllStages)) {
         DAWN_INVALID_IF(
-            bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
-            "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
-            "limit (%u).",
-            bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+            bindingCounts.perStage[stage].sampledTextureCount > kMaxSampledTexturesPerShaderStage,
+            "The number of sampled textures (%u) in the %s stage exceeds the maximum "
+            "per-stage limit (%u).",
+            bindingCounts.perStage[stage].sampledTextureCount, stage,
+            kMaxSampledTexturesPerShaderStage);
+
+        // The per-stage number of external textures is bound by the maximum sampled textures
+        // per stage.
+        DAWN_INVALID_IF(bindingCounts.perStage[stage].externalTextureCount >
+                            kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
+                        "The number of external textures (%u) in the %s stage exceeds the maximum "
+                        "per-stage limit (%u).",
+                        bindingCounts.perStage[stage].externalTextureCount, stage,
+                        kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
 
         DAWN_INVALID_IF(
-            bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
-            "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+            bindingCounts.perStage[stage].sampledTextureCount +
+                    (bindingCounts.perStage[stage].externalTextureCount *
+                     kSampledTexturesPerExternalTexture) >
+                kMaxSampledTexturesPerShaderStage,
+            "The combination of sampled textures (%u) and external textures (%u) in the %s "
+            "stage exceeds the maximum per-stage limit (%u).",
+            bindingCounts.perStage[stage].sampledTextureCount,
+            bindingCounts.perStage[stage].externalTextureCount, stage,
+            kMaxSampledTexturesPerShaderStage);
+
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
+            "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
+            "(%u).",
+            bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
+
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].samplerCount +
+                    (bindingCounts.perStage[stage].externalTextureCount *
+                     kSamplersPerExternalTexture) >
+                kMaxSamplersPerShaderStage,
+            "The combination of samplers (%u) and external textures (%u) in the %s stage "
+            "exceeds the maximum per-stage limit (%u).",
+            bindingCounts.perStage[stage].samplerCount,
+            bindingCounts.perStage[stage].externalTextureCount, stage, kMaxSamplersPerShaderStage);
+
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
+            "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
             "limit (%u).",
-            bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
+            bindingCounts.perStage[stage].storageBufferCount, stage,
+            kMaxStorageBuffersPerShaderStage);
 
-        for (SingleShaderStage stage : IterateStages(kAllStages)) {
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].sampledTextureCount >
-                    kMaxSampledTexturesPerShaderStage,
-                "The number of sampled textures (%u) in the %s stage exceeds the maximum "
-                "per-stage limit (%u).",
-                bindingCounts.perStage[stage].sampledTextureCount, stage,
-                kMaxSampledTexturesPerShaderStage);
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].storageTextureCount > kMaxStorageTexturesPerShaderStage,
+            "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
+            "limit (%u).",
+            bindingCounts.perStage[stage].storageTextureCount, stage,
+            kMaxStorageTexturesPerShaderStage);
 
-            // The per-stage number of external textures is bound by the maximum sampled textures
-            // per stage.
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].externalTextureCount >
-                    kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
-                "The number of external textures (%u) in the %s stage exceeds the maximum "
-                "per-stage limit (%u).",
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
+            "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
+            "limit (%u).",
+            bindingCounts.perStage[stage].uniformBufferCount, stage,
+            kMaxUniformBuffersPerShaderStage);
 
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].sampledTextureCount +
-                        (bindingCounts.perStage[stage].externalTextureCount *
-                         kSampledTexturesPerExternalTexture) >
-                    kMaxSampledTexturesPerShaderStage,
-                "The combination of sampled textures (%u) and external textures (%u) in the %s "
-                "stage exceeds the maximum per-stage limit (%u).",
-                bindingCounts.perStage[stage].sampledTextureCount,
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxSampledTexturesPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
-                "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
-                "(%u).",
-                bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].samplerCount +
-                        (bindingCounts.perStage[stage].externalTextureCount *
-                         kSamplersPerExternalTexture) >
-                    kMaxSamplersPerShaderStage,
-                "The combination of samplers (%u) and external textures (%u) in the %s stage "
-                "exceeds the maximum per-stage limit (%u).",
-                bindingCounts.perStage[stage].samplerCount,
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxSamplersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
-                "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
-                "limit (%u).",
-                bindingCounts.perStage[stage].storageBufferCount, stage,
-                kMaxStorageBuffersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].storageTextureCount >
-                    kMaxStorageTexturesPerShaderStage,
-                "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
-                "limit (%u).",
-                bindingCounts.perStage[stage].storageTextureCount, stage,
-                kMaxStorageTexturesPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
-                "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
-                "limit (%u).",
-                bindingCounts.perStage[stage].uniformBufferCount, stage,
-                kMaxUniformBuffersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].uniformBufferCount +
-                        (bindingCounts.perStage[stage].externalTextureCount *
-                         kUniformsPerExternalTexture) >
-                    kMaxUniformBuffersPerShaderStage,
-                "The combination of uniform buffers (%u) and external textures (%u) in the %s "
-                "stage exceeds the maximum per-stage limit (%u).",
-                bindingCounts.perStage[stage].uniformBufferCount,
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxUniformBuffersPerShaderStage);
-        }
-
-        return {};
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].uniformBufferCount +
+                    (bindingCounts.perStage[stage].externalTextureCount *
+                     kUniformsPerExternalTexture) >
+                kMaxUniformBuffersPerShaderStage,
+            "The combination of uniform buffers (%u) and external textures (%u) in the %s "
+            "stage exceeds the maximum per-stage limit (%u).",
+            bindingCounts.perStage[stage].uniformBufferCount,
+            bindingCounts.perStage[stage].externalTextureCount, stage,
+            kMaxUniformBuffersPerShaderStage);
     }
 
+    return {};
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/BindingInfo.h b/src/dawn/native/BindingInfo.h
index 1798eb6..9d32b05 100644
--- a/src/dawn/native/BindingInfo.h
+++ b/src/dawn/native/BindingInfo.h
@@ -29,70 +29,70 @@
 
 namespace dawn::native {
 
-    // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
-    static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
-        kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
+// Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
+static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
+    kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
 
-    static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
-        BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
+static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
+    BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
 
-    // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
-    // API. There should never be more bindings than the max per stage, for each stage.
-    static constexpr uint32_t kMaxBindingsPerPipelineLayout =
-        3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
-             kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
-             kMaxUniformBuffersPerShaderStage);
+// Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
+// API. There should never be more bindings than the max per stage, for each stage.
+static constexpr uint32_t kMaxBindingsPerPipelineLayout =
+    3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
+         kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
+         kMaxUniformBuffersPerShaderStage);
 
-    static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
-        BindingIndex(kMaxBindingsPerPipelineLayout);
+static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
+    BindingIndex(kMaxBindingsPerPipelineLayout);
 
-    // TODO(enga): Figure out a good number for this.
-    static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
+// TODO(enga): Figure out a good number for this.
+static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
 
-    enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
+enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
 
-    struct BindingInfo {
-        BindingNumber binding;
-        wgpu::ShaderStage visibility;
+struct BindingInfo {
+    BindingNumber binding;
+    wgpu::ShaderStage visibility;
 
-        BindingInfoType bindingType;
+    BindingInfoType bindingType;
 
-        // TODO(dawn:527): These four values could be made into a union.
-        BufferBindingLayout buffer;
-        SamplerBindingLayout sampler;
-        TextureBindingLayout texture;
-        StorageTextureBindingLayout storageTexture;
-    };
+    // TODO(dawn:527): These four values could be made into a union.
+    BufferBindingLayout buffer;
+    SamplerBindingLayout sampler;
+    TextureBindingLayout texture;
+    StorageTextureBindingLayout storageTexture;
+};
 
-    struct BindingSlot {
-        BindGroupIndex group;
-        BindingNumber binding;
-    };
+struct BindingSlot {
+    BindGroupIndex group;
+    BindingNumber binding;
+};
 
-    struct PerStageBindingCounts {
-        uint32_t sampledTextureCount;
-        uint32_t samplerCount;
-        uint32_t storageBufferCount;
-        uint32_t storageTextureCount;
-        uint32_t uniformBufferCount;
-        uint32_t externalTextureCount;
-    };
+struct PerStageBindingCounts {
+    uint32_t sampledTextureCount;
+    uint32_t samplerCount;
+    uint32_t storageBufferCount;
+    uint32_t storageTextureCount;
+    uint32_t uniformBufferCount;
+    uint32_t externalTextureCount;
+};
 
-    struct BindingCounts {
-        uint32_t totalCount;
-        uint32_t bufferCount;
-        uint32_t unverifiedBufferCount;  // Buffers with minimum buffer size unspecified
-        uint32_t dynamicUniformBufferCount;
-        uint32_t dynamicStorageBufferCount;
-        PerStage<PerStageBindingCounts> perStage;
-    };
+struct BindingCounts {
+    uint32_t totalCount;
+    uint32_t bufferCount;
+    uint32_t unverifiedBufferCount;  // Buffers with minimum buffer size unspecified
+    uint32_t dynamicUniformBufferCount;
+    uint32_t dynamicStorageBufferCount;
+    PerStage<PerStageBindingCounts> perStage;
+};
 
-    void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
-    void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
-    MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
+void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
+void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
+MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
 
-    // For buffer size validation
-    using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
+// For buffer size validation
+using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BlobCache.cpp b/src/dawn/native/BlobCache.cpp
index c566695..9e27d2d 100644
--- a/src/dawn/native/BlobCache.cpp
+++ b/src/dawn/native/BlobCache.cpp
@@ -21,73 +21,72 @@
 
 namespace dawn::native {
 
-    CachedBlob::CachedBlob(size_t size) {
-        if (size != 0) {
-            Reset(size);
-        }
+CachedBlob::CachedBlob(size_t size) {
+    if (size != 0) {
+        Reset(size);
     }
+}
 
-    bool CachedBlob::Empty() const {
-        return mSize == 0;
-    }
+bool CachedBlob::Empty() const {
+    return mSize == 0;
+}
 
-    const uint8_t* CachedBlob::Data() const {
-        return mData.get();
-    }
+const uint8_t* CachedBlob::Data() const {
+    return mData.get();
+}
 
-    uint8_t* CachedBlob::Data() {
-        return mData.get();
-    }
+uint8_t* CachedBlob::Data() {
+    return mData.get();
+}
 
-    size_t CachedBlob::Size() const {
-        return mSize;
-    }
+size_t CachedBlob::Size() const {
+    return mSize;
+}
 
-    void CachedBlob::Reset(size_t size) {
-        mSize = size;
-        mData = std::make_unique<uint8_t[]>(size);
-    }
+void CachedBlob::Reset(size_t size) {
+    mSize = size;
+    mData = std::make_unique<uint8_t[]>(size);
+}
 
-    BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
-        : mCache(cachingInterface) {
-    }
+BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
+    : mCache(cachingInterface) {}
 
-    CachedBlob BlobCache::Load(const CacheKey& key) {
-        std::lock_guard<std::mutex> lock(mMutex);
-        return LoadInternal(key);
-    }
+CachedBlob BlobCache::Load(const CacheKey& key) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    return LoadInternal(key);
+}
 
-    void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
-        std::lock_guard<std::mutex> lock(mMutex);
-        StoreInternal(key, valueSize, value);
-    }
+void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    StoreInternal(key, valueSize, value);
+}
 
-    void BlobCache::Store(const CacheKey& key, const CachedBlob& value) {
-        Store(key, value.Size(), value.Data());
-    }
+void BlobCache::Store(const CacheKey& key, const CachedBlob& value) {
+    Store(key, value.Size(), value.Data());
+}
 
-    CachedBlob BlobCache::LoadInternal(const CacheKey& key) {
-        CachedBlob result;
-        if (mCache == nullptr) {
-            return result;
-        }
-        const size_t expectedSize = mCache->LoadData(nullptr, key.data(), key.size(), nullptr, 0);
-        if (expectedSize > 0) {
-            result.Reset(expectedSize);
-            const size_t actualSize =
-                mCache->LoadData(nullptr, key.data(), key.size(), result.Data(), expectedSize);
-            ASSERT(expectedSize == actualSize);
-        }
+CachedBlob BlobCache::LoadInternal(const CacheKey& key) {
+    CachedBlob result;
+    if (mCache == nullptr) {
         return result;
     }
-
-    void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
-        ASSERT(value != nullptr);
-        ASSERT(valueSize > 0);
-        if (mCache == nullptr) {
-            return;
-        }
-        mCache->StoreData(nullptr, key.data(), key.size(), value, valueSize);
+    const size_t expectedSize = mCache->LoadData(nullptr, key.data(), key.size(), nullptr, 0);
+    if (expectedSize > 0) {
+        result.Reset(expectedSize);
+        const size_t actualSize =
+            mCache->LoadData(nullptr, key.data(), key.size(), result.Data(), expectedSize);
+        ASSERT(expectedSize == actualSize);
     }
+    return result;
+}
+
+void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
+    ASSERT(value != nullptr);
+    ASSERT(valueSize > 0);
+    if (mCache == nullptr) {
+        return;
+    }
+    mCache->StoreData(nullptr, key.data(), key.size(), value, valueSize);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BlobCache.h b/src/dawn/native/BlobCache.h
index 4e92fba..f4fb965 100644
--- a/src/dawn/native/BlobCache.h
+++ b/src/dawn/native/BlobCache.h
@@ -19,57 +19,57 @@
 #include <mutex>
 
 namespace dawn::platform {
-    class CachingInterface;
+class CachingInterface;
 }
 
 namespace dawn::native {
 
-    class BlobCache;
-    class CacheKey;
-    class InstanceBase;
+class BlobCache;
+class CacheKey;
+class InstanceBase;
 
-    class CachedBlob {
-      public:
-        explicit CachedBlob(size_t size = 0);
+class CachedBlob {
+  public:
+    explicit CachedBlob(size_t size = 0);
 
-        bool Empty() const;
-        const uint8_t* Data() const;
-        uint8_t* Data();
-        size_t Size() const;
-        void Reset(size_t size);
+    bool Empty() const;
+    const uint8_t* Data() const;
+    uint8_t* Data();
+    size_t Size() const;
+    void Reset(size_t size);
 
-      private:
-        std::unique_ptr<uint8_t[]> mData = nullptr;
-        size_t mSize = 0;
-    };
+  private:
+    std::unique_ptr<uint8_t[]> mData = nullptr;
+    size_t mSize = 0;
+};
 
-    // This class should always be thread-safe because it may be called asynchronously. Its purpose
-    // is to wrap the CachingInterface provided via a platform.
-    class BlobCache {
-      public:
-        explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr);
+// This class should always be thread-safe because it may be called asynchronously. Its purpose
+// is to wrap the CachingInterface provided via a platform.
+class BlobCache {
+  public:
+    explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr);
 
-        // Returns empty blob if the key is not found in the cache.
-        CachedBlob Load(const CacheKey& key);
+    // Returns empty blob if the key is not found in the cache.
+    CachedBlob Load(const CacheKey& key);
 
-        // Value to store must be non-empty/non-null.
-        void Store(const CacheKey& key, size_t valueSize, const void* value);
-        void Store(const CacheKey& key, const CachedBlob& value);
+    // Value to store must be non-empty/non-null.
+    void Store(const CacheKey& key, size_t valueSize, const void* value);
+    void Store(const CacheKey& key, const CachedBlob& value);
 
-      private:
-        // Non-thread safe internal implementations of load and store. Exposed callers that use
-        // these helpers need to make sure that these are entered with `mMutex` held.
-        CachedBlob LoadInternal(const CacheKey& key);
-        void StoreInternal(const CacheKey& key, size_t valueSize, const void* value);
+  private:
+    // Non-thread safe internal implementations of load and store. Exposed callers that use
+    // these helpers need to make sure that these are entered with `mMutex` held.
+    CachedBlob LoadInternal(const CacheKey& key);
+    void StoreInternal(const CacheKey& key, size_t valueSize, const void* value);
 
-        // Protects thread safety of access to mCache.
-        std::mutex mMutex;
+    // Protects thread safety of access to mCache.
+    std::mutex mMutex;
 
-        // TODO(dawn:549): Current CachingInterface declaration requires passing a device to each
-        //   call, but this might be unnecessary. This class just passes nullptr for those calls
-        //   right now. Eventually we can just change the interface to be more generic.
-        dawn::platform::CachingInterface* mCache;
-    };
+    // TODO(dawn:549): Current CachingInterface declaration requires passing a device to each
+    //   call, but this might be unnecessary. This class just passes nullptr for those calls
+    //   right now. Eventually we can just change the interface to be more generic.
+    dawn::platform::CachingInterface* mCache;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BuddyAllocator.cpp b/src/dawn/native/BuddyAllocator.cpp
index 76d7a65..2d7de75 100644
--- a/src/dawn/native/BuddyAllocator.cpp
+++ b/src/dawn/native/BuddyAllocator.cpp
@@ -19,246 +19,246 @@
 
 namespace dawn::native {
 
-    BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
-        ASSERT(IsPowerOfTwo(maxSize));
+BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+    ASSERT(IsPowerOfTwo(maxSize));
 
-        mFreeLists.resize(Log2(mMaxBlockSize) + 1);
+    mFreeLists.resize(Log2(mMaxBlockSize) + 1);
 
-        // Insert the level0 free block.
-        mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
-        mFreeLists[0] = {mRoot};
+    // Insert the level0 free block.
+    mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
+    mFreeLists[0] = {mRoot};
+}
+
+BuddyAllocator::~BuddyAllocator() {
+    if (mRoot) {
+        DeleteBlock(mRoot);
     }
+}
 
-    BuddyAllocator::~BuddyAllocator() {
-        if (mRoot) {
-            DeleteBlock(mRoot);
+uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
+    return ComputeNumOfFreeBlocks(mRoot);
+}
+
+uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
+    if (block->mState == BlockState::Free) {
+        return 1;
+    } else if (block->mState == BlockState::Split) {
+        return ComputeNumOfFreeBlocks(block->split.pLeft) +
+               ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
+    }
+    return 0;
+}
+
+uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
+    // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
+    // However, mFreeList zero-indexed by level.
+    // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
+    return Log2(mMaxBlockSize) - Log2(blockSize);
+}
+
+uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
+                                                 uint64_t alignment) const {
+    ASSERT(IsPowerOfTwo(alignment));
+    // The current level is the level that corresponds to the allocation size. The free list may
+    // not contain a block at that level until a larger one gets allocated (and splits).
+    // Continue to go up the tree until such a larger block exists.
+    //
+    // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
+    // When the alignment is also a power-of-two, we simply use the next free block whose size
+    // is greater than or equal to the alignment value.
+    //
+    //  After one 8-byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       F2     |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   F1  |              |       A - allocated
+    //                 --------------------------------
+    //
+    //  Allocate(size=8, alignment=8) will be satisfied by using F1.
+    //  Allocate(size=8, alignment=4) will be satified by using F1.
+    //  Allocate(size=8, alignment=16) will be satisified by using F2.
+    //
+    for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
+        size_t currLevel = allocationBlockLevel - ii;
+        BuddyBlock* freeBlock = mFreeLists[currLevel].head;
+        if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
+            return currLevel;
         }
     }
+    return kInvalidOffset;  // No free block exists at any level.
+}
 
-    uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
-        return ComputeNumOfFreeBlocks(mRoot);
+// Inserts existing free block into the free-list.
+// Called by allocate upon splitting to insert a child block into a free-list.
+// Note: Always insert into the head of the free-list. As when a larger free block at a lower
+// level was split, there were no smaller free blocks at a higher level to allocate.
+void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
+    ASSERT(block->mState == BlockState::Free);
+
+    // Inserted block is now the front (no prev).
+    block->free.pPrev = nullptr;
+
+    // Old head is now the inserted block's next.
+    block->free.pNext = mFreeLists[level].head;
+
+    // Block already in HEAD position (ex. right child was inserted first).
+    if (mFreeLists[level].head != nullptr) {
+        // Old head's previous is the inserted block.
+        mFreeLists[level].head->free.pPrev = block;
     }
 
-    uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
-        if (block->mState == BlockState::Free) {
-            return 1;
-        } else if (block->mState == BlockState::Split) {
-            return ComputeNumOfFreeBlocks(block->split.pLeft) +
-                   ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
-        }
-        return 0;
-    }
+    mFreeLists[level].head = block;
+}
 
-    uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
-        // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
-        // However, mFreeList zero-indexed by level.
-        // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
-        return Log2(mMaxBlockSize) - Log2(blockSize);
-    }
+void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
+    ASSERT(block->mState == BlockState::Free);
 
-    uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
-                                                     uint64_t alignment) const {
-        ASSERT(IsPowerOfTwo(alignment));
-        // The current level is the level that corresponds to the allocation size. The free list may
-        // not contain a block at that level until a larger one gets allocated (and splits).
-        // Continue to go up the tree until such a larger block exists.
-        //
-        // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
-        // When the alignment is also a power-of-two, we simply use the next free block whose size
-        // is greater than or equal to the alignment value.
-        //
-        //  After one 8-byte allocation:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       F2     |       S - split
-        //                 --------------------------------       F - free
-        //      2       8  |   Aa  |   F1  |              |       A - allocated
-        //                 --------------------------------
-        //
-        //  Allocate(size=8, alignment=8) will be satisfied by using F1.
-        //  Allocate(size=8, alignment=4) will be satified by using F1.
-        //  Allocate(size=8, alignment=16) will be satisified by using F2.
-        //
-        for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
-            size_t currLevel = allocationBlockLevel - ii;
-            BuddyBlock* freeBlock = mFreeLists[currLevel].head;
-            if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
-                return currLevel;
-            }
-        }
-        return kInvalidOffset;  // No free block exists at any level.
-    }
+    if (mFreeLists[level].head == block) {
+        // Block is in HEAD position.
+        mFreeLists[level].head = mFreeLists[level].head->free.pNext;
+    } else {
+        // Block is after HEAD position.
+        BuddyBlock* pPrev = block->free.pPrev;
+        BuddyBlock* pNext = block->free.pNext;
 
-    // Inserts existing free block into the free-list.
-    // Called by allocate upon splitting to insert a child block into a free-list.
-    // Note: Always insert into the head of the free-list. As when a larger free block at a lower
-    // level was split, there were no smaller free blocks at a higher level to allocate.
-    void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
-        ASSERT(block->mState == BlockState::Free);
+        ASSERT(pPrev != nullptr);
+        ASSERT(pPrev->mState == BlockState::Free);
 
-        // Inserted block is now the front (no prev).
-        block->free.pPrev = nullptr;
+        pPrev->free.pNext = pNext;
 
-        // Old head is now the inserted block's next.
-        block->free.pNext = mFreeLists[level].head;
-
-        // Block already in HEAD position (ex. right child was inserted first).
-        if (mFreeLists[level].head != nullptr) {
-            // Old head's previous is the inserted block.
-            mFreeLists[level].head->free.pPrev = block;
-        }
-
-        mFreeLists[level].head = block;
-    }
-
-    void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
-        ASSERT(block->mState == BlockState::Free);
-
-        if (mFreeLists[level].head == block) {
-            // Block is in HEAD position.
-            mFreeLists[level].head = mFreeLists[level].head->free.pNext;
-        } else {
-            // Block is after HEAD position.
-            BuddyBlock* pPrev = block->free.pPrev;
-            BuddyBlock* pNext = block->free.pNext;
-
-            ASSERT(pPrev != nullptr);
-            ASSERT(pPrev->mState == BlockState::Free);
-
-            pPrev->free.pNext = pNext;
-
-            if (pNext != nullptr) {
-                ASSERT(pNext->mState == BlockState::Free);
-                pNext->free.pPrev = pPrev;
-            }
+        if (pNext != nullptr) {
+            ASSERT(pNext->mState == BlockState::Free);
+            pNext->free.pPrev = pPrev;
         }
     }
+}
 
-    uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
-        if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
-            return kInvalidOffset;
-        }
+uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
+    if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
+        return kInvalidOffset;
+    }
 
-        // Compute the level
-        const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
+    // Compute the level
+    const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
 
-        ASSERT(allocationSizeToLevel < mFreeLists.size());
+    ASSERT(allocationSizeToLevel < mFreeLists.size());
 
-        uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
+    uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
 
-        // Error when no free blocks exist (allocator is full)
-        if (currBlockLevel == kInvalidOffset) {
-            return kInvalidOffset;
-        }
+    // Error when no free blocks exist (allocator is full)
+    if (currBlockLevel == kInvalidOffset) {
+        return kInvalidOffset;
+    }
 
-        // Split free blocks level-by-level.
-        // Terminate when the current block level is equal to the computed level of the requested
-        // allocation.
-        BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
+    // Split free blocks level-by-level.
+    // Terminate when the current block level is equal to the computed level of the requested
+    // allocation.
+    BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
 
-        for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
-            ASSERT(currBlock->mState == BlockState::Free);
+    for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
+        ASSERT(currBlock->mState == BlockState::Free);
 
-            // Remove curr block (about to be split).
-            RemoveFreeBlock(currBlock, currBlockLevel);
-
-            // Create two free child blocks (the buddies).
-            const uint64_t nextLevelSize = currBlock->mSize / 2;
-            BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
-            BuddyBlock* rightChildBlock =
-                new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
-
-            // Remember the parent to merge these back upon de-allocation.
-            rightChildBlock->pParent = currBlock;
-            leftChildBlock->pParent = currBlock;
-
-            // Make them buddies.
-            leftChildBlock->pBuddy = rightChildBlock;
-            rightChildBlock->pBuddy = leftChildBlock;
-
-            // Insert the children back into the free list into the next level.
-            // The free list does not require a specific order. However, an order is specified as
-            // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
-            InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
-            InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
-
-            // Curr block is now split.
-            currBlock->mState = BlockState::Split;
-            currBlock->split.pLeft = leftChildBlock;
-
-            // Decend down into the next level.
-            currBlock = leftChildBlock;
-        }
-
-        // Remove curr block from free-list (now allocated).
+        // Remove curr block (about to be split).
         RemoveFreeBlock(currBlock, currBlockLevel);
-        currBlock->mState = BlockState::Allocated;
 
-        return currBlock->mOffset;
+        // Create two free child blocks (the buddies).
+        const uint64_t nextLevelSize = currBlock->mSize / 2;
+        BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
+        BuddyBlock* rightChildBlock =
+            new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
+
+        // Remember the parent to merge these back upon de-allocation.
+        rightChildBlock->pParent = currBlock;
+        leftChildBlock->pParent = currBlock;
+
+        // Make them buddies.
+        leftChildBlock->pBuddy = rightChildBlock;
+        rightChildBlock->pBuddy = leftChildBlock;
+
+        // Insert the children back into the free list into the next level.
+        // The free list does not require a specific order. However, an order is specified as
+        // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
+        InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
+        InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
+
+        // Curr block is now split.
+        currBlock->mState = BlockState::Split;
+        currBlock->split.pLeft = leftChildBlock;
+
+        // Decend down into the next level.
+        currBlock = leftChildBlock;
     }
 
-    void BuddyAllocator::Deallocate(uint64_t offset) {
-        BuddyBlock* curr = mRoot;
+    // Remove curr block from free-list (now allocated).
+    RemoveFreeBlock(currBlock, currBlockLevel);
+    currBlock->mState = BlockState::Allocated;
 
-        // TODO(crbug.com/dawn/827): Optimize de-allocation.
-        // Passing allocationSize directly will avoid the following level-by-level search;
-        // however, it requires the size information to be stored outside the allocator.
+    return currBlock->mOffset;
+}
 
-        // Search for the free block node that corresponds to the block offset.
-        size_t currBlockLevel = 0;
-        while (curr->mState == BlockState::Split) {
-            if (offset < curr->split.pLeft->pBuddy->mOffset) {
-                curr = curr->split.pLeft;
-            } else {
-                curr = curr->split.pLeft->pBuddy;
-            }
+void BuddyAllocator::Deallocate(uint64_t offset) {
+    BuddyBlock* curr = mRoot;
 
-            currBlockLevel++;
+    // TODO(crbug.com/dawn/827): Optimize de-allocation.
+    // Passing allocationSize directly will avoid the following level-by-level search;
+    // however, it requires the size information to be stored outside the allocator.
+
+    // Search for the free block node that corresponds to the block offset.
+    size_t currBlockLevel = 0;
+    while (curr->mState == BlockState::Split) {
+        if (offset < curr->split.pLeft->pBuddy->mOffset) {
+            curr = curr->split.pLeft;
+        } else {
+            curr = curr->split.pLeft->pBuddy;
         }
 
-        ASSERT(curr->mState == BlockState::Allocated);
-
-        // Ensure the block is at the correct level
-        ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
-
-        // Mark curr free so we can merge.
-        curr->mState = BlockState::Free;
-
-        // Merge the buddies (LevelN-to-Level0).
-        while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
-            // Remove the buddy.
-            RemoveFreeBlock(curr->pBuddy, currBlockLevel);
-
-            BuddyBlock* parent = curr->pParent;
-
-            // The buddies were inserted in a specific order but
-            // could be deleted in any order.
-            DeleteBlock(curr->pBuddy);
-            DeleteBlock(curr);
-
-            // Parent is now free.
-            parent->mState = BlockState::Free;
-
-            // Ascend up to the next level (parent block).
-            curr = parent;
-            currBlockLevel--;
-        }
-
-        InsertFreeBlock(curr, currBlockLevel);
+        currBlockLevel++;
     }
 
-    // Helper which deletes a block in the tree recursively (post-order).
-    void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
-        ASSERT(block != nullptr);
+    ASSERT(curr->mState == BlockState::Allocated);
 
-        if (block->mState == BlockState::Split) {
-            // Delete the pair in same order we inserted.
-            DeleteBlock(block->split.pLeft->pBuddy);
-            DeleteBlock(block->split.pLeft);
-        }
-        delete block;
+    // Ensure the block is at the correct level
+    ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
+
+    // Mark curr free so we can merge.
+    curr->mState = BlockState::Free;
+
+    // Merge the buddies (LevelN-to-Level0).
+    while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
+        // Remove the buddy.
+        RemoveFreeBlock(curr->pBuddy, currBlockLevel);
+
+        BuddyBlock* parent = curr->pParent;
+
+        // The buddies were inserted in a specific order but
+        // could be deleted in any order.
+        DeleteBlock(curr->pBuddy);
+        DeleteBlock(curr);
+
+        // Parent is now free.
+        parent->mState = BlockState::Free;
+
+        // Ascend up to the next level (parent block).
+        curr = parent;
+        currBlockLevel--;
     }
 
+    InsertFreeBlock(curr, currBlockLevel);
+}
+
+// Helper which deletes a block in the tree recursively (post-order).
+void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
+    ASSERT(block != nullptr);
+
+    if (block->mState == BlockState::Split) {
+        // Delete the pair in same order we inserted.
+        DeleteBlock(block->split.pLeft->pBuddy);
+        DeleteBlock(block->split.pLeft);
+    }
+    delete block;
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/BuddyAllocator.h b/src/dawn/native/BuddyAllocator.h
index d22bd58..e0c478b 100644
--- a/src/dawn/native/BuddyAllocator.h
+++ b/src/dawn/native/BuddyAllocator.h
@@ -22,96 +22,96 @@
 
 namespace dawn::native {
 
-    // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
-    // Memory is split into halves until just large enough to fit to the request. This
-    // requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
-    // returning the starting offset whose size is guaranteed to be greater than or equal to the
-    // allocation size. To deallocate, the same offset is used to find the corresponding block.
-    //
-    // Internally, it manages a free list to track free blocks in a full binary tree.
-    // Every index in the free list corresponds to a level in the tree. That level also determines
-    // the size of the block to be used to satisfy the request. The first level (index=0) represents
-    // the root whose size is also called the max block size.
-    //
-    class BuddyAllocator {
-      public:
-        explicit BuddyAllocator(uint64_t maxSize);
-        ~BuddyAllocator();
+// Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
+// Memory is split into halves until just large enough to fit to the request. This
+// requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
+// returning the starting offset whose size is guaranteed to be greater than or equal to the
+// allocation size. To deallocate, the same offset is used to find the corresponding block.
+//
+// Internally, it manages a free list to track free blocks in a full binary tree.
+// Every index in the free list corresponds to a level in the tree. That level also determines
+// the size of the block to be used to satisfy the request. The first level (index=0) represents
+// the root whose size is also called the max block size.
+//
+class BuddyAllocator {
+  public:
+    explicit BuddyAllocator(uint64_t maxSize);
+    ~BuddyAllocator();
 
-        // Required methods.
-        uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
-        void Deallocate(uint64_t offset);
+    // Required methods.
+    uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
+    void Deallocate(uint64_t offset);
 
-        // For testing purposes only.
-        uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
+    // For testing purposes only.
+    uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
 
-        static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+    static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
 
-      private:
-        uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
-        uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
+  private:
+    uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
+    uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
 
-        enum class BlockState { Free, Split, Allocated };
+    enum class BlockState { Free, Split, Allocated };
 
-        struct BuddyBlock {
-            BuddyBlock(uint64_t size, uint64_t offset)
-                : mOffset(offset), mSize(size), mState(BlockState::Free) {
-                free.pPrev = nullptr;
-                free.pNext = nullptr;
-            }
+    struct BuddyBlock {
+        BuddyBlock(uint64_t size, uint64_t offset)
+            : mOffset(offset), mSize(size), mState(BlockState::Free) {
+            free.pPrev = nullptr;
+            free.pNext = nullptr;
+        }
 
-            uint64_t mOffset;
-            uint64_t mSize;
+        uint64_t mOffset;
+        uint64_t mSize;
 
-            // Pointer to this block's buddy, iff parent is split.
-            // Used to quickly merge buddy blocks upon de-allocate.
-            BuddyBlock* pBuddy = nullptr;
-            BuddyBlock* pParent = nullptr;
+        // Pointer to this block's buddy, iff parent is split.
+        // Used to quickly merge buddy blocks upon de-allocate.
+        BuddyBlock* pBuddy = nullptr;
+        BuddyBlock* pParent = nullptr;
 
-            // Track whether this block has been split or not.
-            BlockState mState;
+        // Track whether this block has been split or not.
+        BlockState mState;
 
-            struct FreeLinks {
-                BuddyBlock* pPrev;
-                BuddyBlock* pNext;
-            };
-
-            struct SplitLink {
-                BuddyBlock* pLeft;
-            };
-
-            union {
-                // Used upon allocation.
-                // Avoids searching for the next free block.
-                FreeLinks free;
-
-                // Used upon de-allocation.
-                // Had this block split upon allocation, it and it's buddy is to be deleted.
-                SplitLink split;
-            };
+        struct FreeLinks {
+            BuddyBlock* pPrev;
+            BuddyBlock* pNext;
         };
 
-        void InsertFreeBlock(BuddyBlock* block, size_t level);
-        void RemoveFreeBlock(BuddyBlock* block, size_t level);
-        void DeleteBlock(BuddyBlock* block);
-
-        uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
-
-        // Keep track the head and tail (for faster insertion/removal).
-        struct BlockList {
-            BuddyBlock* head = nullptr;  // First free block in level.
-            // TODO(crbug.com/dawn/827): Track the tail.
+        struct SplitLink {
+            BuddyBlock* pLeft;
         };
 
-        BuddyBlock* mRoot = nullptr;  // Used to deallocate non-free blocks.
+        union {
+            // Used upon allocation.
+            // Avoids searching for the next free block.
+            FreeLinks free;
 
-        uint64_t mMaxBlockSize = 0;
-
-        // List of linked-lists of free blocks where the index is a level that
-        // corresponds to a power-of-two sized block.
-        std::vector<BlockList> mFreeLists;
+            // Used upon de-allocation.
+            // Had this block split upon allocation, it and it's buddy is to be deleted.
+            SplitLink split;
+        };
     };
 
+    void InsertFreeBlock(BuddyBlock* block, size_t level);
+    void RemoveFreeBlock(BuddyBlock* block, size_t level);
+    void DeleteBlock(BuddyBlock* block);
+
+    uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
+
+    // Keep track the head and tail (for faster insertion/removal).
+    struct BlockList {
+        BuddyBlock* head = nullptr;  // First free block in level.
+        // TODO(crbug.com/dawn/827): Track the tail.
+    };
+
+    BuddyBlock* mRoot = nullptr;  // Used to deallocate non-free blocks.
+
+    uint64_t mMaxBlockSize = 0;
+
+    // List of linked-lists of free blocks where the index is a level that
+    // corresponds to a power-of-two sized block.
+    std::vector<BlockList> mFreeLists;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_BUDDYALLOCATOR_H_
diff --git a/src/dawn/native/BuddyMemoryAllocator.cpp b/src/dawn/native/BuddyMemoryAllocator.cpp
index df3874e..d21ecf1 100644
--- a/src/dawn/native/BuddyMemoryAllocator.cpp
+++ b/src/dawn/native/BuddyMemoryAllocator.cpp
@@ -21,102 +21,102 @@
 
 namespace dawn::native {
 
-    BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
-                                               uint64_t memoryBlockSize,
-                                               ResourceHeapAllocator* heapAllocator)
-        : mMemoryBlockSize(memoryBlockSize),
-          mBuddyBlockAllocator(maxSystemSize),
-          mHeapAllocator(heapAllocator) {
-        ASSERT(memoryBlockSize <= maxSystemSize);
-        ASSERT(IsPowerOfTwo(mMemoryBlockSize));
-        ASSERT(maxSystemSize % mMemoryBlockSize == 0);
+BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
+                                           uint64_t memoryBlockSize,
+                                           ResourceHeapAllocator* heapAllocator)
+    : mMemoryBlockSize(memoryBlockSize),
+      mBuddyBlockAllocator(maxSystemSize),
+      mHeapAllocator(heapAllocator) {
+    ASSERT(memoryBlockSize <= maxSystemSize);
+    ASSERT(IsPowerOfTwo(mMemoryBlockSize));
+    ASSERT(maxSystemSize % mMemoryBlockSize == 0);
 
-        mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
+    mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
+}
+
+uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
+    ASSERT(offset != BuddyAllocator::kInvalidOffset);
+    return offset / mMemoryBlockSize;
+}
+
+ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
+                                                                       uint64_t alignment) {
+    ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+
+    if (allocationSize == 0) {
+        return std::move(invalidAllocation);
     }
 
-    uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
-        ASSERT(offset != BuddyAllocator::kInvalidOffset);
-        return offset / mMemoryBlockSize;
+    // Check the unaligned size to avoid overflowing NextPowerOfTwo.
+    if (allocationSize > mMemoryBlockSize) {
+        return std::move(invalidAllocation);
     }
 
-    ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
-                                                                           uint64_t alignment) {
-        ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+    // Round allocation size to nearest power-of-two.
+    allocationSize = NextPowerOfTwo(allocationSize);
 
-        if (allocationSize == 0) {
-            return std::move(invalidAllocation);
-        }
-
-        // Check the unaligned size to avoid overflowing NextPowerOfTwo.
-        if (allocationSize > mMemoryBlockSize) {
-            return std::move(invalidAllocation);
-        }
-
-        // Round allocation size to nearest power-of-two.
-        allocationSize = NextPowerOfTwo(allocationSize);
-
-        // Allocation cannot exceed the memory size.
-        if (allocationSize > mMemoryBlockSize) {
-            return std::move(invalidAllocation);
-        }
-
-        // Attempt to sub-allocate a block of the requested size.
-        const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
-        if (blockOffset == BuddyAllocator::kInvalidOffset) {
-            return std::move(invalidAllocation);
-        }
-
-        const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
-        if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
-            // Transfer ownership to this allocator
-            std::unique_ptr<ResourceHeapBase> memory;
-            DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
-            mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
-        }
-
-        mTrackedSubAllocations[memoryIndex].refcount++;
-
-        AllocationInfo info;
-        info.mBlockOffset = blockOffset;
-        info.mMethod = AllocationMethod::kSubAllocated;
-
-        // Allocation offset is always local to the memory.
-        const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
-
-        return ResourceMemoryAllocation{
-            info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+    // Allocation cannot exceed the memory size.
+    if (allocationSize > mMemoryBlockSize) {
+        return std::move(invalidAllocation);
     }
 
-    void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
-        const AllocationInfo info = allocation.GetInfo();
+    // Attempt to sub-allocate a block of the requested size.
+    const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
+    if (blockOffset == BuddyAllocator::kInvalidOffset) {
+        return std::move(invalidAllocation);
+    }
 
-        ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+    const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
+    if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+        // Transfer ownership to this allocator
+        std::unique_ptr<ResourceHeapBase> memory;
+        DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
+        mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
+    }
 
-        const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+    mTrackedSubAllocations[memoryIndex].refcount++;
 
-        ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
-        mTrackedSubAllocations[memoryIndex].refcount--;
+    AllocationInfo info;
+    info.mBlockOffset = blockOffset;
+    info.mMethod = AllocationMethod::kSubAllocated;
 
-        if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
-            mHeapAllocator->DeallocateResourceHeap(
-                std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+    // Allocation offset is always local to the memory.
+    const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
+
+    return ResourceMemoryAllocation{info, memoryOffset,
+                                    mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+}
+
+void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
+    const AllocationInfo info = allocation.GetInfo();
+
+    ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+
+    const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+
+    ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
+    mTrackedSubAllocations[memoryIndex].refcount--;
+
+    if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+        mHeapAllocator->DeallocateResourceHeap(
+            std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+    }
+
+    mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
+}
+
+uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
+    return mMemoryBlockSize;
+}
+
+uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
+    uint64_t count = 0;
+    for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
+        if (allocation.refcount > 0) {
+            count++;
         }
-
-        mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
     }
-
-    uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
-        return mMemoryBlockSize;
-    }
-
-    uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
-        uint64_t count = 0;
-        for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
-            if (allocation.refcount > 0) {
-                count++;
-            }
-        }
-        return count;
-    }
+    return count;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BuddyMemoryAllocator.h b/src/dawn/native/BuddyMemoryAllocator.h
index 299dc3d..a58cbae 100644
--- a/src/dawn/native/BuddyMemoryAllocator.h
+++ b/src/dawn/native/BuddyMemoryAllocator.h
@@ -24,51 +24,50 @@
 
 namespace dawn::native {
 
-    class ResourceHeapAllocator;
+class ResourceHeapAllocator;
 
-    // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
-    // memory created by MemoryAllocator clients. It creates a very large buddy system
-    // where backing device memory blocks equal a specified level in the system.
-    //
-    // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
-    // memory index and should the memory not exist, it is created. If two sub-allocations share the
-    // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
-    // release the other prematurely.
-    //
-    // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
-    // It should also outlive all the resources that are in the buddy allocator.
-    class BuddyMemoryAllocator {
-      public:
-        BuddyMemoryAllocator(uint64_t maxSystemSize,
-                             uint64_t memoryBlockSize,
-                             ResourceHeapAllocator* heapAllocator);
-        ~BuddyMemoryAllocator() = default;
+// BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
+// memory created by MemoryAllocator clients. It creates a very large buddy system
+// where backing device memory blocks equal a specified level in the system.
+//
+// Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
+// memory index and should the memory not exist, it is created. If two sub-allocations share the
+// same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
+// release the other prematurely.
+//
+// The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
+// It should also outlive all the resources that are in the buddy allocator.
+class BuddyMemoryAllocator {
+  public:
+    BuddyMemoryAllocator(uint64_t maxSystemSize,
+                         uint64_t memoryBlockSize,
+                         ResourceHeapAllocator* heapAllocator);
+    ~BuddyMemoryAllocator() = default;
 
-        ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
-                                                         uint64_t alignment);
-        void Deallocate(const ResourceMemoryAllocation& allocation);
+    ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, uint64_t alignment);
+    void Deallocate(const ResourceMemoryAllocation& allocation);
 
-        uint64_t GetMemoryBlockSize() const;
+    uint64_t GetMemoryBlockSize() const;
 
-        // For testing purposes.
-        uint64_t ComputeTotalNumOfHeapsForTesting() const;
+    // For testing purposes.
+    uint64_t ComputeTotalNumOfHeapsForTesting() const;
 
-      private:
-        uint64_t GetMemoryIndex(uint64_t offset) const;
+  private:
+    uint64_t GetMemoryIndex(uint64_t offset) const;
 
-        uint64_t mMemoryBlockSize = 0;
+    uint64_t mMemoryBlockSize = 0;
 
-        BuddyAllocator mBuddyBlockAllocator;
-        ResourceHeapAllocator* mHeapAllocator;
+    BuddyAllocator mBuddyBlockAllocator;
+    ResourceHeapAllocator* mHeapAllocator;
 
-        struct TrackedSubAllocations {
-            size_t refcount = 0;
-            std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
-        };
-
-        std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+    struct TrackedSubAllocations {
+        size_t refcount = 0;
+        std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
     };
 
+    std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/src/dawn/native/Buffer.cpp b/src/dawn/native/Buffer.cpp
index 9161f11..80e905b 100644
--- a/src/dawn/native/Buffer.cpp
+++ b/src/dawn/native/Buffer.cpp
@@ -33,543 +33,526 @@
 
 namespace dawn::native {
 
-    namespace {
-        struct MapRequestTask : QueueBase::TaskInFlight {
-            MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
-                : buffer(std::move(buffer)), id(id) {
-            }
-            void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
-                TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
-                             uint64_t(serial));
-                buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
-            }
-            void HandleDeviceLoss() override {
-                buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
-            }
-            ~MapRequestTask() override = default;
-
-          private:
-            Ref<BufferBase> buffer;
-            MapRequestID id;
-        };
-
-        class ErrorBuffer final : public BufferBase {
-          public:
-            ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
-                : BufferBase(device, descriptor, ObjectBase::kError) {
-                if (descriptor->mappedAtCreation) {
-                    // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
-                    // is invalid, and on 32bit systems we should avoid a narrowing conversion that
-                    // would make size = 1 << 32 + 1 allocate one byte.
-                    bool isValidSize =
-                        descriptor->size != 0 &&
-                        descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
-
-                    if (isValidSize) {
-                        mFakeMappedData =
-                            std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
-                    }
-                    // Since error buffers in this case may allocate memory, we need to track them
-                    // for destruction on the device.
-                    TrackInDevice();
-                }
-            }
-
-          private:
-            bool IsCPUWritableAtCreation() const override {
-                UNREACHABLE();
-            }
-
-            MaybeError MapAtCreationImpl() override {
-                UNREACHABLE();
-            }
-
-            MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
-                UNREACHABLE();
-            }
-
-            void* GetMappedPointerImpl() override {
-                return mFakeMappedData.get();
-            }
-
-            void UnmapImpl() override {
-                mFakeMappedData.reset();
-            }
-
-            std::unique_ptr<uint8_t[]> mFakeMappedData;
-        };
-
-    }  // anonymous namespace
-
-    MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-        DAWN_TRY(ValidateBufferUsage(descriptor->usage));
-
-        wgpu::BufferUsage usage = descriptor->usage;
-
-        DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
-
-        const wgpu::BufferUsage kMapWriteAllowedUsages =
-            wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
-        DAWN_INVALID_IF(
-            usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
-            "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
-            "usage is %s.",
-            usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
-
-        const wgpu::BufferUsage kMapReadAllowedUsages =
-            wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
-        DAWN_INVALID_IF(
-            usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
-            "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
-            "usage is %s.",
-            usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
-
-        DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
-                        "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
-                        descriptor->size);
-
-        return {};
+namespace {
+struct MapRequestTask : QueueBase::TaskInFlight {
+    MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) : buffer(std::move(buffer)), id(id) {}
+    void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
+        TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
+                     uint64_t(serial));
+        buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
     }
-
-    // Buffer
-
-    BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
-        : ApiObjectBase(device, descriptor->label),
-          mSize(descriptor->size),
-          mUsage(descriptor->usage),
-          mState(BufferState::Unmapped) {
-        // Add readonly storage usage if the buffer has a storage usage. The validation rules in
-        // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
-        if (mUsage & wgpu::BufferUsage::Storage) {
-            mUsage |= kReadOnlyStorageBuffer;
-        }
-
-        // The query resolve buffer need to be used as a storage buffer in the internal compute
-        // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
-        // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
-        // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
-        // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
-        // as storage buffer if it's created without Storage usage.
-        if (mUsage & wgpu::BufferUsage::QueryResolve) {
-            mUsage |= kInternalStorageBuffer;
-        }
-
-        // We also add internal storage usage for Indirect buffers for some transformations before
-        // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
-        // D3D12), since these transformations involve binding them as storage buffers for use in a
-        // compute pass.
-        if (mUsage & wgpu::BufferUsage::Indirect) {
-            mUsage |= kInternalStorageBuffer;
-        }
-
-        TrackInDevice();
+    void HandleDeviceLoss() override {
+        buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
     }
+    ~MapRequestTask() override = default;
 
-    BufferBase::BufferBase(DeviceBase* device,
-                           const BufferDescriptor* descriptor,
-                           ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+  private:
+    Ref<BufferBase> buffer;
+    MapRequestID id;
+};
+
+class ErrorBuffer final : public BufferBase {
+  public:
+    ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
+        : BufferBase(device, descriptor, ObjectBase::kError) {
         if (descriptor->mappedAtCreation) {
-            mState = BufferState::MappedAtCreation;
-            mMapOffset = 0;
-            mMapSize = mSize;
-        }
-    }
+            // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
+            // is invalid, and on 32bit systems we should avoid a narrowing conversion that
+            // would make size = 1 << 32 + 1 allocate one byte.
+            bool isValidSize = descriptor->size != 0 &&
+                               descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
 
-    BufferBase::BufferBase(DeviceBase* device, BufferState state)
-        : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
-        TrackInDevice();
-    }
-
-    BufferBase::~BufferBase() {
-        ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
-    }
-
-    void BufferBase::DestroyImpl() {
-        if (mState == BufferState::Mapped) {
-            UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
-        } else if (mState == BufferState::MappedAtCreation) {
-            if (mStagingBuffer != nullptr) {
-                mStagingBuffer.reset();
-            } else if (mSize != 0) {
-                UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+            if (isValidSize) {
+                mFakeMappedData =
+                    std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
             }
+            // Since error buffers in this case may allocate memory, we need to track them
+            // for destruction on the device.
+            TrackInDevice();
         }
-        mState = BufferState::Destroyed;
     }
 
-    // static
-    BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
-        return new ErrorBuffer(device, descriptor);
+  private:
+    bool IsCPUWritableAtCreation() const override { UNREACHABLE(); }
+
+    MaybeError MapAtCreationImpl() override { UNREACHABLE(); }
+
+    MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
+        UNREACHABLE();
     }
 
-    ObjectType BufferBase::GetType() const {
-        return ObjectType::Buffer;
+    void* GetMappedPointerImpl() override { return mFakeMappedData.get(); }
+
+    void UnmapImpl() override { mFakeMappedData.reset(); }
+
+    std::unique_ptr<uint8_t[]> mFakeMappedData;
+};
+
+}  // anonymous namespace
+
+MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+    DAWN_TRY(ValidateBufferUsage(descriptor->usage));
+
+    wgpu::BufferUsage usage = descriptor->usage;
+
+    DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
+
+    const wgpu::BufferUsage kMapWriteAllowedUsages =
+        wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    DAWN_INVALID_IF(
+        usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
+        "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+        "usage is %s.",
+        usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
+
+    const wgpu::BufferUsage kMapReadAllowedUsages =
+        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+    DAWN_INVALID_IF(
+        usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
+        "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+        "usage is %s.",
+        usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
+
+    DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
+                    "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
+                    descriptor->size);
+
+    return {};
+}
+
+// Buffer
+
+BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
+    : ApiObjectBase(device, descriptor->label),
+      mSize(descriptor->size),
+      mUsage(descriptor->usage),
+      mState(BufferState::Unmapped) {
+    // Add readonly storage usage if the buffer has a storage usage. The validation rules in
+    // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
+    if (mUsage & wgpu::BufferUsage::Storage) {
+        mUsage |= kReadOnlyStorageBuffer;
     }
 
-    uint64_t BufferBase::GetSize() const {
-        ASSERT(!IsError());
-        return mSize;
+    // The query resolve buffer need to be used as a storage buffer in the internal compute
+    // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
+    // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
+    // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
+    // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
+    // as storage buffer if it's created without Storage usage.
+    if (mUsage & wgpu::BufferUsage::QueryResolve) {
+        mUsage |= kInternalStorageBuffer;
     }
 
-    uint64_t BufferBase::GetAllocatedSize() const {
-        ASSERT(!IsError());
-        // The backend must initialize this value.
-        ASSERT(mAllocatedSize != 0);
-        return mAllocatedSize;
+    // We also add internal storage usage for Indirect buffers for some transformations before
+    // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
+    // D3D12), since these transformations involve binding them as storage buffers for use in a
+    // compute pass.
+    if (mUsage & wgpu::BufferUsage::Indirect) {
+        mUsage |= kInternalStorageBuffer;
     }
 
-    wgpu::BufferUsage BufferBase::GetUsage() const {
-        ASSERT(!IsError());
-        return mUsage;
-    }
+    TrackInDevice();
+}
 
-    wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
-        return GetUsage() & ~kAllInternalBufferUsages;
-    }
-
-    MaybeError BufferBase::MapAtCreation() {
-        DAWN_TRY(MapAtCreationInternal());
-
-        void* ptr;
-        size_t size;
-        if (mSize == 0) {
-            return {};
-        } else if (mStagingBuffer) {
-            // If there is a staging buffer for initialization, clear its contents directly.
-            // It should be exactly as large as the buffer allocation.
-            ptr = mStagingBuffer->GetMappedPointer();
-            size = mStagingBuffer->GetSize();
-            ASSERT(size == GetAllocatedSize());
-        } else {
-            // Otherwise, the buffer is directly mappable on the CPU.
-            ptr = GetMappedPointerImpl();
-            size = GetAllocatedSize();
-        }
-
-        DeviceBase* device = GetDevice();
-        if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
-            memset(ptr, uint8_t(0u), size);
-            SetIsDataInitialized();
-            device->IncrementLazyClearCountForTesting();
-        } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
-            memset(ptr, uint8_t(1u), size);
-        }
-
-        return {};
-    }
-
-    MaybeError BufferBase::MapAtCreationInternal() {
-        ASSERT(!IsError());
+BufferBase::BufferBase(DeviceBase* device,
+                       const BufferDescriptor* descriptor,
+                       ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+    if (descriptor->mappedAtCreation) {
+        mState = BufferState::MappedAtCreation;
         mMapOffset = 0;
         mMapSize = mSize;
-
-        // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
-        // Skip handling 0-sized buffers so we don't try to map them in the backend.
-        if (mSize != 0) {
-            // Mappable buffers don't use a staging buffer and are just as if mapped through
-            // MapAsync.
-            if (IsCPUWritableAtCreation()) {
-                DAWN_TRY(MapAtCreationImpl());
-            } else {
-                // If any of these fail, the buffer will be deleted and replaced with an error
-                // buffer. The staging buffer is used to return mappable data to inititalize the
-                // buffer contents. Allocate one as large as the real buffer size so that every byte
-                // is initialized.
-                // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
-                // buffer so we don't create many small buffers.
-                DAWN_TRY_ASSIGN(mStagingBuffer,
-                                GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
-            }
-        }
-
-        // Only set the state to mapped at creation if we did no fail any point in this helper.
-        // Otherwise, if we override the default unmapped state before succeeding to create a
-        // staging buffer, we will have issues when we try to destroy the buffer.
-        mState = BufferState::MappedAtCreation;
-        return {};
     }
+}
 
-    MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
-        ASSERT(!IsError());
+BufferBase::BufferBase(DeviceBase* device, BufferState state)
+    : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
+    TrackInDevice();
+}
 
-        switch (mState) {
-            case BufferState::Destroyed:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
-            case BufferState::Mapped:
-            case BufferState::MappedAtCreation:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
-            case BufferState::Unmapped:
-                return {};
-        }
-        UNREACHABLE();
-    }
+BufferBase::~BufferBase() {
+    ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
+}
 
-    void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
-        ASSERT(!IsError());
-        if (mMapCallback != nullptr && mapID == mLastMapID) {
-            // Tag the callback as fired before firing it, otherwise it could fire a second time if
-            // for example buffer.Unmap() is called inside the application-provided callback.
-            WGPUBufferMapCallback callback = mMapCallback;
-            mMapCallback = nullptr;
-
-            if (GetDevice()->IsLost()) {
-                callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
-            } else {
-                callback(status, mMapUserdata);
-            }
-        }
-    }
-
-    void BufferBase::APIMapAsync(wgpu::MapMode mode,
-                                 size_t offset,
-                                 size_t size,
-                                 WGPUBufferMapCallback callback,
-                                 void* userdata) {
-        // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
-        // possible to default the function argument (because there is the callback later in the
-        // argument list)
-        if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
-            size = mSize - offset;
-        }
-
-        WGPUBufferMapAsyncStatus status;
-        if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
-                                       "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
-                                       size)) {
-            if (callback) {
-                callback(status, userdata);
-            }
-            return;
-        }
-        ASSERT(!IsError());
-
-        mLastMapID++;
-        mMapMode = mode;
-        mMapOffset = offset;
-        mMapSize = size;
-        mMapCallback = callback;
-        mMapUserdata = userdata;
-        mState = BufferState::Mapped;
-
-        if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
-            CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
-            return;
-        }
-        std::unique_ptr<MapRequestTask> request =
-            std::make_unique<MapRequestTask>(this, mLastMapID);
-        TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
-                     uint64_t(GetDevice()->GetPendingCommandSerial()));
-        GetDevice()->GetQueue()->TrackTask(std::move(request),
-                                           GetDevice()->GetPendingCommandSerial());
-    }
-
-    void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
-        return GetMappedRange(offset, size, true);
-    }
-
-    const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
-        return GetMappedRange(offset, size, false);
-    }
-
-    void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
-        if (!CanGetMappedRange(writable, offset, size)) {
-            return nullptr;
-        }
-
+void BufferBase::DestroyImpl() {
+    if (mState == BufferState::Mapped) {
+        UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+    } else if (mState == BufferState::MappedAtCreation) {
         if (mStagingBuffer != nullptr) {
-            return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
+            mStagingBuffer.reset();
+        } else if (mSize != 0) {
+            UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
         }
-        if (mSize == 0) {
-            return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
-        }
-        uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
-        return start == nullptr ? nullptr : start + offset;
     }
+    mState = BufferState::Destroyed;
+}
 
-    void BufferBase::APIDestroy() {
-        Destroy();
-    }
+// static
+BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
+    return new ErrorBuffer(device, descriptor);
+}
 
-    MaybeError BufferBase::CopyFromStagingBuffer() {
-        ASSERT(mStagingBuffer);
-        if (mSize == 0) {
-            // Staging buffer is not created if zero size.
-            ASSERT(mStagingBuffer == nullptr);
-            return {};
-        }
+ObjectType BufferBase::GetType() const {
+    return ObjectType::Buffer;
+}
 
-        DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
-                                                      GetAllocatedSize()));
+uint64_t BufferBase::GetSize() const {
+    ASSERT(!IsError());
+    return mSize;
+}
 
-        DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
-        uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+uint64_t BufferBase::GetAllocatedSize() const {
+    ASSERT(!IsError());
+    // The backend must initialize this value.
+    ASSERT(mAllocatedSize != 0);
+    return mAllocatedSize;
+}
 
+wgpu::BufferUsage BufferBase::GetUsage() const {
+    ASSERT(!IsError());
+    return mUsage;
+}
+
+wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
+    return GetUsage() & ~kAllInternalBufferUsages;
+}
+
+MaybeError BufferBase::MapAtCreation() {
+    DAWN_TRY(MapAtCreationInternal());
+
+    void* ptr;
+    size_t size;
+    if (mSize == 0) {
         return {};
+    } else if (mStagingBuffer) {
+        // If there is a staging buffer for initialization, clear its contents directly.
+        // It should be exactly as large as the buffer allocation.
+        ptr = mStagingBuffer->GetMappedPointer();
+        size = mStagingBuffer->GetSize();
+        ASSERT(size == GetAllocatedSize());
+    } else {
+        // Otherwise, the buffer is directly mappable on the CPU.
+        ptr = GetMappedPointerImpl();
+        size = GetAllocatedSize();
     }
 
-    void BufferBase::APIUnmap() {
-        if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
-            return;
-        }
-        Unmap();
+    DeviceBase* device = GetDevice();
+    if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+        memset(ptr, uint8_t(0u), size);
+        SetIsDataInitialized();
+        device->IncrementLazyClearCountForTesting();
+    } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+        memset(ptr, uint8_t(1u), size);
     }
 
-    void BufferBase::Unmap() {
-        UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
-    }
+    return {};
+}
 
-    void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
-        if (mState == BufferState::Mapped) {
-            // A map request can only be called once, so this will fire only if the request wasn't
-            // completed before the Unmap.
-            // Callbacks are not fired if there is no callback registered, so this is correct for
-            // mappedAtCreation = true.
-            CallMapCallback(mLastMapID, callbackStatus);
-            UnmapImpl();
+MaybeError BufferBase::MapAtCreationInternal() {
+    ASSERT(!IsError());
+    mMapOffset = 0;
+    mMapSize = mSize;
 
-            mMapCallback = nullptr;
-            mMapUserdata = 0;
-        } else if (mState == BufferState::MappedAtCreation) {
-            if (mStagingBuffer != nullptr) {
-                GetDevice()->ConsumedError(CopyFromStagingBuffer());
-            } else if (mSize != 0) {
-                UnmapImpl();
-            }
-        }
-
-        mState = BufferState::Unmapped;
-    }
-
-    MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
-                                            size_t offset,
-                                            size_t size,
-                                            WGPUBufferMapAsyncStatus* status) const {
-        *status = WGPUBufferMapAsyncStatus_DeviceLost;
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-
-        *status = WGPUBufferMapAsyncStatus_Error;
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-
-        DAWN_INVALID_IF(uint64_t(offset) > mSize,
-                        "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
-                        this);
-
-        DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
-        DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
-
-        DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
-                        "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
-                        offset, size, mSize, this);
-
-        switch (mState) {
-            case BufferState::Mapped:
-            case BufferState::MappedAtCreation:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
-            case BufferState::Destroyed:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
-            case BufferState::Unmapped:
-                break;
-        }
-
-        bool isReadMode = mode & wgpu::MapMode::Read;
-        bool isWriteMode = mode & wgpu::MapMode::Write;
-        DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
-                        wgpu::MapMode::Write, wgpu::MapMode::Read);
-
-        if (mode & wgpu::MapMode::Read) {
-            DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
-                            "The buffer usages (%s) do not contain %s.", mUsage,
-                            wgpu::BufferUsage::MapRead);
+    // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
+    // Skip handling 0-sized buffers so we don't try to map them in the backend.
+    if (mSize != 0) {
+        // Mappable buffers don't use a staging buffer and are just as if mapped through
+        // MapAsync.
+        if (IsCPUWritableAtCreation()) {
+            DAWN_TRY(MapAtCreationImpl());
         } else {
-            ASSERT(mode & wgpu::MapMode::Write);
-            DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
-                            "The buffer usages (%s) do not contain %s.", mUsage,
-                            wgpu::BufferUsage::MapWrite);
+            // If any of these fail, the buffer will be deleted and replaced with an error
+            // buffer. The staging buffer is used to return mappable data to inititalize the
+            // buffer contents. Allocate one as large as the real buffer size so that every byte
+            // is initialized.
+            // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
+            // buffer so we don't create many small buffers.
+            DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
         }
+    }
 
-        *status = WGPUBufferMapAsyncStatus_Success;
+    // Only set the state to mapped at creation if we did no fail any point in this helper.
+    // Otherwise, if we override the default unmapped state before succeeding to create a
+    // staging buffer, we will have issues when we try to destroy the buffer.
+    mState = BufferState::MappedAtCreation;
+    return {};
+}
+
+MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
+    ASSERT(!IsError());
+
+    switch (mState) {
+        case BufferState::Destroyed:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
+        case BufferState::Mapped:
+        case BufferState::MappedAtCreation:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
+        case BufferState::Unmapped:
+            return {};
+    }
+    UNREACHABLE();
+}
+
+void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+    ASSERT(!IsError());
+    if (mMapCallback != nullptr && mapID == mLastMapID) {
+        // Tag the callback as fired before firing it, otherwise it could fire a second time if
+        // for example buffer.Unmap() is called inside the application-provided callback.
+        WGPUBufferMapCallback callback = mMapCallback;
+        mMapCallback = nullptr;
+
+        if (GetDevice()->IsLost()) {
+            callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
+        } else {
+            callback(status, mMapUserdata);
+        }
+    }
+}
+
+void BufferBase::APIMapAsync(wgpu::MapMode mode,
+                             size_t offset,
+                             size_t size,
+                             WGPUBufferMapCallback callback,
+                             void* userdata) {
+    // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
+    // possible to default the function argument (because there is the callback later in the
+    // argument list)
+    if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
+        size = mSize - offset;
+    }
+
+    WGPUBufferMapAsyncStatus status;
+    if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
+                                   "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
+                                   size)) {
+        if (callback) {
+            callback(status, userdata);
+        }
+        return;
+    }
+    ASSERT(!IsError());
+
+    mLastMapID++;
+    mMapMode = mode;
+    mMapOffset = offset;
+    mMapSize = size;
+    mMapCallback = callback;
+    mMapUserdata = userdata;
+    mState = BufferState::Mapped;
+
+    if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
+        CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
+        return;
+    }
+    std::unique_ptr<MapRequestTask> request = std::make_unique<MapRequestTask>(this, mLastMapID);
+    TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
+                 uint64_t(GetDevice()->GetPendingCommandSerial()));
+    GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetPendingCommandSerial());
+}
+
+void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
+    return GetMappedRange(offset, size, true);
+}
+
+const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
+    return GetMappedRange(offset, size, false);
+}
+
+void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
+    if (!CanGetMappedRange(writable, offset, size)) {
+        return nullptr;
+    }
+
+    if (mStagingBuffer != nullptr) {
+        return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
+    }
+    if (mSize == 0) {
+        return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+    }
+    uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
+    return start == nullptr ? nullptr : start + offset;
+}
+
+void BufferBase::APIDestroy() {
+    Destroy();
+}
+
+MaybeError BufferBase::CopyFromStagingBuffer() {
+    ASSERT(mStagingBuffer);
+    if (mSize == 0) {
+        // Staging buffer is not created if zero size.
+        ASSERT(mStagingBuffer == nullptr);
         return {};
     }
 
-    bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
-        if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
+    DAWN_TRY(
+        GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetAllocatedSize()));
+
+    DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
+    uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+
+    return {};
+}
+
+void BufferBase::APIUnmap() {
+    if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
+        return;
+    }
+    Unmap();
+}
+
+void BufferBase::Unmap() {
+    UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
+}
+
+void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
+    if (mState == BufferState::Mapped) {
+        // A map request can only be called once, so this will fire only if the request wasn't
+        // completed before the Unmap.
+        // Callbacks are not fired if there is no callback registered, so this is correct for
+        // mappedAtCreation = true.
+        CallMapCallback(mLastMapID, callbackStatus);
+        UnmapImpl();
+
+        mMapCallback = nullptr;
+        mMapUserdata = 0;
+    } else if (mState == BufferState::MappedAtCreation) {
+        if (mStagingBuffer != nullptr) {
+            GetDevice()->ConsumedError(CopyFromStagingBuffer());
+        } else if (mSize != 0) {
+            UnmapImpl();
+        }
+    }
+
+    mState = BufferState::Unmapped;
+}
+
+MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
+                                        size_t offset,
+                                        size_t size,
+                                        WGPUBufferMapAsyncStatus* status) const {
+    *status = WGPUBufferMapAsyncStatus_DeviceLost;
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+    *status = WGPUBufferMapAsyncStatus_Error;
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    DAWN_INVALID_IF(uint64_t(offset) > mSize,
+                    "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize, this);
+
+    DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
+    DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
+
+    DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
+                    "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
+                    offset, size, mSize, this);
+
+    switch (mState) {
+        case BufferState::Mapped:
+        case BufferState::MappedAtCreation:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
+        case BufferState::Destroyed:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
+        case BufferState::Unmapped:
+            break;
+    }
+
+    bool isReadMode = mode & wgpu::MapMode::Read;
+    bool isWriteMode = mode & wgpu::MapMode::Write;
+    DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
+                    wgpu::MapMode::Write, wgpu::MapMode::Read);
+
+    if (mode & wgpu::MapMode::Read) {
+        DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
+                        "The buffer usages (%s) do not contain %s.", mUsage,
+                        wgpu::BufferUsage::MapRead);
+    } else {
+        ASSERT(mode & wgpu::MapMode::Write);
+        DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
+                        "The buffer usages (%s) do not contain %s.", mUsage,
+                        wgpu::BufferUsage::MapWrite);
+    }
+
+    *status = WGPUBufferMapAsyncStatus_Success;
+    return {};
+}
+
+bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
+    if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
+        return false;
+    }
+
+    size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
+
+    if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
+        return false;
+    }
+
+    size_t offsetInMappedRange = offset - mMapOffset;
+    if (offsetInMappedRange > mMapSize - rangeSize) {
+        return false;
+    }
+
+    // Note that:
+    //
+    //   - We don't check that the device is alive because the application can ask for the
+    //     mapped pointer before it knows, and even Dawn knows, that the device was lost, and
+    //     still needs to work properly.
+    //   - We don't check that the object is alive because we need to return mapped pointers
+    //     for error buffers too.
+
+    switch (mState) {
+        // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
+        case BufferState::MappedAtCreation:
+            return true;
+
+        case BufferState::Mapped:
+            ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^ bool{mMapMode & wgpu::MapMode::Write});
+            return !writable || (mMapMode & wgpu::MapMode::Write);
+
+        case BufferState::Unmapped:
+        case BufferState::Destroyed:
             return false;
-        }
-
-        size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
-
-        if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
-            return false;
-        }
-
-        size_t offsetInMappedRange = offset - mMapOffset;
-        if (offsetInMappedRange > mMapSize - rangeSize) {
-            return false;
-        }
-
-        // Note that:
-        //
-        //   - We don't check that the device is alive because the application can ask for the
-        //     mapped pointer before it knows, and even Dawn knows, that the device was lost, and
-        //     still needs to work properly.
-        //   - We don't check that the object is alive because we need to return mapped pointers
-        //     for error buffers too.
-
-        switch (mState) {
-            // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
-            case BufferState::MappedAtCreation:
-                return true;
-
-            case BufferState::Mapped:
-                ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^
-                       bool{mMapMode & wgpu::MapMode::Write});
-                return !writable || (mMapMode & wgpu::MapMode::Write);
-
-            case BufferState::Unmapped:
-            case BufferState::Destroyed:
-                return false;
-        }
-        UNREACHABLE();
     }
+    UNREACHABLE();
+}
 
-    MaybeError BufferBase::ValidateUnmap() const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
+MaybeError BufferBase::ValidateUnmap() const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
 
-        switch (mState) {
-            case BufferState::Mapped:
-            case BufferState::MappedAtCreation:
-                // A buffer may be in the Mapped state if it was created with mappedAtCreation
-                // even if it did not have a mappable usage.
-                return {};
-            case BufferState::Unmapped:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
-            case BufferState::Destroyed:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
-        }
-        UNREACHABLE();
+    switch (mState) {
+        case BufferState::Mapped:
+        case BufferState::MappedAtCreation:
+            // A buffer may be in the Mapped state if it was created with mappedAtCreation
+            // even if it did not have a mappable usage.
+            return {};
+        case BufferState::Unmapped:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
+        case BufferState::Destroyed:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
     }
+    UNREACHABLE();
+}
 
-    void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
-        CallMapCallback(mapID, status);
-    }
+void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+    CallMapCallback(mapID, status);
+}
 
-    bool BufferBase::NeedsInitialization() const {
-        return !mIsDataInitialized &&
-               GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
-    }
+bool BufferBase::NeedsInitialization() const {
+    return !mIsDataInitialized && GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
+}
 
-    bool BufferBase::IsDataInitialized() const {
-        return mIsDataInitialized;
-    }
+bool BufferBase::IsDataInitialized() const {
+    return mIsDataInitialized;
+}
 
-    void BufferBase::SetIsDataInitialized() {
-        mIsDataInitialized = true;
-    }
+void BufferBase::SetIsDataInitialized() {
+    mIsDataInitialized = true;
+}
 
-    bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
-        return offset == 0 && size == GetSize();
-    }
+bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
+    return offset == 0 && size == GetSize();
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Buffer.h b/src/dawn/native/Buffer.h
index fd138c2..e5c6150 100644
--- a/src/dawn/native/Buffer.h
+++ b/src/dawn/native/Buffer.h
@@ -26,114 +26,112 @@
 
 namespace dawn::native {
 
-    struct CopyTextureToBufferCmd;
+struct CopyTextureToBufferCmd;
 
-    enum class MapType : uint32_t;
+enum class MapType : uint32_t;
 
-    MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
+MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
 
-    static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
-        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
-        wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
-        wgpu::BufferUsage::Indirect;
+static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
+    wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
+    wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
+    wgpu::BufferUsage::Indirect;
 
-    static constexpr wgpu::BufferUsage kMappableBufferUsages =
-        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
+static constexpr wgpu::BufferUsage kMappableBufferUsages =
+    wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
 
-    class BufferBase : public ApiObjectBase {
-      public:
-        enum class BufferState {
-            Unmapped,
-            Mapped,
-            MappedAtCreation,
-            Destroyed,
-        };
-        BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
-
-        static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
-
-        ObjectType GetType() const override;
-
-        uint64_t GetSize() const;
-        uint64_t GetAllocatedSize() const;
-
-        // |GetUsageExternalOnly| returns the usage with which the buffer was created using the
-        // base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
-        // returns the union of base usage and the usages added internally.
-        wgpu::BufferUsage GetUsage() const;
-        wgpu::BufferUsage GetUsageExternalOnly() const;
-
-        MaybeError MapAtCreation();
-        void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
-
-        MaybeError ValidateCanUseOnQueueNow() const;
-
-        bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
-        bool NeedsInitialization() const;
-        bool IsDataInitialized() const;
-        void SetIsDataInitialized();
-
-        void* GetMappedRange(size_t offset, size_t size, bool writable = true);
-        void Unmap();
-
-        // Dawn API
-        void APIMapAsync(wgpu::MapMode mode,
-                         size_t offset,
-                         size_t size,
-                         WGPUBufferMapCallback callback,
-                         void* userdata);
-        void* APIGetMappedRange(size_t offset, size_t size);
-        const void* APIGetConstMappedRange(size_t offset, size_t size);
-        void APIUnmap();
-        void APIDestroy();
-
-      protected:
-        BufferBase(DeviceBase* device,
-                   const BufferDescriptor* descriptor,
-                   ObjectBase::ErrorTag tag);
-
-        // Constructor used only for mocking and testing.
-        BufferBase(DeviceBase* device, BufferState state);
-        void DestroyImpl() override;
-
-        ~BufferBase() override;
-
-        MaybeError MapAtCreationInternal();
-
-        uint64_t mAllocatedSize = 0;
-
-      private:
-        virtual MaybeError MapAtCreationImpl() = 0;
-        virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
-        virtual void UnmapImpl() = 0;
-        virtual void* GetMappedPointerImpl() = 0;
-
-        virtual bool IsCPUWritableAtCreation() const = 0;
-        MaybeError CopyFromStagingBuffer();
-        void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
-
-        MaybeError ValidateMapAsync(wgpu::MapMode mode,
-                                    size_t offset,
-                                    size_t size,
-                                    WGPUBufferMapAsyncStatus* status) const;
-        MaybeError ValidateUnmap() const;
-        bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
-        void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
-
-        uint64_t mSize = 0;
-        wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
-        BufferState mState;
-        bool mIsDataInitialized = false;
-
-        std::unique_ptr<StagingBufferBase> mStagingBuffer;
-
-        WGPUBufferMapCallback mMapCallback = nullptr;
-        void* mMapUserdata = 0;
-        MapRequestID mLastMapID = MapRequestID(0);
-        wgpu::MapMode mMapMode = wgpu::MapMode::None;
-        size_t mMapOffset = 0;
-        size_t mMapSize = 0;
+class BufferBase : public ApiObjectBase {
+  public:
+    enum class BufferState {
+        Unmapped,
+        Mapped,
+        MappedAtCreation,
+        Destroyed,
     };
+    BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
+
+    static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
+
+    ObjectType GetType() const override;
+
+    uint64_t GetSize() const;
+    uint64_t GetAllocatedSize() const;
+
+    // |GetUsageExternalOnly| returns the usage with which the buffer was created using the
+    // base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
+    // returns the union of base usage and the usages added internally.
+    wgpu::BufferUsage GetUsage() const;
+    wgpu::BufferUsage GetUsageExternalOnly() const;
+
+    MaybeError MapAtCreation();
+    void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+    MaybeError ValidateCanUseOnQueueNow() const;
+
+    bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
+    bool NeedsInitialization() const;
+    bool IsDataInitialized() const;
+    void SetIsDataInitialized();
+
+    void* GetMappedRange(size_t offset, size_t size, bool writable = true);
+    void Unmap();
+
+    // Dawn API
+    void APIMapAsync(wgpu::MapMode mode,
+                     size_t offset,
+                     size_t size,
+                     WGPUBufferMapCallback callback,
+                     void* userdata);
+    void* APIGetMappedRange(size_t offset, size_t size);
+    const void* APIGetConstMappedRange(size_t offset, size_t size);
+    void APIUnmap();
+    void APIDestroy();
+
+  protected:
+    BufferBase(DeviceBase* device, const BufferDescriptor* descriptor, ObjectBase::ErrorTag tag);
+
+    // Constructor used only for mocking and testing.
+    BufferBase(DeviceBase* device, BufferState state);
+    void DestroyImpl() override;
+
+    ~BufferBase() override;
+
+    MaybeError MapAtCreationInternal();
+
+    uint64_t mAllocatedSize = 0;
+
+  private:
+    virtual MaybeError MapAtCreationImpl() = 0;
+    virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
+    virtual void UnmapImpl() = 0;
+    virtual void* GetMappedPointerImpl() = 0;
+
+    virtual bool IsCPUWritableAtCreation() const = 0;
+    MaybeError CopyFromStagingBuffer();
+    void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+    MaybeError ValidateMapAsync(wgpu::MapMode mode,
+                                size_t offset,
+                                size_t size,
+                                WGPUBufferMapAsyncStatus* status) const;
+    MaybeError ValidateUnmap() const;
+    bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
+    void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
+
+    uint64_t mSize = 0;
+    wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
+    BufferState mState;
+    bool mIsDataInitialized = false;
+
+    std::unique_ptr<StagingBufferBase> mStagingBuffer;
+
+    WGPUBufferMapCallback mMapCallback = nullptr;
+    void* mMapUserdata = 0;
+    MapRequestID mLastMapID = MapRequestID(0);
+    wgpu::MapMode mMapMode = wgpu::MapMode::None;
+    size_t mMapOffset = 0;
+    size_t mMapSize = 0;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CacheKey.cpp b/src/dawn/native/CacheKey.cpp
index dea67f8..495b013 100644
--- a/src/dawn/native/CacheKey.cpp
+++ b/src/dawn/native/CacheKey.cpp
@@ -18,26 +18,26 @@
 
 namespace dawn::native {
 
-    std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
-        os << std::hex;
-        for (const int b : key) {
-            os << std::setfill('0') << std::setw(2) << b << " ";
-        }
-        os << std::dec;
-        return os;
+std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
+    os << std::hex;
+    for (const int b : key) {
+        os << std::setfill('0') << std::setw(2) << b << " ";
     }
+    os << std::dec;
+    return os;
+}
 
-    template <>
-    void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
-        key->Record(static_cast<size_t>(t.length()));
-        key->insert(key->end(), t.begin(), t.end());
-    }
+template <>
+void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
+    key->Record(static_cast<size_t>(t.length()));
+    key->insert(key->end(), t.begin(), t.end());
+}
 
-    template <>
-    void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
-        // For nested cache keys, we do not record the length, and just copy the key so that it
-        // appears we just flatten the keys into a single key.
-        key->insert(key->end(), t.begin(), t.end());
-    }
+template <>
+void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
+    // For nested cache keys, we do not record the length, and just copy the key so that it
+    // appears we just flatten the keys into a single key.
+    key->insert(key->end(), t.begin(), t.end());
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CacheKey.h b/src/dawn/native/CacheKey.h
index 8b920b5..357ce4b 100644
--- a/src/dawn/native/CacheKey.h
+++ b/src/dawn/native/CacheKey.h
@@ -27,179 +27,175 @@
 
 namespace dawn::native {
 
-    // Forward declare classes because of co-dependency.
-    class CacheKey;
-    class CachedObject;
+// Forward declare classes because of co-dependency.
+class CacheKey;
+class CachedObject;
 
-    // Stream operator for CacheKey for debugging.
-    std::ostream& operator<<(std::ostream& os, const CacheKey& key);
+// Stream operator for CacheKey for debugging.
+std::ostream& operator<<(std::ostream& os, const CacheKey& key);
 
-    // Overridable serializer struct that should be implemented for cache key serializable
-    // types/classes.
-    template <typename T, typename SFINAE = void>
-    class CacheKeySerializer {
-      public:
-        static void Serialize(CacheKey* key, const T& t);
-    };
+// Overridable serializer struct that should be implemented for cache key serializable
+// types/classes.
+template <typename T, typename SFINAE = void>
+class CacheKeySerializer {
+  public:
+    static void Serialize(CacheKey* key, const T& t);
+};
 
-    class CacheKey : public std::vector<uint8_t> {
-      public:
-        using std::vector<uint8_t>::vector;
+class CacheKey : public std::vector<uint8_t> {
+  public:
+    using std::vector<uint8_t>::vector;
 
-        enum class Type { ComputePipeline, RenderPipeline, Shader };
+    enum class Type { ComputePipeline, RenderPipeline, Shader };
 
-        template <typename T>
-        CacheKey& Record(const T& t) {
-            CacheKeySerializer<T>::Serialize(this, t);
-            return *this;
-        }
-        template <typename T, typename... Args>
-        CacheKey& Record(const T& t, const Args&... args) {
-            CacheKeySerializer<T>::Serialize(this, t);
-            return Record(args...);
-        }
-
-        // Records iterables by prepending the number of elements. Some common iterables are have a
-        // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
-        // strings and CacheKeys, but they fundamentally do the same as this function.
-        template <typename IterableT>
-        CacheKey& RecordIterable(const IterableT& iterable) {
-            // Always record the size of generic iterables as a size_t for now.
-            Record(static_cast<size_t>(iterable.size()));
-            for (auto it = iterable.begin(); it != iterable.end(); ++it) {
-                Record(*it);
-            }
-            return *this;
-        }
-        template <typename Index, typename Value, size_t Size>
-        CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
-            Record(static_cast<Index>(iterable.size()));
-            for (auto it = iterable.begin(); it != iterable.end(); ++it) {
-                Record(*it);
-            }
-            return *this;
-        }
-        template <typename Ptr>
-        CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
-            Record(n);
-            for (size_t i = 0; i < n; ++i) {
-                Record(ptr[i]);
-            }
-            return *this;
-        }
-    };
-
-    // Specialized overload for fundamental types.
     template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T t) {
-            const char* it = reinterpret_cast<const char*>(&t);
-            key->insert(key->end(), it, (it + sizeof(T)));
-        }
-    };
+    CacheKey& Record(const T& t) {
+        CacheKeySerializer<T>::Serialize(this, t);
+        return *this;
+    }
+    template <typename T, typename... Args>
+    CacheKey& Record(const T& t, const Args&... args) {
+        CacheKeySerializer<T>::Serialize(this, t);
+        return Record(args...);
+    }
 
-    // Specialized overload for bitsets that are smaller than 64.
-    template <size_t N>
-    class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
-      public:
-        static void Serialize(CacheKey* key, const std::bitset<N>& t) {
-            key->Record(t.to_ullong());
+    // Records iterables by prepending the number of elements. Some common iterables are have a
+    // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
+    // strings and CacheKeys, but they fundamentally do the same as this function.
+    template <typename IterableT>
+    CacheKey& RecordIterable(const IterableT& iterable) {
+        // Always record the size of generic iterables as a size_t for now.
+        Record(static_cast<size_t>(iterable.size()));
+        for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+            Record(*it);
         }
-    };
+        return *this;
+    }
+    template <typename Index, typename Value, size_t Size>
+    CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
+        Record(static_cast<Index>(iterable.size()));
+        for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+            Record(*it);
+        }
+        return *this;
+    }
+    template <typename Ptr>
+    CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
+        Record(n);
+        for (size_t i = 0; i < n; ++i) {
+            Record(ptr[i]);
+        }
+        return *this;
+    }
+};
 
-    // Specialized overload for bitsets since using the built-in to_ullong have a size limit.
-    template <size_t N>
-    class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
-      public:
-        static void Serialize(CacheKey* key, const std::bitset<N>& t) {
-            // Serializes the bitset into series of uint8_t, along with recording the size.
-            static_assert(N > 0);
-            key->Record(static_cast<size_t>(N));
-            uint8_t value = 0;
-            for (size_t i = 0; i < N; i++) {
-                value <<= 1;
-                // Explicitly convert to numeric since MSVC doesn't like mixing of bools.
-                value |= t[i] ? 1 : 0;
-                if (i % 8 == 7) {
-                    // Whenever we fill an 8 bit value, record it and zero it out.
-                    key->Record(value);
-                    value = 0;
-                }
-            }
-            // Serialize the last value if we are not a multiple of 8.
-            if (N % 8 != 0) {
+// Specialized overload for fundamental types.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T t) {
+        const char* it = reinterpret_cast<const char*>(&t);
+        key->insert(key->end(), it, (it + sizeof(T)));
+    }
+};
+
+// Specialized overload for bitsets that are smaller than 64.
+template <size_t N>
+class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
+  public:
+    static void Serialize(CacheKey* key, const std::bitset<N>& t) { key->Record(t.to_ullong()); }
+};
+
+// Specialized overload for bitsets since using the built-in to_ullong have a size limit.
+template <size_t N>
+class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
+  public:
+    static void Serialize(CacheKey* key, const std::bitset<N>& t) {
+        // Serializes the bitset into series of uint8_t, along with recording the size.
+        static_assert(N > 0);
+        key->Record(static_cast<size_t>(N));
+        uint8_t value = 0;
+        for (size_t i = 0; i < N; i++) {
+            value <<= 1;
+            // Explicitly convert to numeric since MSVC doesn't like mixing of bools.
+            value |= t[i] ? 1 : 0;
+            if (i % 8 == 7) {
+                // Whenever we fill an 8 bit value, record it and zero it out.
                 key->Record(value);
+                value = 0;
             }
         }
-    };
-
-    // Specialized overload for enums.
-    template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T t) {
-            CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
-                key, static_cast<std::underlying_type_t<T>>(t));
+        // Serialize the last value if we are not a multiple of 8.
+        if (N % 8 != 0) {
+            key->Record(value);
         }
-    };
+    }
+};
 
-    // Specialized overload for TypedInteger.
-    template <typename Tag, typename Integer>
-    class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
-      public:
-        static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
-            CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
-        }
-    };
+// Specialized overload for enums.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T t) {
+        CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
+            key, static_cast<std::underlying_type_t<T>>(t));
+    }
+};
 
-    // Specialized overload for pointers. Since we are serializing for a cache key, we always
-    // serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
-    // the pointer was nullptr followed by the contents if applicable.
-    template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T t) {
-            key->Record(t == nullptr);
-            if (t != nullptr) {
-                CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
-            }
-        }
-    };
+// Specialized overload for TypedInteger.
+template <typename Tag, typename Integer>
+class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
+  public:
+    static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
+        CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
+    }
+};
 
-    // Specialized overload for fixed arrays of primitives.
-    template <typename T, size_t N>
-    class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T (&t)[N]) {
-            static_assert(N > 0);
-            key->Record(static_cast<size_t>(N));
-            const char* it = reinterpret_cast<const char*>(t);
-            key->insert(key->end(), it, it + sizeof(t));
+// Specialized overload for pointers. Since we are serializing for a cache key, we always
+// serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
+// the pointer was nullptr followed by the contents if applicable.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T t) {
+        key->Record(t == nullptr);
+        if (t != nullptr) {
+            CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
         }
-    };
+    }
+};
 
-    // Specialized overload for fixed arrays of non-primitives.
-    template <typename T, size_t N>
-    class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T (&t)[N]) {
-            static_assert(N > 0);
-            key->Record(static_cast<size_t>(N));
-            for (size_t i = 0; i < N; i++) {
-                key->Record(t[i]);
-            }
-        }
-    };
+// Specialized overload for fixed arrays of primitives.
+template <typename T, size_t N>
+class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T (&t)[N]) {
+        static_assert(N > 0);
+        key->Record(static_cast<size_t>(N));
+        const char* it = reinterpret_cast<const char*>(t);
+        key->insert(key->end(), it, it + sizeof(t));
+    }
+};
 
-    // Specialized overload for CachedObjects.
-    template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T& t) {
-            key->Record(t.GetCacheKey());
+// Specialized overload for fixed arrays of non-primitives.
+template <typename T, size_t N>
+class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T (&t)[N]) {
+        static_assert(N > 0);
+        key->Record(static_cast<size_t>(N));
+        for (size_t i = 0; i < N; i++) {
+            key->Record(t[i]);
         }
-    };
+    }
+};
+
+// Specialized overload for CachedObjects.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T& t) { key->Record(t.GetCacheKey()); }
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CachedObject.cpp b/src/dawn/native/CachedObject.cpp
index e7e7cd8..26c58b9 100644
--- a/src/dawn/native/CachedObject.cpp
+++ b/src/dawn/native/CachedObject.cpp
@@ -19,35 +19,35 @@
 
 namespace dawn::native {
 
-    bool CachedObject::IsCachedReference() const {
-        return mIsCachedReference;
-    }
+bool CachedObject::IsCachedReference() const {
+    return mIsCachedReference;
+}
 
-    void CachedObject::SetIsCachedReference() {
-        mIsCachedReference = true;
-    }
+void CachedObject::SetIsCachedReference() {
+    mIsCachedReference = true;
+}
 
-    size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
-        return obj->GetContentHash();
-    }
+size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
+    return obj->GetContentHash();
+}
 
-    size_t CachedObject::GetContentHash() const {
-        ASSERT(mIsContentHashInitialized);
-        return mContentHash;
-    }
+size_t CachedObject::GetContentHash() const {
+    ASSERT(mIsContentHashInitialized);
+    return mContentHash;
+}
 
-    void CachedObject::SetContentHash(size_t contentHash) {
-        ASSERT(!mIsContentHashInitialized);
-        mContentHash = contentHash;
-        mIsContentHashInitialized = true;
-    }
+void CachedObject::SetContentHash(size_t contentHash) {
+    ASSERT(!mIsContentHashInitialized);
+    mContentHash = contentHash;
+    mIsContentHashInitialized = true;
+}
 
-    const CacheKey& CachedObject::GetCacheKey() const {
-        return mCacheKey;
-    }
+const CacheKey& CachedObject::GetCacheKey() const {
+    return mCacheKey;
+}
 
-    CacheKey* CachedObject::GetCacheKey() {
-        return &mCacheKey;
-    }
+CacheKey* CachedObject::GetCacheKey() {
+    return &mCacheKey;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CachedObject.h b/src/dawn/native/CachedObject.h
index f275e02..6fda516 100644
--- a/src/dawn/native/CachedObject.h
+++ b/src/dawn/native/CachedObject.h
@@ -23,43 +23,43 @@
 
 namespace dawn::native {
 
-    // Some objects are cached so that instead of creating new duplicate objects,
-    // we increase the refcount of an existing object.
-    // When an object is successfully created, the device should call
-    // SetIsCachedReference() and insert the object into the cache.
-    class CachedObject {
-      public:
-        bool IsCachedReference() const;
+// Some objects are cached so that instead of creating new duplicate objects,
+// we increase the refcount of an existing object.
+// When an object is successfully created, the device should call
+// SetIsCachedReference() and insert the object into the cache.
+class CachedObject {
+  public:
+    bool IsCachedReference() const;
 
-        // Functor necessary for the unordered_set<CachedObject*>-based cache.
-        struct HashFunc {
-            size_t operator()(const CachedObject* obj) const;
-        };
-
-        size_t GetContentHash() const;
-        void SetContentHash(size_t contentHash);
-
-        // Returns the cache key for the object only, i.e. without device/adapter information.
-        const CacheKey& GetCacheKey() const;
-
-      protected:
-        // Protected accessor for derived classes to access and modify the key.
-        CacheKey* GetCacheKey();
-
-      private:
-        friend class DeviceBase;
-        void SetIsCachedReference();
-
-        bool mIsCachedReference = false;
-
-        // Called by ObjectContentHasher upon creation to record the object.
-        virtual size_t ComputeContentHash() = 0;
-
-        size_t mContentHash = 0;
-        bool mIsContentHashInitialized = false;
-        CacheKey mCacheKey;
+    // Functor necessary for the unordered_set<CachedObject*>-based cache.
+    struct HashFunc {
+        size_t operator()(const CachedObject* obj) const;
     };
 
+    size_t GetContentHash() const;
+    void SetContentHash(size_t contentHash);
+
+    // Returns the cache key for the object only, i.e. without device/adapter information.
+    const CacheKey& GetCacheKey() const;
+
+  protected:
+    // Protected accessor for derived classes to access and modify the key.
+    CacheKey* GetCacheKey();
+
+  private:
+    friend class DeviceBase;
+    void SetIsCachedReference();
+
+    bool mIsCachedReference = false;
+
+    // Called by ObjectContentHasher upon creation to record the object.
+    virtual size_t ComputeContentHash() = 0;
+
+    size_t mContentHash = 0;
+    bool mIsContentHashInitialized = false;
+    CacheKey mCacheKey;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_CACHEDOBJECT_H_
diff --git a/src/dawn/native/CallbackTaskManager.cpp b/src/dawn/native/CallbackTaskManager.cpp
index d9589a7..40146f8 100644
--- a/src/dawn/native/CallbackTaskManager.cpp
+++ b/src/dawn/native/CallbackTaskManager.cpp
@@ -18,22 +18,22 @@
 
 namespace dawn::native {
 
-    bool CallbackTaskManager::IsEmpty() {
-        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
-        return mCallbackTaskQueue.empty();
-    }
+bool CallbackTaskManager::IsEmpty() {
+    std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+    return mCallbackTaskQueue.empty();
+}
 
-    std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
-        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
+    std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
 
-        std::vector<std::unique_ptr<CallbackTask>> allTasks;
-        allTasks.swap(mCallbackTaskQueue);
-        return allTasks;
-    }
+    std::vector<std::unique_ptr<CallbackTask>> allTasks;
+    allTasks.swap(mCallbackTaskQueue);
+    return allTasks;
+}
 
-    void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
-        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
-        mCallbackTaskQueue.push_back(std::move(callbackTask));
-    }
+void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
+    std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+    mCallbackTaskQueue.push_back(std::move(callbackTask));
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CallbackTaskManager.h b/src/dawn/native/CallbackTaskManager.h
index 0a4253a..479ba01 100644
--- a/src/dawn/native/CallbackTaskManager.h
+++ b/src/dawn/native/CallbackTaskManager.h
@@ -21,24 +21,24 @@
 
 namespace dawn::native {
 
-    struct CallbackTask {
-      public:
-        virtual ~CallbackTask() = default;
-        virtual void Finish() = 0;
-        virtual void HandleShutDown() = 0;
-        virtual void HandleDeviceLoss() = 0;
-    };
+struct CallbackTask {
+  public:
+    virtual ~CallbackTask() = default;
+    virtual void Finish() = 0;
+    virtual void HandleShutDown() = 0;
+    virtual void HandleDeviceLoss() = 0;
+};
 
-    class CallbackTaskManager {
-      public:
-        void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
-        bool IsEmpty();
-        std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
+class CallbackTaskManager {
+  public:
+    void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
+    bool IsEmpty();
+    std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
 
-      private:
-        std::mutex mCallbackTaskQueueMutex;
-        std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
-    };
+  private:
+    std::mutex mCallbackTaskQueueMutex;
+    std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CommandAllocator.cpp b/src/dawn/native/CommandAllocator.cpp
index 587e1af..7f1c022 100644
--- a/src/dawn/native/CommandAllocator.cpp
+++ b/src/dawn/native/CommandAllocator.cpp
@@ -24,205 +24,203 @@
 
 namespace dawn::native {
 
-    // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
+// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
 
-    CommandIterator::CommandIterator() {
-        Reset();
+CommandIterator::CommandIterator() {
+    Reset();
+}
+
+CommandIterator::~CommandIterator() {
+    ASSERT(IsEmpty());
+}
+
+CommandIterator::CommandIterator(CommandIterator&& other) {
+    if (!other.IsEmpty()) {
+        mBlocks = std::move(other.mBlocks);
+        other.Reset();
     }
+    Reset();
+}
 
-    CommandIterator::~CommandIterator() {
-        ASSERT(IsEmpty());
+CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
+    ASSERT(IsEmpty());
+    if (!other.IsEmpty()) {
+        mBlocks = std::move(other.mBlocks);
+        other.Reset();
     }
+    Reset();
+    return *this;
+}
 
-    CommandIterator::CommandIterator(CommandIterator&& other) {
-        if (!other.IsEmpty()) {
-            mBlocks = std::move(other.mBlocks);
-            other.Reset();
-        }
-        Reset();
-    }
+CommandIterator::CommandIterator(CommandAllocator allocator) : mBlocks(allocator.AcquireBlocks()) {
+    Reset();
+}
 
-    CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
-        ASSERT(IsEmpty());
-        if (!other.IsEmpty()) {
-            mBlocks = std::move(other.mBlocks);
-            other.Reset();
-        }
-        Reset();
-        return *this;
-    }
-
-    CommandIterator::CommandIterator(CommandAllocator allocator)
-        : mBlocks(allocator.AcquireBlocks()) {
-        Reset();
-    }
-
-    void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
-        ASSERT(IsEmpty());
-        mBlocks.clear();
-        for (CommandAllocator& allocator : allocators) {
-            CommandBlocks blocks = allocator.AcquireBlocks();
-            if (!blocks.empty()) {
-                mBlocks.reserve(mBlocks.size() + blocks.size());
-                for (BlockDef& block : blocks) {
-                    mBlocks.push_back(std::move(block));
-                }
+void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
+    ASSERT(IsEmpty());
+    mBlocks.clear();
+    for (CommandAllocator& allocator : allocators) {
+        CommandBlocks blocks = allocator.AcquireBlocks();
+        if (!blocks.empty()) {
+            mBlocks.reserve(mBlocks.size() + blocks.size());
+            for (BlockDef& block : blocks) {
+                mBlocks.push_back(std::move(block));
             }
         }
+    }
+    Reset();
+}
+
+bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
+    mCurrentBlock++;
+    if (mCurrentBlock >= mBlocks.size()) {
         Reset();
+        *commandId = detail::kEndOfBlock;
+        return false;
+    }
+    mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
+    return NextCommandId(commandId);
+}
+
+void CommandIterator::Reset() {
+    mCurrentBlock = 0;
+
+    if (mBlocks.empty()) {
+        // This will case the first NextCommandId call to try to move to the next block and stop
+        // the iteration immediately, without special casing the initialization.
+        mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
+        mBlocks.emplace_back();
+        mBlocks[0].size = sizeof(mEndOfBlock);
+        mBlocks[0].block = mCurrentPtr;
+    } else {
+        mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
+    }
+}
+
+void CommandIterator::MakeEmptyAsDataWasDestroyed() {
+    if (IsEmpty()) {
+        return;
     }
 
-    bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
-        mCurrentBlock++;
-        if (mCurrentBlock >= mBlocks.size()) {
-            Reset();
-            *commandId = detail::kEndOfBlock;
-            return false;
-        }
-        mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
-        return NextCommandId(commandId);
+    for (BlockDef& block : mBlocks) {
+        free(block.block);
     }
+    mBlocks.clear();
+    Reset();
+    ASSERT(IsEmpty());
+}
 
-    void CommandIterator::Reset() {
-        mCurrentBlock = 0;
+bool CommandIterator::IsEmpty() const {
+    return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
+}
 
-        if (mBlocks.empty()) {
-            // This will case the first NextCommandId call to try to move to the next block and stop
-            // the iteration immediately, without special casing the initialization.
-            mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
-            mBlocks.emplace_back();
-            mBlocks[0].size = sizeof(mEndOfBlock);
-            mBlocks[0].block = mCurrentPtr;
-        } else {
-            mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
-        }
-    }
+// Potential TODO(crbug.com/dawn/835):
+//  - Host the size and pointer to next block in the block itself to avoid having an allocation
+//    in the vector
+//  - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
+//    in Allocate
+//  - Be able to optimize allocation to one block, for command buffers expected to live long to
+//    avoid cache misses
+//  - Better block allocation, maybe have Dawn API to say command buffer is going to have size
+//    close to another
 
-    void CommandIterator::MakeEmptyAsDataWasDestroyed() {
-        if (IsEmpty()) {
-            return;
-        }
+CommandAllocator::CommandAllocator() {
+    ResetPointers();
+}
 
-        for (BlockDef& block : mBlocks) {
-            free(block.block);
-        }
-        mBlocks.clear();
-        Reset();
-        ASSERT(IsEmpty());
-    }
+CommandAllocator::~CommandAllocator() {
+    Reset();
+}
 
-    bool CommandIterator::IsEmpty() const {
-        return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
-    }
-
-    // Potential TODO(crbug.com/dawn/835):
-    //  - Host the size and pointer to next block in the block itself to avoid having an allocation
-    //    in the vector
-    //  - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
-    //    in Allocate
-    //  - Be able to optimize allocation to one block, for command buffers expected to live long to
-    //    avoid cache misses
-    //  - Better block allocation, maybe have Dawn API to say command buffer is going to have size
-    //    close to another
-
-    CommandAllocator::CommandAllocator() {
+CommandAllocator::CommandAllocator(CommandAllocator&& other)
+    : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
+    other.mBlocks.clear();
+    if (!other.IsEmpty()) {
+        mCurrentPtr = other.mCurrentPtr;
+        mEndPtr = other.mEndPtr;
+    } else {
         ResetPointers();
     }
+    other.Reset();
+}
 
-    CommandAllocator::~CommandAllocator() {
-        Reset();
+CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
+    Reset();
+    if (!other.IsEmpty()) {
+        std::swap(mBlocks, other.mBlocks);
+        mLastAllocationSize = other.mLastAllocationSize;
+        mCurrentPtr = other.mCurrentPtr;
+        mEndPtr = other.mEndPtr;
+    }
+    other.Reset();
+    return *this;
+}
+
+void CommandAllocator::Reset() {
+    for (BlockDef& block : mBlocks) {
+        free(block.block);
+    }
+    mBlocks.clear();
+    mLastAllocationSize = kDefaultBaseAllocationSize;
+    ResetPointers();
+}
+
+bool CommandAllocator::IsEmpty() const {
+    return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
+}
+
+CommandBlocks&& CommandAllocator::AcquireBlocks() {
+    ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
+    ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+    ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
+    *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
+
+    mCurrentPtr = nullptr;
+    mEndPtr = nullptr;
+    return std::move(mBlocks);
+}
+
+uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
+                                              size_t commandSize,
+                                              size_t commandAlignment) {
+    // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
+    // to move to the next one. kEndOfBlock on the last block means the end of the commands.
+    uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+    *idAlloc = detail::kEndOfBlock;
+
+    // We'll request a block that can contain at least the command ID, the command and an
+    // additional ID to contain the kEndOfBlock tag.
+    size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
+
+    // The computation of the request could overflow.
+    if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
+        return nullptr;
     }
 
-    CommandAllocator::CommandAllocator(CommandAllocator&& other)
-        : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
-        other.mBlocks.clear();
-        if (!other.IsEmpty()) {
-            mCurrentPtr = other.mCurrentPtr;
-            mEndPtr = other.mEndPtr;
-        } else {
-            ResetPointers();
-        }
-        other.Reset();
+    if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
+        return nullptr;
+    }
+    return Allocate(commandId, commandSize, commandAlignment);
+}
+
+bool CommandAllocator::GetNewBlock(size_t minimumSize) {
+    // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
+    mLastAllocationSize = std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
+
+    uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
+    if (DAWN_UNLIKELY(block == nullptr)) {
+        return false;
     }
 
-    CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
-        Reset();
-        if (!other.IsEmpty()) {
-            std::swap(mBlocks, other.mBlocks);
-            mLastAllocationSize = other.mLastAllocationSize;
-            mCurrentPtr = other.mCurrentPtr;
-            mEndPtr = other.mEndPtr;
-        }
-        other.Reset();
-        return *this;
-    }
+    mBlocks.push_back({mLastAllocationSize, block});
+    mCurrentPtr = AlignPtr(block, alignof(uint32_t));
+    mEndPtr = block + mLastAllocationSize;
+    return true;
+}
 
-    void CommandAllocator::Reset() {
-        for (BlockDef& block : mBlocks) {
-            free(block.block);
-        }
-        mBlocks.clear();
-        mLastAllocationSize = kDefaultBaseAllocationSize;
-        ResetPointers();
-    }
-
-    bool CommandAllocator::IsEmpty() const {
-        return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
-    }
-
-    CommandBlocks&& CommandAllocator::AcquireBlocks() {
-        ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
-        ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
-        ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
-        *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
-
-        mCurrentPtr = nullptr;
-        mEndPtr = nullptr;
-        return std::move(mBlocks);
-    }
-
-    uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
-                                                  size_t commandSize,
-                                                  size_t commandAlignment) {
-        // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
-        // to move to the next one. kEndOfBlock on the last block means the end of the commands.
-        uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
-        *idAlloc = detail::kEndOfBlock;
-
-        // We'll request a block that can contain at least the command ID, the command and an
-        // additional ID to contain the kEndOfBlock tag.
-        size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
-
-        // The computation of the request could overflow.
-        if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
-            return nullptr;
-        }
-
-        if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
-            return nullptr;
-        }
-        return Allocate(commandId, commandSize, commandAlignment);
-    }
-
-    bool CommandAllocator::GetNewBlock(size_t minimumSize) {
-        // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
-        mLastAllocationSize =
-            std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
-
-        uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
-        if (DAWN_UNLIKELY(block == nullptr)) {
-            return false;
-        }
-
-        mBlocks.push_back({mLastAllocationSize, block});
-        mCurrentPtr = AlignPtr(block, alignof(uint32_t));
-        mEndPtr = block + mLastAllocationSize;
-        return true;
-    }
-
-    void CommandAllocator::ResetPointers() {
-        mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
-        mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
-    }
+void CommandAllocator::ResetPointers() {
+    mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
+    mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CommandAllocator.h b/src/dawn/native/CommandAllocator.h
index b9d4d15..c3e999e 100644
--- a/src/dawn/native/CommandAllocator.h
+++ b/src/dawn/native/CommandAllocator.h
@@ -26,248 +26,246 @@
 
 namespace dawn::native {
 
-    // Allocation for command buffers should be fast. To avoid doing an allocation per command
-    // or to avoid copying commands when reallocing, we use a linear allocator in a growing set
-    // of large memory blocks. We also use this to have the format to be (u32 commandId, command),
-    // so that iteration over the commands is easy.
+// Allocation for command buffers should be fast. To avoid doing an allocation per command
+// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
+// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
+// so that iteration over the commands is easy.
 
-    // Usage of the allocator and iterator:
-    //     CommandAllocator allocator;
-    //     DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
-    //     // Fill command
-    //     // Repeat allocation and filling commands
-    //
-    //     CommandIterator commands(allocator);
-    //     CommandType type;
-    //     while(commands.NextCommandId(&type)) {
-    //         switch(type) {
-    //              case CommandType::Draw:
-    //                  DrawCommand* draw = commands.NextCommand<DrawCommand>();
-    //                  // Do the draw
-    //                  break;
-    //              // other cases
-    //         }
-    //     }
+// Usage of the allocator and iterator:
+//     CommandAllocator allocator;
+//     DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
+//     // Fill command
+//     // Repeat allocation and filling commands
+//
+//     CommandIterator commands(allocator);
+//     CommandType type;
+//     while(commands.NextCommandId(&type)) {
+//         switch(type) {
+//              case CommandType::Draw:
+//                  DrawCommand* draw = commands.NextCommand<DrawCommand>();
+//                  // Do the draw
+//                  break;
+//              // other cases
+//         }
+//     }
 
-    // Note that you need to extract the commands from the CommandAllocator before destroying it
-    // and must tell the CommandIterator when the allocated commands have been processed for
-    // deletion.
+// Note that you need to extract the commands from the CommandAllocator before destroying it
+// and must tell the CommandIterator when the allocated commands have been processed for
+// deletion.
 
-    // These are the lists of blocks, should not be used directly, only through CommandAllocator
-    // and CommandIterator
-    struct BlockDef {
-        size_t size;
-        uint8_t* block;
-    };
-    using CommandBlocks = std::vector<BlockDef>;
+// These are the lists of blocks, should not be used directly, only through CommandAllocator
+// and CommandIterator
+struct BlockDef {
+    size_t size;
+    uint8_t* block;
+};
+using CommandBlocks = std::vector<BlockDef>;
 
-    namespace detail {
-        constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
-        constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
-    }  // namespace detail
+namespace detail {
+constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
+constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
+}  // namespace detail
 
-    class CommandAllocator;
+class CommandAllocator;
 
-    class CommandIterator : public NonCopyable {
-      public:
-        CommandIterator();
-        ~CommandIterator();
+class CommandIterator : public NonCopyable {
+  public:
+    CommandIterator();
+    ~CommandIterator();
 
-        CommandIterator(CommandIterator&& other);
-        CommandIterator& operator=(CommandIterator&& other);
+    CommandIterator(CommandIterator&& other);
+    CommandIterator& operator=(CommandIterator&& other);
 
-        // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
-        explicit CommandIterator(CommandAllocator allocator);
+    // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
+    explicit CommandIterator(CommandAllocator allocator);
 
-        void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
+    void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
 
-        template <typename E>
-        bool NextCommandId(E* commandId) {
-            return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
+    template <typename E>
+    bool NextCommandId(E* commandId) {
+        return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
+    }
+    template <typename T>
+    T* NextCommand() {
+        return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
+    }
+    template <typename T>
+    T* NextData(size_t count) {
+        return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
+    }
+
+    // Sets iterator to the beginning of the commands without emptying the list. This method can
+    // be used if iteration was stopped early and the iterator needs to be restarted.
+    void Reset();
+
+    // This method must to be called after commands have been deleted. This indicates that the
+    // commands have been submitted and they are no longer valid.
+    void MakeEmptyAsDataWasDestroyed();
+
+  private:
+    bool IsEmpty() const;
+
+    DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
+        uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
+        ASSERT(idPtr + sizeof(uint32_t) <=
+               mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+        uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
+
+        if (id != detail::kEndOfBlock) {
+            mCurrentPtr = idPtr + sizeof(uint32_t);
+            *commandId = id;
+            return true;
         }
-        template <typename T>
-        T* NextCommand() {
-            return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
+        return NextCommandIdInNewBlock(commandId);
+    }
+
+    bool NextCommandIdInNewBlock(uint32_t* commandId);
+
+    DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
+        uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
+        ASSERT(commandPtr + sizeof(commandSize) <=
+               mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+        mCurrentPtr = commandPtr + commandSize;
+        return commandPtr;
+    }
+
+    DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
+        uint32_t id;
+        bool hasId = NextCommandId(&id);
+        ASSERT(hasId);
+        ASSERT(id == detail::kAdditionalData);
+
+        return NextCommand(dataSize, dataAlignment);
+    }
+
+    CommandBlocks mBlocks;
+    uint8_t* mCurrentPtr = nullptr;
+    size_t mCurrentBlock = 0;
+    // Used to avoid a special case for empty iterators.
+    uint32_t mEndOfBlock = detail::kEndOfBlock;
+};
+
+class CommandAllocator : public NonCopyable {
+  public:
+    CommandAllocator();
+    ~CommandAllocator();
+
+    // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
+    CommandAllocator(CommandAllocator&&);
+    CommandAllocator& operator=(CommandAllocator&&);
+
+    // Frees all blocks held by the allocator and restores it to its initial empty state.
+    void Reset();
+
+    bool IsEmpty() const;
+
+    template <typename T, typename E>
+    T* Allocate(E commandId) {
+        static_assert(sizeof(E) == sizeof(uint32_t));
+        static_assert(alignof(E) == alignof(uint32_t));
+        static_assert(alignof(T) <= kMaxSupportedAlignment);
+        T* result =
+            reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
+        if (!result) {
+            return nullptr;
         }
-        template <typename T>
-        T* NextData(size_t count) {
-            return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
+        new (result) T;
+        return result;
+    }
+
+    template <typename T>
+    T* AllocateData(size_t count) {
+        static_assert(alignof(T) <= kMaxSupportedAlignment);
+        T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
+        if (!result) {
+            return nullptr;
         }
-
-        // Sets iterator to the beginning of the commands without emptying the list. This method can
-        // be used if iteration was stopped early and the iterator needs to be restarted.
-        void Reset();
-
-        // This method must to be called after commands have been deleted. This indicates that the
-        // commands have been submitted and they are no longer valid.
-        void MakeEmptyAsDataWasDestroyed();
-
-      private:
-        bool IsEmpty() const;
-
-        DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
-            uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
-            ASSERT(idPtr + sizeof(uint32_t) <=
-                   mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
-            uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
-
-            if (id != detail::kEndOfBlock) {
-                mCurrentPtr = idPtr + sizeof(uint32_t);
-                *commandId = id;
-                return true;
-            }
-            return NextCommandIdInNewBlock(commandId);
+        for (size_t i = 0; i < count; i++) {
+            new (result + i) T;
         }
+        return result;
+    }
 
-        bool NextCommandIdInNewBlock(uint32_t* commandId);
+  private:
+    // This is used for some internal computations and can be any power of two as long as code
+    // using the CommandAllocator passes the static_asserts.
+    static constexpr size_t kMaxSupportedAlignment = 8;
 
-        DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
-            uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
-            ASSERT(commandPtr + sizeof(commandSize) <=
-                   mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+    // To avoid checking for overflows at every step of the computations we compute an upper
+    // bound of the space that will be needed in addition to the command data.
+    static constexpr size_t kWorstCaseAdditionalSize =
+        sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
 
-            mCurrentPtr = commandPtr + commandSize;
-            return commandPtr;
+    // The default value of mLastAllocationSize.
+    static conste