Use `absl::InlinedVector` instead of `StackVector`

This patch replaces `StackVector` with `absl::InlinedVector` as in
Chromium `base::StackVector` has been totally removed and replaced by
`absl::InlinedVector`.

Bug: dawn:443
Change-Id: I13e921d2ff1f84127015a7a69b05b8b31234b30e
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/187620
Reviewed-by: Austin Eng <enga@chromium.org>
Reviewed-by: Loko Kung <lokokung@google.com>
Commit-Queue: Jiawei Shao <jiawei.shao@intel.com>
diff --git a/src/dawn/common/BUILD.gn b/src/dawn/common/BUILD.gn
index 66ca53a..4efb268 100644
--- a/src/dawn/common/BUILD.gn
+++ b/src/dawn/common/BUILD.gn
@@ -232,6 +232,17 @@
   ]
 }
 
+group("abseil") {
+  # When build_with_chromium=true we need to include "//third_party/abseil-cpp:absl" while
+  # it's beneficial to be more specific with standalone Dawn, especially when it comes to
+  # including it as a dependency in other projects (such as Skia).
+  if (build_with_chromium) {
+    public_deps = [ "$dawn_abseil_dir:absl" ]
+  } else {
+    public_deps = [ "${dawn_root}/third_party/gn/abseil-cpp:inlined_vector" ]
+  }
+}
+
 # This GN file is discovered by all Chromium builds, but common doesn't support
 # all of Chromium's OSes so we explicitly make the target visible only on
 # systems we know Dawn is able to compile on.
@@ -288,7 +299,6 @@
       "SlabAllocator.cpp",
       "SlabAllocator.h",
       "StackAllocated.h",
-      "StackContainer.h",
       "SystemUtils.cpp",
       "SystemUtils.h",
       "TypeTraits.h",
@@ -309,6 +319,7 @@
     sources += get_target_outputs(":dawn_gpu_info_gen")
 
     public_deps = [
+      ":abseil",
       ":dawn_gpu_info_gen",
       ":dawn_version_gen",
       "${dawn_root}/src/dawn/partition_alloc:raw_ptr",
diff --git a/src/dawn/common/CMakeLists.txt b/src/dawn/common/CMakeLists.txt
index 32c4b95..db206920 100644
--- a/src/dawn/common/CMakeLists.txt
+++ b/src/dawn/common/CMakeLists.txt
@@ -95,7 +95,6 @@
     "SlabAllocator.cpp"
     "SlabAllocator.h"
     "StackAllocated.h"
-    "StackContainer.h"
     "SystemUtils.cpp"
     "SystemUtils.h"
     "TypeTraits.h"
@@ -131,6 +130,7 @@
 target_link_libraries(dawn_common
     PUBLIC dawncpp_headers
     PUBLIC partition_alloc
+    PRIVATE absl_inlined_vector
     PRIVATE dawn_internal_config
 )
 
diff --git a/src/dawn/common/ContentLessObjectCache.h b/src/dawn/common/ContentLessObjectCache.h
index afe52a1..8e0cca1 100644
--- a/src/dawn/common/ContentLessObjectCache.h
+++ b/src/dawn/common/ContentLessObjectCache.h
@@ -33,10 +33,10 @@
 #include <utility>
 
 #include "absl/container/flat_hash_set.h"
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/ContentLessObjectCacheable.h"
 #include "dawn/common/Ref.h"
 #include "dawn/common/RefCounted.h"
-#include "dawn/common/StackContainer.h"
 #include "dawn/common/WeakRef.h"
 #include "partition_alloc/pointers/raw_ptr.h"
 
@@ -210,14 +210,14 @@
   private:
     friend struct CacheKeyFuncs::EqualityFunc;
 
-    void TrackTemporaryRef(Ref<RefCountedT> ref) { (*mTemporaryRefs)->push_back(std::move(ref)); }
+    void TrackTemporaryRef(Ref<RefCountedT> ref) { mTemporaryRefs->push_back(std::move(ref)); }
     template <typename F>
     auto WithLockAndCleanup(F func) {
         using RetType = decltype(func());
         RetType result;
 
-        // Creates and owns a temporary StackVector that we point to internally to track Refs.
-        StackVector<Ref<RefCountedT>, 4> temps;
+        // Creates and owns a temporary InlinedVector that we point to internally to track Refs.
+        absl::InlinedVector<Ref<RefCountedT>, 4> temps;
         {
             std::lock_guard<std::mutex> lock(mMutex);
             mTemporaryRefs = &temps;
@@ -233,13 +233,13 @@
                         typename CacheKeyFuncs::EqualityFunc>
         mCache;
 
-    // The cache has a pointer to a StackVector of temporary Refs that are by-products of Promotes
+    // The cache has a pointer to a InlinedVector of temporary Refs that are by-products of Promotes
     // inside the EqualityFunc. These Refs need to outlive the EqualityFunc calls because otherwise,
     // they could be the last living Ref of the object resulting in a re-entrant Erase call that
     // deadlocks on the mutex.
-    // Absl should make fewer than 1 equality checks per set operation, so a StackVector of length
+    // Absl should make fewer than 1 equality checks per set operation, so a InlinedVector of length
     // 4 should be sufficient for most cases. See dawn:1993 for more details.
-    raw_ptr<StackVector<Ref<RefCountedT>, 4>> mTemporaryRefs = nullptr;
+    raw_ptr<absl::InlinedVector<Ref<RefCountedT>, 4>> mTemporaryRefs = nullptr;
 };
 
 }  // namespace dawn
diff --git a/src/dawn/common/GPUInfo.cpp b/src/dawn/common/GPUInfo.cpp
index 22b7f79..c3d6ca7 100644
--- a/src/dawn/common/GPUInfo.cpp
+++ b/src/dawn/common/GPUInfo.cpp
@@ -61,29 +61,29 @@
 
 DriverVersion::DriverVersion(const std::initializer_list<uint16_t>& version) {
     DAWN_ASSERT(version.size() <= kMaxVersionFields);
-    mDriverVersion->assign(version.begin(), version.end());
+    mDriverVersion.assign(version.begin(), version.end());
 }
 
 uint16_t& DriverVersion::operator[](size_t i) {
-    return mDriverVersion->operator[](i);
+    return mDriverVersion.operator[](i);
 }
 
 const uint16_t& DriverVersion::operator[](size_t i) const {
-    return mDriverVersion->operator[](i);
+    return mDriverVersion.operator[](i);
 }
 
 uint32_t DriverVersion::size() const {
-    return mDriverVersion->size();
+    return mDriverVersion.size();
 }
 
 std::string DriverVersion::ToString() const {
     std::ostringstream oss;
-    if (mDriverVersion->size() > 0) {
+    if (!mDriverVersion.empty()) {
         // Convert all but the last element to avoid a trailing "."
-        std::copy(mDriverVersion->begin(), mDriverVersion->end() - 1,
+        std::copy(mDriverVersion.begin(), mDriverVersion.end() - 1,
                   std::ostream_iterator<uint16_t>(oss, "."));
         // Add the last element
-        oss << mDriverVersion->back();
+        oss << mDriverVersion.back();
     }
 
     return oss.str();
diff --git a/src/dawn/common/GPUInfo.h b/src/dawn/common/GPUInfo.h
index 0328188..1ad6238 100644
--- a/src/dawn/common/GPUInfo.h
+++ b/src/dawn/common/GPUInfo.h
@@ -30,8 +30,8 @@
 
 #include <string>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/GPUInfo_autogen.h"
-#include "dawn/common/StackContainer.h"
 
 namespace dawn::gpu_info {
 
@@ -53,7 +53,7 @@
     std::string ToString() const;
 
   private:
-    StackVector<uint16_t, kMaxVersionFields> mDriverVersion;
+    absl::InlinedVector<uint16_t, kMaxVersionFields> mDriverVersion;
 };
 
 // Do comparison between two driver versions. Currently we only support the comparison between
diff --git a/src/dawn/common/StackContainer.h b/src/dawn/common/StackContainer.h
deleted file mode 100644
index 19d6806..0000000
--- a/src/dawn/common/StackContainer.h
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is a modified copy of Chromium's /src/base/containers/stack_container.h
-
-#ifndef SRC_DAWN_COMMON_STACKCONTAINER_H_
-#define SRC_DAWN_COMMON_STACKCONTAINER_H_
-
-#include <cstddef>
-#include <memory>
-#include <vector>
-
-#include "dawn/common/Compiler.h"
-#include "partition_alloc/pointers/raw_ptr.h"
-
-namespace dawn {
-
-// This allocator can be used with STL containers to provide a stack buffer
-// from which to allocate memory and overflows onto the heap. This stack buffer
-// would be allocated on the stack and allows us to avoid heap operations in
-// some situations.
-//
-// STL likes to make copies of allocators, so the allocator itself can't hold
-// the data. Instead, we make the creator responsible for creating a
-// StackAllocator::Source which contains the data. Copying the allocator
-// merely copies the pointer to this shared source, so all allocators created
-// based on our allocator will share the same stack buffer.
-//
-// This stack buffer implementation is very simple. The first allocation that
-// fits in the stack buffer will use the stack buffer. Any subsequent
-// allocations will not use the stack buffer, even if there is unused room.
-// This makes it appropriate for array-like containers, but the caller should
-// be sure to reserve() in the container up to the stack buffer size. Otherwise
-// the container will allocate a small array which will "use up" the stack
-// buffer.
-template <typename T, size_t stack_capacity>
-class StackAllocator : public std::allocator<T> {
-  public:
-    typedef typename std::allocator_traits<std::allocator<T>>::pointer pointer;
-    typedef typename std::allocator_traits<std::allocator<T>>::size_type size_type;
-
-    // Backing store for the allocator. The container owner is responsible for
-    // maintaining this for as long as any containers using this allocator are
-    // live.
-    struct Source {
-        Source() : used_stack_buffer_(false) {}
-
-        // Casts the buffer in its right type.
-        T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
-        const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
-
-        // The buffer itself. It is not of type T because we don't want the
-        // constructors and destructors to be automatically called. Define a POD
-        // buffer of the right size instead.
-        alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
-#if DAWN_COMPILER_IS(GCC) && !defined(__x86_64__) && !defined(__i386__)
-        static_assert(alignof(T) <= 16, "http://crbug.com/115612");
-#endif
-
-        // Set when the stack buffer is used for an allocation. We do not track
-        // how much of the buffer is used, only that somebody is using it.
-        bool used_stack_buffer_;
-    };
-
-    // Used by containers when they want to refer to an allocator of type U.
-    template <typename U>
-    struct rebind {
-        typedef StackAllocator<U, stack_capacity> other;
-    };
-
-    // For the straight up copy c-tor, we can share storage.
-    StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
-        : std::allocator<T>(), source_(rhs.source_) {}
-
-    // ISO C++ requires the following constructor to be defined,
-    // and std::vector in VC++2008SP1 Release fails with an error
-    // in the class _Container_base_aux_alloc_real (from <xutility>)
-    // if the constructor does not exist.
-    // For this constructor, we cannot share storage; there's
-    // no guarantee that the Source buffer of Ts is large enough
-    // for Us.
-    template <typename U, size_t other_capacity>
-    StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
-
-    // This constructor must exist. It creates a default allocator that doesn't
-    // actually have a stack buffer. glibc's std::string() will compare the
-    // current allocator against the default-constructed allocator, so this
-    // should be fast.
-    StackAllocator() : source_(nullptr) {}
-
-    explicit StackAllocator(Source* source) : source_(source) {}
-
-    // Actually do the allocation. Use the stack buffer if nobody has used it yet
-    // and the size requested fits. Otherwise, fall through to the standard
-    // allocator.
-    pointer allocate(size_type n) {
-        if (source_ && !source_->used_stack_buffer_ && n <= stack_capacity) {
-            source_->used_stack_buffer_ = true;
-            return source_->stack_buffer();
-        } else {
-            return std::allocator<T>::allocate(n);
-        }
-    }
-
-    // Free: when trying to free the stack buffer, just mark it as free. For
-    // non-stack-buffer pointers, just fall though to the standard allocator.
-    void deallocate(pointer p, size_type n) {
-        if (source_ && p == source_->stack_buffer()) {
-            source_->used_stack_buffer_ = false;
-        } else {
-            std::allocator<T>::deallocate(p, n);
-        }
-    }
-
-  private:
-    raw_ptr<Source> source_;
-};
-
-// A wrapper around STL containers that maintains a stack-sized buffer that the
-// initial capacity of the vector is based on. Growing the container beyond the
-// stack capacity will transparently overflow onto the heap. The container must
-// support reserve().
-//
-// This will not work with std::string since some implementations allocate
-// more bytes than requested in calls to reserve(), forcing the allocation onto
-// the heap.  http://crbug.com/709273
-//
-// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
-// type. This object is really intended to be used only internally. You'll want
-// to use the wrappers below for different types.
-template <typename TContainerType, size_t stack_capacity>
-class StackContainer {
-  public:
-    typedef TContainerType ContainerType;
-    typedef typename ContainerType::value_type ContainedType;
-    typedef StackAllocator<ContainedType, stack_capacity> Allocator;
-
-    // Allocator must be constructed before the container!
-    StackContainer() : allocator_(&stack_data_), container_(allocator_) {
-        // Make the container use the stack allocation by reserving our buffer size
-        // before doing anything else.
-        container_.reserve(stack_capacity);
-    }
-
-    // Getters for the actual container.
-    //
-    // Danger: any copies of this made using the copy constructor must have
-    // shorter lifetimes than the source. The copy will share the same allocator
-    // and therefore the same stack buffer as the original. Use std::copy to
-    // copy into a "real" container for longer-lived objects.
-    ContainerType& container() { return container_; }
-    const ContainerType& container() const { return container_; }
-
-    // Support operator-> to get to the container. This allows nicer syntax like:
-    //   StackContainer<...> foo;
-    //   std::sort(foo->begin(), foo->end());
-    ContainerType* operator->() { return &container_; }
-    const ContainerType* operator->() const { return &container_; }
-
-    // Retrieves the stack source so that that unit tests can verify that the
-    // buffer is being used properly.
-    const typename Allocator::Source& stack_data() const { return stack_data_; }
-
-  protected:
-    typename Allocator::Source stack_data_;
-    Allocator allocator_;
-    ContainerType container_;
-
-  private:
-    StackContainer(const StackContainer& rhs) = delete;
-    StackContainer& operator=(const StackContainer& rhs) = delete;
-    StackContainer(StackContainer&& rhs) = delete;
-    StackContainer& operator=(StackContainer&& rhs) = delete;
-};
-
-// Range-based iteration support for StackContainer.
-template <typename TContainerType, size_t stack_capacity>
-auto begin(const StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(begin(stack_container.container())) {
-    return begin(stack_container.container());
-}
-
-template <typename TContainerType, size_t stack_capacity>
-auto begin(StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(begin(stack_container.container())) {
-    return begin(stack_container.container());
-}
-
-template <typename TContainerType, size_t stack_capacity>
-auto end(StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(end(stack_container.container())) {
-    return end(stack_container.container());
-}
-
-template <typename TContainerType, size_t stack_capacity>
-auto end(const StackContainer<TContainerType, stack_capacity>& stack_container)
-    -> decltype(end(stack_container.container())) {
-    return end(stack_container.container());
-}
-
-// StackVector -----------------------------------------------------------------
-
-// Example:
-//   StackVector<int, 16> foo;
-//   foo->push_back(22);  // we have overloaded operator->
-//   foo[0] = 10;         // as well as operator[]
-template <typename T, size_t stack_capacity>
-class StackVector
-    : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
-  public:
-    StackVector()
-        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
-
-    // We need to put this in STL containers sometimes, which requires a copy
-    // constructor. We can't call the regular copy constructor because that will
-    // take the stack buffer from the original. Here, we create an empty object
-    // and make a stack buffer of its own.
-    StackVector(const StackVector<T, stack_capacity>& other)
-        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
-        this->container().assign(other->begin(), other->end());
-    }
-
-    StackVector<T, stack_capacity>& operator=(const StackVector<T, stack_capacity>& other) {
-        this->container().assign(other->begin(), other->end());
-        return *this;
-    }
-
-    // Vectors are commonly indexed, which isn't very convenient even with
-    // operator-> (using "->at()" does exception stuff we don't want).
-    T& operator[](size_t i) { return this->container().operator[](i); }
-    const T& operator[](size_t i) const { return this->container().operator[](i); }
-
-  private:
-    // StackVector(const StackVector& rhs) = delete;
-    // StackVector& operator=(const StackVector& rhs) = delete;
-    StackVector(StackVector&& rhs) = delete;
-    StackVector& operator=(StackVector&& rhs) = delete;
-};
-
-}  // namespace dawn
-
-#endif  // SRC_DAWN_COMMON_STACKCONTAINER_H_
diff --git a/src/dawn/common/ityp_stack_vec.h b/src/dawn/common/ityp_stack_vec.h
index 427e656..f3eb16e 100644
--- a/src/dawn/common/ityp_stack_vec.h
+++ b/src/dawn/common/ityp_stack_vec.h
@@ -31,22 +31,21 @@
 #include <limits>
 #include <vector>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/Assert.h"
-#include "dawn/common/StackContainer.h"
 #include "dawn/common/UnderlyingType.h"
 
 namespace dawn::ityp {
 
 template <typename Index, typename Value, size_t StaticCapacity>
-class stack_vec : private StackVector<Value, StaticCapacity> {
+class stack_vec : private absl::InlinedVector<Value, StaticCapacity> {
     using I = UnderlyingType<Index>;
-    using Base = StackVector<Value, StaticCapacity>;
-    using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
+    using Base = absl::InlinedVector<Value, StaticCapacity>;
     static_assert(StaticCapacity <= std::numeric_limits<I>::max());
 
   public:
     stack_vec() : Base() {}
-    explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
+    explicit stack_vec(Index size) : Base() { Base::resize(static_cast<I>(size)); }
 
     Value& operator[](Index i) {
         DAWN_ASSERT(i < size());
@@ -58,31 +57,15 @@
         return Base::operator[](static_cast<I>(i));
     }
 
-    void resize(Index size) { this->container().resize(static_cast<I>(size)); }
+    void resize(Index size) { Base::resize(static_cast<I>(size)); }
 
-    void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
+    void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
 
-    Value* data() { return this->container().data(); }
+    Value* data() { return Base::data(); }
 
-    const Value* data() const { return this->container().data(); }
+    const Value* data() const { return Base::data(); }
 
-    typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
-
-    typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
-
-    typename VectorBase::iterator end() noexcept { return this->container().end(); }
-
-    typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
-
-    typename VectorBase::reference front() { return this->container().front(); }
-
-    typename VectorBase::const_reference front() const { return this->container().front(); }
-
-    typename VectorBase::reference back() { return this->container().back(); }
-
-    typename VectorBase::const_reference back() const { return this->container().back(); }
-
-    Index size() const { return Index(static_cast<I>(this->container().size())); }
+    Index size() const { return Index(static_cast<I>(Base::size())); }
 };
 
 }  // namespace dawn::ityp
diff --git a/src/dawn/native/BUILD.gn b/src/dawn/native/BUILD.gn
index 74b07c4..880c6f1 100644
--- a/src/dawn/native/BUILD.gn
+++ b/src/dawn/native/BUILD.gn
@@ -65,6 +65,7 @@
       "${dawn_root}/third_party/gn/abseil-cpp:bits",
       "${dawn_root}/third_party/gn/abseil-cpp:flat_hash_map",
       "${dawn_root}/third_party/gn/abseil-cpp:flat_hash_set",
+      "${dawn_root}/third_party/gn/abseil-cpp:inlined_vector",
       "${dawn_root}/third_party/gn/abseil-cpp:str_format",
       "${dawn_root}/third_party/gn/abseil-cpp:strings",
     ]
diff --git a/src/dawn/native/CMakeLists.txt b/src/dawn/native/CMakeLists.txt
index b46c606..a2f4274 100644
--- a/src/dawn/native/CMakeLists.txt
+++ b/src/dawn/native/CMakeLists.txt
@@ -261,6 +261,7 @@
             absl_str_format_internal
             absl_flat_hash_map
             absl_flat_hash_set
+            absl_inlined_vector
 )
 
 target_include_directories(dawn_native PRIVATE ${DAWN_ABSEIL_DIR})
diff --git a/src/dawn/native/CommandBufferStateTracker.cpp b/src/dawn/native/CommandBufferStateTracker.cpp
index cbc445c..8f6ae91 100644
--- a/src/dawn/native/CommandBufferStateTracker.cpp
+++ b/src/dawn/native/CommandBufferStateTracker.cpp
@@ -34,9 +34,9 @@
 #include <variant>
 
 #include "absl/container/flat_hash_map.h"
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/BitSetIterator.h"
-#include "dawn/common/StackContainer.h"
 #include "dawn/native/BindGroup.h"
 #include "dawn/native/ComputePassEncoder.h"
 #include "dawn/native/ComputePipeline.h"
@@ -109,14 +109,14 @@
     // Reduce the bindings array first to only preserve storage buffer bindings that could
     // potentially have ranges overlap.
     // There can at most be 8 storage buffer bindings (in default limits) per shader stage.
-    StackVector<BufferBinding, 8> storageBufferBindingsToCheck;
-    StackVector<std::pair<BindGroupIndex, BindingIndex>, 8> bufferBindingIndices;
+    absl::InlinedVector<BufferBinding, 8> storageBufferBindingsToCheck;
+    absl::InlinedVector<std::pair<BindGroupIndex, BindingIndex>, 8> bufferBindingIndices;
 
     // Reduce the bindings array first to only preserve writable storage texture bindings that could
     // potentially have ranges overlap.
     // There can at most be 8 storage texture bindings (in default limits) per shader stage.
-    StackVector<const TextureViewBase*, 8> storageTextureViewsToCheck;
-    StackVector<std::pair<BindGroupIndex, BindingIndex>, 8> textureBindingIndices;
+    absl::InlinedVector<const TextureViewBase*, 8> storageTextureViewsToCheck;
+    absl::InlinedVector<std::pair<BindGroupIndex, BindingIndex>, 8> textureBindingIndices;
 
     for (BindGroupIndex groupIndex : IterateBitSet(pipelineLayout->GetBindGroupLayoutsMask())) {
         BindGroupLayoutInternalBase* bgl = bindGroups[groupIndex]->GetLayout();
@@ -147,14 +147,14 @@
                 adjustedOffset += dynamicOffsets[groupIndex][static_cast<uint32_t>(bindingIndex)];
             }
 
-            storageBufferBindingsToCheck->push_back(BufferBinding{
+            storageBufferBindingsToCheck.push_back(BufferBinding{
                 bufferBinding.buffer,
                 adjustedOffset,
                 bufferBinding.size,
             });
 
             if constexpr (kProduceDetails) {
-                bufferBindingIndices->emplace_back(groupIndex, bindingIndex);
+                bufferBindingIndices.emplace_back(groupIndex, bindingIndex);
             }
         }
 
@@ -182,10 +182,10 @@
             const TextureViewBase* textureView =
                 bindGroups[groupIndex]->GetBindingAsTextureView(bindingIndex);
 
-            storageTextureViewsToCheck->push_back(textureView);
+            storageTextureViewsToCheck.push_back(textureView);
 
             if constexpr (kProduceDetails) {
-                textureBindingIndices->emplace_back(groupIndex, bindingIndex);
+                textureBindingIndices.emplace_back(groupIndex, bindingIndex);
             }
         }
     }
@@ -194,10 +194,10 @@
     // exists. Given that maxStorageBuffersPerShaderStage is 8, it doesn't seem too bad to do a
     // nested loop check.
     // TODO(dawn:1642): Maybe do algorithm optimization from O(N^2) to O(N*logN).
-    for (size_t i = 0; i < storageBufferBindingsToCheck->size(); i++) {
+    for (size_t i = 0; i < storageBufferBindingsToCheck.size(); i++) {
         const auto& bufferBinding0 = storageBufferBindingsToCheck[i];
 
-        for (size_t j = i + 1; j < storageBufferBindingsToCheck->size(); j++) {
+        for (size_t j = i + 1; j < storageBufferBindingsToCheck.size(); j++) {
             const auto& bufferBinding1 = storageBufferBindingsToCheck[j];
 
             if (bufferBinding0.buffer != bufferBinding1.buffer) {
@@ -225,7 +225,7 @@
     // Given that maxStorageTexturesPerShaderStage is 8,
     // it doesn't seem too bad to do a nested loop check.
     // TODO(dawn:1642): Maybe do algorithm optimization from O(N^2) to O(N*logN).
-    for (size_t i = 0; i < storageTextureViewsToCheck->size(); i++) {
+    for (size_t i = 0; i < storageTextureViewsToCheck.size(); i++) {
         const TextureViewBase* textureView0 = storageTextureViewsToCheck[i];
 
         DAWN_ASSERT(textureView0->GetAspects() == Aspect::Color);
@@ -235,7 +235,7 @@
         uint32_t baseArrayLayer0 = textureView0->GetBaseArrayLayer();
         uint32_t arrayLayerCount0 = textureView0->GetLayerCount();
 
-        for (size_t j = i + 1; j < storageTextureViewsToCheck->size(); j++) {
+        for (size_t j = i + 1; j < storageTextureViewsToCheck.size(); j++) {
             const TextureViewBase* textureView1 = storageTextureViewsToCheck[j];
 
             if (textureView0->GetTexture() != textureView1->GetTexture()) {
@@ -283,13 +283,13 @@
            a->GetLayerCount() == b->GetLayerCount();
 }
 
-using VectorOfTextureViews = StackVector<const TextureViewBase*, 8>;
+using VectorOfTextureViews = absl::InlinedVector<const TextureViewBase*, 8>;
 
 bool TextureViewsAllMatch(const VectorOfTextureViews& views) {
-    DAWN_ASSERT(!views->empty());
+    DAWN_ASSERT(!views.empty());
 
     const TextureViewBase* first = views[0];
-    for (size_t i = 1; i < views->size(); ++i) {
+    for (size_t i = 1; i < views.size(); ++i) {
         if (!TextureViewsMatch(first, views[i])) {
             return false;
         }
@@ -369,7 +369,7 @@
             const TextureViewBase* textureViewBase =
                 bindGroup->GetBindingAsTextureView(bindingIndex);
 
-            textureToViews[textureViewBase->GetTexture()]->push_back(textureViewBase);
+            textureToViews[textureViewBase->GetTexture()].push_back(textureViewBase);
         }
     }
 
@@ -380,8 +380,7 @@
             !TextureViewsAllMatch(views),
             "In compatibility mode, %s must not have different views in a single draw/dispatch "
             "command. texture views: %s",
-            texture,
-            ityp::span<size_t, const TextureViewBase* const>(views->data(), views->size()));
+            texture, ityp::span<size_t, const TextureViewBase* const>(views.data(), views.size()));
     }
 
     return {};
diff --git a/src/dawn/native/CommandEncoder.cpp b/src/dawn/native/CommandEncoder.cpp
index 9b84a30..8f1988f 100644
--- a/src/dawn/native/CommandEncoder.cpp
+++ b/src/dawn/native/CommandEncoder.cpp
@@ -31,6 +31,7 @@
 #include <utility>
 #include <vector>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/BitSetIterator.h"
 #include "dawn/common/Enumerator.h"
 #include "dawn/common/Math.h"
@@ -229,19 +230,19 @@
             record.depthOrArrayLayer = attachment->GetBaseArrayLayer();
         }
 
-        for (size_t i = 0; i < mRecords->size(); i++) {
+        for (size_t i = 0; i < mRecords.size(); i++) {
             DAWN_INVALID_IF(
                 mRecords[i] == record,
                 "The %s %s has read-write or write-write conflict with another attachment.",
                 attachmentTypeStr, attachment);
         }
 
-        mRecords->push_back(record);
+        mRecords.push_back(record);
 
         return {};
     }
 
-    bool HasAttachment() const { return mRecords->size() != 0; }
+    bool HasAttachment() const { return !mRecords.empty(); }
 
     bool IsValidState() const {
         return ((mRenderWidth > 0) && (mRenderHeight > 0) && (mSampleCount > 0) &&
@@ -277,7 +278,7 @@
     uint32_t mAttachmentValidationHeight = 0;
 
     // The records of the attachments that were validated in render pass.
-    StackVector<RecordedAttachment, kMaxColorAttachments> mRecords;
+    absl::InlinedVector<RecordedAttachment, kMaxColorAttachments> mRecords;
 
     bool mWillExpandResolveTexture = false;
 };
@@ -693,7 +694,7 @@
                                  const RenderPassPixelLocalStorage* pls,
                                  UsageValidationMode usageValidationMode,
                                  RenderPassValidationState* validationState) {
-    StackVector<StorageAttachmentInfoForValidation, 4> attachments;
+    absl::InlinedVector<StorageAttachmentInfoForValidation, 4> attachments;
 
     for (size_t i = 0; i < pls->storageAttachmentCount; i++) {
         const RenderPassStorageAttachment& attachment = pls->storageAttachments[i];
@@ -723,11 +724,11 @@
         DAWN_TRY(
             validationState->AddAttachment(attachment.storage, AttachmentType::StorageAttachment));
 
-        attachments->push_back({attachment.offset, attachment.storage->GetFormat().format});
+        attachments.push_back({attachment.offset, attachment.storage->GetFormat().format});
     }
 
     return ValidatePLSInfo(device, pls->totalPixelLocalStorageSize,
-                           {attachments->data(), attachments->size()});
+                           {attachments.data(), attachments.size()});
 }
 
 ResultOrError<UnpackedPtr<RenderPassDescriptor>> ValidateRenderPassDescriptor(
@@ -753,7 +754,7 @@
                                                            validationState),
                          "validating colorAttachments[%u].", i);
         if (attachment.view) {
-            colorAttachmentFormats->push_back(&attachment.view->GetFormat());
+            colorAttachmentFormats.push_back(&attachment.view->GetFormat());
         }
     }
     DAWN_TRY_CONTEXT(ValidateColorAttachmentBytesPerSample(device, colorAttachmentFormats),
diff --git a/src/dawn/native/CommandValidation.h b/src/dawn/native/CommandValidation.h
index c9abed2..b07ad87 100644
--- a/src/dawn/native/CommandValidation.h
+++ b/src/dawn/native/CommandValidation.h
@@ -30,8 +30,8 @@
 
 #include <vector>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/Constants.h"
-#include "dawn/common/StackContainer.h"
 #include "dawn/native/CommandAllocator.h"
 #include "dawn/native/Error.h"
 #include "dawn/native/Features.h"
@@ -115,7 +115,7 @@
                             UsageValidationMode mode);
 MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
 
-using ColorAttachmentFormats = StackVector<const Format*, kMaxColorAttachments>;
+using ColorAttachmentFormats = absl::InlinedVector<const Format*, kMaxColorAttachments>;
 MaybeError ValidateColorAttachmentBytesPerSample(DeviceBase* device,
                                                  const ColorAttachmentFormats& formats);
 
diff --git a/src/dawn/native/PipelineLayout.cpp b/src/dawn/native/PipelineLayout.cpp
index db43c4a..7df1072 100644
--- a/src/dawn/native/PipelineLayout.cpp
+++ b/src/dawn/native/PipelineLayout.cpp
@@ -31,6 +31,7 @@
 #include <map>
 #include <utility>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/BitSetIterator.h"
 #include "dawn/common/Enumerator.h"
@@ -57,7 +58,7 @@
 
     // Validation for any pixel local storage.
     if (auto* pls = unpacked.Get<PipelineLayoutPixelLocalStorage>()) {
-        StackVector<StorageAttachmentInfoForValidation, 4> attachments;
+        absl::InlinedVector<StorageAttachmentInfoForValidation, 4> attachments;
         for (size_t i = 0; i < pls->storageAttachmentCount; i++) {
             const PipelineLayoutStorageAttachment& attachment = pls->storageAttachments[i];
 
@@ -68,11 +69,11 @@
                             "storageAttachments[%i]'s format (%s) cannot be used with %s.", i,
                             format->format, wgpu::TextureUsage::StorageAttachment);
 
-            attachments->push_back({attachment.offset, attachment.format});
+            attachments.push_back({attachment.offset, attachment.format});
         }
 
         DAWN_TRY(ValidatePLSInfo(device, pls->totalPixelLocalStorageSize,
-                                 {attachments->data(), attachments->size()}));
+                                 {attachments.data(), attachments.size()}));
     }
 
     DAWN_INVALID_IF(descriptor->bindGroupLayoutCount > kMaxBindGroups,
diff --git a/src/dawn/native/RenderBundleEncoder.cpp b/src/dawn/native/RenderBundleEncoder.cpp
index ddc9026..f6d86bf 100644
--- a/src/dawn/native/RenderBundleEncoder.cpp
+++ b/src/dawn/native/RenderBundleEncoder.cpp
@@ -29,7 +29,6 @@
 
 #include <utility>
 
-#include "dawn/common/StackContainer.h"
 #include "dawn/native/CommandValidation.h"
 #include "dawn/native/Commands.h"
 #include "dawn/native/Device.h"
@@ -81,7 +80,7 @@
         if (format != wgpu::TextureFormat::Undefined) {
             DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, format),
                              "validating colorFormats[%u]", i);
-            colorAttachmentFormats->push_back(&device->GetValidInternalFormat(format));
+            colorAttachmentFormats.push_back(&device->GetValidInternalFormat(format));
             allColorFormatsUndefined = false;
         }
     }
diff --git a/src/dawn/native/RenderPipeline.cpp b/src/dawn/native/RenderPipeline.cpp
index 78e0ccf..5a49efc 100644
--- a/src/dawn/native/RenderPipeline.cpp
+++ b/src/dawn/native/RenderPipeline.cpp
@@ -611,7 +611,7 @@
                                                   fragmentMetadata.fragmentOutputMask[i],
                                                   fragmentMetadata.fragmentOutputVariables[i]),
                          "validating targets[%u] framebuffer output.", i);
-        colorAttachmentFormats->push_back(&device->GetValidInternalFormat(targets[i].format));
+        colorAttachmentFormats.push_back(&device->GetValidInternalFormat(targets[i].format));
 
         if (fragmentMetadata.fragmentInputMask[i]) {
             DAWN_TRY_CONTEXT(ValidateFramebufferInput(device, format,
diff --git a/src/dawn/native/SharedResourceMemory.cpp b/src/dawn/native/SharedResourceMemory.cpp
index 296e993..f33640e 100644
--- a/src/dawn/native/SharedResourceMemory.cpp
+++ b/src/dawn/native/SharedResourceMemory.cpp
@@ -165,8 +165,7 @@
     DAWN_TRY(BeginAccessImpl(resource, descriptor));
 
     for (size_t i = 0; i < descriptor->fenceCount; ++i) {
-        mContents->mPendingFences->push_back(
-            {descriptor->fences[i], descriptor->signaledValues[i]});
+        mContents->mPendingFences.push_back({descriptor->fences[i], descriptor->signaledValues[i]});
     }
 
     DAWN_ASSERT(!resource->IsError());
@@ -275,14 +274,14 @@
     {
         ResultOrError<FenceAndSignalValue> result = EndAccessInternal(resource, state);
         if (result.IsSuccess()) {
-            fenceList->push_back(result.AcquireSuccess());
+            fenceList.push_back(result.AcquireSuccess());
         } else {
             err = result.AcquireError();
         }
     }
 
     // Copy the fences to the output state.
-    if (size_t fenceCount = fenceList->size()) {
+    if (size_t fenceCount = fenceList.size()) {
         auto* fences = new SharedFenceBase*[fenceCount];
         uint64_t* signaledValues = new uint64_t[fenceCount];
         for (size_t i = 0; i < fenceCount; ++i) {
@@ -325,7 +324,7 @@
 
 void SharedResourceMemoryContents::AcquirePendingFences(PendingFenceList* fences) {
     *fences = mPendingFences;
-    mPendingFences->clear();
+    mPendingFences.clear();
 }
 
 void SharedResourceMemoryContents::SetLastUsageSerial(ExecutionSerial lastUsageSerial) {
diff --git a/src/dawn/native/SharedResourceMemory.h b/src/dawn/native/SharedResourceMemory.h
index 632b98f..cfe3abf 100644
--- a/src/dawn/native/SharedResourceMemory.h
+++ b/src/dawn/native/SharedResourceMemory.h
@@ -28,7 +28,7 @@
 #ifndef SRC_DAWN_NATIVE_SHAREDRESOURCEMEMORY_H_
 #define SRC_DAWN_NATIVE_SHAREDRESOURCEMEMORY_H_
 
-#include "dawn/common/StackContainer.h"
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/WeakRef.h"
 #include "dawn/common/WeakRefSupport.h"
 #include "dawn/native/Error.h"
@@ -64,7 +64,7 @@
 
 class SharedResourceMemory : public ApiObjectBase, public WeakRefSupport<SharedResourceMemory> {
   public:
-    using PendingFenceList = StackVector<FenceAndSignalValue, 1>;
+    using PendingFenceList = absl::InlinedVector<FenceAndSignalValue, 1>;
 
     ~SharedResourceMemory() override;
     void Initialize();
diff --git a/src/dawn/native/SharedTextureMemory.h b/src/dawn/native/SharedTextureMemory.h
index 7751b34..1346231 100644
--- a/src/dawn/native/SharedTextureMemory.h
+++ b/src/dawn/native/SharedTextureMemory.h
@@ -28,7 +28,6 @@
 #ifndef SRC_DAWN_NATIVE_SHAREDTEXTUREMEMORY_H_
 #define SRC_DAWN_NATIVE_SHAREDTEXTUREMEMORY_H_
 
-#include "dawn/common/StackContainer.h"
 #include "dawn/common/WeakRef.h"
 #include "dawn/common/WeakRefSupport.h"
 #include "dawn/native/Error.h"
diff --git a/src/dawn/native/WaitAnySystemEvent.h b/src/dawn/native/WaitAnySystemEvent.h
index 9cc2aa4..630850d 100644
--- a/src/dawn/native/WaitAnySystemEvent.h
+++ b/src/dawn/native/WaitAnySystemEvent.h
@@ -43,7 +43,7 @@
 #include <unistd.h>
 #endif
 
-#include "dawn/common/StackContainer.h"
+#include "absl/container/inlined_vector.h"
 #include "dawn/native/SystemEvent.h"
 
 namespace dawn::native {
@@ -78,13 +78,13 @@
         return false;
     }
 #if DAWN_PLATFORM_IS(WINDOWS)
-    StackVector<HANDLE, 4 /* avoid heap allocation for small waits */> handles;
-    handles->reserve(count);
+    absl::InlinedVector<HANDLE, 4 /* avoid heap allocation for small waits */> handles;
+    handles.reserve(count);
     for (auto it = begin; it != end; ++it) {
-        handles->push_back((*it).first.mPrimitive.Get());
+        handles.push_back((*it).first.mPrimitive.Get());
     }
-    DAWN_ASSERT(handles->size() <= MAXIMUM_WAIT_OBJECTS);
-    DWORD status = WaitForMultipleObjects(handles->size(), handles->data(), /*bWaitAll=*/false,
+    DAWN_ASSERT(handles.size() <= MAXIMUM_WAIT_OBJECTS);
+    DWORD status = WaitForMultipleObjects(handles.size(), handles.data(), /*bWaitAll=*/false,
                                           ToMilliseconds(timeout));
     if (status == WAIT_TIMEOUT) {
         return false;
@@ -95,12 +95,12 @@
     *(*(begin + completedIndex)).second = true;
     return true;
 #elif DAWN_PLATFORM_IS(POSIX)
-    StackVector<pollfd, 4 /* avoid heap allocation for small waits */> pollfds;
-    pollfds->reserve(count);
+    absl::InlinedVector<pollfd, 4 /* avoid heap allocation for small waits */> pollfds;
+    pollfds.reserve(count);
     for (auto it = begin; it != end; ++it) {
-        pollfds->push_back(pollfd{static_cast<int>((*it).first.mPrimitive.Get()), POLLIN, 0});
+        pollfds.push_back(pollfd{static_cast<int>((*it).first.mPrimitive.Get()), POLLIN, 0});
     }
-    int status = poll(pollfds->data(), pollfds->size(), ToMilliseconds(timeout));
+    int status = poll(pollfds.data(), pollfds.size(), ToMilliseconds(timeout));
 
     DAWN_CHECK(status >= 0);
     if (status == 0) {
diff --git a/src/dawn/native/d3d11/QueueD3D11.cpp b/src/dawn/native/d3d11/QueueD3D11.cpp
index f6fc1f4..e0ac29a 100644
--- a/src/dawn/native/d3d11/QueueD3D11.cpp
+++ b/src/dawn/native/d3d11/QueueD3D11.cpp
@@ -33,6 +33,7 @@
 #include <utility>
 #include <vector>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/Log.h"
 #include "dawn/native/WaitAnySystemEvent.h"
 #include "dawn/native/d3d/D3DError.h"
@@ -362,21 +363,21 @@
                 return GetLastSubmittedCommandSerial();
             }
 
-            StackVector<HANDLE, 8> handles;
+            absl::InlinedVector<HANDLE, 8> handles;
             const size_t numberOfHandles =
                 std::min(pendingEvents->size(), static_cast<size_t>(MAXIMUM_WAIT_OBJECTS));
-            handles->reserve(numberOfHandles);
+            handles.reserve(numberOfHandles);
             // Gather events in reversed order (from the most recent to the oldest events).
             std::for_each_n(pendingEvents->rbegin(), numberOfHandles, [&handles](const auto& e) {
-                handles->push_back(e.receiver.GetPrimitive().Get());
+                handles.push_back(e.receiver.GetPrimitive().Get());
             });
             DWORD result =
-                WaitForMultipleObjects(handles->size(), handles->data(), /*bWaitAll=*/false,
+                WaitForMultipleObjects(handles.size(), handles.data(), /*bWaitAll=*/false,
                                        /*dwMilliseconds=*/0);
             DAWN_INTERNAL_ERROR_IF(result == WAIT_FAILED, "WaitForMultipleObjects() failed");
 
             DAWN_INTERNAL_ERROR_IF(
-                result >= WAIT_ABANDONED_0 && result < WAIT_ABANDONED_0 + handles->size(),
+                result >= WAIT_ABANDONED_0 && result < WAIT_ABANDONED_0 + handles.size(),
                 "WaitForMultipleObjects() get abandoned event");
 
             if (result == WAIT_TIMEOUT) {
diff --git a/src/dawn/native/d3d12/TextureD3D12.cpp b/src/dawn/native/d3d12/TextureD3D12.cpp
index 8764b94..b47b0f9 100644
--- a/src/dawn/native/d3d12/TextureD3D12.cpp
+++ b/src/dawn/native/d3d12/TextureD3D12.cpp
@@ -441,8 +441,8 @@
     if (SharedResourceMemoryContents* contents = GetSharedResourceMemoryContents()) {
         SharedTextureMemoryBase::PendingFenceList fences;
         contents->AcquirePendingFences(&fences);
-        waitFences.insert(waitFences.end(), std::make_move_iterator(fences->begin()),
-                          std::make_move_iterator(fences->end()));
+        waitFences.insert(waitFences.end(), std::make_move_iterator(fences.begin()),
+                          std::make_move_iterator(fences.end()));
         contents->SetLastUsageSerial(queue->GetPendingCommandSerial());
     }
 
diff --git a/src/dawn/native/metal/SharedTextureMemoryMTL.h b/src/dawn/native/metal/SharedTextureMemoryMTL.h
index 238bd7e..7caf798 100644
--- a/src/dawn/native/metal/SharedTextureMemoryMTL.h
+++ b/src/dawn/native/metal/SharedTextureMemoryMTL.h
@@ -32,6 +32,7 @@
 #import <Metal/Metal.h>
 #include <vector>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/CoreFoundationRef.h"
 #include "dawn/common/NSRef.h"
 #include "dawn/native/Error.h"
@@ -51,7 +52,8 @@
         const SharedTextureMemoryIOSurfaceDescriptor* descriptor);
 
     IOSurfaceRef GetIOSurface() const;
-    const StackVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat>& GetMtlPlaneTextures() const;
+    const absl::InlinedVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat>& GetMtlPlaneTextures()
+        const;
     MTLTextureUsage GetMtlTextureUsage() const;
     MTLPixelFormat GetMtlPixelFormat() const;
 
@@ -74,7 +76,7 @@
                                                      UnpackedPtr<EndAccessState>& state) override;
     MaybeError CreateMtlTextures();
 
-    StackVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat> mMtlPlaneTextures;
+    absl::InlinedVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat> mMtlPlaneTextures;
     MTLPixelFormat mMtlFormat = MTLPixelFormatInvalid;
     MTLTextureUsage mMtlUsage;
     CFRef<IOSurfaceRef> mIOSurface;
diff --git a/src/dawn/native/metal/SharedTextureMemoryMTL.mm b/src/dawn/native/metal/SharedTextureMemoryMTL.mm
index a826d02..2dc9ead 100644
--- a/src/dawn/native/metal/SharedTextureMemoryMTL.mm
+++ b/src/dawn/native/metal/SharedTextureMemoryMTL.mm
@@ -155,7 +155,7 @@
     return mIOSurface.Get();
 }
 
-const StackVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat>&
+const absl::InlinedVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat>&
 SharedTextureMemory::GetMtlPlaneTextures() const {
     return mMtlPlaneTextures;
 }
@@ -240,7 +240,7 @@
 
         mMtlUsage = mtlDesc.usage;
         mMtlFormat = mtlDesc.pixelFormat;
-        mMtlPlaneTextures->resize(1);
+        mMtlPlaneTextures.resize(1);
         mMtlPlaneTextures[0] =
             AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc
                                                                  iosurface:mIOSurface.Get()
@@ -250,7 +250,7 @@
         // Multiplanar format doesn't have equivalent MTLPixelFormat so just set it to invalid.
         mMtlFormat = MTLPixelFormatInvalid;
         const size_t numPlanes = IOSurfaceGetPlaneCount(mIOSurface.Get());
-        mMtlPlaneTextures->resize(numPlanes);
+        mMtlPlaneTextures.resize(numPlanes);
         for (size_t plane = 0; plane < numPlanes; ++plane) {
             mMtlPlaneTextures[plane] = AcquireNSPRef(CreateTextureMtlForPlane(
                 mMtlUsage, *format, plane, device, /*sampleCount=*/1, mIOSurface.Get()));
diff --git a/src/dawn/native/metal/TextureMTL.h b/src/dawn/native/metal/TextureMTL.h
index 0471735..83ba7bc 100644
--- a/src/dawn/native/metal/TextureMTL.h
+++ b/src/dawn/native/metal/TextureMTL.h
@@ -34,9 +34,9 @@
 
 #include "dawn/native/Texture.h"
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/CoreFoundationRef.h"
 #include "dawn/common/NSRef.h"
-#include "dawn/common/StackContainer.h"
 #include "dawn/native/DawnNative.h"
 #include "dawn/native/MetalBackend.h"
 
@@ -94,7 +94,7 @@
                             const SubresourceRange& range,
                             TextureBase::ClearValue clearValue);
 
-    StackVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat> mMtlPlaneTextures;
+    absl::InlinedVector<NSPRef<id<MTLTexture>>, kMaxPlanesPerFormat> mMtlPlaneTextures;
     MTLPixelFormat mMtlFormat = MTLPixelFormatInvalid;
 
     MTLTextureUsage mMtlUsage;
diff --git a/src/dawn/native/metal/TextureMTL.mm b/src/dawn/native/metal/TextureMTL.mm
index 999856e..cb2187e 100644
--- a/src/dawn/native/metal/TextureMTL.mm
+++ b/src/dawn/native/metal/TextureMTL.mm
@@ -342,7 +342,7 @@
         NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
         mMtlUsage = [*mtlDesc usage];
         mMtlFormat = [*mtlDesc pixelFormat];
-        mMtlPlaneTextures->resize(1);
+        mMtlPlaneTextures.resize(1);
         mMtlPlaneTextures[0] =
             AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
 
@@ -369,7 +369,7 @@
         // Multiplanar format doesn't have equivalent MTLPixelFormat so just set it to invalid.
         mMtlFormat = MTLPixelFormatInvalid;
         const size_t numPlanes = IOSurfaceGetPlaneCount(GetIOSurface());
-        mMtlPlaneTextures->resize(numPlanes);
+        mMtlPlaneTextures.resize(numPlanes);
         for (size_t plane = 0; plane < numPlanes; ++plane) {
             mMtlPlaneTextures[plane] = AcquireNSPRef(CreateTextureMtlForPlane(
                 mMtlUsage, GetFormat(), plane, device, GetSampleCount(), GetIOSurface()));
@@ -389,7 +389,7 @@
     NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
     mMtlUsage = [*mtlDesc usage];
     mMtlFormat = [*mtlDesc pixelFormat];
-    mMtlPlaneTextures->resize(1);
+    mMtlPlaneTextures.resize(1);
     mMtlPlaneTextures[0] = std::move(wrapped);
     SetLabelImpl();
 }
@@ -421,7 +421,7 @@
             contents->SetLastUsageSerial(GetDevice()->GetQueue()->GetPendingCommandSerial());
         }
 
-        if (!mWaitEvents.empty() || !fences->empty()) {
+        if (!mWaitEvents.empty() || !fences.empty()) {
             // There may be an open blit encoder from a copy command or writeBuffer.
             // Wait events are only allowed if there is no encoder open.
             commandContext->EndBlit();
@@ -455,16 +455,16 @@
     //   is implicitly destroyed. This case is thread-safe because there are no
     //   other threads using the texture since there are no other live refs.
     TextureBase::DestroyImpl();
-    mMtlPlaneTextures->clear();
+    mMtlPlaneTextures.clear();
     mIOSurface = nullptr;
 }
 
 void Texture::SetLabelImpl() {
     if (!GetFormat().IsMultiPlanar()) {
-        DAWN_ASSERT(mMtlPlaneTextures->size() == 1);
+        DAWN_ASSERT(mMtlPlaneTextures.size() == 1);
         SetDebugName(GetDevice(), mMtlPlaneTextures[0].Get(), "Dawn_Texture", GetLabel());
     } else {
-        for (size_t i = 0; i < mMtlPlaneTextures->size(); ++i) {
+        for (size_t i = 0; i < mMtlPlaneTextures.size(); ++i) {
             SetDebugName(GetDevice(), mMtlPlaneTextures[i].Get(),
                          absl::StrFormat("Dawn_Plane_Texture[%zu]", i).c_str(), GetLabel());
         }
@@ -474,16 +474,16 @@
 id<MTLTexture> Texture::GetMTLTexture(Aspect aspect) const {
     switch (aspect) {
         case Aspect::Plane0:
-            DAWN_ASSERT(mMtlPlaneTextures->size() > 1);
+            DAWN_ASSERT(mMtlPlaneTextures.size() > 1);
             return mMtlPlaneTextures[0].Get();
         case Aspect::Plane1:
-            DAWN_ASSERT(mMtlPlaneTextures->size() > 1);
+            DAWN_ASSERT(mMtlPlaneTextures.size() > 1);
             return mMtlPlaneTextures[1].Get();
         case Aspect::Plane2:
-            DAWN_ASSERT(mMtlPlaneTextures->size() > 2);
+            DAWN_ASSERT(mMtlPlaneTextures.size() > 2);
             return mMtlPlaneTextures[2].Get();
         default:
-            DAWN_ASSERT(mMtlPlaneTextures->size() == 1);
+            DAWN_ASSERT(mMtlPlaneTextures.size() == 1);
             return mMtlPlaneTextures[0].Get();
     }
 }
diff --git a/src/dawn/native/metal/UtilsMetal.h b/src/dawn/native/metal/UtilsMetal.h
index ee41923..8051085 100644
--- a/src/dawn/native/metal/UtilsMetal.h
+++ b/src/dawn/native/metal/UtilsMetal.h
@@ -30,8 +30,8 @@
 
 #include <string>
 
+#include "absl/container/inlined_vector.h"
 #include "dawn/common/NSRef.h"
-#include "dawn/common/StackContainer.h"
 #include "dawn/native/dawn_platform.h"
 #include "dawn/native/metal/DeviceMTL.h"
 #include "dawn/native/metal/ShaderModuleMTL.h"
@@ -94,11 +94,11 @@
         Extent3D copyExtent;
     };
 
-    StackVector<CopyInfo, kNumCommonTextureBufferCopyRegions> copies;
+    absl::InlinedVector<CopyInfo, kNumCommonTextureBufferCopyRegions> copies;
 
-    auto begin() const { return copies->begin(); }
-    auto end() const { return copies->end(); }
-    void push_back(const CopyInfo& copyInfo) { copies->push_back(copyInfo); }
+    auto begin() const { return copies.begin(); }
+    auto end() const { return copies.end(); }
+    void push_back(const CopyInfo& copyInfo) { copies.push_back(copyInfo); }
 };
 
 TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
diff --git a/src/dawn/tests/BUILD.gn b/src/dawn/tests/BUILD.gn
index 9af0d8b..de09e9c 100644
--- a/src/dawn/tests/BUILD.gn
+++ b/src/dawn/tests/BUILD.gn
@@ -355,7 +355,6 @@
     "unittests/SerialMapTests.cpp",
     "unittests/SerialQueueTests.cpp",
     "unittests/SlabAllocatorTests.cpp",
-    "unittests/StackContainerTests.cpp",
     "unittests/SubresourceStorageTests.cpp",
     "unittests/SystemUtilsTests.cpp",
     "unittests/ToBackendTests.cpp",
diff --git a/src/dawn/tests/unittests/StackContainerTests.cpp b/src/dawn/tests/unittests/StackContainerTests.cpp
deleted file mode 100644
index f25e6fc..0000000
--- a/src/dawn/tests/unittests/StackContainerTests.cpp
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is a modified copy of Chromium's /src/base/containers/stack_container_unittest.cc
-
-#include <algorithm>
-#include <cstddef>
-#include <vector>
-
-#include "dawn/common/Ref.h"
-#include "dawn/common/RefCounted.h"
-#include "dawn/common/StackContainer.h"
-#include "gtest/gtest.h"
-#include "partition_alloc/pointers/raw_ptr.h"
-
-namespace dawn {
-namespace {
-
-class Placeholder : public RefCounted {
-  public:
-    explicit Placeholder(int* alive) : mAlive(alive) { ++*mAlive; }
-
-  private:
-    ~Placeholder() override { --*mAlive; }
-
-    const raw_ptr<int> mAlive;
-};
-
-TEST(StackContainer, Vector) {
-    const int stack_size = 3;
-    StackVector<int, stack_size> vect;
-    const int* stack_buffer = &vect.stack_data().stack_buffer()[0];
-
-    // The initial |stack_size| elements should appear in the stack buffer.
-    EXPECT_EQ(static_cast<size_t>(stack_size), vect.container().capacity());
-    for (int i = 0; i < stack_size; i++) {
-        vect.container().push_back(i);
-        EXPECT_EQ(stack_buffer, &vect.container()[0]);
-        EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
-    }
-
-    // Adding more elements should push the array onto the heap.
-    for (int i = 0; i < stack_size; i++) {
-        vect.container().push_back(i + stack_size);
-        EXPECT_NE(stack_buffer, &vect.container()[0]);
-        EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
-    }
-
-    // The array should still be in order.
-    for (int i = 0; i < stack_size * 2; i++) {
-        EXPECT_EQ(i, vect.container()[i]);
-    }
-
-    // Resize to smaller. Our STL implementation won't reallocate in this case,
-    // otherwise it might use our stack buffer. We reserve right after the resize
-    // to guarantee it isn't using the stack buffer, even though it doesn't have
-    // much data.
-    vect.container().resize(stack_size);
-    vect.container().reserve(stack_size * 2);
-    EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
-
-    // Copying the small vector to another should use the same allocator and use
-    // the now-unused stack buffer. GENERALLY CALLERS SHOULD NOT DO THIS since
-    // they have to get the template types just right and it can cause errors.
-    std::vector<int, StackAllocator<int, stack_size>> other(vect.container());
-    EXPECT_EQ(stack_buffer, &other.front());
-    EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
-    for (int i = 0; i < stack_size; i++) {
-        EXPECT_EQ(i, other[i]);
-    }
-}
-
-TEST(StackContainer, VectorDoubleDelete) {
-    // Regression testing for double-delete.
-    typedef StackVector<Ref<Placeholder>, 2> Vector;
-    Vector vect;
-
-    int alive = 0;
-    Ref<Placeholder> placeholder = AcquireRef(new Placeholder(&alive));
-    EXPECT_EQ(alive, 1);
-
-    vect->push_back(placeholder);
-    EXPECT_EQ(alive, 1);
-
-    Placeholder* placeholder_unref = placeholder.Get();
-    placeholder = nullptr;
-    EXPECT_EQ(alive, 1);
-
-    auto itr = std::find(vect->begin(), vect->end(), placeholder_unref);
-    EXPECT_EQ(itr->Get(), placeholder_unref);
-    vect->erase(itr);
-    EXPECT_EQ(alive, 0);
-
-    // Shouldn't crash at exit.
-}
-
-template <size_t alignment>
-class AlignedData {
-  public:
-    AlignedData() { memset(data_, 0, alignment); }
-    ~AlignedData() = default;
-    AlignedData(const AlignedData&) = default;
-    AlignedData& operator=(const AlignedData&) = default;
-    alignas(alignment) char data_[alignment];
-};
-
-#define EXPECT_ALIGNED(ptr, align) EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
-
-TEST(StackContainer, BufferAlignment) {
-    StackVector<wchar_t, 16> text;
-    text->push_back(L'A');
-    EXPECT_ALIGNED(&text[0], alignof(wchar_t));
-
-    StackVector<double, 1> doubles;
-    doubles->push_back(0.0);
-    EXPECT_ALIGNED(&doubles[0], alignof(double));
-
-    StackVector<AlignedData<16>, 1> aligned16;
-    aligned16->push_back(AlignedData<16>());
-    EXPECT_ALIGNED(&aligned16[0], 16);
-
-#if !DAWN_COMPILER_IS(GCC) || defined(__x86_64__) || defined(__i386__)
-    // It seems that non-X86 gcc doesn't respect greater than 16 byte alignment.
-    // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33721 for details.
-    // TODO(sbc): Re-enable this if GCC starts respecting higher alignments.
-    StackVector<AlignedData<256>, 1> aligned256;
-    aligned256->push_back(AlignedData<256>());
-    EXPECT_ALIGNED(&aligned256[0], 256);
-#endif
-}
-
-}  // anonymous namespace
-
-template class StackVector<int, 2>;
-template class StackVector<Ref<Placeholder>, 2>;
-
-namespace {
-
-template <typename T, size_t size>
-void CheckStackVectorElements(const StackVector<T, size>& vec, std::initializer_list<T> expected) {
-    auto expected_it = expected.begin();
-    EXPECT_EQ(vec->size(), expected.size());
-    for (T t : vec) {
-        EXPECT_NE(expected.end(), expected_it);
-        EXPECT_EQ(*expected_it, t);
-        ++expected_it;
-    }
-    EXPECT_EQ(expected.end(), expected_it);
-}
-
-TEST(StackContainer, Iteration) {
-    StackVector<int, 3> vect;
-    vect->push_back(7);
-    vect->push_back(11);
-
-    CheckStackVectorElements(vect, {7, 11});
-    for (int& i : vect) {
-        ++i;
-    }
-    CheckStackVectorElements(vect, {8, 12});
-    vect->push_back(13);
-    CheckStackVectorElements(vect, {8, 12, 13});
-    vect->resize(5);
-    CheckStackVectorElements(vect, {8, 12, 13, 0, 0});
-    vect->resize(1);
-    CheckStackVectorElements(vect, {8});
-}
-
-}  // anonymous namespace
-}  // namespace dawn
diff --git a/third_party/gn/abseil-cpp/BUILD.gn b/third_party/gn/abseil-cpp/BUILD.gn
index 4bb1c23..baea578 100644
--- a/third_party/gn/abseil-cpp/BUILD.gn
+++ b/third_party/gn/abseil-cpp/BUILD.gn
@@ -213,6 +213,15 @@
   ]
 }
 
+absl_source_set("nullability") {
+  sources = [ "${dawn_abseil_dir}/absl/base/internal/nullability_impl.h" ]
+  public = [ "${dawn_abseil_dir}/absl/base/nullability.h" ]
+  deps = [
+    ":core_headers",
+    ":type_traits",
+  ]
+}
+
 absl_source_set("prefetch") {
   public = [
     "${dawn_abseil_dir}/absl/base/internal/prefetch.h",
@@ -372,6 +381,30 @@
   ]
 }
 
+absl_source_set("inlined_vector") {
+  public = [ "${dawn_abseil_dir}/absl/container/inlined_vector.h" ]
+  deps = [
+    ":algorithm",
+    ":core_headers",
+    ":inlined_vector_internal",
+    ":memory",
+    ":throw_delegate",
+    ":type_traits",
+  ]
+}
+
+absl_source_set("inlined_vector_internal") {
+  public = [ "${dawn_abseil_dir}/absl/container/internal/inlined_vector.h" ]
+  deps = [
+    ":compressed_tuple",
+    ":config",
+    ":core_headers",
+    ":memory",
+    ":span",
+    ":type_traits",
+  ]
+}
+
 absl_source_set("raw_hash_map") {
   public = [ "${dawn_abseil_dir}/absl/container/internal/raw_hash_map.h" ]
   deps = [
@@ -961,6 +994,18 @@
   ]
 }
 
+absl_source_set("span") {
+  public = [ "${dawn_abseil_dir}/absl/types/span.h" ]
+  sources = [ "${dawn_abseil_dir}/absl/types/internal/span.h" ]
+  public_deps = [
+    ":algorithm",
+    ":core_headers",
+    ":nullability",
+    ":throw_delegate",
+    ":type_traits",
+  ]
+}
+
 absl_source_set("variant") {
   sources = [ "${dawn_abseil_dir}/absl/types/internal/variant.h" ]
   public = [ "${dawn_abseil_dir}/absl/types/variant.h" ]