Implement SharedTextureMemory and SharedFence on Metal

Bug: dawn:1745
Change-Id: I150244f45e01c12a0d62264c0ea68ed11b0d7c19
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/142403
Kokoro: Kokoro <noreply+kokoro@google.com>
Reviewed-by: Loko Kung <lokokung@google.com>
Commit-Queue: Austin Eng <enga@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
diff --git a/dawn.json b/dawn.json
index e1fea45..1b00ca8 100644
--- a/dawn.json
+++ b/dawn.json
@@ -1473,7 +1473,7 @@
                 "name": "create texture",
                 "returns": "texture",
                 "args": [
-                    {"name": "descriptor", "type": "texture descriptor", "annotation": "const*"}
+                    {"name": "descriptor", "type": "texture descriptor", "annotation": "const*", "optional": true, "default": "nullptr"}
                 ]
             },
             {
diff --git a/src/dawn/native/BUILD.gn b/src/dawn/native/BUILD.gn
index c80aad9..109d0ad 100644
--- a/src/dawn/native/BUILD.gn
+++ b/src/dawn/native/BUILD.gn
@@ -612,6 +612,10 @@
       "metal/SamplerMTL.mm",
       "metal/ShaderModuleMTL.h",
       "metal/ShaderModuleMTL.mm",
+      "metal/SharedFenceMTL.h",
+      "metal/SharedFenceMTL.mm",
+      "metal/SharedTextureMemoryMTL.h",
+      "metal/SharedTextureMemoryMTL.mm",
       "metal/SwapChainMTL.h",
       "metal/SwapChainMTL.mm",
       "metal/TextureMTL.h",
diff --git a/src/dawn/native/CMakeLists.txt b/src/dawn/native/CMakeLists.txt
index 22969fe..568dd9e 100644
--- a/src/dawn/native/CMakeLists.txt
+++ b/src/dawn/native/CMakeLists.txt
@@ -462,6 +462,10 @@
         "metal/SamplerMTL.mm"
         "metal/ShaderModuleMTL.h"
         "metal/ShaderModuleMTL.mm"
+        "metal/SharedFenceMTL.h"
+        "metal/SharedFenceMTL.mm"
+        "metal/SharedTextureMemoryMTL.h"
+        "metal/SharedTextureMemoryMTL.mm"
         "metal/SwapChainMTL.h"
         "metal/SwapChainMTL.mm"
         "metal/TextureMTL.h"
diff --git a/src/dawn/native/Device.cpp b/src/dawn/native/Device.cpp
index 1a4d006..e89691d 100644
--- a/src/dawn/native/Device.cpp
+++ b/src/dawn/native/Device.cpp
@@ -394,7 +394,7 @@
     // can destroy the frontend cache.
 
     // clang-format off
-        static constexpr std::array<ObjectType, 18> kObjectTypeDependencyOrder = {
+        static constexpr std::array<ObjectType, 20> kObjectTypeDependencyOrder = {
             ObjectType::ComputePassEncoder,
             ObjectType::RenderPassEncoder,
             ObjectType::RenderBundleEncoder,
@@ -408,6 +408,8 @@
             ObjectType::BindGroup,
             ObjectType::BindGroupLayout,
             ObjectType::ShaderModule,
+            ObjectType::SharedTextureMemory,
+            ObjectType::SharedFence,
             ObjectType::ExternalTexture,
             ObjectType::Texture,  // Note that Textures own the TextureViews.
             ObjectType::QuerySet,
@@ -1362,8 +1364,12 @@
 SharedTextureMemoryBase* DeviceBase::APIImportSharedTextureMemory(
     const SharedTextureMemoryDescriptor* descriptor) {
     Ref<SharedTextureMemoryBase> result = nullptr;
-    if (ConsumedError(ImportSharedTextureMemoryImpl(descriptor), &result,
-                      "calling %s.ImportSharedTextureMemory(%s).", this, descriptor)) {
+    if (ConsumedError(
+            [&]() -> ResultOrError<Ref<SharedTextureMemoryBase>> {
+                DAWN_TRY(ValidateIsAlive());
+                return ImportSharedTextureMemoryImpl(descriptor);
+            }(),
+            &result, "calling %s.ImportSharedTextureMemory(%s).", this, descriptor)) {
         return SharedTextureMemoryBase::MakeError(this, descriptor);
     }
     return result.Detach();
@@ -1376,8 +1382,12 @@
 
 SharedFenceBase* DeviceBase::APIImportSharedFence(const SharedFenceDescriptor* descriptor) {
     Ref<SharedFenceBase> result = nullptr;
-    if (ConsumedError(ImportSharedFenceImpl(descriptor), &result,
-                      "calling %s.ImportSharedFence(%s).", this, descriptor)) {
+    if (ConsumedError(
+            [&]() -> ResultOrError<Ref<SharedFenceBase>> {
+                DAWN_TRY(ValidateIsAlive());
+                return ImportSharedFenceImpl(descriptor);
+            }(),
+            &result, "calling %s.ImportSharedFence(%s).", this, descriptor)) {
         return SharedFenceBase::MakeError(this, descriptor);
     }
     return result.Detach();
diff --git a/src/dawn/native/SharedFence.cpp b/src/dawn/native/SharedFence.cpp
index 38a9796..3674438 100644
--- a/src/dawn/native/SharedFence.cpp
+++ b/src/dawn/native/SharedFence.cpp
@@ -19,10 +19,22 @@
 
 namespace dawn::native {
 
+namespace {
+
+class ErrorSharedFence : public SharedFenceBase {
+  public:
+    ErrorSharedFence(DeviceBase* device, const SharedFenceDescriptor* descriptor)
+        : SharedFenceBase(device, descriptor, ObjectBase::kError) {}
+
+    MaybeError ExportInfoImpl(SharedFenceExportInfo* info) const override { UNREACHABLE(); }
+};
+
+}  // namespace
+
 // static
 SharedFenceBase* SharedFenceBase::MakeError(DeviceBase* device,
                                             const SharedFenceDescriptor* descriptor) {
-    return new SharedFenceBase(device, descriptor, ObjectBase::kError);
+    return new ErrorSharedFence(device, descriptor);
 }
 
 SharedFenceBase::SharedFenceBase(DeviceBase* device,
@@ -30,14 +42,26 @@
                                  ObjectBase::ErrorTag tag)
     : ApiObjectBase(device, tag, descriptor->label) {}
 
+SharedFenceBase::SharedFenceBase(DeviceBase* device, const char* label)
+    : ApiObjectBase(device, label) {}
+
 ObjectType SharedFenceBase::GetType() const {
     return ObjectType::SharedFence;
 }
 
 void SharedFenceBase::APIExportInfo(SharedFenceExportInfo* info) const {
-    DAWN_UNUSED(GetDevice()->ConsumedError(DAWN_UNIMPLEMENTED_ERROR("Not implemented")));
+    DAWN_UNUSED(GetDevice()->ConsumedError(ExportInfo(info)));
 }
 
 void SharedFenceBase::DestroyImpl() {}
 
+MaybeError SharedFenceBase::ExportInfo(SharedFenceExportInfo* info) const {
+    // Set the type to undefined. It will be overwritten to the actual type
+    // as long as no error occurs.
+    info->type = wgpu::SharedFenceType::Undefined;
+
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+    return ExportInfoImpl(info);
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/SharedFence.h b/src/dawn/native/SharedFence.h
index 53b6734..d3a697b 100644
--- a/src/dawn/native/SharedFence.h
+++ b/src/dawn/native/SharedFence.h
@@ -15,6 +15,7 @@
 #ifndef SRC_DAWN_NATIVE_SHAREDFENCE_H_
 #define SRC_DAWN_NATIVE_SHAREDFENCE_H_
 
+#include "dawn/native/Error.h"
 #include "dawn/native/ObjectBase.h"
 
 namespace dawn::native {
@@ -30,12 +31,22 @@
 
     void APIExportInfo(SharedFenceExportInfo* info) const;
 
-  private:
-    void DestroyImpl() override;
-
+  protected:
+    SharedFenceBase(DeviceBase* device, const char* label);
     SharedFenceBase(DeviceBase* device,
                     const SharedFenceDescriptor* descriptor,
                     ObjectBase::ErrorTag tag);
+
+  private:
+    MaybeError ExportInfo(SharedFenceExportInfo* info) const;
+
+    void DestroyImpl() override;
+    virtual MaybeError ExportInfoImpl(SharedFenceExportInfo* info) const = 0;
+};
+
+struct FenceAndSignalValue {
+    Ref<SharedFenceBase> object;
+    uint64_t signaledValue;
 };
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/SharedTextureMemory.cpp b/src/dawn/native/SharedTextureMemory.cpp
index d044906..c41ef03 100644
--- a/src/dawn/native/SharedTextureMemory.cpp
+++ b/src/dawn/native/SharedTextureMemory.cpp
@@ -14,22 +14,68 @@
 
 #include "dawn/native/SharedTextureMemory.h"
 
+#include <utility>
+
+#include "dawn/native/ChainUtils_autogen.h"
 #include "dawn/native/Device.h"
+#include "dawn/native/SharedFence.h"
 #include "dawn/native/dawn_platform.h"
 
 namespace dawn::native {
 
+namespace {
+
+class ErrorSharedTextureMemory : public SharedTextureMemoryBase {
+  public:
+    ErrorSharedTextureMemory(DeviceBase* device, const SharedTextureMemoryDescriptor* descriptor)
+        : SharedTextureMemoryBase(device, descriptor, ObjectBase::kError) {}
+
+    ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+        const TextureDescriptor* descriptor) override {
+        UNREACHABLE();
+    }
+    MaybeError BeginAccessImpl(TextureBase* texture,
+                               const BeginAccessDescriptor* descriptor) override {
+        UNREACHABLE();
+    }
+    ResultOrError<FenceAndSignalValue> EndAccessImpl(TextureBase* texture) override {
+        UNREACHABLE();
+    }
+};
+
+}  // namespace
+
 // static
 SharedTextureMemoryBase* SharedTextureMemoryBase::MakeError(
     DeviceBase* device,
     const SharedTextureMemoryDescriptor* descriptor) {
-    return new SharedTextureMemoryBase(device, descriptor, ObjectBase::kError);
+    return new ErrorSharedTextureMemory(device, descriptor);
 }
 
 SharedTextureMemoryBase::SharedTextureMemoryBase(DeviceBase* device,
                                                  const SharedTextureMemoryDescriptor* descriptor,
                                                  ObjectBase::ErrorTag tag)
-    : ApiObjectBase(device, tag, descriptor->label) {}
+    : ApiObjectBase(device, tag, descriptor->label),
+      mProperties{
+          nullptr,
+          wgpu::TextureUsage::None,
+          {0, 0, 0},
+          wgpu::TextureFormat::Undefined,
+      } {}
+
+SharedTextureMemoryBase::SharedTextureMemoryBase(DeviceBase* device,
+                                                 const char* label,
+                                                 const SharedTextureMemoryProperties& properties)
+    : ApiObjectBase(device, label), mProperties(properties) {
+    const Format& internalFormat = device->GetValidInternalFormat(properties.format);
+    if (!internalFormat.supportsStorageUsage) {
+        ASSERT(!(mProperties.usage & wgpu::TextureUsage::StorageBinding));
+    }
+    if (!internalFormat.isRenderable) {
+        ASSERT(!(mProperties.usage & wgpu::TextureUsage::RenderAttachment));
+    }
+    GetObjectTrackingList()->Track(this);
+}
 
 ObjectType SharedTextureMemoryBase::GetType() const {
     return ObjectType::SharedTextureMemory;
@@ -38,23 +84,220 @@
 void SharedTextureMemoryBase::DestroyImpl() {}
 
 void SharedTextureMemoryBase::APIGetProperties(SharedTextureMemoryProperties* properties) const {
-    DAWN_UNUSED(GetDevice()->ConsumedError(DAWN_UNIMPLEMENTED_ERROR("Not implemented")));
+    properties->usage = mProperties.usage;
+    properties->size = mProperties.size;
+    properties->format = mProperties.format;
+
+    if (GetDevice()->ConsumedError(ValidateSTypes(properties->nextInChain, {}),
+                                   "calling %s.GetProperties", this)) {
+        return;
+    }
 }
 
 TextureBase* SharedTextureMemoryBase::APICreateTexture(const TextureDescriptor* descriptor) {
-    DAWN_UNUSED(GetDevice()->ConsumedError(DAWN_UNIMPLEMENTED_ERROR("Not implemented")));
-    return TextureBase::MakeError(GetDevice(), descriptor);
+    Ref<TextureBase> result;
+
+    // Provide the defaults if no descriptor is provided.
+    TextureDescriptor defaultDescriptor;
+    if (descriptor == nullptr) {
+        defaultDescriptor = {};
+        defaultDescriptor.format = mProperties.format;
+        defaultDescriptor.size = mProperties.size;
+        defaultDescriptor.usage = mProperties.usage;
+        descriptor = &defaultDescriptor;
+    }
+
+    if (GetDevice()->ConsumedError(CreateTexture(descriptor), &result,
+                                   InternalErrorType::OutOfMemory, "calling %s.CreateTexture(%s).",
+                                   this, descriptor)) {
+        return TextureBase::MakeError(GetDevice(), descriptor);
+    }
+    return result.Detach();
+}
+
+ResultOrError<Ref<TextureBase>> SharedTextureMemoryBase::CreateTexture(
+    const TextureDescriptor* descriptor) {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    // Validate that there is one 2D, single-sampled subresource
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                    "Texture dimension (%s) is not %s.", descriptor->dimension,
+                    wgpu::TextureDimension::e2D);
+    DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                    descriptor->mipLevelCount);
+    DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+                    descriptor->size.depthOrArrayLayers);
+    DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                    descriptor->sampleCount);
+
+    // Validate that the texture size exactly matches the shared texture memory's size.
+    DAWN_INVALID_IF(
+        (descriptor->size.width != mProperties.size.width) ||
+            (descriptor->size.height != mProperties.size.height) ||
+            (descriptor->size.depthOrArrayLayers != mProperties.size.depthOrArrayLayers),
+        "SharedTextureMemory size (%s) doesn't match descriptor size (%s).", &mProperties.size,
+        &descriptor->size);
+
+    // Validate that the texture format exactly matches the shared texture memory's format.
+    DAWN_INVALID_IF(descriptor->format != mProperties.format,
+                    "SharedTextureMemory format (%s) doesn't match descriptor format (%s).",
+                    mProperties.format, descriptor->format);
+
+    // Validate the rest of the texture descriptor, and require its usage to be a subset of the
+    // shared texture memory's usage.
+    DAWN_TRY(ValidateTextureDescriptor(GetDevice(), descriptor, AllowMultiPlanarTextureFormat::Yes,
+                                       mProperties.usage));
+
+    Ref<TextureBase> texture;
+    DAWN_TRY_ASSIGN(texture, CreateTextureImpl(descriptor));
+    // Access is started on memory.BeginAccess.
+    texture->SetHasAccess(false);
+    return texture;
+}
+
+void SharedTextureMemoryBase::PushAccessFences(TextureBase* texture,
+                                               const BeginAccessDescriptor* descriptor) {
+    PendingFenceList fences;
+    for (size_t i = 0; i < descriptor->fenceCount; ++i) {
+        fences->push_back({descriptor->fences[i], descriptor->signaledValues[i]});
+    }
+    mAccessScopes[texture].push(fences);
+}
+
+void SharedTextureMemoryBase::AcquireBeginFences(TextureBase* texture, PendingFenceList* fences) {
+    if (!mAccessScopes[texture].empty()) {
+        auto& current = mAccessScopes[texture].top();
+        *fences = current;
+        current->clear();
+    }
+}
+
+void SharedTextureMemoryBase::SetLastUsageSerial(ExecutionSerial lastUsageSerial) {
+    mLastUsageSerial = lastUsageSerial;
+}
+
+ExecutionSerial SharedTextureMemoryBase::GetLastUsageSerial() const {
+    return mLastUsageSerial;
+}
+
+void SharedTextureMemoryBase::PopAccessFences(TextureBase* texture, PendingFenceList* fences) {
+    if (!mAccessScopes[texture].empty()) {
+        *fences = mAccessScopes[texture].top();
+        mAccessScopes[texture].pop();
+    }
 }
 
 void SharedTextureMemoryBase::APIBeginAccess(TextureBase* texture,
                                              const BeginAccessDescriptor* descriptor) {
-    DAWN_UNUSED(GetDevice()->ConsumedError(DAWN_UNIMPLEMENTED_ERROR("Not implemented")));
+    DAWN_UNUSED(GetDevice()->ConsumedError(BeginAccess(texture, descriptor),
+                                           "calling %s.BeginAccess(%s).", this, texture));
+}
+
+MaybeError SharedTextureMemoryBase::BeginAccess(TextureBase* texture,
+                                                const BeginAccessDescriptor* descriptor) {
+    PushAccessFences(texture, descriptor);
+
+    DAWN_INVALID_IF(mCurrentAccess != nullptr,
+                    "Cannot begin access with %s on %s which is currently accessed by %s.", texture,
+                    this, mCurrentAccess.Get());
+    mCurrentAccess = texture;
+
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(texture));
+    for (size_t i = 0; i < descriptor->fenceCount; ++i) {
+        DAWN_TRY(GetDevice()->ValidateObject(descriptor->fences[i]));
+    }
+
+    Ref<SharedTextureMemoryBase> memory = texture->TryGetSharedTextureMemory();
+    DAWN_INVALID_IF(memory.Get() != this, "%s was created from %s and cannot be used with %s.",
+                    texture, memory.Get(), this);
+
+    DAWN_INVALID_IF(texture->GetFormat().IsMultiPlanar() && !descriptor->initialized,
+                    "BeginAccess on %s with multiplanar format (%s) must be initialized.", texture,
+                    texture->GetFormat().format);
+
+    DAWN_TRY(BeginAccessImpl(texture, descriptor));
+    if (!texture->IsError()) {
+        texture->SetHasAccess(true);
+        texture->SetIsSubresourceContentInitialized(descriptor->initialized,
+                                                    texture->GetAllSubresources());
+    }
+    return {};
 }
 
 void SharedTextureMemoryBase::APIEndAccess(TextureBase* texture, EndAccessState* state) {
-    DAWN_UNUSED(GetDevice()->ConsumedError(DAWN_UNIMPLEMENTED_ERROR("Not implemented")));
+    DAWN_UNUSED(GetDevice()->ConsumedError(EndAccess(texture, state), "calling %s.EndAccess(%s).",
+                                           this, texture));
 }
 
-void APISharedTextureMemoryEndAccessStateFreeMembers(WGPUSharedTextureMemoryEndAccessState state) {}
+MaybeError SharedTextureMemoryBase::EndAccess(TextureBase* texture, EndAccessState* state) {
+    PendingFenceList fenceList;
+    PopAccessFences(texture, &fenceList);
+
+    if (!texture->IsError()) {
+        texture->SetHasAccess(false);
+    }
+
+    // Call the error-generating part of the EndAccess implementation. This is separated out because
+    // writing the output state must happen regardless of whether or not EndAccessInternal
+    // succeeds.
+    MaybeError err;
+    {
+        ResultOrError<FenceAndSignalValue> result = EndAccessInternal(texture, state);
+        if (result.IsSuccess()) {
+            fenceList->push_back(result.AcquireSuccess());
+        } else {
+            err = result.AcquireError();
+        }
+    }
+
+    // Copy the fences to the output state.
+    if (size_t fenceCount = fenceList->size()) {
+        auto* fences = new SharedFenceBase*[fenceCount];
+        uint64_t* signaledValues = new uint64_t[fenceCount];
+        for (size_t i = 0; i < fenceCount; ++i) {
+            fences[i] = fenceList[i].object.Detach();
+            signaledValues[i] = fenceList[i].signaledValue;
+        }
+
+        state->fenceCount = fenceCount;
+        state->fences = fences;
+        state->signaledValues = signaledValues;
+    } else {
+        state->fenceCount = 0;
+        state->fences = nullptr;
+        state->signaledValues = nullptr;
+    }
+    state->initialized = texture->IsError() ||
+                         texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
+    return err;
+}
+
+ResultOrError<FenceAndSignalValue> SharedTextureMemoryBase::EndAccessInternal(
+    TextureBase* texture,
+    EndAccessState* state) {
+    DAWN_INVALID_IF(mCurrentAccess != texture,
+                    "Cannot end access with %s on %s which is currently accessed by %s.", texture,
+                    this, mCurrentAccess.Get());
+    mCurrentAccess = nullptr;
+
+    DAWN_TRY(GetDevice()->ValidateObject(texture));
+
+    Ref<SharedTextureMemoryBase> memory = texture->TryGetSharedTextureMemory();
+    DAWN_INVALID_IF(memory.Get() != this, "%s was created from %s and cannot be used with %s.",
+                    texture, memory.Get(), this);
+
+    return EndAccessImpl(texture);
+}
+
+void APISharedTextureMemoryEndAccessStateFreeMembers(WGPUSharedTextureMemoryEndAccessState cState) {
+    auto* state = reinterpret_cast<SharedTextureMemoryBase::EndAccessState*>(&cState);
+    for (size_t i = 0; i < state->fenceCount; ++i) {
+        state->fences[i]->APIRelease();
+    }
+    delete[] state->fences;
+    delete[] state->signaledValues;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/SharedTextureMemory.h b/src/dawn/native/SharedTextureMemory.h
index 0c95ca0..0d09fc4 100644
--- a/src/dawn/native/SharedTextureMemory.h
+++ b/src/dawn/native/SharedTextureMemory.h
@@ -15,7 +15,16 @@
 #ifndef SRC_DAWN_NATIVE_SHAREDTEXTUREMEMORY_H_
 #define SRC_DAWN_NATIVE_SHAREDTEXTUREMEMORY_H_
 
+#include <map>
+#include <stack>
+
+#include "dawn/common/StackContainer.h"
+#include "dawn/common/WeakRefSupport.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
 #include "dawn/native/ObjectBase.h"
+#include "dawn/native/SharedFence.h"
+#include "dawn/native/dawn_platform.h"
 
 namespace dawn::native {
 
@@ -25,10 +34,12 @@
 struct SharedTextureMemoryProperties;
 struct TextureDescriptor;
 
-class SharedTextureMemoryBase : public ApiObjectBase {
+class SharedTextureMemoryBase : public ApiObjectBase,
+                                public WeakRefSupport<SharedTextureMemoryBase> {
   public:
     using BeginAccessDescriptor = SharedTextureMemoryBeginAccessDescriptor;
     using EndAccessState = SharedTextureMemoryEndAccessState;
+    using PendingFenceList = StackVector<FenceAndSignalValue, 1>;
 
     static SharedTextureMemoryBase* MakeError(DeviceBase* device,
                                               const SharedTextureMemoryDescriptor* descriptor);
@@ -40,12 +51,56 @@
 
     ObjectType GetType() const override;
 
-  private:
-    void DestroyImpl() override;
+    // Acquire the begin fences for the current access scope on `texture`.
+    void AcquireBeginFences(TextureBase* texture, PendingFenceList* fences);
 
+    // Set the last usage serial. This indicates when the SharedFence exported
+    // from APIEndAccess will complete.
+    void SetLastUsageSerial(ExecutionSerial lastUsageSerial);
+    ExecutionSerial GetLastUsageSerial() const;
+
+  protected:
+    SharedTextureMemoryBase(DeviceBase* device,
+                            const char* label,
+                            const SharedTextureMemoryProperties& properties);
     SharedTextureMemoryBase(DeviceBase* device,
                             const SharedTextureMemoryDescriptor* descriptor,
                             ObjectBase::ErrorTag tag);
+
+    void DestroyImpl() override;
+
+    const SharedTextureMemoryProperties mProperties;
+
+    Ref<TextureBase> mCurrentAccess;
+
+  private:
+    ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
+    MaybeError BeginAccess(TextureBase* texture, const BeginAccessDescriptor* descriptor);
+    MaybeError EndAccess(TextureBase* texture, EndAccessState* state);
+    ResultOrError<FenceAndSignalValue> EndAccessInternal(TextureBase* texture,
+                                                         EndAccessState* state);
+
+    virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+        const TextureDescriptor* descriptor) = 0;
+
+    // BeginAccessImpl validates the operation is valid on the backend, and performs any
+    // backend specific operations. It does NOT need to acquire begin fences; that is done in the
+    // frontend in BeginAccess.
+    virtual MaybeError BeginAccessImpl(TextureBase* texture,
+                                       const BeginAccessDescriptor* descriptor) = 0;
+    // EndAccessImpl validates the operation is valid on the backend, and returns the end fence.
+    virtual ResultOrError<FenceAndSignalValue> EndAccessImpl(TextureBase* texture) = 0;
+
+    // Begin an access scope on `texture`. Passing a list of fences that should be waited on before
+    // use.
+    void PushAccessFences(TextureBase* texture,
+                          const SharedTextureMemoryBeginAccessDescriptor* descriptor);
+    // End an access scope on `texture`, writing out any fences that have not yet been acquired.
+    void PopAccessFences(TextureBase* texture, PendingFenceList* fences);
+
+    // Map of texture -> stack of PendingFenceList.
+    std::map<Ref<TextureBase>, std::stack<PendingFenceList>> mAccessScopes;
+    ExecutionSerial mLastUsageSerial{0};
 };
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Texture.cpp b/src/dawn/native/Texture.cpp
index 496b830..b603301 100644
--- a/src/dawn/native/Texture.cpp
+++ b/src/dawn/native/Texture.cpp
@@ -16,6 +16,7 @@
 
 #include <algorithm>
 #include <string>
+#include <utility>
 
 #include "dawn/common/Assert.h"
 #include "dawn/common/Constants.h"
@@ -28,6 +29,7 @@
 #include "dawn/native/ObjectType_autogen.h"
 #include "dawn/native/PassResourceUsage.h"
 #include "dawn/native/PhysicalDevice.h"
+#include "dawn/native/SharedTextureMemory.h"
 #include "dawn/native/ValidationUtils_autogen.h"
 
 namespace dawn::native {
@@ -295,7 +297,8 @@
 MaybeError ValidateTextureUsage(const DeviceBase* device,
                                 const TextureDescriptor* descriptor,
                                 wgpu::TextureUsage usage,
-                                const Format* format) {
+                                const Format* format,
+                                std::optional<wgpu::TextureUsage> allowedSharedTextureMemoryUsage) {
     DAWN_TRY(dawn::native::ValidateTextureUsage(usage));
 
     DAWN_INVALID_IF(usage == wgpu::TextureUsage::None, "The texture usage must not be 0.");
@@ -347,13 +350,21 @@
         // TODO(dawn:1704): Validate the constraints on the dimension, format, etc.
     }
 
-    // Only allows simple readonly texture usages.
-    constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
-        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
-    DAWN_INVALID_IF(format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
-                    "The texture usage (%s) is incompatible with the multi-planar format (%s).",
-                    usage, format->format);
-
+    if (!allowedSharedTextureMemoryUsage) {
+        // Legacy path
+        // TODO(crbug.com/dawn/1795): Remove after migrating all old usages.
+        // Only allows simple readonly texture usages.
+        constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
+            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
+        DAWN_INVALID_IF(format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
+                        "The texture usage (%s) is incompatible with the multi-planar format (%s).",
+                        usage, format->format);
+    } else {
+        DAWN_INVALID_IF(
+            !IsSubset(usage, *allowedSharedTextureMemoryUsage),
+            "The texture usage (%s) is not a subset of the shared texture memory usage (%s).",
+            usage, *allowedSharedTextureMemoryUsage);
+    }
     return {};
 }
 
@@ -406,9 +417,11 @@
 
 }  // anonymous namespace
 
-MaybeError ValidateTextureDescriptor(const DeviceBase* device,
-                                     const TextureDescriptor* descriptor,
-                                     AllowMultiPlanarTextureFormat allowMultiPlanar) {
+MaybeError ValidateTextureDescriptor(
+    const DeviceBase* device,
+    const TextureDescriptor* descriptor,
+    AllowMultiPlanarTextureFormat allowMultiPlanar,
+    std::optional<wgpu::TextureUsage> allowedSharedTextureMemoryUsage) {
     DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
                                  wgpu::SType::DawnTextureInternalUsageDescriptor));
 
@@ -444,7 +457,8 @@
         usage |= internalUsageDesc->internalUsage;
     }
 
-    DAWN_TRY(ValidateTextureUsage(device, descriptor, usage, format));
+    DAWN_TRY(ValidateTextureUsage(device, descriptor, usage, format,
+                                  std::move(allowedSharedTextureMemoryUsage)));
     DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
     DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
 
@@ -806,7 +820,11 @@
 
 MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
     ASSERT(!IsError());
-    DAWN_INVALID_IF(mState.destroyed, "Destroyed texture %s used in a submit.", this);
+    if (DAWN_UNLIKELY(mState.destroyed || !mState.hasAccess)) {
+        DAWN_INVALID_IF(mState.destroyed, "Destroyed texture %s used in a submit.", this);
+        DAWN_INVALID_IF(!mState.hasAccess, "%s without current access to %s used in a submit.",
+                        this, mSharedTextureMemory.Promote().Get());
+    }
 
     return {};
 }
@@ -909,6 +927,10 @@
     return (GetUsage() & wgpu::TextureUsage::TextureBinding) != 0;
 }
 
+Ref<SharedTextureMemoryBase> TextureBase::TryGetSharedTextureMemory() {
+    return mSharedTextureMemory.Promote();
+}
+
 void TextureBase::APIDestroy() {
     Destroy();
 }
diff --git a/src/dawn/native/Texture.h b/src/dawn/native/Texture.h
index b12a997..21cbdf0 100644
--- a/src/dawn/native/Texture.h
+++ b/src/dawn/native/Texture.h
@@ -17,12 +17,14 @@
 
 #include <vector>
 
+#include "dawn/common/WeakRef.h"
 #include "dawn/common/ityp_array.h"
 #include "dawn/common/ityp_bitset.h"
 #include "dawn/native/Error.h"
 #include "dawn/native/Format.h"
 #include "dawn/native/Forward.h"
 #include "dawn/native/ObjectBase.h"
+#include "dawn/native/SharedTextureMemory.h"
 #include "dawn/native/Subresource.h"
 
 #include "dawn/native/dawn_platform.h"
@@ -37,7 +39,8 @@
 MaybeError ValidateTextureDescriptor(
     const DeviceBase* device,
     const TextureDescriptor* descriptor,
-    AllowMultiPlanarTextureFormat allowMultiPlanar = AllowMultiPlanarTextureFormat::No);
+    AllowMultiPlanarTextureFormat allowMultiPlanar = AllowMultiPlanarTextureFormat::No,
+    std::optional<wgpu::TextureUsage> allowedSharedTextureMemoryUsage = std::nullopt);
 MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
                                          const TextureBase* texture,
                                          const TextureViewDescriptor* descriptor);
@@ -115,6 +118,8 @@
 
     bool IsImplicitMSAARenderTextureViewSupported() const;
 
+    Ref<SharedTextureMemoryBase> TryGetSharedTextureMemory();
+
     // Dawn API
     TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
     void APIDestroy();
@@ -134,6 +139,9 @@
     void DestroyImpl() override;
     void AddInternalUsage(wgpu::TextureUsage usage);
 
+    // The shared texture memory the texture was created from. May be null.
+    WeakRef<SharedTextureMemoryBase> mSharedTextureMemory;
+
   private:
     struct TextureState {
         TextureState();
diff --git a/src/dawn/native/ToBackend.h b/src/dawn/native/ToBackend.h
index 664da57..2cb06ac 100644
--- a/src/dawn/native/ToBackend.h
+++ b/src/dawn/native/ToBackend.h
@@ -99,6 +99,16 @@
 };
 
 template <typename BackendTraits>
+struct ToBackendTraits<SharedFenceBase, BackendTraits> {
+    using BackendType = typename BackendTraits::SharedFenceType;
+};
+
+template <typename BackendTraits>
+struct ToBackendTraits<SharedTextureMemoryBase, BackendTraits> {
+    using BackendType = typename BackendTraits::SharedTextureMemoryType;
+};
+
+template <typename BackendTraits>
 struct ToBackendTraits<TextureBase, BackendTraits> {
     using BackendType = typename BackendTraits::TextureType;
 };
diff --git a/src/dawn/native/metal/BackendMTL.mm b/src/dawn/native/metal/BackendMTL.mm
index a80e825..e33914e 100644
--- a/src/dawn/native/metal/BackendMTL.mm
+++ b/src/dawn/native/metal/BackendMTL.mm
@@ -547,6 +547,11 @@
                 EnableFeature(Feature::ChromiumExperimentalSubgroups);
             }
         }
+
+        EnableFeature(Feature::SharedTextureMemoryIOSurface);
+        if (@available(macOS 10.14, iOS 12.0, *)) {
+            EnableFeature(Feature::SharedFenceMTLSharedEvent);
+        }
     }
 
     void InitializeVendorArchitectureImpl() override {
diff --git a/src/dawn/native/metal/DeviceMTL.h b/src/dawn/native/metal/DeviceMTL.h
index 30eaf0c..a93e27b 100644
--- a/src/dawn/native/metal/DeviceMTL.h
+++ b/src/dawn/native/metal/DeviceMTL.h
@@ -138,6 +138,11 @@
     ResultOrError<wgpu::TextureUsage> GetSupportedSurfaceUsageImpl(
         const Surface* surface) const override;
 
+    ResultOrError<Ref<SharedTextureMemoryBase>> ImportSharedTextureMemoryImpl(
+        const SharedTextureMemoryDescriptor* descriptor) override;
+    ResultOrError<Ref<SharedFenceBase>> ImportSharedFenceImpl(
+        const SharedFenceDescriptor* descriptor) override;
+
     void DestroyImpl() override;
 
     NSPRef<id<MTLDevice>> mMtlDevice;
diff --git a/src/dawn/native/metal/DeviceMTL.mm b/src/dawn/native/metal/DeviceMTL.mm
index 24db250..5039bfb 100644
--- a/src/dawn/native/metal/DeviceMTL.mm
+++ b/src/dawn/native/metal/DeviceMTL.mm
@@ -18,6 +18,7 @@
 #include "dawn/common/Platform.h"
 #include "dawn/native/Adapter.h"
 #include "dawn/native/BackendConnection.h"
+#include "dawn/native/ChainUtils_autogen.h"
 #include "dawn/native/Commands.h"
 #include "dawn/native/ErrorData.h"
 #include "dawn/native/metal/BindGroupLayoutMTL.h"
@@ -31,6 +32,8 @@
 #include "dawn/native/metal/RenderPipelineMTL.h"
 #include "dawn/native/metal/SamplerMTL.h"
 #include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/SharedFenceMTL.h"
+#include "dawn/native/metal/SharedTextureMemoryMTL.h"
 #include "dawn/native/metal/SwapChainMTL.h"
 #include "dawn/native/metal/TextureMTL.h"
 #include "dawn/native/metal/UtilsMetal.h"
@@ -147,7 +150,7 @@
     if (mCommandQueue == nil) {
         return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
     }
-    if (@available(macOS 10.14, *)) {
+    if (@available(macOS 10.14, iOS 12.0, *)) {
         mMtlSharedEvent.Acquire([*mMtlDevice newSharedEvent]);
     }
 
@@ -248,6 +251,41 @@
     return usages;
 }
 
+ResultOrError<Ref<SharedTextureMemoryBase>> Device::ImportSharedTextureMemoryImpl(
+    const SharedTextureMemoryDescriptor* baseDescriptor) {
+    DAWN_TRY(ValidateSingleSType(baseDescriptor->nextInChain,
+                                 wgpu::SType::SharedTextureMemoryIOSurfaceDescriptor));
+
+    const SharedTextureMemoryIOSurfaceDescriptor* descriptor = nullptr;
+    FindInChain(baseDescriptor->nextInChain, &descriptor);
+
+    DAWN_INVALID_IF(descriptor == nullptr,
+                    "SharedTextureMemoryIOSurfaceDescriptor must be chained.");
+
+    DAWN_INVALID_IF(!HasFeature(Feature::SharedTextureMemoryIOSurface), "%s is not enabled.",
+                    wgpu::FeatureName::SharedTextureMemoryIOSurface);
+
+    return SharedTextureMemory::Create(this, baseDescriptor->label, descriptor);
+}
+
+ResultOrError<Ref<SharedFenceBase>> Device::ImportSharedFenceImpl(
+    const SharedFenceDescriptor* baseDescriptor) {
+    DAWN_TRY(ValidateSingleSType(baseDescriptor->nextInChain,
+                                 wgpu::SType::SharedFenceMTLSharedEventDescriptor));
+
+    const SharedFenceMTLSharedEventDescriptor* descriptor = nullptr;
+    FindInChain(baseDescriptor->nextInChain, &descriptor);
+
+    DAWN_INVALID_IF(descriptor == nullptr, "SharedFenceMTLSharedEventDescriptor must be chained.");
+
+    DAWN_INVALID_IF(!HasFeature(Feature::SharedFenceMTLSharedEvent), "%s is not enabled.",
+                    wgpu::FeatureName::SharedFenceMTLSharedEvent);
+    if (@available(macOS 10.14, ios 12.0, *)) {
+        return SharedFence::Create(this, baseDescriptor->label, descriptor);
+    }
+    UNREACHABLE();
+}
+
 ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
     uint64_t frontendCompletedSerial{GetQueue()->GetCompletedCommandSerial()};
     // sometimes we increase the serials, in which case the completed serial in
diff --git a/src/dawn/native/metal/Forward.h b/src/dawn/native/metal/Forward.h
index 493e1fa..55a1bee 100644
--- a/src/dawn/native/metal/Forward.h
+++ b/src/dawn/native/metal/Forward.h
@@ -32,6 +32,8 @@
 class Queue;
 class RenderPipeline;
 class Sampler;
+class SharedFence;
+class SharedTextureMemory;
 class ShaderModule;
 class SwapChain;
 class Texture;
@@ -51,6 +53,8 @@
     using RenderPipelineType = RenderPipeline;
     using SamplerType = Sampler;
     using ShaderModuleType = ShaderModule;
+    using SharedFenceType = SharedFence;
+    using SharedTextureMemoryType = SharedTextureMemory;
     using SwapChainType = SwapChain;
     using TextureType = Texture;
     using TextureViewType = TextureView;
diff --git a/src/dawn/native/metal/SharedFenceMTL.h b/src/dawn/native/metal/SharedFenceMTL.h
new file mode 100644
index 0000000..79d3039
--- /dev/null
+++ b/src/dawn/native/metal/SharedFenceMTL.h
@@ -0,0 +1,50 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_METAL_SHAREDTEXTUREFENCEMTL_H_
+#define SRC_DAWN_NATIVE_METAL_SHAREDTEXTUREFENCEMTL_H_
+
+#include <os/availability.h>
+#include <vector>
+
+#include "dawn/common/NSRef.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/SharedFence.h"
+
+@protocol MTLSharedEvent;
+
+namespace dawn::native::metal {
+
+class Device;
+
+class API_AVAILABLE(macos(10.14), ios(12.0)) SharedFence final : public SharedFenceBase {
+  public:
+    static ResultOrError<Ref<SharedFence>> Create(
+        Device* device,
+        const char* label,
+        const SharedFenceMTLSharedEventDescriptor* descriptor);
+
+    id<MTLSharedEvent> GetMTLSharedEvent() const;
+
+  private:
+    SharedFence(Device* device, const char* label, id<MTLSharedEvent> sharedEvent);
+
+    MaybeError ExportInfoImpl(SharedFenceExportInfo* info) const override;
+
+    NSPRef<id<MTLSharedEvent>> mSharedEvent;
+};
+
+}  // namespace dawn::native::metal
+
+#endif  // SRC_DAWN_NATIVE_METAL_SHAREDTEXTUREFENCEMTL_H_
diff --git a/src/dawn/native/metal/SharedFenceMTL.mm b/src/dawn/native/metal/SharedFenceMTL.mm
new file mode 100644
index 0000000..205b9b4
--- /dev/null
+++ b/src/dawn/native/metal/SharedFenceMTL.mm
@@ -0,0 +1,54 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/SharedFenceMTL.h"
+
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+namespace dawn::native::metal {
+
+// static
+ResultOrError<Ref<SharedFence>> SharedFence::Create(
+    Device* device,
+    const char* label,
+    const SharedFenceMTLSharedEventDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->sharedEvent == nullptr, "MTLSharedEvent is missing.");
+    return AcquireRef(
+        new SharedFence(device, label, static_cast<id<MTLSharedEvent>>(descriptor->sharedEvent)));
+}
+
+SharedFence::SharedFence(Device* device, const char* label, id<MTLSharedEvent> sharedEvent)
+    : SharedFenceBase(device, label), mSharedEvent(sharedEvent) {}
+
+id<MTLSharedEvent> SharedFence::GetMTLSharedEvent() const {
+    return mSharedEvent.Get();
+}
+
+MaybeError SharedFence::ExportInfoImpl(SharedFenceExportInfo* info) const {
+    info->type = wgpu::SharedFenceType::MTLSharedEvent;
+
+    DAWN_TRY(
+        ValidateSingleSType(info->nextInChain, wgpu::SType::SharedFenceMTLSharedEventExportInfo));
+
+    SharedFenceMTLSharedEventExportInfo* exportInfo = nullptr;
+    FindInChain(info->nextInChain, &exportInfo);
+
+    if (exportInfo != nullptr) {
+        exportInfo->sharedEvent = mSharedEvent.Get();
+    }
+    return {};
+}
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/SharedTextureMemoryMTL.h b/src/dawn/native/metal/SharedTextureMemoryMTL.h
new file mode 100644
index 0000000..1c00515
--- /dev/null
+++ b/src/dawn/native/metal/SharedTextureMemoryMTL.h
@@ -0,0 +1,55 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_NATIVE_METAL_SHAREDTEXTUREMEMORYMTL_H_
+#define SRC_DAWN_NATIVE_METAL_SHAREDTEXTUREMEMORYMTL_H_
+
+#include <IOSurface/IOSurfaceRef.h>
+#include <vector>
+
+#include "dawn/common/CoreFoundationRef.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/SharedTextureMemory.h"
+
+namespace dawn::native::metal {
+
+class Device;
+class CommandRecordingContext;
+
+class SharedTextureMemory final : public SharedTextureMemoryBase {
+  public:
+    static ResultOrError<Ref<SharedTextureMemory>> Create(
+        Device* device,
+        const char* label,
+        const SharedTextureMemoryIOSurfaceDescriptor* descriptor);
+
+    IOSurfaceRef GetIOSurface() const;
+
+  private:
+    SharedTextureMemory(Device* device,
+                        const char* label,
+                        const SharedTextureMemoryProperties& properties,
+                        IOSurfaceRef ioSurface);
+    void DestroyImpl() override;
+
+    ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+    MaybeError BeginAccessImpl(TextureBase* texture, const BeginAccessDescriptor*) override;
+    ResultOrError<FenceAndSignalValue> EndAccessImpl(TextureBase* texture) override;
+
+    CFRef<IOSurfaceRef> mIOSurface;
+};
+
+}  // namespace dawn::native::metal
+
+#endif  // SRC_DAWN_NATIVE_METAL_SHAREDTEXTUREMEMORYMTL_H_
diff --git a/src/dawn/native/metal/SharedTextureMemoryMTL.mm b/src/dawn/native/metal/SharedTextureMemoryMTL.mm
new file mode 100644
index 0000000..81c6884
--- /dev/null
+++ b/src/dawn/native/metal/SharedTextureMemoryMTL.mm
@@ -0,0 +1,161 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/SharedTextureMemoryMTL.h"
+
+#include <CoreVideo/CVPixelBuffer.h>
+
+#include "dawn/native/metal/CommandRecordingContext.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/SharedFenceMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+
+namespace dawn::native::metal {
+
+namespace {
+ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
+    switch (format) {
+        case kCVPixelFormatType_64RGBAHalf:
+            return wgpu::TextureFormat::RGBA16Float;
+        case kCVPixelFormatType_TwoComponent16Half:
+            return wgpu::TextureFormat::RG16Float;
+        case kCVPixelFormatType_OneComponent16Half:
+            return wgpu::TextureFormat::R16Float;
+        case kCVPixelFormatType_ARGB2101010LEPacked:
+            return wgpu::TextureFormat::RGB10A2Unorm;
+        case kCVPixelFormatType_32RGBA:
+            return wgpu::TextureFormat::RGBA8Unorm;
+        case kCVPixelFormatType_32BGRA:
+            return wgpu::TextureFormat::BGRA8Unorm;
+        case kCVPixelFormatType_TwoComponent8:
+            return wgpu::TextureFormat::RG8Unorm;
+        case kCVPixelFormatType_OneComponent8:
+            return wgpu::TextureFormat::R8Unorm;
+        case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
+            return wgpu::TextureFormat::R8BG8Biplanar420Unorm;
+        default:
+            return DAWN_VALIDATION_ERROR("Unsupported IOSurface format (%x).", format);
+    }
+}
+
+}  // anonymous namespace
+
+// static
+ResultOrError<Ref<SharedTextureMemory>> SharedTextureMemory::Create(
+    Device* device,
+    const char* label,
+    const SharedTextureMemoryIOSurfaceDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->ioSurface == nullptr, "IOSurface is missing.");
+
+    IOSurfaceRef ioSurface = static_cast<IOSurfaceRef>(descriptor->ioSurface);
+    wgpu::TextureFormat format;
+    DAWN_TRY_ASSIGN(format,
+                    GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
+
+    const Format* internalFormat = nullptr;
+    DAWN_TRY_ASSIGN(internalFormat, device->GetInternalFormat(format));
+
+    size_t width = IOSurfaceGetWidth(ioSurface);
+    size_t height = IOSurfaceGetHeight(ioSurface);
+
+    const CombinedLimits& limits = device->GetLimits();
+
+    DAWN_INVALID_IF(width > limits.v1.maxTextureDimension2D,
+                    "IOSurface width (%u) exceeds maxTextureDimension2D (%u).", width,
+                    limits.v1.maxTextureDimension2D);
+    DAWN_INVALID_IF(height > limits.v1.maxTextureDimension2D,
+                    "IOSurface height (%u) exceeds maxTextureDimension2D (%u).", height,
+                    limits.v1.maxTextureDimension2D);
+
+    SharedTextureMemoryProperties properties;
+    if (internalFormat->IsMultiPlanar()) {
+        properties.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding;
+    } else {
+        properties.usage =
+            wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+            wgpu::TextureUsage::TextureBinding |
+            (internalFormat->supportsStorageUsage ? wgpu::TextureUsage::StorageBinding
+                                                  : wgpu::TextureUsage::None) |
+            (internalFormat->isRenderable ? wgpu::TextureUsage::RenderAttachment
+                                          : wgpu::TextureUsage::None);
+    }
+    properties.format = format;
+    properties.size = {static_cast<uint32_t>(width), static_cast<uint32_t>(height), 1};
+
+    return AcquireRef(new SharedTextureMemory(device, label, properties, ioSurface));
+}
+
+SharedTextureMemory::SharedTextureMemory(Device* device,
+                                         const char* label,
+                                         const SharedTextureMemoryProperties& properties,
+                                         IOSurfaceRef ioSurface)
+    : SharedTextureMemoryBase(device, label, properties), mIOSurface(ioSurface) {}
+
+void SharedTextureMemory::DestroyImpl() {
+    SharedTextureMemoryBase::DestroyImpl();
+    mIOSurface = nullptr;
+}
+
+IOSurfaceRef SharedTextureMemory::GetIOSurface() const {
+    return mIOSurface.Get();
+}
+
+ResultOrError<Ref<TextureBase>> SharedTextureMemory::CreateTextureImpl(
+    const TextureDescriptor* descriptor) {
+    return Texture::CreateFromSharedTextureMemory(this, descriptor);
+}
+
+MaybeError SharedTextureMemory::BeginAccessImpl(TextureBase* texture,
+                                                const BeginAccessDescriptor* descriptor) {
+    for (size_t i = 0; i < descriptor->fenceCount; ++i) {
+        SharedFenceBase* fence = descriptor->fences[i];
+
+        SharedFenceExportInfo exportInfo;
+        fence->APIExportInfo(&exportInfo);
+        switch (exportInfo.type) {
+            case wgpu::SharedFenceType::MTLSharedEvent:
+                DAWN_INVALID_IF(!GetDevice()->HasFeature(Feature::SharedFenceMTLSharedEvent),
+                                "Required feature (%s) for %s is missing.",
+                                wgpu::FeatureName::SharedFenceMTLSharedEvent,
+                                wgpu::SharedFenceType::MTLSharedEvent);
+                break;
+            default:
+                return DAWN_VALIDATION_ERROR("Unsupported fence type %s.", exportInfo.type);
+        }
+    }
+    return {};
+}
+
+ResultOrError<FenceAndSignalValue> SharedTextureMemory::EndAccessImpl(TextureBase* texture) {
+    DAWN_INVALID_IF(!GetDevice()->HasFeature(Feature::SharedFenceMTLSharedEvent),
+                    "Required feature (%s) is missing.",
+                    wgpu::FeatureName::SharedFenceMTLSharedEvent);
+
+    if (@available(macOS 10.14, iOS 12.0, *)) {
+        ExternalImageIOSurfaceEndAccessDescriptor oldEndAccessDesc;
+        ToBackend(GetDevice())->ExportLastSignaledEvent(&oldEndAccessDesc);
+
+        SharedFenceMTLSharedEventDescriptor newDesc;
+        newDesc.sharedEvent = oldEndAccessDesc.sharedEvent;
+
+        Ref<SharedFence> fence;
+        DAWN_TRY_ASSIGN(fence, SharedFence::Create(ToBackend(GetDevice()),
+                                                   "Internal MTLSharedEvent", &newDesc));
+
+        return FenceAndSignalValue{std::move(fence), static_cast<uint64_t>(GetLastUsageSerial())};
+    }
+    UNREACHABLE();
+}
+
+}  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/TextureMTL.h b/src/dawn/native/metal/TextureMTL.h
index cca2985..f389255 100644
--- a/src/dawn/native/metal/TextureMTL.h
+++ b/src/dawn/native/metal/TextureMTL.h
@@ -31,6 +31,7 @@
 class CommandRecordingContext;
 class Device;
 struct MTLSharedEventAndSignalValue;
+class SharedTextureMemory;
 
 MTLPixelFormat MetalPixelFormat(const DeviceBase* device, wgpu::TextureFormat format);
 MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
@@ -45,6 +46,9 @@
         const ExternalImageDescriptor* descriptor,
         IOSurfaceRef ioSurface,
         std::vector<MTLSharedEventAndSignalValue> waitEvents);
+    static ResultOrError<Ref<Texture>> CreateFromSharedTextureMemory(
+        SharedTextureMemory* memory,
+        const TextureDescriptor* descriptor);
     static Ref<Texture> CreateWrapping(Device* device,
                                        const TextureDescriptor* descriptor,
                                        NSPRef<id<MTLTexture>> wrapped);
diff --git a/src/dawn/native/metal/TextureMTL.mm b/src/dawn/native/metal/TextureMTL.mm
index f2ff3b0..369fefa 100644
--- a/src/dawn/native/metal/TextureMTL.mm
+++ b/src/dawn/native/metal/TextureMTL.mm
@@ -21,6 +21,8 @@
 #include "dawn/native/EnumMaskIterator.h"
 #include "dawn/native/metal/BufferMTL.h"
 #include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/SharedFenceMTL.h"
+#include "dawn/native/metal/SharedTextureMemoryMTL.h"
 #include "dawn/native/metal/UtilsMetal.h"
 
 #include <CoreVideo/CVPixelBuffer.h>
@@ -741,6 +743,21 @@
 }
 
 // static
+ResultOrError<Ref<Texture>> Texture::CreateFromSharedTextureMemory(
+    SharedTextureMemory* memory,
+    const TextureDescriptor* descriptor) {
+    ExternalImageDescriptorIOSurface ioSurfaceImageDesc;
+    ioSurfaceImageDesc.isInitialized = false;  // Initialized state is set on memory.BeginAccess.
+
+    Device* device = ToBackend(memory->GetDevice());
+    Ref<Texture> texture = AcquireRef(new Texture(device, descriptor));
+    DAWN_TRY(texture->InitializeFromIOSurface(&ioSurfaceImageDesc, descriptor,
+                                              memory->GetIOSurface(), {}));
+    texture->mSharedTextureMemory = GetWeakRef(static_cast<SharedTextureMemoryBase*>(memory));
+    return texture;
+}
+
+// static
 Ref<Texture> Texture::CreateWrapping(Device* device,
                                      const TextureDescriptor* descriptor,
                                      NSPRef<id<MTLTexture>> wrapped) {
@@ -815,8 +832,15 @@
 }
 
 void Texture::SynchronizeTextureBeforeUse(CommandRecordingContext* commandContext) {
-    if (@available(macOS 10.14, *)) {
-        if (!mWaitEvents.empty()) {
+    if (@available(macOS 10.14, iOS 12.0, *)) {
+        SharedTextureMemoryBase::PendingFenceList fences;
+        Ref<SharedTextureMemoryBase> memory = TryGetSharedTextureMemory();
+        if (memory != nullptr) {
+            memory->AcquireBeginFences(this, &fences);
+            memory->SetLastUsageSerial(GetDevice()->GetPendingCommandSerial());
+        }
+
+        if (!mWaitEvents.empty() || !fences->empty()) {
             // There may be an open blit encoder from a copy command or writeBuffer.
             // Wait events are only allowed if there is no encoder open.
             commandContext->EndBlit();
@@ -828,6 +852,11 @@
             id<MTLSharedEvent> sharedEvent = static_cast<id<MTLSharedEvent>>(rawEvent);
             [commandBuffer encodeWaitForEvent:sharedEvent value:waitEvent.signaledValue];
         }
+
+        for (const auto& fence : fences) {
+            [commandBuffer encodeWaitForEvent:ToBackend(fence.object)->GetMTLSharedEvent()
+                                        value:fence.signaledValue];
+        }
     }
 }
 
diff --git a/src/dawn/tests/BUILD.gn b/src/dawn/tests/BUILD.gn
index 2c9f159..b2c5c2e 100644
--- a/src/dawn/tests/BUILD.gn
+++ b/src/dawn/tests/BUILD.gn
@@ -572,6 +572,8 @@
     "end2end/ShaderF16Tests.cpp",
     "end2end/ShaderTests.cpp",
     "end2end/ShaderValidationTests.cpp",
+    "end2end/SharedTextureMemoryTests.cpp",
+    "end2end/SharedTextureMemoryTests.h",
     "end2end/StorageTextureTests.cpp",
     "end2end/SubresourceRenderAttachmentTests.cpp",
     "end2end/Texture3DTests.cpp",
@@ -605,12 +607,21 @@
     sources += [ "end2end/D3D12CachingTests.cpp" ]
   }
 
-  if (dawn_enable_metal) {
-    sources += [
-      "end2end/IOSurfaceWrappingTests.cpp",
-      "end2end/VideoViewsTests_mac.cpp",
-    ]
+  if (is_mac || is_ios) {
+    if (dawn_enable_metal) {
+      sources += [
+        "end2end/IOSurfaceWrappingTests.cpp",
+        "end2end/VideoViewsTests_mac.cpp",
+      ]
+    }
+    sources += [ "end2end/SharedTextureMemoryTests_apple.mm" ]
+
     frameworks = [ "IOSurface.framework" ]
+
+    if (filter_include(configs, [ "//build/config/compiler:enable_arc" ]) !=
+        []) {
+      configs -= [ "//build/config/compiler:enable_arc" ]
+    }
   }
 
   if (dawn_supports_glfw_for_windowing) {
diff --git a/src/dawn/tests/DawnTest.h b/src/dawn/tests/DawnTest.h
index 4ba3f61..292c156 100644
--- a/src/dawn/tests/DawnTest.h
+++ b/src/dawn/tests/DawnTest.h
@@ -748,6 +748,12 @@
         DawnTestBase::PrintToStringParamName(#testName));                               \
     GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(testName)
 
+#define DAWN_INSTANTIATE_PREFIXED_TEST_P(prefix, testName, ...)                    \
+    INSTANTIATE_TEST_SUITE_P(                                                      \
+        prefix, testName,                                                          \
+        ::testing::ValuesIn(MakeParamGenerator<testName::ParamType>(__VA_ARGS__)), \
+        DawnTestBase::PrintToStringParamName(#testName))
+
 // Instantiate the test once for each backend provided in the first param list.
 // The test will be parameterized over the following param lists.
 // Use it like this:
@@ -784,8 +790,8 @@
         DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH)(DAWN_TEST_PARAM_STRUCT_DECL_STRUCT_FIELD,  \
                                                         __VA_ARGS__))                              \
     };                                                                                             \
-    std::ostream& operator<<(std::ostream& o,                                                      \
-                             const DAWN_PP_CONCATENATE(_Dawn_, StructName) & param) {              \
+    inline std::ostream& operator<<(std::ostream& o,                                               \
+                                    const DAWN_PP_CONCATENATE(_Dawn_, StructName) & param) {       \
         DAWN_PP_EXPAND(DAWN_PP_EXPAND(DAWN_PP_FOR_EACH)(DAWN_TEST_PARAM_STRUCT_PRINT_STRUCT_FIELD, \
                                                         __VA_ARGS__))                              \
         return o;                                                                                  \
@@ -796,7 +802,7 @@
             : AdapterTestParam(param),                                                             \
               DAWN_PP_CONCATENATE(_Dawn_, StructName){std::forward<Args>(args)...} {}              \
     };                                                                                             \
-    std::ostream& operator<<(std::ostream& o, const StructName& param) {                           \
+    inline std::ostream& operator<<(std::ostream& o, const StructName& param) {                    \
         o << static_cast<const AdapterTestParam&>(param);                                          \
         o << "; " << static_cast<const DAWN_PP_CONCATENATE(_Dawn_, StructName)&>(param);           \
         return o;                                                                                  \
diff --git a/src/dawn/tests/end2end/SharedTextureMemoryTests.cpp b/src/dawn/tests/end2end/SharedTextureMemoryTests.cpp
new file mode 100644
index 0000000..c87b450
--- /dev/null
+++ b/src/dawn/tests/end2end/SharedTextureMemoryTests.cpp
@@ -0,0 +1,1260 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/tests/end2end/SharedTextureMemoryTests.h"
+
+#include "dawn/tests/MockCallback.h"
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace dawn {
+
+void SharedTextureMemoryNoFeatureTests::SetUp() {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    DawnTestWithParams<SharedTextureMemoryTestParams>::SetUp();
+}
+
+std::vector<wgpu::FeatureName> SharedTextureMemoryTests::GetRequiredFeatures() {
+    auto features = GetParam().mBackend->RequiredFeatures();
+    if (!SupportsFeatures(features)) {
+        return {};
+    }
+    if (SupportsFeatures({wgpu::FeatureName::TransientAttachments})) {
+        features.push_back(wgpu::FeatureName::TransientAttachments);
+    }
+    return features;
+}
+
+void SharedTextureMemoryTests::SetUp() {
+    DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+    DawnTestWithParams<SharedTextureMemoryTestParams>::SetUp();
+    DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures(GetParam().mBackend->RequiredFeatures()));
+}
+
+std::vector<wgpu::SharedTextureMemory> SharedTextureMemoryTestBackend::CreateSharedTextureMemories(
+    wgpu::Device& device) {
+    std::vector<wgpu::SharedTextureMemory> memories;
+    for (auto& memory : CreatePerDeviceSharedTextureMemories({device})) {
+        ASSERT(memory.size() == 1u);
+        memories.push_back(std::move(memory[0]));
+    }
+    return memories;
+}
+
+std::vector<std::vector<wgpu::SharedTextureMemory>>
+SharedTextureMemoryTestBackend::CreatePerDeviceSharedTextureMemoriesFilterByUsage(
+    const std::vector<wgpu::Device>& devices,
+    wgpu::TextureUsage requiredUsage) {
+    std::vector<std::vector<wgpu::SharedTextureMemory>> out;
+    for (auto& memories : CreatePerDeviceSharedTextureMemories(devices)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memories[0].GetProperties(&properties);
+
+        if ((properties.usage & requiredUsage) == requiredUsage) {
+            out.push_back(std::move(memories));
+        }
+    }
+    return out;
+}
+
+void SharedTextureMemoryTests::UseInRenderPass(wgpu::Device& deviceObj, wgpu::Texture& texture) {
+    wgpu::CommandEncoder encoder = deviceObj.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor passDescriptor({texture.CreateView()});
+    passDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+    passDescriptor.cColorAttachments[0].storeOp = wgpu::StoreOp::Store;
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+    pass.End();
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    deviceObj.GetQueue().Submit(1, &commandBuffer);
+}
+
+void SharedTextureMemoryTests::UseInCopy(wgpu::Device& deviceObj, wgpu::Texture& texture) {
+    wgpu::CommandEncoder encoder = deviceObj.CreateCommandEncoder();
+    wgpu::ImageCopyTexture source;
+    source.texture = texture;
+
+    // Create a destination buffer, large enough for 1 texel of any format.
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = 128;
+    bufferDesc.usage = wgpu::BufferUsage::CopyDst;
+
+    wgpu::ImageCopyBuffer destination;
+    destination.buffer = deviceObj.CreateBuffer(&bufferDesc);
+
+    wgpu::Extent3D size = {1, 1, 1};
+    encoder.CopyTextureToBuffer(&source, &destination, &size);
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+    deviceObj.GetQueue().Submit(1, &commandBuffer);
+}
+
+// Make a command buffer that clears the texture to four different colors in each quadrant.
+wgpu::CommandBuffer SharedTextureMemoryTests::MakeFourColorsClearCommandBuffer(
+    wgpu::Device& deviceObj,
+    wgpu::Texture& texture) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(deviceObj, R"(
+      struct VertexOut {
+          @builtin(position) position : vec4f,
+          @location(0) uv : vec2f,
+      }
+
+      struct FragmentIn {
+          @location(0) uv : vec2f,
+      }
+
+      @vertex fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+          let pos = array(
+            vec2( 1.0,  1.0),
+            vec2( 1.0, -1.0),
+            vec2(-1.0, -1.0),
+            vec2( 1.0,  1.0),
+            vec2(-1.0, -1.0),
+            vec2(-1.0,  1.0),
+          );
+
+          let uv = array(
+            vec2(1.0, 0.0),
+            vec2(1.0, 1.0),
+            vec2(0.0, 1.0),
+            vec2(1.0, 0.0),
+            vec2(0.0, 1.0),
+            vec2(0.0, 0.0),
+          );
+          return VertexOut(vec4f(pos[VertexIndex], 0.0, 1.0), uv[VertexIndex]);
+      }
+
+      @fragment fn frag_main(in: FragmentIn) -> @location(0) vec4f {
+          if (in.uv.x < 0.5) {
+            if (in.uv.y < 0.5) {
+              return vec4f(0.0, 1.0, 0.0, 1.0);
+            } else {
+              return vec4f(1.0, 0.0, 0.0, 1.0);
+            }
+          } else {
+            if (in.uv.y < 0.5) {
+              return vec4f(0.0, 0.0, 1.0, 1.0);
+            } else {
+              return vec4f(1.0, 1.0, 0.0, 1.0);
+            }
+          }
+      }
+    )");
+
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = module;
+    pipelineDesc.vertex.entryPoint = "vert_main";
+    pipelineDesc.cFragment.module = module;
+    pipelineDesc.cFragment.entryPoint = "frag_main";
+    pipelineDesc.cTargets[0].format = texture.GetFormat();
+
+    wgpu::RenderPipeline pipeline = deviceObj.CreateRenderPipeline(&pipelineDesc);
+
+    wgpu::CommandEncoder encoder = deviceObj.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor passDescriptor({texture.CreateView()});
+    passDescriptor.cColorAttachments[0].storeOp = wgpu::StoreOp::Store;
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+    pass.SetPipeline(pipeline);
+    pass.Draw(6);
+    pass.End();
+    return encoder.Finish();
+}
+
+// Make a command buffer that samples the contents of the input texture into an RGBA8Unorm texture.
+std::pair<wgpu::CommandBuffer, wgpu::Texture>
+SharedTextureMemoryTests::MakeCheckBySamplingCommandBuffer(wgpu::Device& deviceObj,
+                                                           wgpu::Texture& texture) {
+    wgpu::ShaderModule module = utils::CreateShaderModule(deviceObj, R"(
+      @vertex fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4f {
+          let pos = array(
+            vec2( 1.0,  1.0),
+            vec2( 1.0, -1.0),
+            vec2(-1.0, -1.0),
+            vec2( 1.0,  1.0),
+            vec2(-1.0, -1.0),
+            vec2(-1.0,  1.0),
+          );
+          return vec4f(pos[VertexIndex], 0.0, 1.0);
+      }
+
+      @group(0) @binding(0) var t: texture_2d<f32>;
+
+      @fragment fn frag_main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4f {
+        return textureLoad(t, vec2u(coord_in.xy), 0);
+      }
+    )");
+
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    textureDesc.size = {texture.GetWidth(), texture.GetHeight(), texture.GetDepthOrArrayLayers()};
+    textureDesc.label = "intermediate check texture";
+
+    wgpu::Texture colorTarget = deviceObj.CreateTexture(&textureDesc);
+
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = module;
+    pipelineDesc.vertex.entryPoint = "vert_main";
+    pipelineDesc.cFragment.module = module;
+    pipelineDesc.cFragment.entryPoint = "frag_main";
+    pipelineDesc.cTargets[0].format = colorTarget.GetFormat();
+
+    wgpu::RenderPipeline pipeline = deviceObj.CreateRenderPipeline(&pipelineDesc);
+
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(deviceObj, pipeline.GetBindGroupLayout(0),
+                                                     {{0, texture.CreateView()}});
+
+    wgpu::CommandEncoder encoder = deviceObj.CreateCommandEncoder();
+    utils::ComboRenderPassDescriptor passDescriptor({colorTarget.CreateView()});
+    passDescriptor.cColorAttachments[0].storeOp = wgpu::StoreOp::Store;
+
+    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+    pass.SetPipeline(pipeline);
+    pass.SetBindGroup(0, bindGroup);
+    pass.Draw(6);
+    pass.End();
+    return {encoder.Finish(), colorTarget};
+}
+
+// Check that the contents of colorTarget are RGBA8Unorm texels that match those written by
+// MakeFourColorsClearCommandBuffer.
+void SharedTextureMemoryTests::CheckFourColors(wgpu::Device& deviceObj,
+                                               wgpu::TextureFormat format,
+                                               wgpu::Texture& colorTarget) {
+    wgpu::Origin3D tl = {colorTarget.GetWidth() / 4, colorTarget.GetHeight() / 4};
+    wgpu::Origin3D bl = {colorTarget.GetWidth() / 4, 3 * colorTarget.GetHeight() / 4};
+    wgpu::Origin3D tr = {3 * colorTarget.GetWidth() / 4, colorTarget.GetHeight() / 4};
+    wgpu::Origin3D br = {3 * colorTarget.GetWidth() / 4, 3 * colorTarget.GetHeight() / 4};
+
+    switch (format) {
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+        case wgpu::TextureFormat::RGBA16Float:
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kGreen, colorTarget, tl, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kRed, colorTarget, bl, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kBlue, colorTarget, tr, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kYellow, colorTarget, br, {1, 1});
+            break;
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RG8Unorm:
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kGreen, colorTarget, tl, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kRed, colorTarget, bl, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kBlack, colorTarget, tr, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kYellow, colorTarget, br, {1, 1});
+            break;
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::R8Unorm:
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kBlack, colorTarget, tl, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kRed, colorTarget, bl, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kBlack, colorTarget, tr, {1, 1});
+            EXPECT_TEXTURE_EQ(deviceObj, &utils::RGBA8::kRed, colorTarget, br, {1, 1});
+            break;
+        default:
+            UNREACHABLE();
+    }
+}
+
+// Allow tests to be uninstantiated since it's possible no backends are available.
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(SharedTextureMemoryNoFeatureTests);
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(SharedTextureMemoryTests);
+
+namespace {
+
+using testing::HasSubstr;
+using testing::MockCallback;
+
+template <typename T>
+T& AsNonConst(const T& rhs) {
+    return const_cast<T&>(rhs);
+}
+
+// Test that creating shared texture memory without the required features is an error.
+// Using the memory thereafter produces errors.
+TEST_P(SharedTextureMemoryNoFeatureTests, CreationWithoutFeature) {
+    // Create external texture memories with an error filter.
+    // We should see a message that the feature is not enabled.
+    device.PushErrorScope(wgpu::ErrorFilter::Validation);
+    const auto& memories = GetParam().mBackend->CreateSharedTextureMemories(device);
+
+    MockCallback<WGPUErrorCallback> popErrorScopeCallback;
+    EXPECT_CALL(popErrorScopeCallback,
+                Call(WGPUErrorType_Validation, HasSubstr("is not enabled"), this));
+
+    device.PopErrorScope(popErrorScopeCallback.Callback(),
+                         popErrorScopeCallback.MakeUserdata(this));
+
+    for (wgpu::SharedTextureMemory memory : memories) {
+        ASSERT_DEVICE_ERROR_MSG(wgpu::Texture texture = memory.CreateTexture(),
+                                HasSubstr("is invalid"));
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = true;
+
+        ASSERT_DEVICE_ERROR_MSG(memory.BeginAccess(texture, &beginDesc), HasSubstr("is invalid"));
+
+        wgpu::SharedTextureMemoryEndAccessState endState = {};
+        ASSERT_DEVICE_ERROR_MSG(memory.EndAccess(texture, &endState), HasSubstr("is invalid"));
+    }
+}
+
+// Test that it is an error to import a shared texture memory with no chained struct.
+TEST_P(SharedTextureMemoryTests, ImportSharedTextureMemoryNoChain) {
+    wgpu::SharedTextureMemoryDescriptor desc;
+    ASSERT_DEVICE_ERROR_MSG(
+        wgpu::SharedTextureMemory memory = device.ImportSharedTextureMemory(&desc),
+        HasSubstr("chain"));
+}
+
+// Test that it is an error to import a shared fence with no chained struct.
+// Also test that ExportInfo reports an Undefined type for the error fence.
+TEST_P(SharedTextureMemoryTests, ImportSharedFenceNoChain) {
+    wgpu::SharedFenceDescriptor desc;
+    ASSERT_DEVICE_ERROR_MSG(wgpu::SharedFence fence = device.ImportSharedFence(&desc),
+                            HasSubstr("chain"));
+
+    wgpu::SharedFenceExportInfo exportInfo;
+    exportInfo.type = static_cast<wgpu::SharedFenceType>(1234);  // should be overrwritten
+
+    // Expect that exporting the fence info writes Undefined, and generates an error.
+    ASSERT_DEVICE_ERROR(fence.ExportInfo(&exportInfo));
+    EXPECT_EQ(exportInfo.type, wgpu::SharedFenceType::Undefined);
+}
+
+// Test that it is an error to import a shared texture memory when the device is destroyed
+TEST_P(SharedTextureMemoryTests, ImportSharedTextureMemoryDeviceDestroyed) {
+    device.Destroy();
+
+    wgpu::SharedTextureMemoryDescriptor desc;
+    ASSERT_DEVICE_ERROR_MSG(
+        wgpu::SharedTextureMemory memory = device.ImportSharedTextureMemory(&desc),
+        HasSubstr("lost"));
+}
+
+// Test that it is an error to import a shared fence when the device is destroyed
+TEST_P(SharedTextureMemoryTests, ImportSharedFenceDeviceDestroyed) {
+    device.Destroy();
+
+    wgpu::SharedFenceDescriptor desc;
+    ASSERT_DEVICE_ERROR_MSG(wgpu::SharedFence fence = device.ImportSharedFence(&desc),
+                            HasSubstr("lost"));
+}
+
+// Test calling GetProperties with an error memory. The properties are filled with 0/None/Undefined.
+TEST_P(SharedTextureMemoryTests, GetPropertiesErrorMemory) {
+    wgpu::SharedTextureMemoryDescriptor desc;
+    ASSERT_DEVICE_ERROR(wgpu::SharedTextureMemory memory = device.ImportSharedTextureMemory(&desc));
+
+    wgpu::SharedTextureMemoryProperties properties;
+    memory.GetProperties(&properties);
+
+    EXPECT_EQ(properties.usage, wgpu::TextureUsage::None);
+    EXPECT_EQ(properties.size.width, 0u);
+    EXPECT_EQ(properties.size.height, 0u);
+    EXPECT_EQ(properties.size.depthOrArrayLayers, 0u);
+    EXPECT_EQ(properties.format, wgpu::TextureFormat::Undefined);
+}
+
+// Test calling GetProperties with an invalid chained struct. An error is
+// generated, but the properties are still populated.
+TEST_P(SharedTextureMemoryTests, GetPropertiesInvalidChain) {
+    wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+    wgpu::ChainedStructOut otherStruct;
+    wgpu::SharedTextureMemoryProperties properties1;
+    properties1.nextInChain = &otherStruct;
+    ASSERT_DEVICE_ERROR(memory.GetProperties(&properties1));
+
+    wgpu::SharedTextureMemoryProperties properties2;
+    memory.GetProperties(&properties2);
+
+    EXPECT_EQ(properties1.usage, properties2.usage);
+    EXPECT_EQ(properties1.size.width, properties2.size.width);
+    EXPECT_EQ(properties1.size.height, properties2.size.height);
+    EXPECT_EQ(properties1.size.depthOrArrayLayers, properties2.size.depthOrArrayLayers);
+    EXPECT_EQ(properties1.format, properties2.format);
+}
+
+// Test that texture usages must be a subset of the shared texture memory's usage.
+TEST_P(SharedTextureMemoryTests, UsageValidation) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        // SharedTextureMemory should never support TransientAttachment.
+        ASSERT_EQ(properties.usage & wgpu::TextureUsage::TransientAttachment, 0);
+
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.format = properties.format;
+        textureDesc.size = properties.size;
+
+        for (wgpu::TextureUsage usage : {
+                 wgpu::TextureUsage::CopySrc,
+                 wgpu::TextureUsage::CopyDst,
+                 wgpu::TextureUsage::TextureBinding,
+                 wgpu::TextureUsage::StorageBinding,
+                 wgpu::TextureUsage::RenderAttachment,
+             }) {
+            textureDesc.usage = usage;
+
+            // `usage` is valid if it is in the shared texture memory properties.
+            if (usage & properties.usage) {
+                wgpu::Texture t = memory.CreateTexture(&textureDesc);
+                EXPECT_EQ(t.GetUsage(), usage);
+            } else {
+                ASSERT_DEVICE_ERROR(memory.CreateTexture(&textureDesc));
+            }
+        }
+    }
+}
+
+// Test that it is an error if the texture format doesn't match the shared texture memory.
+TEST_P(SharedTextureMemoryTests, FormatValidation) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.format = properties.format != wgpu::TextureFormat::RGBA8Unorm
+                                 ? wgpu::TextureFormat::RGBA8Unorm
+                                 : wgpu::TextureFormat::RGBA16Float;
+        textureDesc.size = properties.size;
+        textureDesc.usage = properties.usage;
+
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc),
+                                HasSubstr("doesn't match descriptor format"));
+    }
+}
+
+// Test that it is an error if the texture size doesn't match the shared texture memory.
+TEST_P(SharedTextureMemoryTests, SizeValidation) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.format = properties.format;
+        textureDesc.usage = properties.usage;
+
+        textureDesc.size = {properties.size.width + 1, properties.size.height,
+                            properties.size.depthOrArrayLayers};
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc),
+                                HasSubstr("doesn't match descriptor size"));
+
+        textureDesc.size = {properties.size.width, properties.size.height + 1,
+                            properties.size.depthOrArrayLayers};
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc),
+                                HasSubstr("doesn't match descriptor size"));
+
+        textureDesc.size = {properties.size.width, properties.size.height,
+                            properties.size.depthOrArrayLayers + 1};
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc), HasSubstr("is not 1"));
+    }
+}
+
+// Test that it is an error if the texture mip level count is not 1.
+TEST_P(SharedTextureMemoryTests, MipLevelValidation) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.format = properties.format;
+        textureDesc.usage = properties.usage;
+        textureDesc.size = properties.size;
+        textureDesc.mipLevelCount = 1u;
+
+        memory.CreateTexture(&textureDesc);
+
+        textureDesc.mipLevelCount = 2u;
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc), HasSubstr("(2) is not 1"));
+    }
+}
+
+// Test that it is an error if the texture sample count is not 1.
+TEST_P(SharedTextureMemoryTests, SampleCountValidation) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.format = properties.format;
+        textureDesc.usage = properties.usage;
+        textureDesc.size = properties.size;
+        textureDesc.sampleCount = 1u;
+
+        memory.CreateTexture(&textureDesc);
+
+        textureDesc.sampleCount = 4u;
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc), HasSubstr("(4) is not 1"));
+    }
+}
+
+// Test that it is an error if the texture dimension is not 2D.
+TEST_P(SharedTextureMemoryTests, DimensionValidation) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        wgpu::TextureDescriptor textureDesc = {};
+        textureDesc.format = properties.format;
+        textureDesc.usage = properties.usage;
+        textureDesc.size = properties.size;
+
+        textureDesc.dimension = wgpu::TextureDimension::e1D;
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc),
+                                HasSubstr("is not TextureDimension::e2D"));
+
+        textureDesc.dimension = wgpu::TextureDimension::e3D;
+        ASSERT_DEVICE_ERROR_MSG(memory.CreateTexture(&textureDesc),
+                                HasSubstr("is not TextureDimension::e2D"));
+    }
+}
+
+// Test that it is an error to call BeginAccess twice in a row on the same texture and memory.
+TEST_P(SharedTextureMemoryTests, DoubleBeginAccess) {
+    wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+    wgpu::Texture texture = memory.CreateTexture();
+
+    wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+    beginDesc.initialized = true;
+
+    // It should be an error to BeginAccess twice in a row.
+    memory.BeginAccess(texture, &beginDesc);
+    ASSERT_DEVICE_ERROR_MSG(memory.BeginAccess(texture, &beginDesc),
+                            HasSubstr("Cannot begin access with"));
+}
+
+// Test that it is an error to call BeginAccess twice in a row on two textures from the same memory.
+TEST_P(SharedTextureMemoryTests, DoubleBeginAccessSeparateTextures) {
+    wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+    wgpu::Texture texture1 = memory.CreateTexture();
+    wgpu::Texture texture2 = memory.CreateTexture();
+
+    wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+    beginDesc.initialized = true;
+
+    // It should be an error to BeginAccess twice in a row.
+    memory.BeginAccess(texture1, &beginDesc);
+    ASSERT_DEVICE_ERROR_MSG(memory.BeginAccess(texture2, &beginDesc),
+                            HasSubstr("Cannot begin access with"));
+}
+
+// Test that it is an error to call EndAccess twice in a row on the same memory.
+TEST_P(SharedTextureMemoryTests, DoubleEndAccess) {
+    wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+    wgpu::Texture texture = memory.CreateTexture();
+
+    wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+    beginDesc.initialized = true;
+
+    memory.BeginAccess(texture, &beginDesc);
+
+    wgpu::SharedTextureMemoryEndAccessState endState = {};
+    memory.EndAccess(texture, &endState);
+
+    // Invalid to end access a second time.
+    ASSERT_DEVICE_ERROR_MSG(memory.EndAccess(texture, &endState), HasSubstr("Cannot end access"));
+}
+
+// Test that it is an error to call EndAccess on a texture that was not the one BeginAccess was
+// called on.
+TEST_P(SharedTextureMemoryTests, BeginThenEndOnDifferentTexture) {
+    wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+    wgpu::Texture texture1 = memory.CreateTexture();
+    wgpu::Texture texture2 = memory.CreateTexture();
+
+    wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+    beginDesc.initialized = true;
+
+    memory.BeginAccess(texture1, &beginDesc);
+
+    wgpu::SharedTextureMemoryEndAccessState endState = {};
+    ASSERT_DEVICE_ERROR_MSG(memory.EndAccess(texture2, &endState), HasSubstr("Cannot end access"));
+}
+
+// Test that it is an error to call EndAccess without a preceding BeginAccess.
+TEST_P(SharedTextureMemoryTests, EndAccessWithoutBegin) {
+    wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+    wgpu::Texture texture = memory.CreateTexture();
+
+    wgpu::SharedTextureMemoryEndAccessState endState = {};
+    ASSERT_DEVICE_ERROR_MSG(memory.EndAccess(texture, &endState), HasSubstr("Cannot end access"));
+}
+
+// Test that it is an error to use the texture on the queue without a preceding BeginAccess.
+TEST_P(SharedTextureMemoryTests, UseWithoutBegin) {
+    DAWN_TEST_UNSUPPORTED_IF(HasToggleEnabled("skip_validation"));
+
+    wgpu::SharedTextureMemory memory = GetParam().mBackend->CreateSharedTextureMemory(device);
+
+    wgpu::SharedTextureMemoryProperties properties;
+    memory.GetProperties(&properties);
+
+    wgpu::Texture texture = memory.CreateTexture();
+
+    if (properties.usage & wgpu::TextureUsage::RenderAttachment) {
+        ASSERT_DEVICE_ERROR_MSG(UseInRenderPass(device, texture),
+                                HasSubstr("without current access"));
+    } else if (properties.format != wgpu::TextureFormat::R8BG8Biplanar420Unorm) {
+        if (properties.usage & wgpu::TextureUsage::CopySrc) {
+            ASSERT_DEVICE_ERROR_MSG(UseInCopy(device, texture),
+                                    HasSubstr("without current access"));
+        }
+        if (properties.usage & wgpu::TextureUsage::CopyDst) {
+            wgpu::Extent3D writeSize = {1, 1, 1};
+            wgpu::ImageCopyTexture dest = {};
+            dest.texture = texture;
+            wgpu::TextureDataLayout dataLayout = {};
+            uint64_t data[2];
+            ASSERT_DEVICE_ERROR_MSG(
+                device.GetQueue().WriteTexture(&dest, &data, sizeof(data), &dataLayout, &writeSize),
+                HasSubstr("without current access"));
+        }
+    }
+}
+
+// Fences are tracked by BeginAccess regardless of whether or not the operation
+// was successful. In error conditions, the same fences are returned in EndAccess, so that
+// the caller can free them (the implementation did not consume them), and the wait condition
+// isn't dropped on the floor entirely.
+// If there are invalid nested accesses, forwarding waits for the invalid accesses still occurs.
+// The mental model is that there is a stack of scopes per (memory, texture) pair.
+TEST_P(SharedTextureMemoryTests, AccessStack) {
+    const auto& memories = GetParam().mBackend->CreateSharedTextureMemories(device);
+    ASSERT_GT(memories.size(), 1u);
+
+    for (size_t i = 0; i < memories.size(); ++i) {
+        // Create multiple textures for use in the test.
+        // The test will use them to Begin/End access in nested and interleaved
+        // patterns to verify the access stack is tracked separately per-texture.
+        wgpu::Texture texture1 = memories[i].CreateTexture();
+        wgpu::Texture texture2 = memories[i].CreateTexture();
+        wgpu::Texture texture3 = memories[i].CreateTexture();
+        wgpu::Texture texture4 = memories[i].CreateTexture();
+        wgpu::Texture texture5 = memories[i].CreateTexture();
+
+        std::vector<wgpu::SharedFence> fences;
+        std::vector<uint64_t> signaledValues;
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        wgpu::SharedTextureMemoryEndAccessState endState;
+        beginDesc.initialized = true;
+
+        auto CheckFencesMatch = [&](const wgpu::SharedTextureMemoryBeginAccessDescriptor& begin,
+                                    const wgpu::SharedTextureMemoryEndAccessState& end) {
+            ASSERT_EQ(begin.fenceCount, end.fenceCount);
+            for (size_t j = 0; j < end.fenceCount; ++j) {
+                EXPECT_EQ(begin.fences[j].Get(), end.fences[j].Get());
+                EXPECT_EQ(begin.signaledValues[j], end.signaledValues[j]);
+            }
+        };
+
+        // Begin/EndAccess to generate multiple fences.
+        while (fences.size() < 7) {
+            endState = {};
+            memories[i].BeginAccess(texture1, &beginDesc);
+            memories[i].EndAccess(texture1, &endState);
+
+            ASSERT_GT(endState.fenceCount, 0u);
+            for (size_t j = 0; j < endState.fenceCount; ++j) {
+                fences.push_back(std::move(endState.fences[j]));
+                signaledValues.push_back(endState.signaledValues[j]);
+            }
+        }
+
+        // Begin access on memories[i], texture1 using the first fence.
+        auto ti1BeginDesc = beginDesc = {};
+        ti1BeginDesc.initialized = true;
+        ti1BeginDesc.fenceCount = 1;
+        ti1BeginDesc.fences = &fences[0];
+        ti1BeginDesc.signaledValues = &signaledValues[0];
+        memories[i].BeginAccess(texture1, &ti1BeginDesc);
+
+        // Begin access on memories[i], texture2 with no fences.
+        auto ti2BeginDesc = beginDesc = {};
+        ti2BeginDesc.fenceCount = 0;
+        ASSERT_DEVICE_ERROR(memories[i].BeginAccess(texture2, &ti2BeginDesc));
+
+        // Begin access on memories[i], texture3 with two fences.
+        auto ti3BeginDesc = beginDesc = {};
+        ti3BeginDesc.fenceCount = 2;
+        ti3BeginDesc.fences = &fences[1];
+        ti3BeginDesc.signaledValues = &signaledValues[1];
+        ASSERT_DEVICE_ERROR(memories[i].BeginAccess(texture3, &ti3BeginDesc));
+
+        auto tj3BeginDesc = beginDesc = {};
+        if (i + 1 < memories.size()) {
+            // Begin access on memories[i + 1], texture3 with one fence.
+            tj3BeginDesc.fenceCount = 1;
+            tj3BeginDesc.fences = &fences[3];
+            tj3BeginDesc.signaledValues = &signaledValues[3];
+            ASSERT_DEVICE_ERROR(memories[i + 1].BeginAccess(texture3, &tj3BeginDesc));
+        }
+
+        // End access on memories[i], texture2.
+        // Expect the same fences from the BeginAccess operation.
+        ASSERT_DEVICE_ERROR(memories[i].EndAccess(texture2, &endState));
+        CheckFencesMatch(ti2BeginDesc, endState);
+
+        // End access on memories[i], texture1. The begin was valid.
+        // This should be valid too.
+        memories[i].EndAccess(texture1, &endState);
+
+        // Begin access on memories[i], texture4 with two fences.
+        auto ti4BeginDesc = beginDesc = {};
+        ti4BeginDesc.initialized = true;
+        ti4BeginDesc.fenceCount = 1;
+        ti4BeginDesc.fences = &fences[4];
+        ti4BeginDesc.signaledValues = &signaledValues[4];
+        memories[i].BeginAccess(texture4, &ti4BeginDesc);
+
+        auto tj5BeginDesc = beginDesc = {};
+        if (i + 1 < memories.size()) {
+            // Begin access on memories[i + 1], texture5 with one fence.
+            tj5BeginDesc.fenceCount = 1;
+            tj5BeginDesc.fences = &fences[6];
+            tj5BeginDesc.signaledValues = &signaledValues[6];
+            ASSERT_DEVICE_ERROR(memories[i + 1].BeginAccess(texture5, &tj5BeginDesc));
+
+            // End access on memories[i + 1], texture3.
+            ASSERT_DEVICE_ERROR(memories[i + 1].EndAccess(texture3, &endState));
+            CheckFencesMatch(tj3BeginDesc, endState);
+        }
+
+        // End access on memories[i], texture3.
+        ASSERT_DEVICE_ERROR(memories[i].EndAccess(texture3, &endState));
+        CheckFencesMatch(ti3BeginDesc, endState);
+
+        // EndAccess on memories[i], texture4.
+        memories[i].EndAccess(texture4, &endState);
+
+        if (i + 1 < memories.size()) {
+            // End access on memories[i + 1], texture5.
+            ASSERT_DEVICE_ERROR(memories[i + 1].EndAccess(texture5, &endState));
+            CheckFencesMatch(tj5BeginDesc, endState);
+        }
+    }
+}
+
+// Test that it is an error to call BeginAccess on a texture that wasn't created from the same
+// memory.
+TEST_P(SharedTextureMemoryTests, MismatchingMemory) {
+    const auto& memories = GetParam().mBackend->CreateSharedTextureMemories(device);
+    wgpu::SharedTextureMemory otherMemory = GetParam().mBackend->CreateSharedTextureMemory(device);
+    for (size_t i = 0; i < memories.size(); ++i) {
+        wgpu::Texture texture = memories[i].CreateTexture();
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = true;
+
+        ASSERT_DEVICE_ERROR_MSG(otherMemory.BeginAccess(texture, &beginDesc),
+                                HasSubstr("cannot be used with"));
+
+        // End access so the access scope is balanced.
+        wgpu::SharedTextureMemoryEndAccessState endState;
+        ASSERT_DEVICE_ERROR_MSG(otherMemory.EndAccess(texture, &endState),
+                                HasSubstr("cannot be used with"));
+    }
+}
+
+// Test that it is valid (does not crash) if the memory is dropped while a texture access has begun.
+TEST_P(SharedTextureMemoryTests, TextureAccessOutlivesMemory) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = true;
+
+        // Begin access on a texture, and drop the memory.
+        wgpu::Texture texture = memory.CreateTexture();
+        memory.BeginAccess(texture, &beginDesc);
+        memory = nullptr;
+
+        // Use the texture on the GPU; it should not crash.
+        if (properties.usage & wgpu::TextureUsage::RenderAttachment) {
+            UseInRenderPass(device, texture);
+        } else if (properties.format != wgpu::TextureFormat::R8BG8Biplanar420Unorm) {
+            ASSERT(properties.usage & wgpu::TextureUsage::CopySrc);
+            UseInCopy(device, texture);
+        }
+    }
+}
+
+// Test that if the texture is uninitialized, it is cleared on first use.
+TEST_P(SharedTextureMemoryTests, UninitializedTextureIsCleared) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        // Skipped for multiplanar formats because those must be initialized on import.
+        if (utils::IsMultiPlanarFormat(properties.format)) {
+            continue;
+        }
+
+        wgpu::Texture texture = memory.CreateTexture();
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        wgpu::SharedTextureMemoryEndAccessState endState = {};
+
+        // First fill the texture with data, so we can check that using it uninitialized
+        // makes it black.
+        {
+            wgpu::CommandBuffer commandBuffer = MakeFourColorsClearCommandBuffer(device, texture);
+
+            beginDesc.initialized = true;
+            memory.BeginAccess(texture, &beginDesc);
+            device.GetQueue().Submit(1, &commandBuffer);
+            memory.EndAccess(texture, &endState);
+        }
+
+        // Now, BeginAccess on the texture as uninitialized.
+        beginDesc.fenceCount = endState.fenceCount;
+        beginDesc.fences = endState.fences;
+        beginDesc.signaledValues = endState.signaledValues;
+        beginDesc.initialized = false;
+        memory.BeginAccess(texture, &beginDesc);
+
+        // Use the texture on the GPU which should lazy clear it.
+        if (properties.usage & wgpu::TextureUsage::RenderAttachment) {
+            UseInRenderPass(device, texture);
+        } else {
+            ASSERT(properties.usage & wgpu::TextureUsage::CopySrc);
+            UseInCopy(device, texture);
+        }
+
+        AsNonConst(endState.initialized) = false;  // should be overrwritten
+        memory.EndAccess(texture, &endState);
+        // The texture should be initialized now.
+        EXPECT_TRUE(endState.initialized);
+
+        // Begin access again - and check that the texture contents are zero.
+        {
+            auto [commandBuffer, colorTarget] = MakeCheckBySamplingCommandBuffer(device, texture);
+
+            beginDesc.fenceCount = endState.fenceCount;
+            beginDesc.fences = endState.fences;
+            beginDesc.signaledValues = endState.signaledValues;
+            beginDesc.initialized = endState.initialized;
+
+            memory.BeginAccess(texture, &beginDesc);
+            device.GetQueue().Submit(1, &commandBuffer);
+            memory.EndAccess(texture, &endState);
+
+            uint8_t alphaVal;
+            switch (properties.format) {
+                case wgpu::TextureFormat::RGBA8Unorm:
+                case wgpu::TextureFormat::BGRA8Unorm:
+                case wgpu::TextureFormat::RGB10A2Unorm:
+                case wgpu::TextureFormat::RGBA16Float:
+                    alphaVal = 0;
+                    break;
+                default:
+                    // The test checks by sampling. Formats that don't
+                    // have alpha return 1 for alpha when sampled in a shader.
+                    alphaVal = 255;
+                    break;
+            }
+            std::vector<utils::RGBA8> expected(texture.GetWidth() * texture.GetHeight(),
+                                               utils::RGBA8{0, 0, 0, alphaVal});
+            EXPECT_TEXTURE_EQ(device, expected.data(), colorTarget, {0, 0},
+                              {colorTarget.GetWidth(), colorTarget.GetHeight()})
+                << "format: " << static_cast<uint32_t>(properties.format);
+        }
+    }
+}
+
+// Test that if the texture is uninitialized, EndAccess writes the state out as uninitialized.
+TEST_P(SharedTextureMemoryTests, UninitializedOnEndAccess) {
+    for (wgpu::SharedTextureMemory memory :
+         GetParam().mBackend->CreateSharedTextureMemories(device)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memory.GetProperties(&properties);
+
+        // Test basic begin+end access exports the state as uninitialized
+        // if it starts as uninitialized. Skipped for multiplanar formats
+        // because those must be initialized on import.
+        if (!utils::IsMultiPlanarFormat(properties.format)) {
+            wgpu::Texture texture = memory.CreateTexture();
+
+            wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+            beginDesc.initialized = false;
+            memory.BeginAccess(texture, &beginDesc);
+
+            wgpu::SharedTextureMemoryEndAccessState endState = {};
+            AsNonConst(endState.initialized) = true;  // should be overrwritten
+            memory.EndAccess(texture, &endState);
+            EXPECT_FALSE(endState.initialized);
+        }
+
+        // Test begin access as initialized, then uninitializing the texture
+        // exports the state as uninitialized on end access. Requires render
+        // attachment usage to uninitialize.
+        if (properties.usage & wgpu::TextureUsage::RenderAttachment) {
+            wgpu::Texture texture = memory.CreateTexture();
+
+            wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+            beginDesc.initialized = true;
+            memory.BeginAccess(texture, &beginDesc);
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            utils::ComboRenderPassDescriptor passDescriptor({texture.CreateView()});
+            passDescriptor.cColorAttachments[0].storeOp = wgpu::StoreOp::Discard;
+
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+            pass.End();
+            wgpu::CommandBuffer commandBuffer = encoder.Finish();
+            device.GetQueue().Submit(1, &commandBuffer);
+
+            wgpu::SharedTextureMemoryEndAccessState endState = {};
+            AsNonConst(endState.initialized) = true;  // should be overrwritten
+            memory.EndAccess(texture, &endState);
+            EXPECT_FALSE(endState.initialized);
+        }
+    }
+}
+
+// Test rendering to a texture memory on one device, then sampling it using another device.
+// Encode the commands after performing BeginAccess.
+TEST_P(SharedTextureMemoryTests, RenderThenSampleEncodeAfterBeginAccess) {
+    std::vector<wgpu::Device> devices = {device, CreateDevice()};
+
+    for (const auto& memories :
+         GetParam().mBackend->CreatePerDeviceSharedTextureMemoriesFilterByUsage(
+             devices, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding)) {
+        wgpu::Texture texture = memories[0].CreateTexture();
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = false;
+        memories[0].BeginAccess(texture, &beginDesc);
+
+        // Clear the texture
+        wgpu::CommandBuffer commandBuffer = MakeFourColorsClearCommandBuffer(devices[0], texture);
+        devices[0].GetQueue().Submit(1, &commandBuffer);
+
+        wgpu::SharedTextureMemoryEndAccessState endState = {};
+        memories[0].EndAccess(texture, &endState);
+
+        // Sample from the texture
+
+        std::vector<wgpu::SharedFence> sharedFences(endState.fenceCount);
+        for (size_t i = 0; i < endState.fenceCount; ++i) {
+            sharedFences[i] = GetParam().mBackend->ImportFenceTo(devices[1], endState.fences[i]);
+        }
+        beginDesc.fenceCount = endState.fenceCount;
+        beginDesc.fences = sharedFences.data();
+        beginDesc.signaledValues = endState.signaledValues;
+        beginDesc.initialized = endState.initialized;
+
+        texture = memories[1].CreateTexture();
+
+        memories[1].BeginAccess(texture, &beginDesc);
+
+        wgpu::Texture colorTarget;
+        std::tie(commandBuffer, colorTarget) =
+            MakeCheckBySamplingCommandBuffer(devices[1], texture);
+        devices[1].GetQueue().Submit(1, &commandBuffer);
+        memories[1].EndAccess(texture, &endState);
+
+        CheckFourColors(devices[1], texture.GetFormat(), colorTarget);
+    }
+}
+
+// Test rendering to a texture memory on one device, then sampling it using another device.
+// Encode the commands before performing BeginAccess (the access is only held during) QueueSubmit.
+TEST_P(SharedTextureMemoryTests, RenderThenSampleEncodeBeforeBeginAccess) {
+    std::vector<wgpu::Device> devices = {device, CreateDevice()};
+    for (const auto& memories :
+         GetParam().mBackend->CreatePerDeviceSharedTextureMemoriesFilterByUsage(
+             devices, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding)) {
+        // Create two textures from each memory.
+        wgpu::Texture textures[] = {memories[0].CreateTexture(), memories[1].CreateTexture()};
+
+        // Make two command buffers, one that clears the texture, another that samples.
+        wgpu::CommandBuffer commandBuffer0 =
+            MakeFourColorsClearCommandBuffer(devices[0], textures[0]);
+        auto [commandBuffer1, colorTarget] =
+            MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = false;
+        memories[0].BeginAccess(textures[0], &beginDesc);
+
+        devices[0].GetQueue().Submit(1, &commandBuffer0);
+
+        wgpu::SharedTextureMemoryEndAccessState endState = {};
+        memories[0].EndAccess(textures[0], &endState);
+
+        std::vector<wgpu::SharedFence> sharedFences(endState.fenceCount);
+        for (size_t i = 0; i < endState.fenceCount; ++i) {
+            sharedFences[i] = GetParam().mBackend->ImportFenceTo(devices[1], endState.fences[i]);
+        }
+        beginDesc.fenceCount = endState.fenceCount;
+        beginDesc.fences = sharedFences.data();
+        beginDesc.signaledValues = endState.signaledValues;
+        beginDesc.initialized = endState.initialized;
+
+        memories[1].BeginAccess(textures[1], &beginDesc);
+        devices[1].GetQueue().Submit(1, &commandBuffer1);
+        memories[1].EndAccess(textures[1], &endState);
+
+        CheckFourColors(devices[1], textures[1].GetFormat(), colorTarget);
+    }
+}
+
+// Test rendering to a texture memory on one device, then sampling it using another device.
+// Destroy the texture from the first device after submitting the commands, but before performing
+// EndAccess. The second device should still be able to wait on the first device and see the
+// results.
+TEST_P(SharedTextureMemoryTests, RenderThenTextureDestroyBeforeEndAccessThenSample) {
+    std::vector<wgpu::Device> devices = {device, CreateDevice()};
+    for (const auto& memories :
+         GetParam().mBackend->CreatePerDeviceSharedTextureMemoriesFilterByUsage(
+             devices, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding)) {
+        // Create two textures from each memory.
+        wgpu::Texture textures[] = {memories[0].CreateTexture(), memories[1].CreateTexture()};
+
+        // Make two command buffers, one that clears the texture, another that samples.
+        wgpu::CommandBuffer commandBuffer0 =
+            MakeFourColorsClearCommandBuffer(devices[0], textures[0]);
+        auto [commandBuffer1, colorTarget] =
+            MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = false;
+        memories[0].BeginAccess(textures[0], &beginDesc);
+
+        devices[0].GetQueue().Submit(1, &commandBuffer0);
+
+        // Destroy the texture before performing EndAccess.
+        textures[0].Destroy();
+
+        wgpu::SharedTextureMemoryEndAccessState endState = {};
+        memories[0].EndAccess(textures[0], &endState);
+
+        std::vector<wgpu::SharedFence> sharedFences(endState.fenceCount);
+        for (size_t i = 0; i < endState.fenceCount; ++i) {
+            sharedFences[i] = GetParam().mBackend->ImportFenceTo(devices[1], endState.fences[i]);
+        }
+        beginDesc.fenceCount = endState.fenceCount;
+        beginDesc.fences = sharedFences.data();
+        beginDesc.signaledValues = endState.signaledValues;
+        beginDesc.initialized = endState.initialized;
+
+        memories[1].BeginAccess(textures[1], &beginDesc);
+        devices[1].GetQueue().Submit(1, &commandBuffer1);
+        memories[1].EndAccess(textures[1], &endState);
+
+        CheckFourColors(devices[1], textures[1].GetFormat(), colorTarget);
+    }
+}
+
+// Test rendering to a texture memory on one device, then sampling it using another device.
+// Destroy or destroy the first device after submitting the commands, but before performing
+// EndAccess. The second device should still be able to wait on the first device and see the
+// results.
+// This tests both cases where the device is destroyed, and where the device is lost.
+TEST_P(SharedTextureMemoryTests, RenderThenLoseOrDestroyDeviceBeforeEndAccessThenSample) {
+    auto DoTest = [&](auto DestroyOrLoseDevice) {
+        std::vector<wgpu::Device> devices = {CreateDevice(), CreateDevice()};
+        auto perDeviceMemories =
+            GetParam().mBackend->CreatePerDeviceSharedTextureMemoriesFilterByUsage(
+                devices, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding);
+        DAWN_TEST_UNSUPPORTED_IF(perDeviceMemories.empty());
+
+        const auto& memories = perDeviceMemories[0];
+
+        // Create two textures from each memory.
+        wgpu::Texture textures[] = {memories[0].CreateTexture(), memories[1].CreateTexture()};
+
+        // Make two command buffers, one that clears the texture, another that samples.
+        wgpu::CommandBuffer commandBuffer0 =
+            MakeFourColorsClearCommandBuffer(devices[0], textures[0]);
+        auto [commandBuffer1, colorTarget] =
+            MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
+
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = false;
+        memories[0].BeginAccess(textures[0], &beginDesc);
+
+        devices[0].GetQueue().Submit(1, &commandBuffer0);
+
+        // Destroy or lose the device before performing EndAccess.
+        DestroyOrLoseDevice(devices[0]);
+
+        wgpu::SharedTextureMemoryEndAccessState endState = {};
+        memories[0].EndAccess(textures[0], &endState);
+        EXPECT_GT(endState.fenceCount, 0u);
+
+        std::vector<wgpu::SharedFence> sharedFences(endState.fenceCount);
+        for (size_t i = 0; i < endState.fenceCount; ++i) {
+            sharedFences[i] = GetParam().mBackend->ImportFenceTo(devices[1], endState.fences[i]);
+        }
+        beginDesc.fenceCount = endState.fenceCount;
+        beginDesc.fences = sharedFences.data();
+        beginDesc.signaledValues = endState.signaledValues;
+        beginDesc.initialized = endState.initialized;
+
+        memories[1].BeginAccess(textures[1], &beginDesc);
+        devices[1].GetQueue().Submit(1, &commandBuffer1);
+        memories[1].EndAccess(textures[1], &endState);
+
+        CheckFourColors(devices[1], textures[1].GetFormat(), colorTarget);
+    };
+
+    DoTest([](wgpu::Device d) { d.Destroy(); });
+
+    DoTest([this](wgpu::Device d) { LoseDeviceForTesting(d); });
+}
+
+// Test a shared texture memory created on separate devices but wrapping the same underyling data.
+// Write to the texture, then read from two separate devices concurrently, then write again.
+// Reads should happen strictly after the writes. The final write should wait for the reads.
+TEST_P(SharedTextureMemoryTests, SeparateDevicesWriteThenConcurrentReadThenWrite) {
+    std::vector<wgpu::Device> devices = {device, CreateDevice(), CreateDevice()};
+    for (const auto& memories :
+         GetParam().mBackend->CreatePerDeviceSharedTextureMemoriesFilterByUsage(
+             devices, wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding)) {
+        wgpu::SharedTextureMemoryProperties properties;
+        memories[0].GetProperties(&properties);
+
+        wgpu::TextureDescriptor writeTextureDesc = {};
+        writeTextureDesc.format = properties.format;
+        writeTextureDesc.size = properties.size;
+        writeTextureDesc.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
+        writeTextureDesc.label = "write texture";
+
+        wgpu::TextureDescriptor readTextureDesc = {};
+        readTextureDesc.format = properties.format;
+        readTextureDesc.size = properties.size;
+        readTextureDesc.usage = wgpu::TextureUsage::TextureBinding;
+        readTextureDesc.label = "read texture";
+
+        // Create three textures from each memory.
+        // The first one will be written to.
+        // The second two will be concurrently read after the write.
+        // Then the first one will be written to again.
+        wgpu::Texture textures[] = {memories[0].CreateTexture(&writeTextureDesc),
+                                    memories[1].CreateTexture(&readTextureDesc),
+                                    memories[2].CreateTexture(&readTextureDesc)};
+
+        // Build command buffers for the test.
+        wgpu::CommandBuffer writeCommandBuffer0 =
+            MakeFourColorsClearCommandBuffer(devices[0], textures[0]);
+
+        auto [checkCommandBuffer1, colorTarget1] =
+            MakeCheckBySamplingCommandBuffer(devices[1], textures[1]);
+
+        auto [checkCommandBuffer2, colorTarget2] =
+            MakeCheckBySamplingCommandBuffer(devices[2], textures[2]);
+
+        wgpu::CommandBuffer clearToGrayCommandBuffer0;
+        {
+            wgpu::CommandEncoder encoder = devices[0].CreateCommandEncoder();
+            utils::ComboRenderPassDescriptor passDescriptor({textures[0].CreateView()});
+            passDescriptor.cColorAttachments[0].storeOp = wgpu::StoreOp::Store;
+            passDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+            passDescriptor.cColorAttachments[0].clearValue = {0.5, 0.5, 0.5, 1.0};
+
+            encoder.BeginRenderPass(&passDescriptor).End();
+            clearToGrayCommandBuffer0 = encoder.Finish();
+        }
+
+        // Begin access on texture 0
+        wgpu::SharedTextureMemoryBeginAccessDescriptor beginDesc = {};
+        beginDesc.initialized = false;
+        memories[0].BeginAccess(textures[0], &beginDesc);
+
+        // Write
+        devices[0].GetQueue().Submit(1, &writeCommandBuffer0);
+
+        // End access on texture 0
+        wgpu::SharedTextureMemoryEndAccessState endState = {};
+        memories[0].EndAccess(textures[0], &endState);
+        EXPECT_TRUE(endState.initialized);
+
+        // Import fences to devices[1] and begin access.
+        std::vector<wgpu::SharedFence> sharedFences(endState.fenceCount);
+        for (size_t i = 0; i < endState.fenceCount; ++i) {
+            sharedFences[i] = GetParam().mBackend->ImportFenceTo(devices[1], endState.fences[i]);
+        }
+        beginDesc.fenceCount = sharedFences.size();
+        beginDesc.fences = sharedFences.data();
+        beginDesc.signaledValues = endState.signaledValues;
+        beginDesc.initialized = true;
+        memories[1].BeginAccess(textures[1], &beginDesc);
+
+        // Import fences to devices[2] and begin access.
+        for (size_t i = 0; i < endState.fenceCount; ++i) {
+            sharedFences[i] = GetParam().mBackend->ImportFenceTo(devices[2], endState.fences[i]);
+        }
+        memories[2].BeginAccess(textures[2], &beginDesc);
+
+        // Check contents
+        devices[1].GetQueue().Submit(1, &checkCommandBuffer1);
+        devices[2].GetQueue().Submit(1, &checkCommandBuffer2);
+        CheckFourColors(devices[1], textures[1].GetFormat(), colorTarget1);
+        CheckFourColors(devices[2], textures[2].GetFormat(), colorTarget2);
+
+        // End access on texture 1
+        wgpu::SharedTextureMemoryEndAccessState endState1;
+        memories[1].EndAccess(textures[1], &endState1);
+        EXPECT_TRUE(endState1.initialized);
+
+        // End access on texture 2
+        wgpu::SharedTextureMemoryEndAccessState endState2;
+        memories[2].EndAccess(textures[2], &endState2);
+        EXPECT_TRUE(endState2.initialized);
+
+        // Import fences back to devices[0]
+        sharedFences.resize(endState1.fenceCount + endState2.fenceCount);
+        std::vector<uint64_t> signaledValues(sharedFences.size());
+
+        for (size_t i = 0; i < endState1.fenceCount; ++i) {
+            sharedFences[i] = GetParam().mBackend->ImportFenceTo(devices[0], endState1.fences[i]);
+            signaledValues[i] = endState1.signaledValues[i];
+        }
+        for (size_t i = 0; i < endState2.fenceCount; ++i) {
+            sharedFences[i + endState1.fenceCount] =
+                GetParam().mBackend->ImportFenceTo(devices[0], endState2.fences[i]);
+            signaledValues[i + endState1.fenceCount] = endState2.signaledValues[i];
+        }
+
+        beginDesc.fenceCount = sharedFences.size();
+        beginDesc.fences = sharedFences.data();
+        beginDesc.signaledValues = signaledValues.data();
+        beginDesc.initialized = true;
+
+        // Begin access on texture 0
+        memories[0].BeginAccess(textures[0], &beginDesc);
+
+        // Submit a clear to gray.
+        devices[0].GetQueue().Submit(1, &clearToGrayCommandBuffer0);
+    }
+}
+
+}  // anonymous namespace
+}  // namespace dawn
diff --git a/src/dawn/tests/end2end/SharedTextureMemoryTests.h b/src/dawn/tests/end2end/SharedTextureMemoryTests.h
new file mode 100644
index 0000000..7601058
--- /dev/null
+++ b/src/dawn/tests/end2end/SharedTextureMemoryTests.h
@@ -0,0 +1,94 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SRC_DAWN_TESTS_WHITE_BOX_SHAREDTEXTUREMEMORYTESTS_H_
+#define SRC_DAWN_TESTS_WHITE_BOX_SHAREDTEXTUREMEMORYTESTS_H_
+
+#include <gtest/gtest.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "dawn/tests/DawnTest.h"
+
+namespace dawn {
+
+class SharedTextureMemoryTestBackend {
+  public:
+    // The name used in gtest parameterization. Names of backends must be unique.
+    virtual std::string Name() const = 0;
+
+    // The required features for testing this backend.
+    virtual std::vector<wgpu::FeatureName> RequiredFeatures() const = 0;
+
+    // Create one basic shared texture memory. It should support most operations.
+    virtual wgpu::SharedTextureMemory CreateSharedTextureMemory(wgpu::Device& device) = 0;
+
+    // Create a variety of valid SharedTextureMemory for testing, one on each device.
+    // Backends should return all interesting types of shared texture memory here, including
+    // different sizes, formats, memory types, etc.
+    // The inner vector is a vector of the same memory imported to each device.
+    virtual std::vector<std::vector<wgpu::SharedTextureMemory>>
+    CreatePerDeviceSharedTextureMemories(const std::vector<wgpu::Device>& devices) = 0;
+
+    // Import `fence` which may have been created on some other device, onto `importingDevice`.
+    virtual wgpu::SharedFence ImportFenceTo(const wgpu::Device& importingDevice,
+                                            const wgpu::SharedFence& fence) = 0;
+
+    // Shorthand version of `CreatePerDeviceSharedTextureMemories` that creates memories on a single
+    // device.
+    std::vector<wgpu::SharedTextureMemory> CreateSharedTextureMemories(wgpu::Device& device);
+
+    // Wrapper around CreatePerDeviceSharedTextureMemories that filters the memories by
+    // usage to ensure they have `requiredUsage`.
+    std::vector<std::vector<wgpu::SharedTextureMemory>>
+    CreatePerDeviceSharedTextureMemoriesFilterByUsage(const std::vector<wgpu::Device>& devices,
+                                                      wgpu::TextureUsage requiredUsage);
+};
+
+inline std::ostream& operator<<(std::ostream& o, SharedTextureMemoryTestBackend* backend) {
+    o << backend->Name();
+    return o;
+}
+
+using Backend = SharedTextureMemoryTestBackend*;
+DAWN_TEST_PARAM_STRUCT(SharedTextureMemoryTestParams, Backend);
+
+class SharedTextureMemoryNoFeatureTests : public DawnTestWithParams<SharedTextureMemoryTestParams> {
+  protected:
+    void SetUp() override;
+};
+
+class SharedTextureMemoryTests : public DawnTestWithParams<SharedTextureMemoryTestParams> {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override;
+
+    void SetUp() override;
+
+    void UseInRenderPass(wgpu::Device& deviceObj, wgpu::Texture& texture);
+    void UseInCopy(wgpu::Device& deviceObj, wgpu::Texture& texture);
+
+    wgpu::CommandBuffer MakeFourColorsClearCommandBuffer(wgpu::Device& deviceObj,
+                                                         wgpu::Texture& texture);
+    std::pair<wgpu::CommandBuffer, wgpu::Texture> MakeCheckBySamplingCommandBuffer(
+        wgpu::Device& deviceObj,
+        wgpu::Texture& texture);
+    void CheckFourColors(wgpu::Device& deviceObj,
+                         wgpu::TextureFormat format,
+                         wgpu::Texture& colorTarget);
+};
+
+}  // namespace dawn
+
+#endif  // SRC_DAWN_TESTS_WHITE_BOX_SHAREDTEXTUREMEMORYTESTS_H_
diff --git a/src/dawn/tests/end2end/SharedTextureMemoryTests_apple.mm b/src/dawn/tests/end2end/SharedTextureMemoryTests_apple.mm
new file mode 100644
index 0000000..977782b
--- /dev/null
+++ b/src/dawn/tests/end2end/SharedTextureMemoryTests_apple.mm
@@ -0,0 +1,251 @@
+// Copyright 2023 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <CoreVideo/CVPixelBuffer.h>
+#include <IOSurface/IOSurface.h>
+
+#import <Metal/Metal.h>
+
+#include "dawn/common/CoreFoundationRef.h"
+#include "dawn/common/NSRef.h"
+#include "dawn/tests/end2end/SharedTextureMemoryTests.h"
+#include "dawn/webgpu_cpp.h"
+
+namespace dawn {
+namespace {
+
+void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
+    auto number = AcquireCFRef(CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
+    CFDictionaryAddValue(dictionary, key, number.Get());
+}
+
+class Backend : public SharedTextureMemoryTestBackend {
+  public:
+    static Backend* GetInstance() {
+        static Backend b;
+        return &b;
+    }
+
+    std::string Name() const override { return "IOSurface"; }
+
+    std::vector<wgpu::FeatureName> RequiredFeatures() const override {
+        return {wgpu::FeatureName::SharedTextureMemoryIOSurface,
+                wgpu::FeatureName::SharedFenceMTLSharedEvent,
+                wgpu::FeatureName::DawnMultiPlanarFormats};
+    }
+
+    // Create one basic shared texture memory. It should support most operations.
+    wgpu::SharedTextureMemory CreateSharedTextureMemory(wgpu::Device& device) override {
+        auto dict = AcquireCFRef(CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+                                                           &kCFTypeDictionaryKeyCallBacks,
+                                                           &kCFTypeDictionaryValueCallBacks));
+        AddIntegerValue(dict.Get(), kIOSurfaceWidth, 16);
+        AddIntegerValue(dict.Get(), kIOSurfaceHeight, 16);
+        AddIntegerValue(dict.Get(), kIOSurfacePixelFormat, kCVPixelFormatType_32RGBA);
+        AddIntegerValue(dict.Get(), kIOSurfaceBytesPerElement, 4);
+
+        wgpu::SharedTextureMemoryIOSurfaceDescriptor ioSurfaceDesc;
+        ioSurfaceDesc.ioSurface = IOSurfaceCreate(dict.Get());
+
+        wgpu::SharedTextureMemoryDescriptor desc;
+        desc.nextInChain = &ioSurfaceDesc;
+
+        return device.ImportSharedTextureMemory(&desc);
+    }
+
+    std::vector<std::vector<wgpu::SharedTextureMemory>> CreatePerDeviceSharedTextureMemories(
+        const std::vector<wgpu::Device>& devices) override {
+        std::vector<std::vector<wgpu::SharedTextureMemory>> memories;
+        for (auto [format, bytesPerElement] : {
+                 std::make_pair(kCVPixelFormatType_64RGBAHalf, 8),
+                 std::make_pair(kCVPixelFormatType_TwoComponent16Half, 4),
+                 std::make_pair(kCVPixelFormatType_OneComponent16Half, 2),
+                 std::make_pair(kCVPixelFormatType_ARGB2101010LEPacked, 4),
+                 std::make_pair(kCVPixelFormatType_32RGBA, 4),
+                 std::make_pair(kCVPixelFormatType_32BGRA, 4),
+                 std::make_pair(kCVPixelFormatType_TwoComponent8, 2),
+                 std::make_pair(kCVPixelFormatType_OneComponent8, 1),
+                 std::make_pair(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, 4),
+             }) {
+            for (uint32_t size : {4, 64}) {
+                auto dict = AcquireCFRef(CFDictionaryCreateMutable(
+                    kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks,
+                    &kCFTypeDictionaryValueCallBacks));
+                AddIntegerValue(dict.Get(), kIOSurfaceWidth, size);
+                AddIntegerValue(dict.Get(), kIOSurfaceHeight, size);
+                AddIntegerValue(dict.Get(), kIOSurfacePixelFormat, format);
+                AddIntegerValue(dict.Get(), kIOSurfaceBytesPerElement, bytesPerElement);
+
+                wgpu::SharedTextureMemoryIOSurfaceDescriptor ioSurfaceDesc;
+                ioSurfaceDesc.ioSurface = IOSurfaceCreate(dict.Get());
+
+                // Internally, the CV enums are defined as their fourcc values. Cast to that and use
+                // it as the label. The fourcc value is a four-character name that can be
+                // interpreted as a 32-bit integer enum ('ABGR', 'r011', etc.)
+                std::string label = std::string(reinterpret_cast<char*>(&format), 4) + " " +
+                                    std::to_string(size) + "x" + std::to_string(size);
+                wgpu::SharedTextureMemoryDescriptor desc;
+                desc.label = label.c_str();
+                desc.nextInChain = &ioSurfaceDesc;
+
+                std::vector<wgpu::SharedTextureMemory> perDeviceMemories;
+                for (auto& device : devices) {
+                    perDeviceMemories.push_back(device.ImportSharedTextureMemory(&desc));
+                }
+                memories.push_back(std::move(perDeviceMemories));
+            }
+        }
+
+        return memories;
+    }
+
+    wgpu::SharedFence ImportFenceTo(const wgpu::Device& importingDevice,
+                                    const wgpu::SharedFence& fence) override {
+        wgpu::SharedFenceMTLSharedEventExportInfo sharedEventInfo;
+        wgpu::SharedFenceExportInfo exportInfo;
+        exportInfo.nextInChain = &sharedEventInfo;
+
+        fence.ExportInfo(&exportInfo);
+
+        wgpu::SharedFenceMTLSharedEventDescriptor sharedEventDesc;
+        sharedEventDesc.sharedEvent = sharedEventInfo.sharedEvent;
+
+        wgpu::SharedFenceDescriptor fenceDesc;
+        fenceDesc.nextInChain = &sharedEventDesc;
+
+        return importingDevice.ImportSharedFence(&fenceDesc);
+    }
+};
+
+// Test that a shared event can be imported, and then exported.
+TEST_P(SharedTextureMemoryTests, SharedFenceSuccessfulImportExport) {
+    if (@available(macOS 10.14, iOS 12.0, *)) {
+        auto mtlDevice = AcquireNSPRef(MTLCreateSystemDefaultDevice());
+        auto sharedEvent = AcquireNSPRef([*mtlDevice newSharedEvent]);
+
+        wgpu::SharedFenceMTLSharedEventDescriptor sharedEventDesc;
+        sharedEventDesc.sharedEvent = static_cast<void*>(*sharedEvent);
+
+        wgpu::SharedFenceDescriptor fenceDesc;
+        fenceDesc.nextInChain = &sharedEventDesc;
+
+        wgpu::SharedFence fence = device.ImportSharedFence(&fenceDesc);
+
+        // Release the Metal objects. They should be retained by the implementation.
+        mtlDevice = nil;
+        sharedEvent = nil;
+
+        wgpu::SharedFenceMTLSharedEventExportInfo sharedEventInfo;
+        wgpu::SharedFenceExportInfo exportInfo;
+        exportInfo.nextInChain = &sharedEventInfo;
+        fence.ExportInfo(&exportInfo);
+
+        // The exported event should be the same as the imported one.
+        EXPECT_EQ(sharedEventInfo.sharedEvent, sharedEventDesc.sharedEvent);
+        EXPECT_EQ(exportInfo.type, wgpu::SharedFenceType::MTLSharedEvent);
+    }
+}
+
+// Test that it is an error to import a shared fence with a null MTLSharedEvent
+TEST_P(SharedTextureMemoryNoFeatureTests, SharedFenceImportWithoutFeature) {
+    if (@available(macOS 10.14, iOS 12.0, *)) {
+        auto mtlDevice = AcquireNSPRef(MTLCreateSystemDefaultDevice());
+        auto sharedEvent = AcquireNSPRef([*mtlDevice newSharedEvent]);
+
+        wgpu::SharedFenceMTLSharedEventDescriptor sharedEventDesc;
+        sharedEventDesc.sharedEvent = static_cast<void*>(*sharedEvent);
+
+        wgpu::SharedFenceDescriptor fenceDesc;
+        fenceDesc.nextInChain = &sharedEventDesc;
+
+        ASSERT_DEVICE_ERROR_MSG(wgpu::SharedFence fence = device.ImportSharedFence(&fenceDesc),
+                                testing::HasSubstr("MTLSharedEvent is not enabled"));
+    }
+}
+
+// Test that it is an error to import a shared fence with a null MTLSharedEvent
+TEST_P(SharedTextureMemoryTests, SharedFenceImportMTLSharedEventMissing) {
+    if (@available(macOS 10.14, iOS 12.0, *)) {
+        wgpu::SharedFenceMTLSharedEventDescriptor sharedEventDesc;
+        sharedEventDesc.sharedEvent = nullptr;
+
+        wgpu::SharedFenceDescriptor fenceDesc;
+        fenceDesc.nextInChain = &sharedEventDesc;
+
+        ASSERT_DEVICE_ERROR_MSG(wgpu::SharedFence fence = device.ImportSharedFence(&fenceDesc),
+                                testing::HasSubstr("missing"));
+    }
+}
+
+// Test exporting info from a shared fence with no chained struct.
+// It should be valid and the fence type is exported.
+TEST_P(SharedTextureMemoryTests, SharedFenceExportInfoNoChainedStruct) {
+    if (@available(macOS 10.14, iOS 12.0, *)) {
+        auto mtlDevice = AcquireNSPRef(MTLCreateSystemDefaultDevice());
+        auto sharedEvent = AcquireNSPRef([*mtlDevice newSharedEvent]);
+
+        wgpu::SharedFenceMTLSharedEventDescriptor sharedEventDesc;
+        sharedEventDesc.sharedEvent = static_cast<void*>(*sharedEvent);
+
+        wgpu::SharedFenceDescriptor fenceDesc;
+        fenceDesc.nextInChain = &sharedEventDesc;
+
+        wgpu::SharedFence fence = device.ImportSharedFence(&fenceDesc);
+
+        // Test no chained struct.
+        wgpu::SharedFenceExportInfo exportInfo;
+        exportInfo.nextInChain = nullptr;
+
+        fence.ExportInfo(&exportInfo);
+        EXPECT_EQ(exportInfo.type, wgpu::SharedFenceType::MTLSharedEvent);
+    }
+}
+
+// Test exporting info from a shared fence with an invalid chained struct.
+// It should not be valid, but the fence type should still be exported.
+TEST_P(SharedTextureMemoryTests, SharedFenceExportInfoInvalidChainedStruct) {
+    if (@available(macOS 10.14, iOS 12.0, *)) {
+        auto mtlDevice = AcquireNSPRef(MTLCreateSystemDefaultDevice());
+        auto sharedEvent = AcquireNSPRef([*mtlDevice newSharedEvent]);
+
+        wgpu::SharedFenceMTLSharedEventDescriptor sharedEventDesc;
+        sharedEventDesc.sharedEvent = static_cast<void*>(*sharedEvent);
+
+        wgpu::SharedFenceDescriptor fenceDesc;
+        fenceDesc.nextInChain = &sharedEventDesc;
+
+        wgpu::SharedFence fence = device.ImportSharedFence(&fenceDesc);
+
+        wgpu::ChainedStructOut otherStruct;
+        wgpu::SharedFenceExportInfo exportInfo;
+        exportInfo.nextInChain = &otherStruct;
+
+        ASSERT_DEVICE_ERROR(fence.ExportInfo(&exportInfo));
+        EXPECT_EQ(exportInfo.type, wgpu::SharedFenceType::MTLSharedEvent);
+    }
+}
+
+DAWN_INSTANTIATE_PREFIXED_TEST_P(Metal,
+                                 SharedTextureMemoryNoFeatureTests,
+                                 {MetalBackend()},
+                                 {Backend::GetInstance()});
+
+DAWN_INSTANTIATE_PREFIXED_TEST_P(Metal,
+                                 SharedTextureMemoryTests,
+                                 {MetalBackend()},
+                                 {Backend::GetInstance()});
+
+}  // anonymous namespace
+}  // namespace dawn
diff --git a/src/dawn/utils/TextureUtils.cpp b/src/dawn/utils/TextureUtils.cpp
index 9d1c12b..8f84b9c 100644
--- a/src/dawn/utils/TextureUtils.cpp
+++ b/src/dawn/utils/TextureUtils.cpp
@@ -146,6 +146,15 @@
     }
 }
 
+bool IsMultiPlanarFormat(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+            return true;
+        default:
+            return false;
+    }
+}
+
 bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat) {
     if (IsBCTextureFormat(textureFormat) || IsETC2TextureFormat(textureFormat) ||
         IsASTCTextureFormat(textureFormat)) {
diff --git a/src/dawn/utils/TextureUtils.h b/src/dawn/utils/TextureUtils.h
index 5638fb1..be634a3 100644
--- a/src/dawn/utils/TextureUtils.h
+++ b/src/dawn/utils/TextureUtils.h
@@ -231,6 +231,8 @@
 bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat);
 bool IsDepthOrStencilFormat(wgpu::TextureFormat textureFormat);
 
+bool IsMultiPlanarFormat(wgpu::TextureFormat textureFormat);
+
 bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat);
 bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat);