Validate depth attachment size against the first plane size

D3D11 requires depth attachment size has to match the size of the
texture's first plane size. However other backends doesn't
have this requirement. They can work with bigger depth buffer.
So we change the dawn's validation code to validate depth
attachment size against the first plane size instead of subsampled
plane size.

Bug: chromium:324422644
Change-Id: I1b556adce4646d45622b92e88a28d59d3736ddbc
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/176020
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
Commit-Queue: Peng Huang <penghuang@chromium.org>
diff --git a/src/dawn/native/CommandEncoder.cpp b/src/dawn/native/CommandEncoder.cpp
index 76d836b..3560534 100644
--- a/src/dawn/native/CommandEncoder.cpp
+++ b/src/dawn/native/CommandEncoder.cpp
@@ -134,14 +134,61 @@
             implicitPrefixStr = "implicit ";
         }
 
-        Extent3D attachmentSize = attachment->GetSingleSubresourceVirtualSize();
-
+        Extent3D renderSize = attachment->GetSingleSubresourceVirtualSize();
+        Extent3D attachmentValidationSize = renderSize;
+        if (attachment->GetTexture()->GetFormat().IsMultiPlanar()) {
+            // For multi-planar texture, D3D requires depth stencil buffer size mush be equal to the
+            // size of the plane 0 for the color attachment texture (`attachmentValidationSize`).
+            // Vulkan, Metal and GL requires buffer size equal or bigger than render size. To make
+            // all dawn backends work, dawn requires depth attachment's size equal to the
+            // `attachmentValidationSize`.
+            // Vulkan:
+            // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkFramebufferCreateInfo.html#VUID-VkFramebufferCreateInfo-flags-04533
+            // OpenGLES3.0 (https://www.khronos.org/registry/OpenGL/specs/es/3.0/es_spec_3.0.pdf
+            // section 4.4.4.2) allows attachments have unequal size.
+            attachmentValidationSize =
+                attachment->GetTexture()->GetMipLevelSingleSubresourceVirtualSize(
+                    attachment->GetBaseMipLevel(), Aspect::Plane0);
+        }
         if (HasAttachment()) {
-            DAWN_INVALID_IF(attachmentSize.width != mWidth || attachmentSize.height != mHeight,
-                            "The %s %s size (width: %u, height: %u) does not match the size of the "
-                            "other attachments (width: %u, height: %u).",
-                            attachmentTypeStr, attachment, attachmentSize.width,
-                            attachmentSize.height, mWidth, mHeight);
+            switch (attachmentType) {
+                case AttachmentType::ColorAttachment:
+                case AttachmentType::StorageAttachment: {
+                    DAWN_INVALID_IF(
+                        renderSize.width != mRenderWidth || renderSize.height != mRenderHeight,
+                        "The %s %s size (width: %u, height: %u) does not match the size of the "
+                        "other attachments (width: %u, height: %u).",
+                        attachmentTypeStr, attachment, renderSize.width, renderSize.height,
+                        mRenderWidth, mRenderHeight);
+                    break;
+                }
+                case AttachmentType::ResolveTarget: {
+                    // TODO(chromium:324422644): support using multi-planar texture as resolve
+                    // target.
+                    DAWN_INVALID_IF(attachment->GetTexture()->GetFormat().IsMultiPlanar(),
+                                    "The resolve target %s used as resolve target is from a "
+                                    "multi-planar texture. It is not supported by dawn yet.",
+                                    attachment);
+                    DAWN_INVALID_IF(
+                        renderSize.width != mRenderWidth || renderSize.height != mRenderHeight,
+                        "The resolve target %s size (width: %u, height: %u) does not match the "
+                        "size of the other attachments (width: %u, height: %u).",
+                        attachment, renderSize.width, renderSize.height, mRenderWidth,
+                        mRenderHeight);
+                    break;
+                }
+                case AttachmentType::DepthStencilAttachment: {
+                    DAWN_INVALID_IF(
+                        attachmentValidationSize.width != mAttachmentValidationWidth ||
+                            attachmentValidationSize.height != mAttachmentValidationHeight,
+                        "The depth stencil attachment %s size (width: %u, height: %u) does not "
+                        "match the size of the other attachments' base plane (width: %u, height: "
+                        "%u).",
+                        attachment, attachmentValidationSize.width, attachmentValidationSize.height,
+                        mAttachmentValidationWidth, mAttachmentValidationHeight);
+                    break;
+                }
+            }
 
             // Skip the sampleCount validation for resolve target
             DAWN_INVALID_IF(attachmentType != AttachmentType::ResolveTarget &&
@@ -151,12 +198,16 @@
                             attachmentTypeStr, attachment, implicitPrefixStr,
                             attachment->GetTexture()->GetSampleCount(), mSampleCount);
         } else {
-            mWidth = attachmentSize.width;
-            mHeight = attachmentSize.height;
+            mRenderWidth = renderSize.width;
+            mRenderHeight = renderSize.height;
+            mAttachmentValidationWidth = attachmentValidationSize.width;
+            mAttachmentValidationHeight = attachmentValidationSize.height;
             mSampleCount = mImplicitSampleCount > 1 ? mImplicitSampleCount
                                                     : attachment->GetTexture()->GetSampleCount();
-            DAWN_ASSERT(mWidth != 0);
-            DAWN_ASSERT(mHeight != 0);
+            DAWN_ASSERT(mRenderWidth != 0);
+            DAWN_ASSERT(mRenderHeight != 0);
+            DAWN_ASSERT(mAttachmentValidationWidth != 0);
+            DAWN_ASSERT(mAttachmentValidationHeight != 0);
             DAWN_ASSERT(mSampleCount != 0);
         }
 
@@ -186,13 +237,13 @@
     bool HasAttachment() const { return mRecords->size() != 0; }
 
     bool IsValidState() const {
-        return ((mWidth > 0) && (mHeight > 0) && (mSampleCount > 0) &&
+        return ((mRenderWidth > 0) && (mRenderHeight > 0) && (mSampleCount > 0) &&
                 (mImplicitSampleCount == 0 || mImplicitSampleCount == mSampleCount));
     }
 
-    uint32_t GetWidth() const { return mWidth; }
+    uint32_t GetRenderWidth() const { return mRenderWidth; }
 
-    uint32_t GetHeight() const { return mHeight; }
+    uint32_t GetRenderHeight() const { return mRenderHeight; }
 
     uint32_t GetSampleCount() const { return mSampleCount; }
 
@@ -204,12 +255,15 @@
 
   private:
     // The attachment's width, height and sample count.
-    uint32_t mWidth = 0;
-    uint32_t mHeight = 0;
+    uint32_t mRenderWidth = 0;
+    uint32_t mRenderHeight = 0;
     uint32_t mSampleCount = 0;
     // The implicit multisample count used by MSAA render to single sampled.
     uint32_t mImplicitSampleCount = 0;
 
+    uint32_t mAttachmentValidationWidth = 0;
+    uint32_t mAttachmentValidationHeight = 0;
+
     // The records of the attachments that were validated in render pass.
     StackVector<RecordedAttachment, kMaxColorAttachments> mRecords;
 };
@@ -1238,8 +1292,8 @@
                 }
             }
 
-            cmd->width = validationState.GetWidth();
-            cmd->height = validationState.GetHeight();
+            cmd->width = validationState.GetRenderWidth();
+            cmd->height = validationState.GetRenderHeight();
 
             cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
 
@@ -1293,8 +1347,8 @@
     if (success) {
         Ref<RenderPassEncoder> passEncoder = RenderPassEncoder::Create(
             device, descriptor, this, &mEncodingContext, std::move(usageTracker),
-            std::move(attachmentState), validationState.GetWidth(), validationState.GetHeight(),
-            depthReadOnly, stencilReadOnly, passEndCallback);
+            std::move(attachmentState), validationState.GetRenderWidth(),
+            validationState.GetRenderHeight(), depthReadOnly, stencilReadOnly, passEndCallback);
 
         mEncodingContext.EnterPass(passEncoder.Get());
 
diff --git a/src/dawn/tests/end2end/VideoViewsTests.cpp b/src/dawn/tests/end2end/VideoViewsTests.cpp
index 8f3932a..a31b897 100644
--- a/src/dawn/tests/end2end/VideoViewsTests.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests.cpp
@@ -27,6 +27,8 @@
 
 #include "dawn/tests/end2end/VideoViewsTests.h"
 
+#include <sstream>
+#include <string>
 #include <utility>
 #include <vector>
 
@@ -1410,121 +1412,197 @@
         return requiredFeatures;
     }
 
+    template <typename T>
+    wgpu::Texture CreatePlaneTextureWithData(int planeIndex, bool hasAlpha) {
+        auto kSubsampleFactor = planeIndex == kYUVAChromaPlaneIndex ? 2 : 1;
+        wgpu::Extent3D size = {kYUVAImageDataWidthInTexels / kSubsampleFactor,
+                               kYUVAImageDataHeightInTexels / kSubsampleFactor, 1};
+
+        // Create source texture with plane format
+        wgpu::TextureDescriptor planeTextureDesc;
+        planeTextureDesc.size = size;
+        planeTextureDesc.format = GetPlaneFormat(planeIndex);
+        planeTextureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture planeTexture = device.CreateTexture(&planeTextureDesc);
+
+        // Copy plane (Y/UV/A) data to the plane source texture.
+        std::vector<T> planeSrcData = VideoViewsTestsBase::GetTestTextureDataWithPlaneIndex<T>(
+            planeIndex, kTextureBytesPerRowAlignment,
+            kYUVAImageDataHeightInTexels / kSubsampleFactor, false, hasAlpha);
+        wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(planeTexture);
+        wgpu::TextureDataLayout textureDataLayout =
+            utils::CreateTextureDataLayout(0, kTextureBytesPerRowAlignment);
+        wgpu::Queue queue = device.GetQueue();
+        queue.WriteTexture(&imageCopyTexture, planeSrcData.data(), planeSrcData.size() * sizeof(T),
+                           &textureDataLayout, &size);
+
+        return planeTexture;
+    }
+
     // Tests for rendering to a multiplanar video texture through its views. It creates R/RG source
     // textures with data which are then read into luma and chroma texture views. Since multiplanar
     // textures don't support copy operations yet, the test renders from the luma/chroma texture
     // views into another R/RG textures which are then compared with for rendered data.
     template <typename T>
-    void RenderToMultiplanarVideoTexture() {
-        // Create plane texture initialized with data.
-        auto CreatePlaneTexWithData = [this](int planeIndex, bool hasAlpha) -> wgpu::Texture {
-            auto kSubsampleFactor = planeIndex == kYUVAChromaPlaneIndex ? 2 : 1;
-            wgpu::Extent3D size = {kYUVAImageDataWidthInTexels / kSubsampleFactor,
-                                   kYUVAImageDataHeightInTexels / kSubsampleFactor, 1};
-
-            // Create source texture with plane format
-            wgpu::TextureDescriptor planeTextureDesc;
-            planeTextureDesc.size = size;
-            planeTextureDesc.format = GetPlaneFormat(planeIndex);
-            planeTextureDesc.usage =
-                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
-            wgpu::Texture planeTexture = device.CreateTexture(&planeTextureDesc);
-
-            // Copy plane (Y/UV/A) data to the plane source texture.
-            std::vector<T> planeSrcData = VideoViewsTestsBase::GetTestTextureDataWithPlaneIndex<T>(
-                planeIndex, kTextureBytesPerRowAlignment,
-                kYUVAImageDataHeightInTexels / kSubsampleFactor, false, hasAlpha);
-            wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(planeTexture);
-            wgpu::TextureDataLayout textureDataLayout =
-                utils::CreateTextureDataLayout(0, kTextureBytesPerRowAlignment);
-            wgpu::Queue queue = device.GetQueue();
-            queue.WriteTexture(&imageCopyTexture, planeSrcData.data(),
-                               planeSrcData.size() * sizeof(T), &textureDataLayout, &size);
-
-            return planeTexture;
-        };
-
+    void RenderToMultiplanarVideoTextures(size_t numOfTextures, bool hasDepth) {
         const bool hasAlpha = NumPlanes(GetFormat()) > 2;
 
         // Create source texture with plane 0 format i.e. R8/R16Unorm.
-        wgpu::Texture plane0Texture = CreatePlaneTexWithData(kYUVALumaPlaneIndex, hasAlpha);
+        wgpu::Texture plane0Texture = CreatePlaneTextureWithData<T>(kYUVALumaPlaneIndex, hasAlpha);
         ASSERT_NE(plane0Texture.Get(), nullptr);
         // Create source texture with plane 1 format i.e. RG8/RG16Unorm.
-        wgpu::Texture plane1Texture = CreatePlaneTexWithData(kYUVAChromaPlaneIndex, hasAlpha);
+        wgpu::Texture plane1Texture =
+            CreatePlaneTextureWithData<T>(kYUVAChromaPlaneIndex, hasAlpha);
         ASSERT_NE(plane1Texture.Get(), nullptr);
         wgpu::Texture plane2Texture;
         if (hasAlpha) {
             // Create source texture with plane 2 format i.e. R8.
-            plane2Texture = CreatePlaneTexWithData(kYUVAAlphaPlaneIndex, hasAlpha);
+            plane2Texture = CreatePlaneTextureWithData<T>(kYUVAAlphaPlaneIndex, hasAlpha);
             ASSERT_NE(plane2Texture.Get(), nullptr);
         }
 
         // TODO(dawn:1337): Allow creating uninitialized texture for rendering.
-        // Create a video texture to be rendered into with multiplanar format.
-        auto destVideoTexture = mBackend->CreateVideoTextureForTest(
-            GetFormat(), wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment,
-            /*isCheckerboard*/ false,
-            /*initialized*/ true);
-        ASSERT_NE(destVideoTexture.get(), nullptr);
-        if (!destVideoTexture->CanWrapAsWGPUTexture()) {
-            mBackend->DestroyVideoTextureForTest(std::move(destVideoTexture));
-            GTEST_SKIP() << "Skipped because not supported.";
+        // Create a video texture to be rendered into with multi-planar format.
+        std::vector<std::unique_ptr<VideoViewsTestBackend::PlatformTexture>> destVideoTextures(
+            numOfTextures);
+        std::vector<wgpu::Texture> destVideoWGPUTextures;
+        for (auto& destVideoTexture : destVideoTextures) {
+            destVideoTexture = mBackend->CreateVideoTextureForTest(
+                GetFormat(),
+                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment,
+                /*isCheckerboard*/ false,
+                /*initialized*/ true);
+            ASSERT_NE(destVideoTexture.get(), nullptr);
+            if (!destVideoTexture->CanWrapAsWGPUTexture()) {
+                mBackend->DestroyVideoTextureForTest(std::move(destVideoTexture));
+                GTEST_SKIP() << "Skipped because not supported.";
+            }
+            destVideoWGPUTextures.push_back(destVideoTexture->wgpuTexture);
         }
-        auto destVideoWGPUTexture = destVideoTexture->wgpuTexture;
 
-        // Perform plane operations for texting by creating render passes and comparing textures.
-        auto PerformPlaneOperations = [this](int planeIndex, wgpu::Texture destVideoWGPUTexture,
-                                             wgpu::Texture planeTextureWithData, bool hasAlpha) {
+        wgpu::Texture depthTexture;
+        wgpu::TextureView depthTextureView;
+        if (hasDepth) {
+            // Create a full size depth texture, and use it for render all planes.
+            wgpu::Extent3D size = {kYUVAImageDataWidthInTexels, kYUVAImageDataHeightInTexels, 1};
+            wgpu::TextureDescriptor desc;
+            desc.size = size;
+            desc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+            desc.usage = wgpu::TextureUsage::RenderAttachment;
+            depthTexture = device.CreateTexture(&desc);
+            depthTextureView = depthTexture.CreateView();
+        }
+
+        // Perform plane operations for testing by creating render passes and comparing textures.
+        auto PerformPlaneOperations = [this, destVideoWGPUTextures, hasDepth, depthTextureView](
+                                          int planeIndex, wgpu::Texture planeTextureWithData,
+                                          bool hasAlpha) {
             auto kSubsampleFactor = planeIndex == kYUVAChromaPlaneIndex ? 2 : 1;
+            auto vsModule = GetTestVertexShaderModule();
 
-            utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
-            renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
-            renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
-                @group(0) @binding(0) var sampler0 : sampler;
-                @group(0) @binding(1) var texture : texture_2d<f32>;
+            std::string outputStruct;
+            {
+                std::ostringstream result;
+                result << "struct Output {" << std::endl;
+                for (size_t i = 0; i < destVideoWGPUTextures.size(); ++i) {
+                    result << "    @location(" << i << ") color" << i << " : vec4f," << std::endl;
+                }
+                result << "};" << std::endl;
+                outputStruct = std::move(result).str();
+            }
 
-                @fragment
-                fn main(@location(0) texCoord : vec2f) -> @location(0) vec4f {
-                    return textureSample(texture, sampler0, texCoord);
-                })");
-            renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
-            renderPipelineDescriptor.cTargets[0].format = GetPlaneFormat(planeIndex);
-            wgpu::RenderPipeline renderPipeline =
-                device.CreateRenderPipeline(&renderPipelineDescriptor);
-            wgpu::Sampler sampler = device.CreateSampler();
+            // var o: Output;
+            std::string returnOutput;
+            {
+                std::ostringstream result;
+                result << "    var output : Output;" << std::endl;
+                for (size_t i = 0; i < destVideoWGPUTextures.size(); ++i) {
+                    result << "    output.color" << i << " = outputColor;" << std::endl;
+                }
+                result << "    return output;" << std::endl;
+                returnOutput = std::move(result).str();
+            }
+
+            std::ostringstream fsSource;
+            fsSource << "@group(0) @binding(0) var sampler0 : sampler;" << std::endl;
+            fsSource << "@group(0) @binding(1) var texture : texture_2d<f32>;" << std::endl;
+            fsSource << outputStruct << std::endl;
+            fsSource << "@fragment" << std::endl;
+            fsSource << "fn main(@location(0) texCoord : vec2f) -> Output {" << std::endl;
+            fsSource << "    let outputColor = textureSample(texture, sampler0, texCoord);"
+                     << std::endl;
+            fsSource << returnOutput << std::endl;
+            fsSource << "}" << std::endl;
+
+            auto fsModule = utils::CreateShaderModule(device, std::move(fsSource).str());
 
             // Create plane texture view from the multiplanar video texture.
             wgpu::TextureViewDescriptor planeViewDesc;
             planeViewDesc.format = GetPlaneFormat(planeIndex);
             planeViewDesc.aspect = GetPlaneAspect(planeIndex);
-            wgpu::TextureView planeTextureView = destVideoWGPUTexture.CreateView(&planeViewDesc);
+            std::vector<wgpu::TextureView> planeTextureViews;
+            for (auto& destVideoWGPUTexture : destVideoWGPUTextures) {
+                planeTextureViews.push_back(destVideoWGPUTexture.CreateView(&planeViewDesc));
+            }
 
-            // Render pass operations for reading the source data from planeTexture view into
-            // planeTextureView created from the multiplanar video texture.
+            wgpu::Sampler sampler = device.CreateSampler();
+
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            utils::ComboRenderPassDescriptor renderPass({planeTextureView});
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.SetPipeline(renderPipeline);
-            pass.SetBindGroup(
-                0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                        {{0, sampler}, {1, planeTextureWithData.CreateView()}}));
-            pass.Draw(6);
-            pass.End();
+            {
+                utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+                renderPipelineDescriptor.vertex.module = vsModule;
+                renderPipelineDescriptor.cFragment.module = fsModule;
+                renderPipelineDescriptor.cFragment.targetCount = destVideoWGPUTextures.size();
+                renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+                for (size_t i = 0; i < destVideoWGPUTextures.size(); ++i) {
+                    renderPipelineDescriptor.cTargets[i].format = GetPlaneFormat(planeIndex);
+                }
+                if (hasDepth) {
+                    renderPipelineDescriptor.EnableDepthStencil(
+                        wgpu::TextureFormat::Depth24PlusStencil8);
+                }
+                wgpu::RenderPipeline renderPipeline =
+                    device.CreateRenderPipeline(&renderPipelineDescriptor);
 
-            // Another render pass for reading the planeTextureView into a texture of the plane's
-            // format (i.e. R8/R16Unorm for Y and RG8/RG16Unorm for UV). This is needed as
-            // multiplanar textures do not support copy operations.
-            utils::BasicRenderPass basicRenderPass = utils::CreateBasicRenderPass(
-                device, kYUVAImageDataWidthInTexels / kSubsampleFactor,
-                kYUVAImageDataHeightInTexels / kSubsampleFactor, GetPlaneFormat(planeIndex));
-            wgpu::RenderPassEncoder secondPass =
-                encoder.BeginRenderPass(&basicRenderPass.renderPassInfo);
-            secondPass.SetPipeline(renderPipeline);
-            secondPass.SetBindGroup(
-                0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                        {{0, sampler}, {1, planeTextureView}}));
-            secondPass.Draw(6);
-            secondPass.End();
+                // Render pass operations for reading the source data from planeTexture view into
+                // planeTextureView created from the multiplanar video texture.
+                utils::ComboRenderPassDescriptor renderPass(planeTextureViews, depthTextureView);
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+                pass.SetPipeline(renderPipeline);
+                pass.SetBindGroup(0, utils::MakeBindGroup(
+                                         device, renderPipeline.GetBindGroupLayout(0),
+                                         {{0, sampler}, {1, planeTextureWithData.CreateView()}}));
+                pass.Draw(6);
+                pass.End();
+            }
+
+            std::vector<wgpu::Texture> resultTextures;
+
+            for (auto& planeTextureView : planeTextureViews) {
+                utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+                renderPipelineDescriptor.vertex.module = vsModule;
+                renderPipelineDescriptor.cFragment.module = fsModule;
+                renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+                renderPipelineDescriptor.cTargets[0].format = GetPlaneFormat(planeIndex);
+                wgpu::RenderPipeline renderPipeline =
+                    device.CreateRenderPipeline(&renderPipelineDescriptor);
+                // Another render pass for reading the planeTextureView into a texture of the
+                // plane's format (i.e. R8/R16Unorm for Y and RG8/RG16Unorm for UV). This is needed
+                // as multiplanar textures do not support copy operations.
+                utils::BasicRenderPass basicRenderPass = utils::CreateBasicRenderPass(
+                    device, kYUVAImageDataWidthInTexels / kSubsampleFactor,
+                    kYUVAImageDataHeightInTexels / kSubsampleFactor, GetPlaneFormat(planeIndex));
+                wgpu::RenderPassEncoder secondPass =
+                    encoder.BeginRenderPass(&basicRenderPass.renderPassInfo);
+                secondPass.SetPipeline(renderPipeline);
+                secondPass.SetBindGroup(
+                    0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                            {{0, sampler}, {1, planeTextureView}}));
+                secondPass.Draw(6);
+                secondPass.End();
+
+                resultTextures.push_back(basicRenderPass.color);
+            }
 
             // Submit all commands for the encoder.
             wgpu::CommandBuffer commands = encoder.Finish();
@@ -1533,24 +1611,27 @@
             std::vector<T> expectedData = VideoViewsTestsBase::GetTestTextureDataWithPlaneIndex<T>(
                 planeIndex, kYUVAImageDataWidthInTexels * sizeof(T),
                 kYUVAImageDataHeightInTexels / kSubsampleFactor, false, hasAlpha);
-            EXPECT_TEXTURE_EQ(expectedData.data(), basicRenderPass.color, {0, 0},
-                              {kYUVAImageDataWidthInTexels / kSubsampleFactor,
-                               kYUVAImageDataHeightInTexels / kSubsampleFactor},
-                              GetPlaneFormat(planeIndex));
+
+            for (auto& resultTexture : resultTextures) {
+                EXPECT_TEXTURE_EQ(expectedData.data(), resultTexture, {0, 0},
+                                  {kYUVAImageDataWidthInTexels / kSubsampleFactor,
+                                   kYUVAImageDataHeightInTexels / kSubsampleFactor},
+                                  GetPlaneFormat(planeIndex));
+            }
         };
 
         // Perform operations for the Y plane.
-        PerformPlaneOperations(kYUVALumaPlaneIndex, destVideoWGPUTexture, plane0Texture, hasAlpha);
+        PerformPlaneOperations(kYUVALumaPlaneIndex, plane0Texture, hasAlpha);
         // Perform operations for the UV plane.
-        PerformPlaneOperations(kYUVAChromaPlaneIndex, destVideoWGPUTexture, plane1Texture,
-                               hasAlpha);
+        PerformPlaneOperations(kYUVAChromaPlaneIndex, plane1Texture, hasAlpha);
         if (hasAlpha) {
-            // Perform operations for the UV plane.
-            PerformPlaneOperations(kYUVAAlphaPlaneIndex, destVideoWGPUTexture, plane2Texture,
-                                   hasAlpha);
+            // Perform operations for the alpha plane.
+            PerformPlaneOperations(kYUVAAlphaPlaneIndex, plane2Texture, hasAlpha);
         }
 
-        mBackend->DestroyVideoTextureForTest(std::move(destVideoTexture));
+        for (auto& destVideoTexture : destVideoTextures) {
+            mBackend->DestroyVideoTextureForTest(std::move(destVideoTexture));
+        }
     }
 
     // Tests for rendering to a chroma texture view from a luma texture view, both of which created
@@ -1733,15 +1814,15 @@
     chromaViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
     wgpu::TextureView chromaTextureView = platformTexture->wgpuTexture.CreateView(&chromaViewDesc);
 
-    // Create an RGBA texture with same size as chroma texture view.
-    wgpu::TextureDescriptor desc;
-    desc.format = wgpu::TextureFormat::RGBA8Unorm;
-    desc.dimension = wgpu::TextureDimension::e2D;
-    desc.usage = wgpu::TextureUsage::RenderAttachment;
-    desc.size = {kYUVAImageDataWidthInTexels / 2, kYUVAImageDataHeightInTexels / 2, 1};
-    wgpu::Texture rgbaTexture = device.CreateTexture(&desc);
-
     {
+        // Create an RGBA texture with same size as chroma texture view.
+        wgpu::TextureDescriptor desc;
+        desc.format = wgpu::TextureFormat::RGBA8Unorm;
+        desc.dimension = wgpu::TextureDimension::e2D;
+        desc.usage = wgpu::TextureUsage::RenderAttachment;
+        desc.size = {kYUVAImageDataWidthInTexels / 2, kYUVAImageDataHeightInTexels / 2, 1};
+        wgpu::Texture rgbaTexture = device.CreateTexture(&desc);
+
         // Render pass operations passing color attachments of same size (control case).
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         utils::ComboRenderPassDescriptor renderPass({chromaTextureView, rgbaTexture.CreateView()});
@@ -1759,6 +1840,104 @@
         ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
+    {
+        // Create an RGBA texture with same size as luma texture view.
+        wgpu::TextureDescriptor desc;
+        desc.format = wgpu::TextureFormat::R8Unorm;
+        desc.dimension = wgpu::TextureDimension::e2D;
+        desc.usage = wgpu::TextureUsage::RenderAttachment;
+        desc.size = {kYUVAImageDataWidthInTexels, kYUVAImageDataHeightInTexels, 1};
+        desc.sampleCount = 4;
+        wgpu::Texture rgbaTexture = device.CreateTexture(&desc);
+
+        // Render pass operations passing the luma texture view as resolve target
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        utils::ComboRenderPassDescriptor renderPass({rgbaTexture.CreateView()});
+        renderPass.cColorAttachments[0].resolveTarget = lumaTextureView;
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+
+        // TODO(chromium:324422644): support using multi-planar texture as resolve target.
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        // Create an RGBA texture with same size as chroma texture view.
+        wgpu::TextureDescriptor desc;
+        desc.format = wgpu::TextureFormat::R8Unorm;
+        desc.dimension = wgpu::TextureDimension::e2D;
+        desc.usage = wgpu::TextureUsage::RenderAttachment;
+        desc.size = {kYUVAImageDataWidthInTexels / 2, kYUVAImageDataHeightInTexels / 2, 1};
+        desc.sampleCount = 4;
+        wgpu::Texture rgbaTexture = device.CreateTexture(&desc);
+
+        // Render pass operations passing the chroma texture view as resolve target
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        utils::ComboRenderPassDescriptor renderPass({rgbaTexture.CreateView()});
+        renderPass.cColorAttachments[0].resolveTarget = chromaTextureView;
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+
+        // TODO(chromium:324422644): support using multi-planar texture as resolve target.
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    {
+        // Create a depth texture with same size as luma texture view. It should be possible to use
+        // it with both the luminance and the subsampled chroma plane
+        wgpu::TextureDescriptor desc;
+        desc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        desc.dimension = wgpu::TextureDimension::e2D;
+        desc.usage = wgpu::TextureUsage::RenderAttachment;
+        desc.size = {kYUVAImageDataWidthInTexels, kYUVAImageDataHeightInTexels, 1};
+        wgpu::Texture depthTexture = device.CreateTexture(&desc);
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            utils::ComboRenderPassDescriptor renderPass({lumaTextureView},
+                                                        depthTexture.CreateView());
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            encoder.Finish();
+        }
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            utils::ComboRenderPassDescriptor renderPass({chromaTextureView},
+                                                        depthTexture.CreateView());
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            encoder.Finish();
+        }
+    }
+
+    {
+        // Create a depth texture with same size as chroma texture view. It should produce an error
+        // to use it with the chroma plane as a luminance-size texture is expected.
+        wgpu::TextureDescriptor desc;
+        desc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        desc.dimension = wgpu::TextureDimension::e2D;
+        desc.usage = wgpu::TextureUsage::RenderAttachment;
+        desc.size = {kYUVAImageDataWidthInTexels / 2, kYUVAImageDataHeightInTexels / 2, 1};
+        wgpu::Texture depthTexture = device.CreateTexture(&desc);
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            utils::ComboRenderPassDescriptor renderPass({lumaTextureView},
+                                                        depthTexture.CreateView());
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            utils::ComboRenderPassDescriptor renderPass({chromaTextureView},
+                                                        depthTexture.CreateView());
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
     mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
 }
 
@@ -1766,9 +1945,42 @@
 TEST_P(VideoViewsRenderTargetTests, RenderToMultiplanarVideoTexture) {
     if (GetFormat() == wgpu::TextureFormat::R8BG8Biplanar420Unorm ||
         GetFormat() == wgpu::TextureFormat::R8BG8A8Triplanar420Unorm) {
-        RenderToMultiplanarVideoTexture<uint8_t>();
+        RenderToMultiplanarVideoTextures<uint8_t>(/*numOfTextures=*/1, /*hasDepth=*/false);
     } else if (GetFormat() == wgpu::TextureFormat::R10X6BG10X6Biplanar420Unorm) {
-        RenderToMultiplanarVideoTexture<uint16_t>();
+        RenderToMultiplanarVideoTextures<uint16_t>(/*numOfTextures=*/1, /*hasDepth=*/false);
+    } else {
+        DAWN_UNREACHABLE();
+    }
+}
+
+TEST_P(VideoViewsRenderTargetTests, RenderToMultiplanarVideoTextureWithDepth) {
+    if (GetFormat() == wgpu::TextureFormat::R8BG8Biplanar420Unorm ||
+        GetFormat() == wgpu::TextureFormat::R8BG8A8Triplanar420Unorm) {
+        RenderToMultiplanarVideoTextures<uint8_t>(/*numOfTextures=*/1, /*hasDepth=*/true);
+    } else if (GetFormat() == wgpu::TextureFormat::R10X6BG10X6Biplanar420Unorm) {
+        RenderToMultiplanarVideoTextures<uint16_t>(/*numOfTextures=*/1, /*hasDepth=*/true);
+    } else {
+        DAWN_UNREACHABLE();
+    }
+}
+
+TEST_P(VideoViewsRenderTargetTests, RenderToThreeMultiplanarVideoTexture) {
+    if (GetFormat() == wgpu::TextureFormat::R8BG8Biplanar420Unorm ||
+        GetFormat() == wgpu::TextureFormat::R8BG8A8Triplanar420Unorm) {
+        RenderToMultiplanarVideoTextures<uint8_t>(/*numOfTextures=*/3, /*hasDepth=*/false);
+    } else if (GetFormat() == wgpu::TextureFormat::R10X6BG10X6Biplanar420Unorm) {
+        RenderToMultiplanarVideoTextures<uint16_t>(/*numOfTextures=*/3, /*hasDepth=*/false);
+    } else {
+        DAWN_UNREACHABLE();
+    }
+}
+
+TEST_P(VideoViewsRenderTargetTests, RenderToThreeMultiplanarVideoTextureWithDepth) {
+    if (GetFormat() == wgpu::TextureFormat::R8BG8Biplanar420Unorm ||
+        GetFormat() == wgpu::TextureFormat::R8BG8A8Triplanar420Unorm) {
+        RenderToMultiplanarVideoTextures<uint8_t>(/*numOfTextures=*/3, /*hasDepth=*/true);
+    } else if (GetFormat() == wgpu::TextureFormat::R10X6BG10X6Biplanar420Unorm) {
+        RenderToMultiplanarVideoTextures<uint16_t>(/*numOfTextures=*/3, /*hasDepth=*/true);
     } else {
         DAWN_UNREACHABLE();
     }