Make the Vulkan backend support implicit barriers.

With this commit the Vulkan backend completely ignores the explicit
barrier commands passed from the frontend, and generates its own
pipeline barriers.

Right now it encodes each barrier just before the resources are used,
which is quite bad but will be optimized later.

This commit also makes the frontend command buffer validation perform
the checks necessary for implicit barriers (although they are redundant
with checks for explicit barriers) because the tracking can pre-compute
pass usage information that's useful for the Vulkan backend.

Tests for usage validation inside passes will be added once the concept
of transition is removed from the API.
diff --git a/src/backend/CMakeLists.txt b/src/backend/CMakeLists.txt
index 20faf92..3916af3 100644
--- a/src/backend/CMakeLists.txt
+++ b/src/backend/CMakeLists.txt
@@ -364,6 +364,7 @@
     ${BACKEND_DIR}/InputState.h
     ${BACKEND_DIR}/RenderPipeline.cpp
     ${BACKEND_DIR}/RenderPipeline.h
+    ${BACKEND_DIR}/PassResourceUsage.h
     ${BACKEND_DIR}/PerStage.cpp
     ${BACKEND_DIR}/PerStage.h
     ${BACKEND_DIR}/Pipeline.cpp
diff --git a/src/backend/CommandBuffer.cpp b/src/backend/CommandBuffer.cpp
index 9bb6b40..02ff715 100644
--- a/src/backend/CommandBuffer.cpp
+++ b/src/backend/CommandBuffer.cpp
@@ -122,6 +122,142 @@
             return true;
         }
 
+        enum class PassType {
+            Render,
+            Compute,
+        };
+
+        // Helper class to encapsulate the logic of tracking per-resource usage during the
+        // validation of command buffer passes. It is used both to know if there are validation
+        // errors, and to get a list of resources used per pass for backends that need the
+        // information.
+        class PassResourceUsageTracker {
+          public:
+            void BufferUsedAs(BufferBase* buffer, nxt::BufferUsageBit usage) {
+                // std::map's operator[] will create the key and return 0 if the key didn't exist
+                // before.
+                nxt::BufferUsageBit& storedUsage = mBufferUsages[buffer];
+
+                if (usage == nxt::BufferUsageBit::Storage &&
+                    storedUsage & nxt::BufferUsageBit::Storage) {
+                    mStorageUsedMultipleTimes = true;
+                }
+
+                storedUsage |= usage;
+            }
+
+            void TextureUsedAs(TextureBase* texture, nxt::TextureUsageBit usage) {
+                // std::map's operator[] will create the key and return 0 if the key didn't exist
+                // before.
+                nxt::TextureUsageBit& storedUsage = mTextureUsages[texture];
+
+                if (usage == nxt::TextureUsageBit::Storage &&
+                    storedUsage & nxt::TextureUsageBit::Storage) {
+                    mStorageUsedMultipleTimes = true;
+                }
+
+                storedUsage |= usage;
+            }
+
+            // Performs the per-pass usage validation checks
+            bool AreUsagesValid(PassType pass) const {
+                // Storage resources cannot be used twice in the same compute pass
+                if (pass == PassType::Compute && mStorageUsedMultipleTimes) {
+                    return false;
+                }
+
+                // Buffers can only be used as single-write or multiple read.
+                for (auto& it : mBufferUsages) {
+                    BufferBase* buffer = it.first;
+                    nxt::BufferUsageBit usage = it.second;
+
+                    if (usage & ~buffer->GetAllowedUsage()) {
+                        return false;
+                    }
+
+                    bool readOnly = (usage & kReadOnlyBufferUsages) == usage;
+                    bool singleUse = nxt::HasZeroOrOneBits(usage);
+
+                    if (!readOnly && !singleUse) {
+                        return false;
+                    }
+                }
+
+                // Textures can only be used as single-write or multiple read.
+                // TODO(cwallez@chromium.org): implement per-subresource tracking
+                for (auto& it : mTextureUsages) {
+                    TextureBase* texture = it.first;
+                    nxt::TextureUsageBit usage = it.second;
+
+                    if (usage & ~texture->GetAllowedUsage()) {
+                        return false;
+                    }
+
+                    // For textures the only read-only usage in a pass is Sampled, so checking the
+                    // usage constraint simplifies to checking a single usage bit is set.
+                    if (!nxt::HasZeroOrOneBits(it.second)) {
+                        return false;
+                    }
+                }
+
+                return true;
+            }
+
+            // Returns the per-pass usage for use by backends for APIs with explicit barriers.
+            PassResourceUsage AcquireResourceUsage() {
+                PassResourceUsage result;
+                result.buffers.reserve(mBufferUsages.size());
+                result.bufferUsages.reserve(mBufferUsages.size());
+                result.textures.reserve(mTextureUsages.size());
+                result.textureUsages.reserve(mTextureUsages.size());
+
+                for (auto& it : mBufferUsages) {
+                    result.buffers.push_back(it.first);
+                    result.bufferUsages.push_back(it.second);
+                }
+
+                for (auto& it : mTextureUsages) {
+                    result.textures.push_back(it.first);
+                    result.textureUsages.push_back(it.second);
+                }
+
+                return result;
+            }
+
+          private:
+            std::map<BufferBase*, nxt::BufferUsageBit> mBufferUsages;
+            std::map<TextureBase*, nxt::TextureUsageBit> mTextureUsages;
+            bool mStorageUsedMultipleTimes = false;
+        };
+
+        void TrackBindGroupResourceUsage(BindGroupBase* group, PassResourceUsageTracker* tracker) {
+            const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
+
+            for (uint32_t i : IterateBitSet(layoutInfo.mask)) {
+                nxt::BindingType type = layoutInfo.types[i];
+
+                switch (type) {
+                    case nxt::BindingType::UniformBuffer: {
+                        BufferBase* buffer = group->GetBindingAsBufferView(i)->GetBuffer();
+                        tracker->BufferUsedAs(buffer, nxt::BufferUsageBit::Uniform);
+                    } break;
+
+                    case nxt::BindingType::StorageBuffer: {
+                        BufferBase* buffer = group->GetBindingAsBufferView(i)->GetBuffer();
+                        tracker->BufferUsedAs(buffer, nxt::BufferUsageBit::Storage);
+                    } break;
+
+                    case nxt::BindingType::SampledTexture: {
+                        TextureBase* texture = group->GetBindingAsTextureView(i)->GetTexture();
+                        tracker->TextureUsedAs(texture, nxt::TextureUsageBit::Sampled);
+                    } break;
+
+                    case nxt::BindingType::Sampler:
+                        break;
+                }
+            }
+        }
+
     }  // namespace
 
     // CommandBuffer
@@ -171,6 +307,12 @@
         return std::move(mIterator);
     }
 
+    std::vector<PassResourceUsage> CommandBufferBuilder::AcquirePassResourceUsage() {
+        ASSERT(!mWerePassUsagesAcquired);
+        mWerePassUsagesAcquired = true;
+        return std::move(mPassResourceUsages);
+    }
+
     CommandBufferBase* CommandBufferBuilder::GetResultImpl() {
         MoveToIterator();
         return mDevice->CreateCommandBuffer(this);
@@ -283,11 +425,19 @@
     }
 
     bool CommandBufferBuilder::ValidateComputePass() {
+        PassResourceUsageTracker usageTracker;
+
         Command type;
         while (mIterator.NextCommandId(&type)) {
             switch (type) {
                 case Command::EndComputePass: {
                     mIterator.NextCommand<EndComputePassCmd>();
+
+                    if (!usageTracker.AreUsagesValid(PassType::Compute)) {
+                        return false;
+                    }
+                    mPassResourceUsages.push_back(usageTracker.AcquireResourceUsage());
+
                     mState->EndPass();
                     return true;
                 } break;
@@ -322,6 +472,8 @@
 
                 case Command::SetBindGroup: {
                     SetBindGroupCmd* cmd = mIterator.NextCommand<SetBindGroupCmd>();
+
+                    TrackBindGroupResourceUsage(cmd->group.Get(), &usageTracker);
                     if (!mState->SetBindGroup(cmd->index, cmd->group.Get())) {
                         return false;
                     }
@@ -342,11 +494,30 @@
             return false;
         }
 
+        PassResourceUsageTracker usageTracker;
+
+        // Track usage of the render pass attachments
+        for (uint32_t i : IterateBitSet(renderPass->GetColorAttachmentMask())) {
+            TextureBase* texture = renderPass->GetColorAttachment(i).view->GetTexture();
+            usageTracker.TextureUsedAs(texture, nxt::TextureUsageBit::OutputAttachment);
+        }
+
+        if (renderPass->HasDepthStencilAttachment()) {
+            TextureBase* texture = renderPass->GetDepthStencilAttachment().view->GetTexture();
+            usageTracker.TextureUsedAs(texture, nxt::TextureUsageBit::OutputAttachment);
+        }
+
         Command type;
         while (mIterator.NextCommandId(&type)) {
             switch (type) {
                 case Command::EndRenderPass: {
                     mIterator.NextCommand<EndRenderPassCmd>();
+
+                    if (!usageTracker.AreUsagesValid(PassType::Render)) {
+                        return false;
+                    }
+                    mPassResourceUsages.push_back(usageTracker.AcquireResourceUsage());
+
                     mState->EndPass();
                     return true;
                 } break;
@@ -408,6 +579,8 @@
 
                 case Command::SetBindGroup: {
                     SetBindGroupCmd* cmd = mIterator.NextCommand<SetBindGroupCmd>();
+
+                    TrackBindGroupResourceUsage(cmd->group.Get(), &usageTracker);
                     if (!mState->SetBindGroup(cmd->index, cmd->group.Get())) {
                         return false;
                     }
@@ -415,6 +588,8 @@
 
                 case Command::SetIndexBuffer: {
                     SetIndexBufferCmd* cmd = mIterator.NextCommand<SetIndexBufferCmd>();
+
+                    usageTracker.BufferUsedAs(cmd->buffer.Get(), nxt::BufferUsageBit::Index);
                     if (!mState->SetIndexBuffer(cmd->buffer.Get())) {
                         return false;
                     }
@@ -426,6 +601,7 @@
                     mIterator.NextData<uint32_t>(cmd->count);
 
                     for (uint32_t i = 0; i < cmd->count; ++i) {
+                        usageTracker.BufferUsedAs(buffers[i].Get(), nxt::BufferUsageBit::Vertex);
                         mState->SetVertexBuffer(cmd->startSlot + i, buffers[i].Get());
                     }
                 } break;
diff --git a/src/backend/CommandBuffer.h b/src/backend/CommandBuffer.h
index cf4fec5..0525daf 100644
--- a/src/backend/CommandBuffer.h
+++ b/src/backend/CommandBuffer.h
@@ -19,6 +19,7 @@
 
 #include "backend/Builder.h"
 #include "backend/CommandAllocator.h"
+#include "backend/PassResourceUsage.h"
 #include "backend/RefCounted.h"
 
 #include <memory>
@@ -59,6 +60,7 @@
         bool ValidateGetResult();
 
         CommandIterator AcquireCommands();
+        std::vector<PassResourceUsage> AcquirePassResourceUsage();
 
         // NXT API
         void BeginComputePass();
@@ -144,6 +146,9 @@
         CommandIterator mIterator;
         bool mWasMovedToIterator = false;
         bool mWereCommandsAcquired = false;
+        bool mWerePassUsagesAcquired = false;
+
+        std::vector<PassResourceUsage> mPassResourceUsages;
     };
 
 }  // namespace backend
diff --git a/src/backend/CommandBufferStateTracker.cpp b/src/backend/CommandBufferStateTracker.cpp
index 7ae73aa6..f67c60c 100644
--- a/src/backend/CommandBufferStateTracker.cpp
+++ b/src/backend/CommandBufferStateTracker.cpp
@@ -28,6 +28,7 @@
 #include "common/BitSetIterator.h"
 
 namespace backend {
+
     CommandBufferStateTracker::CommandBufferStateTracker(CommandBufferBuilder* mBuilder)
         : mBuilder(mBuilder) {
     }
diff --git a/src/backend/CommandBufferStateTracker.h b/src/backend/CommandBufferStateTracker.h
index 3f784f4..f03cc53 100644
--- a/src/backend/CommandBufferStateTracker.h
+++ b/src/backend/CommandBufferStateTracker.h
@@ -24,6 +24,7 @@
 #include <set>
 
 namespace backend {
+
     class CommandBufferStateTracker {
       public:
         explicit CommandBufferStateTracker(CommandBufferBuilder* builder);
@@ -97,6 +98,7 @@
         std::map<TextureBase*, nxt::TextureUsageBit> mMostRecentTextureUsages;
         std::set<TextureBase*> mTexturesAttached;
     };
+
 }  // namespace backend
 
 #endif  // BACKEND_COMMANDBUFFERSTATETRACKER_H
diff --git a/src/backend/PassResourceUsage.h b/src/backend/PassResourceUsage.h
new file mode 100644
index 0000000..c1d59d4
--- /dev/null
+++ b/src/backend/PassResourceUsage.h
@@ -0,0 +1,40 @@
+// Copyright 2018 The NXT Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BACKEND_PASSRESOURCEUSAGE_H
+#define BACKEND_PASSRESOURCEUSAGE_H
+
+#include "nxt/nxtcpp.h"
+
+#include <vector>
+
+namespace backend {
+
+    class BufferBase;
+    class TextureBase;
+
+    // Which resources are used by pass and how they are used. The command buffer validation
+    // pre-computes this information so that backends with explicit barriers don't have to
+    // re-compute it.
+    struct PassResourceUsage {
+        std::vector<BufferBase*> buffers;
+        std::vector<nxt::BufferUsageBit> bufferUsages;
+
+        std::vector<TextureBase*> textures;
+        std::vector<nxt::TextureUsageBit> textureUsages;
+    };
+
+}  // namespace backend
+
+#endif  // BACKEND_PASSRESOURCEUSAGE_H
diff --git a/src/backend/SwapChain.cpp b/src/backend/SwapChain.cpp
index 75ccc5b..a2a5752 100644
--- a/src/backend/SwapChain.cpp
+++ b/src/backend/SwapChain.cpp
@@ -82,6 +82,8 @@
             return;
         }
 
+        OnBeforePresent(texture);
+
         mImplementation.Present(mImplementation.userData);
     }
 
diff --git a/src/backend/SwapChain.h b/src/backend/SwapChain.h
index 0110af4..391d459 100644
--- a/src/backend/SwapChain.h
+++ b/src/backend/SwapChain.h
@@ -42,6 +42,7 @@
       protected:
         const nxtSwapChainImplementation& GetImplementation();
         virtual TextureBase* GetNextTextureImpl(TextureBuilder* builder) = 0;
+        virtual void OnBeforePresent(TextureBase* texture) = 0;
 
       private:
         DeviceBase* mDevice = nullptr;
diff --git a/src/backend/Texture.h b/src/backend/Texture.h
index 0d6c01f..24aa2fb 100644
--- a/src/backend/Texture.h
+++ b/src/backend/Texture.h
@@ -28,6 +28,14 @@
     bool TextureFormatHasStencil(nxt::TextureFormat format);
     bool TextureFormatHasDepthOrStencil(nxt::TextureFormat format);
 
+    static constexpr nxt::TextureUsageBit kReadOnlyTextureUsages =
+        nxt::TextureUsageBit::TransferSrc | nxt::TextureUsageBit::Sampled |
+        nxt::TextureUsageBit::Present;
+
+    static constexpr nxt::TextureUsageBit kWritableTextureUsages =
+        nxt::TextureUsageBit::TransferDst | nxt::TextureUsageBit::Storage |
+        nxt::TextureUsageBit::OutputAttachment;
+
     class TextureBase : public RefCounted {
       public:
         TextureBase(TextureBuilder* builder);
diff --git a/src/backend/d3d12/DeviceD3D12.cpp b/src/backend/d3d12/DeviceD3D12.cpp
index 4c8f2b5..58f5077 100644
--- a/src/backend/d3d12/DeviceD3D12.cpp
+++ b/src/backend/d3d12/DeviceD3D12.cpp
@@ -51,7 +51,12 @@
 
     nxtSwapChainImplementation CreateNativeSwapChainImpl(nxtDevice device, HWND window) {
         Device* backendDevice = reinterpret_cast<Device*>(device);
-        return CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
+
+        nxtSwapChainImplementation impl;
+        impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
+        impl.textureUsage = NXT_TEXTURE_USAGE_BIT_PRESENT;
+
+        return impl;
     }
 
     nxtTextureFormat GetNativeSwapChainPreferredFormat(
diff --git a/src/backend/d3d12/SwapChainD3D12.cpp b/src/backend/d3d12/SwapChainD3D12.cpp
index 8658add..11a7fd3 100644
--- a/src/backend/d3d12/SwapChainD3D12.cpp
+++ b/src/backend/d3d12/SwapChainD3D12.cpp
@@ -44,4 +44,7 @@
         return new Texture(builder, nativeTexture);
     }
 
+    void SwapChain::OnBeforePresent(TextureBase*) {
+    }
+
 }}  // namespace backend::d3d12
diff --git a/src/backend/d3d12/SwapChainD3D12.h b/src/backend/d3d12/SwapChainD3D12.h
index fa39bf8..33e039f 100644
--- a/src/backend/d3d12/SwapChainD3D12.h
+++ b/src/backend/d3d12/SwapChainD3D12.h
@@ -26,6 +26,7 @@
 
       protected:
         TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
+        void OnBeforePresent(TextureBase* texture) override;
     };
 
 }}  // namespace backend::d3d12
diff --git a/src/backend/metal/SwapChainMTL.h b/src/backend/metal/SwapChainMTL.h
index 55d0a03..a602524 100644
--- a/src/backend/metal/SwapChainMTL.h
+++ b/src/backend/metal/SwapChainMTL.h
@@ -28,6 +28,7 @@
 
       protected:
         TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
+        void OnBeforePresent(TextureBase* texture) override;
     };
 
 }}  // namespace backend::metal
diff --git a/src/backend/metal/SwapChainMTL.mm b/src/backend/metal/SwapChainMTL.mm
index 352f077..5d1bd28 100644
--- a/src/backend/metal/SwapChainMTL.mm
+++ b/src/backend/metal/SwapChainMTL.mm
@@ -44,4 +44,7 @@
         return new Texture(builder, nativeTexture);
     }
 
+    void SwapChain::OnBeforePresent(TextureBase*) {
+    }
+
 }}  // namespace backend::metal
diff --git a/src/backend/null/NullBackend.cpp b/src/backend/null/NullBackend.cpp
index b2330db..99f3581 100644
--- a/src/backend/null/NullBackend.cpp
+++ b/src/backend/null/NullBackend.cpp
@@ -251,4 +251,8 @@
     TextureBase* SwapChain::GetNextTextureImpl(TextureBuilder* builder) {
         return GetDevice()->CreateTexture(builder);
     }
+
+    void SwapChain::OnBeforePresent(TextureBase*) {
+    }
+
 }}  // namespace backend::null
diff --git a/src/backend/null/NullBackend.h b/src/backend/null/NullBackend.h
index abf5202..6b9e4a3 100644
--- a/src/backend/null/NullBackend.h
+++ b/src/backend/null/NullBackend.h
@@ -184,6 +184,7 @@
 
       protected:
         TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
+        void OnBeforePresent(TextureBase*) override;
     };
 
 }}  // namespace backend::null
diff --git a/src/backend/opengl/SwapChainGL.cpp b/src/backend/opengl/SwapChainGL.cpp
index 46aba36..4bd4964 100644
--- a/src/backend/opengl/SwapChainGL.cpp
+++ b/src/backend/opengl/SwapChainGL.cpp
@@ -41,4 +41,7 @@
         return new Texture(builder, nativeTexture);
     }
 
+    void SwapChain::OnBeforePresent(TextureBase*) {
+    }
+
 }}  // namespace backend::opengl
diff --git a/src/backend/opengl/SwapChainGL.h b/src/backend/opengl/SwapChainGL.h
index 03cf06e..6e7e08d 100644
--- a/src/backend/opengl/SwapChainGL.h
+++ b/src/backend/opengl/SwapChainGL.h
@@ -30,6 +30,7 @@
 
       protected:
         TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
+        void OnBeforePresent(TextureBase* texture) override;
     };
 
 }}  // namespace backend::opengl
diff --git a/src/backend/vulkan/BufferVk.cpp b/src/backend/vulkan/BufferVk.cpp
index b181cd6..9c47f16 100644
--- a/src/backend/vulkan/BufferVk.cpp
+++ b/src/backend/vulkan/BufferVk.cpp
@@ -162,17 +162,29 @@
         return mHandle;
     }
 
-    void Buffer::RecordBarrier(VkCommandBuffer commands,
-                               nxt::BufferUsageBit currentUsage,
-                               nxt::BufferUsageBit targetUsage) const {
-        VkPipelineStageFlags srcStages = VulkanPipelineStage(currentUsage);
-        VkPipelineStageFlags dstStages = VulkanPipelineStage(targetUsage);
+    void Buffer::TransitionUsageNow(VkCommandBuffer commands, nxt::BufferUsageBit usage) {
+        bool lastIncludesTarget = (mLastUsage & usage) == usage;
+        bool lastReadOnly = (mLastUsage & kReadOnlyBufferUsages) == mLastUsage;
+
+        // We can skip transitions to already current read-only usages.
+        if (lastIncludesTarget && lastReadOnly) {
+            return;
+        }
+
+        // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
+        if (mLastUsage == nxt::BufferUsageBit::None) {
+            mLastUsage = usage;
+            return;
+        }
+
+        VkPipelineStageFlags srcStages = VulkanPipelineStage(mLastUsage);
+        VkPipelineStageFlags dstStages = VulkanPipelineStage(usage);
 
         VkBufferMemoryBarrier barrier;
         barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
         barrier.pNext = nullptr;
-        barrier.srcAccessMask = VulkanAccessFlags(currentUsage);
-        barrier.dstAccessMask = VulkanAccessFlags(targetUsage);
+        barrier.srcAccessMask = VulkanAccessFlags(mLastUsage);
+        barrier.dstAccessMask = VulkanAccessFlags(usage);
         barrier.srcQueueFamilyIndex = 0;
         barrier.dstQueueFamilyIndex = 0;
         barrier.buffer = mHandle;
@@ -182,26 +194,43 @@
         ToBackend(GetDevice())
             ->fn.CmdPipelineBarrier(commands, srcStages, dstStages, 0, 0, nullptr, 1, &barrier, 0,
                                     nullptr);
+
+        mLastUsage = usage;
     }
 
     void Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint8_t* data) {
-        BufferUploader* uploader = ToBackend(GetDevice())->GetBufferUploader();
+        Device* device = ToBackend(GetDevice());
+
+        VkCommandBuffer commands = device->GetPendingCommandBuffer();
+        TransitionUsageNow(commands, nxt::BufferUsageBit::TransferDst);
+
+        BufferUploader* uploader = device->GetBufferUploader();
         uploader->BufferSubData(mHandle, start, count, data);
     }
 
     void Buffer::MapReadAsyncImpl(uint32_t serial, uint32_t start, uint32_t /*count*/) {
+        Device* device = ToBackend(GetDevice());
+
+        VkCommandBuffer commands = device->GetPendingCommandBuffer();
+        TransitionUsageNow(commands, nxt::BufferUsageBit::MapRead);
+
         uint8_t* memory = mMemoryAllocation.GetMappedPointer();
         ASSERT(memory != nullptr);
 
-        MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
+        MapRequestTracker* tracker = device->GetMapRequestTracker();
         tracker->Track(this, serial, memory + start, false);
     }
 
     void Buffer::MapWriteAsyncImpl(uint32_t serial, uint32_t start, uint32_t /*count*/) {
+        Device* device = ToBackend(GetDevice());
+
+        VkCommandBuffer commands = device->GetPendingCommandBuffer();
+        TransitionUsageNow(commands, nxt::BufferUsageBit::MapWrite);
+
         uint8_t* memory = mMemoryAllocation.GetMappedPointer();
         ASSERT(memory != nullptr);
 
-        MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
+        MapRequestTracker* tracker = device->GetMapRequestTracker();
         tracker->Track(this, serial, memory + start, true);
     }
 
@@ -209,12 +238,11 @@
         // No need to do anything, we keep CPU-visible memory mapped at all time.
     }
 
-    void Buffer::TransitionUsageImpl(nxt::BufferUsageBit currentUsage,
-                                     nxt::BufferUsageBit targetUsage) {
-        VkCommandBuffer commands = ToBackend(GetDevice())->GetPendingCommandBuffer();
-        RecordBarrier(commands, currentUsage, targetUsage);
+    void Buffer::TransitionUsageImpl(nxt::BufferUsageBit, nxt::BufferUsageBit) {
     }
 
+    // MapRequestTracker
+
     MapRequestTracker::MapRequestTracker(Device* device) : mDevice(device) {
     }
 
diff --git a/src/backend/vulkan/BufferVk.h b/src/backend/vulkan/BufferVk.h
index c00015d..ed873ec 100644
--- a/src/backend/vulkan/BufferVk.h
+++ b/src/backend/vulkan/BufferVk.h
@@ -35,9 +35,10 @@
 
         VkBuffer GetHandle() const;
 
-        void RecordBarrier(VkCommandBuffer commands,
-                           nxt::BufferUsageBit currentUsage,
-                           nxt::BufferUsageBit targetUsage) const;
+        // Transitions the buffer to be used as `usage`, recording any necessary barrier in
+        // `commands`.
+        // TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
+        void TransitionUsageNow(VkCommandBuffer commands, nxt::BufferUsageBit usage);
 
       private:
         void SetSubDataImpl(uint32_t start, uint32_t count, const uint8_t* data) override;
@@ -49,6 +50,8 @@
 
         VkBuffer mHandle = VK_NULL_HANDLE;
         DeviceMemoryAllocation mMemoryAllocation;
+
+        nxt::BufferUsageBit mLastUsage = nxt::BufferUsageBit::None;
     };
 
     using BufferView = BufferViewBase;
diff --git a/src/backend/vulkan/CommandBufferVk.cpp b/src/backend/vulkan/CommandBufferVk.cpp
index 2d4ad99..2fcc2ac 100644
--- a/src/backend/vulkan/CommandBufferVk.cpp
+++ b/src/backend/vulkan/CommandBufferVk.cpp
@@ -111,7 +111,9 @@
     }  // anonymous namespace
 
     CommandBuffer::CommandBuffer(CommandBufferBuilder* builder)
-        : CommandBufferBase(builder), mCommands(builder->AcquireCommands()) {
+        : CommandBufferBase(builder),
+          mCommands(builder->AcquireCommands()),
+          mPassResourceUsages(builder->AcquirePassResourceUsage()) {
     }
 
     CommandBuffer::~CommandBuffer() {
@@ -121,6 +123,20 @@
     void CommandBuffer::RecordCommands(VkCommandBuffer commands) {
         Device* device = ToBackend(GetDevice());
 
+        // Records the necessary barriers for the resource usage pre-computed by the frontend
+        auto TransitionForPass = [](VkCommandBuffer commands, const PassResourceUsage& usages) {
+            for (size_t i = 0; i < usages.buffers.size(); ++i) {
+                Buffer* buffer = ToBackend(usages.buffers[i]);
+                buffer->TransitionUsageNow(commands, usages.bufferUsages[i]);
+            }
+            for (size_t i = 0; i < usages.textures.size(); ++i) {
+                Texture* texture = ToBackend(usages.textures[i]);
+                texture->TransitionUsageNow(commands, usages.textureUsages[i]);
+            }
+        };
+
+        size_t nextPassNumber = 0;
+
         Command type;
         while (mCommands.NextCommandId(&type)) {
             switch (type) {
@@ -129,6 +145,11 @@
                     auto& src = copy->source;
                     auto& dst = copy->destination;
 
+                    ToBackend(src.buffer)
+                        ->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferSrc);
+                    ToBackend(dst.buffer)
+                        ->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferDst);
+
                     VkBufferCopy region;
                     region.srcOffset = src.offset;
                     region.dstOffset = dst.offset;
@@ -144,8 +165,14 @@
                     auto& src = copy->source;
                     auto& dst = copy->destination;
 
+                    ToBackend(src.buffer)
+                        ->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferSrc);
+                    ToBackend(dst.texture)
+                        ->TransitionUsageNow(commands, nxt::TextureUsageBit::TransferDst);
+
                     VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
                     VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+
                     VkBufferImageCopy region =
                         ComputeBufferImageCopyRegion(copy->rowPitch, src, dst);
 
@@ -161,8 +188,14 @@
                     auto& src = copy->source;
                     auto& dst = copy->destination;
 
+                    ToBackend(src.texture)
+                        ->TransitionUsageNow(commands, nxt::TextureUsageBit::TransferSrc);
+                    ToBackend(dst.buffer)
+                        ->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferDst);
+
                     VkImage srcImage = ToBackend(src.texture)->GetHandle();
                     VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
+
                     VkBufferImageCopy region =
                         ComputeBufferImageCopyRegion(copy->rowPitch, dst, src);
 
@@ -173,12 +206,20 @@
 
                 case Command::BeginRenderPass: {
                     BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+
+                    TransitionForPass(commands, mPassResourceUsages[nextPassNumber]);
                     RecordRenderPass(commands, ToBackend(cmd->info.Get()));
+
+                    nextPassNumber++;
                 } break;
 
                 case Command::BeginComputePass: {
                     mCommands.NextCommand<BeginComputePassCmd>();
+
+                    TransitionForPass(commands, mPassResourceUsages[nextPassNumber]);
                     RecordComputePass(commands);
+
+                    nextPassNumber++;
                 } break;
 
                 case Command::TransitionBufferUsage: {
@@ -186,7 +227,6 @@
                         mCommands.NextCommand<TransitionBufferUsageCmd>();
 
                     Buffer* buffer = ToBackend(cmd->buffer.Get());
-                    buffer->RecordBarrier(commands, buffer->GetUsage(), cmd->usage);
                     buffer->UpdateUsageInternal(cmd->usage);
                 } break;
 
@@ -195,7 +235,6 @@
                         mCommands.NextCommand<TransitionTextureUsageCmd>();
 
                     Texture* texture = ToBackend(cmd->texture.Get());
-                    texture->RecordBarrier(commands, texture->GetUsage(), cmd->usage);
                     texture->UpdateUsageInternal(cmd->usage);
                 } break;
 
@@ -250,31 +289,6 @@
                                          RenderPassDescriptor* renderPass) {
         Device* device = ToBackend(GetDevice());
 
-        // NXT has an implicit transition to color attachment on render passes.
-        // Transition the attachments now before we start the render pass.
-        {
-            for (uint32_t i : IterateBitSet(renderPass->GetColorAttachmentMask())) {
-                Texture* attachment =
-                    ToBackend(renderPass->GetColorAttachment(i).view->GetTexture());
-
-                if (!(attachment->GetUsage() & nxt::TextureUsageBit::OutputAttachment)) {
-                    attachment->RecordBarrier(commands, attachment->GetUsage(),
-                                              nxt::TextureUsageBit::OutputAttachment);
-                    attachment->UpdateUsageInternal(nxt::TextureUsageBit::OutputAttachment);
-                }
-            }
-            if (renderPass->HasDepthStencilAttachment()) {
-                Texture* attachment =
-                    ToBackend(renderPass->GetDepthStencilAttachment().view->GetTexture());
-
-                if (!(attachment->GetUsage() & nxt::TextureUsageBit::OutputAttachment)) {
-                    attachment->RecordBarrier(commands, attachment->GetUsage(),
-                                              nxt::TextureUsageBit::OutputAttachment);
-                    attachment->UpdateUsageInternal(nxt::TextureUsageBit::OutputAttachment);
-                }
-            }
-        }
-
         renderPass->RecordBeginRenderPass(commands);
 
         // Set the default value for the dynamic state
diff --git a/src/backend/vulkan/CommandBufferVk.h b/src/backend/vulkan/CommandBufferVk.h
index e7089b2..d6d9a1e 100644
--- a/src/backend/vulkan/CommandBufferVk.h
+++ b/src/backend/vulkan/CommandBufferVk.h
@@ -35,6 +35,7 @@
         void RecordRenderPass(VkCommandBuffer commands, RenderPassDescriptor* renderPass);
 
         CommandIterator mCommands;
+        std::vector<PassResourceUsage> mPassResourceUsages;
     };
 
 }}  // namespace backend::vulkan
diff --git a/src/backend/vulkan/DeviceVk.cpp b/src/backend/vulkan/DeviceVk.cpp
index 05e839b..41faf27 100644
--- a/src/backend/vulkan/DeviceVk.cpp
+++ b/src/backend/vulkan/DeviceVk.cpp
@@ -69,8 +69,14 @@
 
     nxtSwapChainImplementation CreateNativeSwapChainImpl(nxtDevice device, VkSurfaceKHR surface) {
         Device* backendDevice = reinterpret_cast<Device*>(device);
-        return CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
+
+        nxtSwapChainImplementation impl;
+        impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
+        impl.textureUsage = NXT_TEXTURE_USAGE_BIT_PRESENT;
+
+        return impl;
     }
+
     nxtTextureFormat GetNativeSwapChainPreferredFormat(
         const nxtSwapChainImplementation* swapChain) {
         NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
diff --git a/src/backend/vulkan/NativeSwapChainImplVk.cpp b/src/backend/vulkan/NativeSwapChainImplVk.cpp
index d623012..bb23afc 100644
--- a/src/backend/vulkan/NativeSwapChainImplVk.cpp
+++ b/src/backend/vulkan/NativeSwapChainImplVk.cpp
@@ -178,13 +178,12 @@
     }
 
     nxtSwapChainError NativeSwapChainImpl::Present() {
-        // Since we're going to do a queue operations we need to flush pending commands such as
-        // layout transitions of the swapchain images to the PRESENT layout.
-        mDevice->SubmitPendingCommands();
+        // This assumes that the image has already been transitioned to the PRESENT layout and
+        // writes were made available to the stage.
 
         // Assuming that the present queue is the same as the graphics queue, the proper
-        // synchronization has already been done by the usage transition to present so we don't
-        // need to wait on any semaphores.
+        // synchronization has already been done on the queue so we don't need to wait on any
+        // semaphores.
         VkPresentInfoKHR presentInfo;
         presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
         presentInfo.pNext = nullptr;
diff --git a/src/backend/vulkan/SwapChainVk.cpp b/src/backend/vulkan/SwapChainVk.cpp
index 91d0baf..f944294 100644
--- a/src/backend/vulkan/SwapChainVk.cpp
+++ b/src/backend/vulkan/SwapChainVk.cpp
@@ -23,6 +23,9 @@
         const auto& im = GetImplementation();
         nxtWSIContextVulkan wsiContext = {};
         im.Init(im.userData, &wsiContext);
+
+        ASSERT(im.textureUsage != NXT_TEXTURE_USAGE_BIT_NONE);
+        mTextureUsage = static_cast<nxt::TextureUsageBit>(im.textureUsage);
     }
 
     SwapChain::~SwapChain() {
@@ -32,6 +35,7 @@
         const auto& im = GetImplementation();
         nxtSwapChainNextTexture next = {};
         nxtSwapChainError error = im.GetNextTexture(im.userData, &next);
+
         if (error) {
             GetDevice()->HandleError(error);
             return nullptr;
@@ -41,4 +45,15 @@
         return new Texture(builder, nativeTexture);
     }
 
+    void SwapChain::OnBeforePresent(TextureBase* texture) {
+        Device* device = ToBackend(GetDevice());
+
+        // Perform the necessary pipeline barriers for the texture to be used with the usage
+        // requested by the implementation.
+        VkCommandBuffer commands = device->GetPendingCommandBuffer();
+        ToBackend(texture)->TransitionUsageNow(commands, mTextureUsage);
+
+        device->SubmitPendingCommands();
+    }
+
 }}  // namespace backend::vulkan
diff --git a/src/backend/vulkan/SwapChainVk.h b/src/backend/vulkan/SwapChainVk.h
index 7e0a8ac..4095d61 100644
--- a/src/backend/vulkan/SwapChainVk.h
+++ b/src/backend/vulkan/SwapChainVk.h
@@ -28,6 +28,10 @@
 
       protected:
         TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
+        void OnBeforePresent(TextureBase* texture) override;
+
+      private:
+        nxt::TextureUsageBit mTextureUsage;
     };
 
 }}  // namespace backend::vulkan
diff --git a/src/backend/vulkan/TextureVk.cpp b/src/backend/vulkan/TextureVk.cpp
index 9d52f5c..4b46670 100644
--- a/src/backend/vulkan/TextureVk.cpp
+++ b/src/backend/vulkan/TextureVk.cpp
@@ -276,12 +276,6 @@
                                        mMemoryAllocation.GetMemoryOffset()) != VK_SUCCESS) {
             ASSERT(false);
         }
-
-        // Vulkan requires images to be transitioned to their first usage. Do the transition if the
-        // texture has an initial usage.
-        if (GetUsage() != nxt::TextureUsageBit::None) {
-            TransitionUsageImpl(nxt::TextureUsageBit::None, GetUsage());
-        }
     }
 
     Texture::Texture(TextureBuilder* builder, VkImage nativeImage)
@@ -312,22 +306,25 @@
         return VulkanAspectMask(GetFormat());
     }
 
-    // Helper function to add a texture barrier to a command buffer. This is inefficient because we
-    // should be coalescing barriers as much as possible.
-    void Texture::RecordBarrier(VkCommandBuffer commands,
-                                nxt::TextureUsageBit currentUsage,
-                                nxt::TextureUsageBit targetUsage) const {
+    void Texture::TransitionUsageNow(VkCommandBuffer commands, nxt::TextureUsageBit usage) {
+        // Avoid encoding barriers when it isn't needed.
+        bool lastReadOnly = (mLastUsage & kReadOnlyTextureUsages) == mLastUsage;
+        if (lastReadOnly && mLastUsage == usage) {
+            return;
+        }
+
         nxt::TextureFormat format = GetFormat();
-        VkPipelineStageFlags srcStages = VulkanPipelineStage(currentUsage, format);
-        VkPipelineStageFlags dstStages = VulkanPipelineStage(targetUsage, format);
+
+        VkPipelineStageFlags srcStages = VulkanPipelineStage(mLastUsage, format);
+        VkPipelineStageFlags dstStages = VulkanPipelineStage(usage, format);
 
         VkImageMemoryBarrier barrier;
         barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
         barrier.pNext = nullptr;
-        barrier.srcAccessMask = VulkanAccessFlags(currentUsage, format);
-        barrier.dstAccessMask = VulkanAccessFlags(targetUsage, format);
-        barrier.oldLayout = VulkanImageLayout(currentUsage, format);
-        barrier.newLayout = VulkanImageLayout(targetUsage, format);
+        barrier.srcAccessMask = VulkanAccessFlags(mLastUsage, format);
+        barrier.dstAccessMask = VulkanAccessFlags(usage, format);
+        barrier.oldLayout = VulkanImageLayout(mLastUsage, format);
+        barrier.newLayout = VulkanImageLayout(usage, format);
         barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
         barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
         barrier.image = mHandle;
@@ -342,12 +339,11 @@
         ToBackend(GetDevice())
             ->fn.CmdPipelineBarrier(commands, srcStages, dstStages, 0, 0, nullptr, 0, nullptr, 1,
                                     &barrier);
+
+        mLastUsage = usage;
     }
 
-    void Texture::TransitionUsageImpl(nxt::TextureUsageBit currentUsage,
-                                      nxt::TextureUsageBit targetUsage) {
-        VkCommandBuffer commands = ToBackend(GetDevice())->GetPendingCommandBuffer();
-        RecordBarrier(commands, currentUsage, targetUsage);
+    void Texture::TransitionUsageImpl(nxt::TextureUsageBit, nxt::TextureUsageBit) {
     }
 
     TextureView::TextureView(TextureViewBuilder* builder) : TextureViewBase(builder) {
diff --git a/src/backend/vulkan/TextureVk.h b/src/backend/vulkan/TextureVk.h
index 0a202a8..69ca429 100644
--- a/src/backend/vulkan/TextureVk.h
+++ b/src/backend/vulkan/TextureVk.h
@@ -34,9 +34,10 @@
         VkImage GetHandle() const;
         VkImageAspectFlags GetVkAspectMask() const;
 
-        void RecordBarrier(VkCommandBuffer commands,
-                           nxt::TextureUsageBit currentUsage,
-                           nxt::TextureUsageBit targetUsage) const;
+        // Transitions the texture to be used as `usage`, recording any necessary barrier in
+        // `commands`.
+        // TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
+        void TransitionUsageNow(VkCommandBuffer commands, nxt::TextureUsageBit usage);
 
       private:
         void TransitionUsageImpl(nxt::TextureUsageBit currentUsage,
@@ -44,6 +45,10 @@
 
         VkImage mHandle = VK_NULL_HANDLE;
         DeviceMemoryAllocation mMemoryAllocation;
+
+        // A usage of none will make sure the texture is transitioned before its first use as
+        // required by the spec.
+        nxt::TextureUsageBit mLastUsage = nxt::TextureUsageBit::None;
     };
 
     class TextureView : public TextureViewBase {