Rename `BufferBase::GetUsage` and `BufferBase::GetUsageExternalOnly`

This patch renames `BufferBase::GetUsage` to `GetInternalUsage` and
renames `BufferBase::GetUsageExternalOnly` to `GetUsage` to align
with the same functions on `TextureBase`.

Bug: chromium:350497225
Change-Id: I2c0e116a5d326fe7af1e79b0a897353314828935
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/202274
Reviewed-by: Loko Kung <lokokung@google.com>
Commit-Queue: Jiawei Shao <jiawei.shao@intel.com>
Reviewed-by: Austin Eng <enga@chromium.org>
diff --git a/src/dawn/native/BindGroup.cpp b/src/dawn/native/BindGroup.cpp
index d52be23..b397bf4 100644
--- a/src/dawn/native/BindGroup.cpp
+++ b/src/dawn/native/BindGroup.cpp
@@ -115,9 +115,9 @@
                     "Offset (%u) of %s does not satisfy the minimum %s alignment (%u).",
                     entry.offset, entry.buffer, layout.type, requiredBindingAlignment);
 
-    DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
+    DAWN_INVALID_IF(!(entry.buffer->GetInternalUsage() & requiredUsage),
                     "Binding usage (%s) of %s doesn't match expected usage (%s).",
-                    entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
+                    entry.buffer->GetUsage(), entry.buffer, requiredUsage);
 
     DAWN_INVALID_IF(bindingSize < layout.minBindingSize,
                     "Binding size (%u) of %s is smaller than the minimum binding size (%u).",
diff --git a/src/dawn/native/BlitTextureToBuffer.cpp b/src/dawn/native/BlitTextureToBuffer.cpp
index f49666b..a1a8c7f 100644
--- a/src/dawn/native/BlitTextureToBuffer.cpp
+++ b/src/dawn/native/BlitTextureToBuffer.cpp
@@ -1069,7 +1069,7 @@
     const bool needsTempForOOBU32Write =
         shaderBindingOffset + shaderBindingSize > dst.buffer->GetSize();
     const bool needsTempForStorageUsage =
-        !(dst.buffer->GetUsage() & (kInternalStorageBuffer | wgpu::BufferUsage::Storage));
+        !(dst.buffer->GetInternalUsage() & (kInternalStorageBuffer | wgpu::BufferUsage::Storage));
     const bool useIntermediateCopyBuffer = needsTempForOOBU32Write || needsTempForStorageUsage;
     // Offset to copy to original buffer. Only relevant if useIntermediateCopyBuffer is true.
     uint64_t offsetInOriginalBuf = 0;
diff --git a/src/dawn/native/Buffer.cpp b/src/dawn/native/Buffer.cpp
index 87123e1..33bc88f 100644
--- a/src/dawn/native/Buffer.cpp
+++ b/src/dawn/native/Buffer.cpp
@@ -534,12 +534,12 @@
     return mAllocatedSize;
 }
 
-wgpu::BufferUsage BufferBase::GetUsage() const {
+wgpu::BufferUsage BufferBase::GetInternalUsage() const {
     DAWN_ASSERT(!IsError());
     return mInternalUsage;
 }
 
-wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
+wgpu::BufferUsage BufferBase::GetUsage() const {
     DAWN_ASSERT(!IsError());
     return mUsage;
 }
@@ -1134,7 +1134,7 @@
     dump->AddScalar(name.c_str(), MemoryDump::kNameSize, MemoryDump::kUnitsBytes,
                     GetAllocatedSize());
     dump->AddString(name.c_str(), "label", GetLabel());
-    dump->AddString(name.c_str(), "usage", absl::StrFormat("%s", GetUsage()));
+    dump->AddString(name.c_str(), "usage", absl::StrFormat("%s", GetInternalUsage()));
 }
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Buffer.h b/src/dawn/native/Buffer.h
index 066f554..d5aa0b6 100644
--- a/src/dawn/native/Buffer.h
+++ b/src/dawn/native/Buffer.h
@@ -88,13 +88,11 @@
     uint64_t GetAllocatedSize() const;
     ExecutionSerial GetLastUsageSerial() const;
 
-    // |GetUsageExternalOnly| returns the usage with which the buffer was created using the
-    // base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
-    // returns the union of base usage and the usages added internally.
-    // TODO(chromium:350497225): Rename |GetUsage| to |GetInternalUsage| to align with the same
-    // function in `TextureBase`.
+    // |GetUsage| returns the usage with which the buffer was created using the base WebGPU API.
+    // Additional usages may be added for internal state tracking. |GetInternalUsage| returns the
+    // union of base usage and the usages added internally.
+    wgpu::BufferUsage GetInternalUsage() const;
     wgpu::BufferUsage GetUsage() const;
-    wgpu::BufferUsage GetUsageExternalOnly() const;
 
     MaybeError MapAtCreation();
     void CallbackOnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
diff --git a/src/dawn/native/CommandValidation.cpp b/src/dawn/native/CommandValidation.cpp
index 1b9989f..7bd537a 100644
--- a/src/dawn/native/CommandValidation.cpp
+++ b/src/dawn/native/CommandValidation.cpp
@@ -650,14 +650,15 @@
 
 MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
     DAWN_ASSERT(wgpu::HasZeroOrOneBits(usage));
-    DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), "%s usage (%s) doesn't include %s.",
-                    buffer, buffer->GetUsageExternalOnly(), usage);
+    DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s usage (%s) doesn't include %s.", buffer,
+                    buffer->GetUsage(), usage);
     return {};
 }
 
 MaybeError ValidateCanUseAsInternal(const BufferBase* buffer, wgpu::BufferUsage usage) {
-    DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s internal usage (%s) doesn't include %s.",
-                    buffer, buffer->GetUsage(), usage);
+    DAWN_INVALID_IF(!(buffer->GetInternalUsage() & usage),
+                    "%s internal usage (%s) doesn't include %s.", buffer,
+                    buffer->GetInternalUsage(), usage);
     return {};
 }
 
diff --git a/src/dawn/native/ComputePassEncoder.cpp b/src/dawn/native/ComputePassEncoder.cpp
index 0d0eed8..ad81066 100644
--- a/src/dawn/native/ComputePassEncoder.cpp
+++ b/src/dawn/native/ComputePassEncoder.cpp
@@ -322,7 +322,7 @@
     Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
 
     Ref<BindGroupBase> validationBindGroup;
-    DAWN_ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
+    DAWN_ASSERT(indirectBuffer->GetInternalUsage() & kInternalStorageBuffer);
     DAWN_TRY_ASSIGN(validationBindGroup,
                     utils::MakeBindGroup(device, layout,
                                          {
diff --git a/src/dawn/native/d3d11/BufferD3D11.cpp b/src/dawn/native/d3d11/BufferD3D11.cpp
index 7c5f2ad..fb1a532 100644
--- a/src/dawn/native/d3d11/BufferD3D11.cpp
+++ b/src/dawn/native/d3d11/BufferD3D11.cpp
@@ -243,7 +243,7 @@
     }
 
     MaybeError InitializeInternal() override {
-        DAWN_ASSERT(IsStaging(GetUsage()));
+        DAWN_ASSERT(IsStaging(GetInternalUsage()));
 
         D3D11_BUFFER_DESC bufferDescriptor;
         bufferDescriptor.ByteWidth = mAllocatedSize;
@@ -271,7 +271,7 @@
 
     MaybeError MapInternal(const ScopedCommandRecordingContext* commandContext,
                            wgpu::MapMode) override {
-        DAWN_ASSERT(IsMappable(GetUsage()));
+        DAWN_ASSERT(IsMappable(GetInternalUsage()));
         DAWN_ASSERT(!mMappedData);
 
         // Always map buffer with D3D11_MAP_READ_WRITE even for mapping wgpu::MapMode:Read, because
@@ -381,10 +381,10 @@
     uint64_t size = std::max(GetSize(), uint64_t(4u));
     // The validation layer requires:
     // ByteWidth must be 12 or larger to be used with D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS.
-    if (GetUsage() & wgpu::BufferUsage::Indirect) {
+    if (GetInternalUsage() & wgpu::BufferUsage::Indirect) {
         size = std::max(size, uint64_t(12u));
     }
-    size_t alignment = D3D11BufferSizeAlignment(GetUsage());
+    size_t alignment = D3D11BufferSizeAlignment(GetInternalUsage());
     // Check for overflow, bufferDescriptor.ByteWidth is a UINT.
     if (size > std::numeric_limits<UINT>::max() - alignment) {
         // Alignment would overlow.
@@ -494,7 +494,7 @@
 }
 
 void Buffer::UnmapImpl() {
-    DAWN_ASSERT(IsMappable(GetUsage()));
+    DAWN_ASSERT(IsMappable(GetInternalUsage()));
     mMapReadySerial = kMaxExecutionSerial;
     if (mMappedData) {
         auto commandContext = ToBackend(GetDevice()->GetQueue())
@@ -826,11 +826,11 @@
 }
 
 MaybeError GPUUsableBuffer::InitializeInternal() {
-    DAWN_ASSERT(!IsStaging(GetUsage()));
+    DAWN_ASSERT(!IsStaging(GetInternalUsage()));
 
     mStorages = {};
 
-    wgpu::BufferUsage usagesToHandle = GetUsage();
+    wgpu::BufferUsage usagesToHandle = GetInternalUsage();
 
     // We need to create a separate storage for uniform usage, because D3D11 doesn't allow constant
     // buffer to be used for other purposes.
@@ -916,7 +916,7 @@
             // Need to exclude GPU writable usages because CPU writable buffer is not GPU writable
             // in D3D11.
             auto nonUniformUsage =
-                GetUsage() & ~(kD3D11GPUWriteUsages | wgpu::BufferUsage::Uniform);
+                GetInternalUsage() & ~(kD3D11GPUWriteUsages | wgpu::BufferUsage::Uniform);
             bufferDescriptor.Usage = D3D11_USAGE_DYNAMIC;
             bufferDescriptor.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
             bufferDescriptor.BindFlags = D3D11BufferBindFlags(nonUniformUsage);
@@ -931,7 +931,7 @@
         case StorageType::GPUWritableNonConstantBuffer: {
             // Need to exclude mapping usages.
             const auto nonUniformUsage =
-                GetUsage() & ~(kMappableBufferUsages | wgpu::BufferUsage::Uniform);
+                GetInternalUsage() & ~(kMappableBufferUsages | wgpu::BufferUsage::Uniform);
             bufferDescriptor.Usage = D3D11_USAGE_DEFAULT;
             bufferDescriptor.CPUAccessFlags = 0;
             bufferDescriptor.BindFlags = D3D11BufferBindFlags(nonUniformUsage);
@@ -967,7 +967,7 @@
         return mStorages[StorageType::GPUWritableNonConstantBuffer].Get();
     }
 
-    if (GetUsage() & wgpu::BufferUsage::Uniform) {
+    if (GetInternalUsage() & wgpu::BufferUsage::Uniform) {
         return GetOrCreateStorage(StorageType::GPUCopyDstConstantBuffer);
     }
 
@@ -1038,7 +1038,7 @@
     dstStorage->SetRevision(dstStorage->GetRevision() + 1);
     mLastUpdatedStorage = dstStorage;
 
-    if (dstStorage->IsGPUWritable() && IsMappable(GetUsage())) {
+    if (dstStorage->IsGPUWritable() && IsMappable(GetInternalUsage())) {
         // If this buffer is mappable and the last updated storage is GPU writable, we need to
         // update the staging storage when the command buffer is flushed.
         // This is to make sure the staging storage will contain the up-to-date GPU modified data.
@@ -1048,7 +1048,7 @@
 
 MaybeError GPUUsableBuffer::SyncGPUWritesToStaging(
     const ScopedCommandRecordingContext* commandContext) {
-    DAWN_ASSERT(IsMappable(GetUsage()));
+    DAWN_ASSERT(IsMappable(GetInternalUsage()));
 
     // Only sync staging storage. Later other CPU writable storages can be updated by
     // copying from staging storage with Map(MAP_WRITE_DISCARD) which won't stall the CPU.
diff --git a/src/dawn/native/d3d12/BufferD3D12.cpp b/src/dawn/native/d3d12/BufferD3D12.cpp
index 8a6a1a0..2da4fe9 100644
--- a/src/dawn/native/d3d12/BufferD3D12.cpp
+++ b/src/dawn/native/d3d12/BufferD3D12.cpp
@@ -157,7 +157,7 @@
 MaybeError Buffer::Initialize(bool mappedAtCreation) {
     // Allocate at least 4 bytes so clamped accesses are always in bounds.
     uint64_t size = std::max(GetSize(), uint64_t(4u));
-    size_t alignment = D3D12BufferSizeAlignment(GetUsage());
+    size_t alignment = D3D12BufferSizeAlignment(GetInternalUsage());
     if (size > std::numeric_limits<uint64_t>::max() - alignment) {
         // Alignment would overlow.
         return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
@@ -177,9 +177,9 @@
     resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
     // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
     // and robust resource initialization.
-    resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
+    resourceDescriptor.Flags = D3D12ResourceFlags(GetInternalUsage() | wgpu::BufferUsage::CopyDst);
 
-    auto heapType = D3D12HeapType(GetUsage());
+    auto heapType = D3D12HeapType(GetInternalUsage());
     mLastState = D3D12_RESOURCE_STATE_COMMON;
 
     switch (heapType) {
@@ -260,7 +260,7 @@
     resourceDescriptor.SampleDesc.Quality = 0;
     resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
     resourceDescriptor.Flags =
-        D3D12ResourceFlags(GetUsage()) | D3D12_RESOURCE_FLAG_ALLOW_CROSS_ADAPTER;
+        D3D12ResourceFlags(GetInternalUsage()) | D3D12_RESOURCE_FLAG_ALLOW_CROSS_ADAPTER;
 
     D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
         device->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
@@ -420,7 +420,7 @@
     // staging buffer, and copied from the staging buffer to the GPU memory of the current
     // buffer in the unmap() call.
     // TODO(enga): Handle CPU-visible memory on UMA
-    return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
+    return (GetInternalUsage() & wgpu::BufferUsage::MapWrite) != 0;
 }
 
 MaybeError Buffer::MapInternal(bool isWrite, size_t offset, size_t size, const char* contextInfo) {
@@ -454,7 +454,7 @@
     // We will use a staging buffer for MapRead buffers instead so we just clear the staging
     // buffer and initialize the original buffer by copying the staging buffer to the original
     // buffer one the first time Unmap() is called.
-    DAWN_ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
+    DAWN_ASSERT((GetInternalUsage() & wgpu::BufferUsage::MapWrite) != 0);
 
     // The buffers with mappedAtCreation == true will be initialized in
     // BufferBase::MapAtCreation().
@@ -624,7 +624,7 @@
 
     // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
     // changed away, so we can only clear such buffer with buffer mapping.
-    if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
+    if (D3D12HeapType(GetInternalUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
         DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
                              "D3D12 map at clear buffer"));
         memset(mMappedData, clearValue, size);
diff --git a/src/dawn/native/metal/BufferMTL.mm b/src/dawn/native/metal/BufferMTL.mm
index 2403e90..addc8d0 100644
--- a/src/dawn/native/metal/BufferMTL.mm
+++ b/src/dawn/native/metal/BufferMTL.mm
@@ -71,7 +71,7 @@
 
 MaybeError Buffer::Initialize(bool mappedAtCreation) {
     MTLResourceOptions storageMode;
-    if (GetUsage() & kMappableBufferUsages) {
+    if (GetInternalUsage() & kMappableBufferUsages) {
         storageMode = MTLResourceStorageModeShared;
     } else {
         storageMode = MTLResourceStorageModePrivate;
@@ -86,7 +86,7 @@
     // Metal validation layer requires the size of uniform buffer and storage buffer to be no
     // less than the size of the buffer block defined in shader, and the overall size of the
     // buffer must be aligned to the largest alignment of its members.
-    if (GetUsage() &
+    if (GetInternalUsage() &
         (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
         DAWN_ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
         alignment = kMinUniformOrStorageBufferAlignment;
@@ -96,7 +96,7 @@
     // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
     // after the end.
     NSUInteger extraBytes = 0u;
-    if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
+    if ((GetInternalUsage() & wgpu::BufferUsage::Vertex) != 0) {
         extraBytes = 4u;
     }
 
@@ -189,7 +189,7 @@
 
 bool Buffer::IsCPUWritableAtCreation() const {
     // TODO(enga): Handle CPU-visible memory on UMA
-    return GetUsage() & kMappableBufferUsages;
+    return GetInternalUsage() & kMappableBufferUsages;
 }
 
 MaybeError Buffer::MapAtCreationImpl() {
diff --git a/src/dawn/native/null/DeviceNull.cpp b/src/dawn/native/null/DeviceNull.cpp
index d8a1e2f..ccd4281 100644
--- a/src/dawn/native/null/DeviceNull.cpp
+++ b/src/dawn/native/null/DeviceNull.cpp
@@ -363,7 +363,7 @@
 bool Buffer::IsCPUWritableAtCreation() const {
     // Only return true for mappable buffers so we can test cases that need / don't need a
     // staging buffer.
-    return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
+    return (GetInternalUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
 }
 
 MaybeError Buffer::MapAtCreationImpl() {
diff --git a/src/dawn/native/vulkan/BufferVk.cpp b/src/dawn/native/vulkan/BufferVk.cpp
index 46bd395..958cfd1 100644
--- a/src/dawn/native/vulkan/BufferVk.cpp
+++ b/src/dawn/native/vulkan/BufferVk.cpp
@@ -177,7 +177,7 @@
     constexpr size_t kAlignment = 4u;
 
     uint32_t extraBytes = 0u;
-    if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
+    if (GetInternalUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
         // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
         // is equal to the whole buffer size. Allocate at least one more byte so it
         // is valid to setVertex/IndexBuffer with a zero-sized range at the end
@@ -219,7 +219,7 @@
     createInfo.size = mAllocatedSize;
     // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
     // and robust resource initialization.
-    createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
+    createInfo.usage = VulkanBufferUsage(GetInternalUsage() | wgpu::BufferUsage::CopyDst);
     createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
     createInfo.queueFamilyIndexCount = 0;
     createInfo.pQueueFamilyIndices = 0;
@@ -234,9 +234,9 @@
     device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
 
     MemoryKind requestKind = MemoryKind::Linear;
-    if (GetUsage() & wgpu::BufferUsage::MapRead) {
+    if (GetInternalUsage() & wgpu::BufferUsage::MapRead) {
         requestKind = MemoryKind::LinearReadMappable;
-    } else if (GetUsage() & wgpu::BufferUsage::MapWrite) {
+    } else if (GetInternalUsage() & wgpu::BufferUsage::MapWrite) {
         requestKind = MemoryKind::LinearWriteMappable;
     }
     DAWN_TRY_ASSIGN(mMemoryAllocation,
@@ -298,7 +298,7 @@
     createInfo.pNext = &externalMemoryCreateInfo;
     createInfo.flags = 0;
     createInfo.size = mAllocatedSize;
-    createInfo.usage = VulkanBufferUsage(GetUsage());
+    createInfo.usage = VulkanBufferUsage(GetInternalUsage());
     createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
     createInfo.queueFamilyIndexCount = 0;
     createInfo.pQueueFamilyIndices = 0;
@@ -331,9 +331,9 @@
     }
 
     MemoryKind requestKind;
-    if (GetUsage() & wgpu::BufferUsage::MapRead) {
+    if (GetInternalUsage() & wgpu::BufferUsage::MapRead) {
         requestKind = MemoryKind::LinearReadMappable;
-    } else if (GetUsage() & wgpu::BufferUsage::MapWrite) {
+    } else if (GetInternalUsage() & wgpu::BufferUsage::MapWrite) {
         requestKind = MemoryKind::LinearWriteMappable;
     } else {
         requestKind = MemoryKind::Linear;
@@ -420,7 +420,7 @@
         MarkUsedInPendingCommands();
     }
 
-    if (!isMapUsage && (GetUsage() & kMappableBufferUsages)) {
+    if (!isMapUsage && (GetInternalUsage() & kMappableBufferUsages)) {
         // The buffer is mappable and the requested usage is not map usage, we need to add it
         // into mappableBuffersForEagerTransition, so the buffer can be transitioned back to map
         // usages at end of the submit.
@@ -447,7 +447,7 @@
         if (usage & kReadOnlyShaderBufferUsages) {
             // Pre-emptively transition to all read-only shader buffer usages if one is used to
             // avoid unnecessary barriers later.
-            usage |= GetUsage() & kReadOnlyShaderBufferUsages;
+            usage |= GetInternalUsage() & kReadOnlyShaderBufferUsages;
         }
 
         mReadUsage |= usage;
@@ -561,7 +561,7 @@
     Device* device = ToBackend(GetDevice());
 
     const bool isInUse = GetLastUsageSerial() > device->GetQueue()->GetCompletedCommandSerial();
-    const bool isMappable = GetUsage() & kMappableBufferUsages;
+    const bool isMappable = GetInternalUsage() & kMappableBufferUsages;
     // Get if buffer has pending writes on the GPU. Even if the write workload has finished, the
     // write may still need a barrier to make the write available.
     const bool hasPendingWrites = !IsSubset(mLastWriteUsage, wgpu::BufferUsage::MapWrite);
@@ -745,7 +745,7 @@
 
     size_t originalBufferCount = buffers.size();
     for (const Ref<Buffer>& buffer : buffers) {
-        wgpu::BufferUsage mapUsage = buffer->GetUsage() & kMappableBufferUsages;
+        wgpu::BufferUsage mapUsage = buffer->GetInternalUsage() & kMappableBufferUsages;
         DAWN_ASSERT(mapUsage == wgpu::BufferUsage::MapRead ||
                     mapUsage == wgpu::BufferUsage::MapWrite);
         VkBufferMemoryBarrier barrier;
diff --git a/src/dawn/tests/unittests/native/mocks/BufferMock.cpp b/src/dawn/tests/unittests/native/mocks/BufferMock.cpp
index 545d027..26a7f91 100644
--- a/src/dawn/tests/unittests/native/mocks/BufferMock.cpp
+++ b/src/dawn/tests/unittests/native/mocks/BufferMock.cpp
@@ -46,7 +46,8 @@
     ON_CALL(*this, DestroyImpl).WillByDefault([this] { this->BufferBase::DestroyImpl(); });
     ON_CALL(*this, GetMappedPointer).WillByDefault(Return(mBackingData.get()));
     ON_CALL(*this, IsCPUWritableAtCreation).WillByDefault([this] {
-        return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
+        return (GetInternalUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) !=
+               0;
     });
 }