blob: 74d74ebc25d89e67a795e3788235887f6166a4e3 [file] [log] [blame]
Corentin Wallez4a9ef4e2018-07-18 11:40:26 +02001// Copyright 2017 The Dawn Authors
Corentin Wallezf07e3bd2017-04-20 14:38:20 -04002//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
Corentin Wallezd37523f2018-07-24 13:53:51 +020015#include "dawn_native/Buffer.h"
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040016
Austin Enge3fd0262021-01-05 07:40:48 +000017#include "common/Alloc.h"
Corentin Wallezfd589f32017-07-10 13:46:05 -040018#include "common/Assert.h"
Jiawei Shaoc11a1912020-07-28 01:58:50 +000019#include "dawn_native/Commands.h"
Corentin Wallezd37523f2018-07-24 13:53:51 +020020#include "dawn_native/Device.h"
Bryan Bernhart67a73bd2019-02-15 21:18:40 +000021#include "dawn_native/DynamicUploader.h"
Natasha Lee0ecc48e2020-01-15 19:02:13 +000022#include "dawn_native/ErrorData.h"
Corentin Wallez47a33412020-06-02 09:24:39 +000023#include "dawn_native/Queue.h"
Corentin Wallez82b65732018-08-22 15:37:29 +020024#include "dawn_native/ValidationUtils_autogen.h"
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040025
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040026#include <cstdio>
Stephen White68d97ad2019-07-23 17:04:34 +000027#include <cstring>
Corentin Wallezc1400f02017-11-24 13:59:42 -050028#include <utility>
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040029
Corentin Wallez49a65d02018-07-24 16:45:45 +020030namespace dawn_native {
Corentin Wallezf07e3bd2017-04-20 14:38:20 -040031
Corentin Walleza594f8f2019-02-13 13:09:18 +000032 namespace {
Natasha Lee51af1b42020-10-12 22:32:33 +000033 struct MapRequestTask : QueueBase::TaskInFlight {
34 MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
35 : buffer(std::move(buffer)), id(id) {
36 }
37 void Finish() override {
38 buffer->OnMapRequestCompleted(id);
39 }
40 ~MapRequestTask() override = default;
41
42 private:
43 Ref<BufferBase> buffer;
44 MapRequestID id;
45 };
Corentin Walleza594f8f2019-02-13 13:09:18 +000046
Rafael Cintronc64242d2020-04-06 18:20:02 +000047 class ErrorBuffer final : public BufferBase {
Corentin Walleza594f8f2019-02-13 13:09:18 +000048 public:
Corentin Wallezb2ea1912020-07-07 11:21:51 +000049 ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
50 : BufferBase(device, descriptor, ObjectBase::kError) {
51 if (descriptor->mappedAtCreation) {
52 // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
53 // is invalid, and on 32bit systems we should avoid a narrowing conversion that
54 // would make size = 1 << 32 + 1 allocate one byte.
55 bool isValidSize =
56 descriptor->size != 0 &&
57 descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
Corentin Walleza594f8f2019-02-13 13:09:18 +000058
Corentin Wallezb2ea1912020-07-07 11:21:51 +000059 if (isValidSize) {
Austin Enge3fd0262021-01-05 07:40:48 +000060 mFakeMappedData =
61 std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
Corentin Wallezb2ea1912020-07-07 11:21:51 +000062 }
63 }
Austin Eng740995c2019-05-15 18:55:22 +000064 }
65
Austin Eng9cd21f12019-06-05 18:35:31 +000066 void ClearMappedData() {
67 mFakeMappedData.reset();
68 }
69
Corentin Walleza594f8f2019-02-13 13:09:18 +000070 private:
Jiawei Shao1c4a7f72020-09-01 08:08:57 +000071 bool IsCPUWritableAtCreation() const override {
Austin Eng9cd21f12019-06-05 18:35:31 +000072 UNREACHABLE();
Austin Eng9cd21f12019-06-05 18:35:31 +000073 }
74
Corentin Wallezb2ea1912020-07-07 11:21:51 +000075 MaybeError MapAtCreationImpl() override {
Austin Eng740995c2019-05-15 18:55:22 +000076 UNREACHABLE();
Austin Eng740995c2019-05-15 18:55:22 +000077 }
78
Corentin Wallez0d52f802020-07-14 12:30:14 +000079 MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
80 UNREACHABLE();
Corentin Wallez0d52f802020-07-14 12:30:14 +000081 }
Natasha Lee949f1e42020-05-19 01:29:32 +000082 void* GetMappedPointerImpl() override {
83 return mFakeMappedData.get();
84 }
Corentin Walleza594f8f2019-02-13 13:09:18 +000085 void UnmapImpl() override {
Austin Eng9cd21f12019-06-05 18:35:31 +000086 UNREACHABLE();
Corentin Walleza594f8f2019-02-13 13:09:18 +000087 }
Natasha Lee718e1db2019-03-11 17:05:22 +000088 void DestroyImpl() override {
89 UNREACHABLE();
90 }
Austin Eng740995c2019-05-15 18:55:22 +000091
92 std::unique_ptr<uint8_t[]> mFakeMappedData;
Corentin Walleza594f8f2019-02-13 13:09:18 +000093 };
94
95 } // anonymous namespace
96
Corentin Wallez82b65732018-08-22 15:37:29 +020097 MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
Corentin Wallez6fee61c2018-09-10 16:17:24 +020098 if (descriptor->nextInChain != nullptr) {
99 return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
100 }
101
Corentin Wallez9e9e29f2019-08-27 08:21:39 +0000102 DAWN_TRY(ValidateBufferUsage(descriptor->usage));
Corentin Wallez82b65732018-08-22 15:37:29 +0200103
Corentin Wallez1f6c8c42019-10-23 11:57:41 +0000104 wgpu::BufferUsage usage = descriptor->usage;
Corentin Wallez82b65732018-08-22 15:37:29 +0200105
Corentin Wallez1f6c8c42019-10-23 11:57:41 +0000106 const wgpu::BufferUsage kMapWriteAllowedUsages =
107 wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
108 if (usage & wgpu::BufferUsage::MapWrite && (usage & kMapWriteAllowedUsages) != usage) {
Corentin Wallezec053552019-07-08 10:05:46 +0000109 return DAWN_VALIDATION_ERROR("Only CopySrc is allowed with MapWrite");
Corentin Wallez82b65732018-08-22 15:37:29 +0200110 }
111
Corentin Wallez1f6c8c42019-10-23 11:57:41 +0000112 const wgpu::BufferUsage kMapReadAllowedUsages =
113 wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
114 if (usage & wgpu::BufferUsage::MapRead && (usage & kMapReadAllowedUsages) != usage) {
Corentin Wallezec053552019-07-08 10:05:46 +0000115 return DAWN_VALIDATION_ERROR("Only CopyDst is allowed with MapRead");
Corentin Wallez82b65732018-08-22 15:37:29 +0200116 }
117
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000118 if (descriptor->mappedAtCreation && descriptor->size % 4 != 0) {
119 return DAWN_VALIDATION_ERROR("size must be aligned to 4 when mappedAtCreation is true");
120 }
121
Corentin Wallez82b65732018-08-22 15:37:29 +0200122 return {};
123 }
124
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400125 // Buffer
126
Corentin Wallez82b65732018-08-22 15:37:29 +0200127 BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
Austin Eng446ab442019-02-13 21:26:48 +0000128 : ObjectBase(device),
129 mSize(descriptor->size),
130 mUsage(descriptor->usage),
131 mState(BufferState::Unmapped) {
Yunchao He64cfaea2019-11-07 07:09:07 +0000132 // Add readonly storage usage if the buffer has a storage usage. The validation rules in
Austin Eng4b0b7a52019-11-21 22:09:41 +0000133 // ValidatePassResourceUsage will make sure we don't use both at the same
Yunchao He64cfaea2019-11-07 07:09:07 +0000134 // time.
135 if (mUsage & wgpu::BufferUsage::Storage) {
Jiawei Shaoe89b4872020-04-21 00:48:10 +0000136 mUsage |= kReadOnlyStorageBuffer;
Yunchao He64cfaea2019-11-07 07:09:07 +0000137 }
Hao Li6f833b72021-01-14 03:26:08 +0000138
139 // TODO(hao.x.li@intel.com): This is just a workaround to make QueryResolve buffer pass the
140 // binding group validation when used as an internal resource. Instead the buffer made with
141 // QueryResolve usage would implicitly get StorageInternal usage which is only compatible
142 // with StorageBufferInternal binding type in BGL, not StorageBuffer binding type.
143 if (mUsage & wgpu::BufferUsage::QueryResolve) {
144 mUsage |= wgpu::BufferUsage::Storage;
145 }
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400146 }
147
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000148 BufferBase::BufferBase(DeviceBase* device,
149 const BufferDescriptor* descriptor,
150 ObjectBase::ErrorTag tag)
151 : ObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
152 if (descriptor->mappedAtCreation) {
153 mState = BufferState::MappedAtCreation;
Corentin Wallezf6e70442020-07-17 18:50:37 +0000154 mMapOffset = 0;
155 mMapSize = mSize;
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000156 }
Corentin Walleza594f8f2019-02-13 13:09:18 +0000157 }
158
Corentin Wallezb1c19ee2017-06-09 10:51:29 -0400159 BufferBase::~BufferBase() {
Austin Eng446ab442019-02-13 21:26:48 +0000160 if (mState == BufferState::Mapped) {
Corentin Walleza594f8f2019-02-13 13:09:18 +0000161 ASSERT(!IsError());
Corentin Wallez53cdbea2020-09-28 14:14:44 +0000162 CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
Corentin Wallezb1c19ee2017-06-09 10:51:29 -0400163 }
164 }
165
Corentin Walleza594f8f2019-02-13 13:09:18 +0000166 // static
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000167 BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
168 return new ErrorBuffer(device, descriptor);
Austin Eng740995c2019-05-15 18:55:22 +0000169 }
170
Austin Eng9cd21f12019-06-05 18:35:31 +0000171 uint64_t BufferBase::GetSize() const {
Corentin Walleza594f8f2019-02-13 13:09:18 +0000172 ASSERT(!IsError());
Corentin Wallezfbecc282017-11-23 10:32:51 -0800173 return mSize;
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400174 }
175
Corentin Wallez1f6c8c42019-10-23 11:57:41 +0000176 wgpu::BufferUsage BufferBase::GetUsage() const {
Corentin Walleza594f8f2019-02-13 13:09:18 +0000177 ASSERT(!IsError());
Corentin Wallez62c77432018-08-22 15:08:02 +0200178 return mUsage;
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400179 }
180
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000181 MaybeError BufferBase::MapAtCreation() {
Jiawei Shao1c4a7f72020-09-01 08:08:57 +0000182 DAWN_TRY(MapAtCreationInternal());
183
Jiawei Shao1c4a7f72020-09-01 08:08:57 +0000184 DeviceBase* device = GetDevice();
Jiawei Shao88001352020-09-02 00:21:08 +0000185 if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
Jiawei Shao1c4a7f72020-09-01 08:08:57 +0000186 memset(GetMappedRange(0, mSize), uint8_t(0u), mSize);
187 SetIsDataInitialized();
188 device->IncrementLazyClearCountForTesting();
189 } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
190 memset(GetMappedRange(0, mSize), uint8_t(1u), mSize);
191 }
192
193 return {};
194 }
195
196 MaybeError BufferBase::MapAtCreationInternal() {
Austin Eng740995c2019-05-15 18:55:22 +0000197 ASSERT(!IsError());
Corentin Wallez2008d152020-06-30 11:05:44 +0000198 mState = BufferState::MappedAtCreation;
Corentin Wallezf6e70442020-07-17 18:50:37 +0000199 mMapOffset = 0;
200 mMapSize = mSize;
Corentin Wallez2008d152020-06-30 11:05:44 +0000201
Corentin Wallez91904cd2020-06-30 12:21:54 +0000202 // 0-sized buffers are not supposed to be written to, Return back any non-null pointer.
203 // Handle 0-sized buffers first so we don't try to map them in the backend.
204 if (mSize == 0) {
Corentin Wallez91904cd2020-06-30 12:21:54 +0000205 return {};
206 }
207
Corentin Wallez13f46502020-06-11 11:07:05 +0000208 // Mappable buffers don't use a staging buffer and are just as if mapped through MapAsync.
Jiawei Shao1c4a7f72020-09-01 08:08:57 +0000209 if (IsCPUWritableAtCreation()) {
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000210 DAWN_TRY(MapAtCreationImpl());
Jiawei Shao1c4a7f72020-09-01 08:08:57 +0000211 } else {
212 // If any of these fail, the buffer will be deleted and replaced with an
213 // error buffer.
214 // TODO(enga): Suballocate and reuse memory from a larger staging buffer so we don't
215 // create many small buffers.
216 DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetSize()));
Austin Eng740995c2019-05-15 18:55:22 +0000217 }
218
Austin Eng740995c2019-05-15 18:55:22 +0000219 return {};
220 }
221
Corentin Wallez47a33412020-06-02 09:24:39 +0000222 MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
Corentin Walleza594f8f2019-02-13 13:09:18 +0000223 ASSERT(!IsError());
224
Austin Eng446ab442019-02-13 21:26:48 +0000225 switch (mState) {
226 case BufferState::Destroyed:
227 return DAWN_VALIDATION_ERROR("Destroyed buffer used in a submit");
228 case BufferState::Mapped:
Corentin Wallez13f46502020-06-11 11:07:05 +0000229 case BufferState::MappedAtCreation:
Austin Eng446ab442019-02-13 21:26:48 +0000230 return DAWN_VALIDATION_ERROR("Buffer used in a submit while mapped");
231 case BufferState::Unmapped:
232 return {};
Corentin Wallez679ff4e2018-11-07 10:02:43 +0000233 }
Corentin Wallez679ff4e2018-11-07 10:02:43 +0000234 }
235
Corentin Wallez53cdbea2020-09-28 14:14:44 +0000236 void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
Corentin Wallez0d52f802020-07-14 12:30:14 +0000237 ASSERT(!IsError());
Corentin Wallez53cdbea2020-09-28 14:14:44 +0000238 if (mMapCallback != nullptr && mapID == mLastMapID) {
Corentin Wallez0d52f802020-07-14 12:30:14 +0000239 // Tag the callback as fired before firing it, otherwise it could fire a second time if
240 // for example buffer.Unmap() is called inside the application-provided callback.
241 WGPUBufferMapCallback callback = mMapCallback;
242 mMapCallback = nullptr;
243
244 if (GetDevice()->IsLost()) {
245 callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
246 } else {
247 callback(status, mMapUserdata);
248 }
249 }
250 }
251
Corentin Wallez0d52f802020-07-14 12:30:14 +0000252 void BufferBase::MapAsync(wgpu::MapMode mode,
253 size_t offset,
254 size_t size,
255 WGPUBufferMapCallback callback,
256 void* userdata) {
257 // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
258 // possible to default the function argument (because there is the callback later in the
259 // argument list)
260 if (size == 0 && offset < mSize) {
261 size = mSize - offset;
262 }
263
264 WGPUBufferMapAsyncStatus status;
265 if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status))) {
266 if (callback) {
267 callback(status, userdata);
268 }
269 return;
270 }
271 ASSERT(!IsError());
272
Corentin Wallez53cdbea2020-09-28 14:14:44 +0000273 mLastMapID++;
Corentin Wallez0d52f802020-07-14 12:30:14 +0000274 mMapMode = mode;
275 mMapOffset = offset;
Corentin Wallezf6e70442020-07-17 18:50:37 +0000276 mMapSize = size;
Corentin Wallez0d52f802020-07-14 12:30:14 +0000277 mMapCallback = callback;
278 mMapUserdata = userdata;
279 mState = BufferState::Mapped;
280
281 if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
Corentin Wallez53cdbea2020-09-28 14:14:44 +0000282 CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
Corentin Wallez0d52f802020-07-14 12:30:14 +0000283 return;
284 }
Natasha Lee51af1b42020-10-12 22:32:33 +0000285 std::unique_ptr<MapRequestTask> request =
286 std::make_unique<MapRequestTask>(this, mLastMapID);
287 GetDevice()->GetDefaultQueue()->TrackTask(std::move(request),
288 GetDevice()->GetPendingCommandSerial());
Corentin Wallezb1c19ee2017-06-09 10:51:29 -0400289 }
290
Corentin Wallezf6e70442020-07-17 18:50:37 +0000291 void* BufferBase::GetMappedRange(size_t offset, size_t size) {
292 return GetMappedRangeInternal(true, offset, size);
Corentin Wallez1325ab12020-06-30 11:51:14 +0000293 }
294
Corentin Wallezf6e70442020-07-17 18:50:37 +0000295 const void* BufferBase::GetConstMappedRange(size_t offset, size_t size) {
296 return GetMappedRangeInternal(false, offset, size);
Corentin Wallezdbf805f2020-07-06 18:08:10 +0000297 }
298
Corentin Wallezf6e70442020-07-17 18:50:37 +0000299 void* BufferBase::GetMappedRangeInternal(bool writable, size_t offset, size_t size) {
300 if (!CanGetMappedRange(writable, offset, size)) {
Corentin Wallez1325ab12020-06-30 11:51:14 +0000301 return nullptr;
302 }
Corentin Wallezdbf805f2020-07-06 18:08:10 +0000303
Corentin Wallez1325ab12020-06-30 11:51:14 +0000304 if (mStagingBuffer != nullptr) {
Corentin Wallezf6e70442020-07-17 18:50:37 +0000305 return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
Corentin Wallez1325ab12020-06-30 11:51:14 +0000306 }
Corentin Wallezdbf805f2020-07-06 18:08:10 +0000307 if (mSize == 0) {
308 return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
309 }
Austin Enge3fd0262021-01-05 07:40:48 +0000310 uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
311 return start == nullptr ? nullptr : start + offset;
Corentin Wallez1325ab12020-06-30 11:51:14 +0000312 }
313
Austin Eng446ab442019-02-13 21:26:48 +0000314 void BufferBase::Destroy() {
Austin Eng9cd21f12019-06-05 18:35:31 +0000315 if (IsError()) {
316 // It is an error to call Destroy() on an ErrorBuffer, but we still need to reclaim the
317 // fake mapped staging data.
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000318 static_cast<ErrorBuffer*>(this)->ClearMappedData();
319 mState = BufferState::Destroyed;
Austin Eng9cd21f12019-06-05 18:35:31 +0000320 }
Austin Eng446ab442019-02-13 21:26:48 +0000321 if (GetDevice()->ConsumedError(ValidateDestroy())) {
322 return;
323 }
324 ASSERT(!IsError());
325
326 if (mState == BufferState::Mapped) {
Jiawei Shaoed2b4652020-09-27 02:00:52 +0000327 UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
Corentin Wallez13f46502020-06-11 11:07:05 +0000328 } else if (mState == BufferState::MappedAtCreation) {
329 if (mStagingBuffer != nullptr) {
330 mStagingBuffer.reset();
Corentin Wallez2008d152020-06-30 11:05:44 +0000331 } else if (mSize != 0) {
Jiawei Shao1c4a7f72020-09-01 08:08:57 +0000332 ASSERT(IsCPUWritableAtCreation());
Jiawei Shaoed2b4652020-09-27 02:00:52 +0000333 UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
Austin Eng9cd21f12019-06-05 18:35:31 +0000334 }
Austin Eng446ab442019-02-13 21:26:48 +0000335 }
Corentin Wallez13f46502020-06-11 11:07:05 +0000336
Natasha Lee20b0c332019-04-01 19:49:04 +0000337 DestroyInternal();
Austin Eng446ab442019-02-13 21:26:48 +0000338 }
339
Austin Eng9cd21f12019-06-05 18:35:31 +0000340 MaybeError BufferBase::CopyFromStagingBuffer() {
341 ASSERT(mStagingBuffer);
Corentin Wallez45aed832020-06-05 15:44:03 +0000342 if (GetSize() == 0) {
343 return {};
344 }
345
Austin Eng9cd21f12019-06-05 18:35:31 +0000346 DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetSize()));
347
Bryan Bernhart450e2122019-09-18 22:06:41 +0000348 DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
Austin Eng9cd21f12019-06-05 18:35:31 +0000349 uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
350
351 return {};
352 }
353
Corentin Wallezb1c19ee2017-06-09 10:51:29 -0400354 void BufferBase::Unmap() {
Jiawei Shaoed2b4652020-09-27 02:00:52 +0000355 UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
356 }
357
358 void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
Austin Eng740995c2019-05-15 18:55:22 +0000359 if (IsError()) {
360 // It is an error to call Unmap() on an ErrorBuffer, but we still need to reclaim the
361 // fake mapped staging data.
Corentin Wallezb2ea1912020-07-07 11:21:51 +0000362 static_cast<ErrorBuffer*>(this)->ClearMappedData();
363 mState = BufferState::Unmapped;
Austin Eng740995c2019-05-15 18:55:22 +0000364 }
Corentin Wallez9e4518b2018-10-15 12:54:30 +0000365 if (GetDevice()->ConsumedError(ValidateUnmap())) {
Corentin Wallezb1c19ee2017-06-09 10:51:29 -0400366 return;
367 }
Corentin Walleza594f8f2019-02-13 13:09:18 +0000368 ASSERT(!IsError());
Corentin Wallezb1c19ee2017-06-09 10:51:29 -0400369
Corentin Wallez13f46502020-06-11 11:07:05 +0000370 if (mState == BufferState::Mapped) {
Austin Eng9cd21f12019-06-05 18:35:31 +0000371 // A map request can only be called once, so this will fire only if the request wasn't
372 // completed before the Unmap.
373 // Callbacks are not fired if there is no callback registered, so this is correct for
Corentin Wallezf7123d72020-08-20 14:22:29 +0000374 // mappedAtCreation = true.
Corentin Wallez53cdbea2020-09-28 14:14:44 +0000375 CallMapCallback(mLastMapID, callbackStatus);
Austin Eng9cd21f12019-06-05 18:35:31 +0000376 UnmapImpl();
Corentin Wallez13f46502020-06-11 11:07:05 +0000377
Corentin Wallez2088cde2020-08-12 19:32:25 +0000378 mMapCallback = nullptr;
Corentin Wallez13f46502020-06-11 11:07:05 +0000379 mMapUserdata = 0;
380
381 } else if (mState == BufferState::MappedAtCreation) {
382 if (mStagingBuffer != nullptr) {
383 GetDevice()->ConsumedError(CopyFromStagingBuffer());
Corentin Wallez2008d152020-06-30 11:05:44 +0000384 } else if (mSize != 0) {
Jiawei Shao1c4a7f72020-09-01 08:08:57 +0000385 ASSERT(IsCPUWritableAtCreation());
Corentin Wallez2008d152020-06-30 11:05:44 +0000386 UnmapImpl();
Corentin Wallez13f46502020-06-11 11:07:05 +0000387 }
Austin Eng9cd21f12019-06-05 18:35:31 +0000388 }
Corentin Wallez13f46502020-06-11 11:07:05 +0000389
Austin Eng446ab442019-02-13 21:26:48 +0000390 mState = BufferState::Unmapped;
Corentin Wallezb1c19ee2017-06-09 10:51:29 -0400391 }
392
Natasha Lee74f50542020-01-28 22:18:58 +0000393 MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage,
394 WGPUBufferMapAsyncStatus* status) const {
395 *status = WGPUBufferMapAsyncStatus_DeviceLost;
Natasha Lee0ecc48e2020-01-15 19:02:13 +0000396 DAWN_TRY(GetDevice()->ValidateIsAlive());
Natasha Lee74f50542020-01-28 22:18:58 +0000397
398 *status = WGPUBufferMapAsyncStatus_Error;
Corentin Walleza594f8f2019-02-13 13:09:18 +0000399 DAWN_TRY(GetDevice()->ValidateObject(this));
400
Austin Eng9cd21f12019-06-05 18:35:31 +0000401 switch (mState) {
402 case BufferState::Mapped:
Corentin Wallez13f46502020-06-11 11:07:05 +0000403 case BufferState::MappedAtCreation:
Corentin Wallez0d52f802020-07-14 12:30:14 +0000404 return DAWN_VALIDATION_ERROR("Buffer is already mapped");
Austin Eng9cd21f12019-06-05 18:35:31 +0000405 case BufferState::Destroyed:
406 return DAWN_VALIDATION_ERROR("Buffer is destroyed");
407 case BufferState::Unmapped:
408 break;
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400409 }
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400410
Corentin Wallez62c77432018-08-22 15:08:02 +0200411 if (!(mUsage & requiredUsage)) {
Corentin Wallez6fee61c2018-09-10 16:17:24 +0200412 return DAWN_VALIDATION_ERROR("Buffer needs the correct map usage bit");
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400413 }
Corentin Wallezd8c068f2018-07-09 15:15:07 +0200414
Natasha Lee74f50542020-01-28 22:18:58 +0000415 *status = WGPUBufferMapAsyncStatus_Success;
Corentin Wallez79149582018-07-18 21:20:07 +0200416 return {};
417 }
418
Corentin Wallez0d52f802020-07-14 12:30:14 +0000419 MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
420 size_t offset,
421 size_t size,
422 WGPUBufferMapAsyncStatus* status) const {
423 *status = WGPUBufferMapAsyncStatus_DeviceLost;
424 DAWN_TRY(GetDevice()->ValidateIsAlive());
425
426 *status = WGPUBufferMapAsyncStatus_Error;
427 DAWN_TRY(GetDevice()->ValidateObject(this));
428
Corentin Wallez41711342020-10-07 17:19:53 +0000429 if (offset % 8 != 0) {
430 return DAWN_VALIDATION_ERROR("offset must be a multiple of 8");
Corentin Wallez0d52f802020-07-14 12:30:14 +0000431 }
432
433 if (size % 4 != 0) {
434 return DAWN_VALIDATION_ERROR("size must be a multiple of 4");
435 }
436
437 if (uint64_t(offset) > mSize || uint64_t(size) > mSize - uint64_t(offset)) {
438 return DAWN_VALIDATION_ERROR("size + offset must fit in the buffer");
439 }
440
441 switch (mState) {
442 case BufferState::Mapped:
443 case BufferState::MappedAtCreation:
444 return DAWN_VALIDATION_ERROR("Buffer is already mapped");
445 case BufferState::Destroyed:
446 return DAWN_VALIDATION_ERROR("Buffer is destroyed");
447 case BufferState::Unmapped:
448 break;
449 }
450
451 bool isReadMode = mode & wgpu::MapMode::Read;
452 bool isWriteMode = mode & wgpu::MapMode::Write;
453 if (!(isReadMode ^ isWriteMode)) {
454 return DAWN_VALIDATION_ERROR("Exactly one of Read or Write mode must be set");
455 }
456
457 if (mode & wgpu::MapMode::Read) {
458 if (!(mUsage & wgpu::BufferUsage::MapRead)) {
459 return DAWN_VALIDATION_ERROR("The buffer must have the MapRead usage");
460 }
461 } else {
462 ASSERT(mode & wgpu::MapMode::Write);
463
464 if (!(mUsage & wgpu::BufferUsage::MapWrite)) {
465 return DAWN_VALIDATION_ERROR("The buffer must have the MapWrite usage");
466 }
467 }
468
469 *status = WGPUBufferMapAsyncStatus_Success;
470 return {};
471 }
472
Corentin Wallezf6e70442020-07-17 18:50:37 +0000473 bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
Corentin Wallez41711342020-10-07 17:19:53 +0000474 if (offset % 8 != 0 || size % 4 != 0) {
475 return false;
476 }
477
Corentin Wallezf6e70442020-07-17 18:50:37 +0000478 if (size > mMapSize || offset < mMapOffset) {
479 return false;
480 }
481
482 size_t offsetInMappedRange = offset - mMapOffset;
483 if (offsetInMappedRange > mMapSize - size) {
484 return false;
485 }
486
Corentin Wallezdbf805f2020-07-06 18:08:10 +0000487 // Note that:
488 //
489 // - We don't check that the device is alive because the application can ask for the
490 // mapped pointer before it knows, and even Dawn knows, that the device was lost, and
491 // still needs to work properly.
492 // - We don't check that the object is alive because we need to return mapped pointers
493 // for error buffers too.
Corentin Wallez1325ab12020-06-30 11:51:14 +0000494
495 switch (mState) {
496 // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
497 case BufferState::MappedAtCreation:
Corentin Wallezdbf805f2020-07-06 18:08:10 +0000498 return true;
Corentin Wallez1325ab12020-06-30 11:51:14 +0000499
500 case BufferState::Mapped:
Corentin Wallezce78ce22020-08-22 11:08:34 +0000501 ASSERT(bool(mMapMode & wgpu::MapMode::Read) ^
502 bool(mMapMode & wgpu::MapMode::Write));
503 return !writable || (mMapMode & wgpu::MapMode::Write);
Corentin Wallez1325ab12020-06-30 11:51:14 +0000504
505 case BufferState::Unmapped:
506 case BufferState::Destroyed:
Corentin Wallezdbf805f2020-07-06 18:08:10 +0000507 return false;
Corentin Wallez1325ab12020-06-30 11:51:14 +0000508 }
509 }
510
Corentin Wallez79149582018-07-18 21:20:07 +0200511 MaybeError BufferBase::ValidateUnmap() const {
Natasha Lee0ecc48e2020-01-15 19:02:13 +0000512 DAWN_TRY(GetDevice()->ValidateIsAlive());
Corentin Walleza594f8f2019-02-13 13:09:18 +0000513 DAWN_TRY(GetDevice()->ValidateObject(this));
514
Austin Eng446ab442019-02-13 21:26:48 +0000515 switch (mState) {
Austin Eng446ab442019-02-13 21:26:48 +0000516 case BufferState::Mapped:
Corentin Wallez13f46502020-06-11 11:07:05 +0000517 case BufferState::MappedAtCreation:
Corentin Wallezf7123d72020-08-20 14:22:29 +0000518 // A buffer may be in the Mapped state if it was created with mappedAtCreation
Austin Eng9cd21f12019-06-05 18:35:31 +0000519 // even if it did not have a mappable usage.
520 return {};
521 case BufferState::Unmapped:
Corentin Wallezc1cce0c2020-10-07 15:14:03 +0000522 return DAWN_VALIDATION_ERROR("Buffer is unmapped");
Austin Eng446ab442019-02-13 21:26:48 +0000523 case BufferState::Destroyed:
524 return DAWN_VALIDATION_ERROR("Buffer is destroyed");
525 }
526 }
Corentin Wallez79149582018-07-18 21:20:07 +0200527
Austin Eng446ab442019-02-13 21:26:48 +0000528 MaybeError BufferBase::ValidateDestroy() const {
529 DAWN_TRY(GetDevice()->ValidateObject(this));
Corentin Wallez79149582018-07-18 21:20:07 +0200530 return {};
Corentin Wallezf07e3bd2017-04-20 14:38:20 -0400531 }
532
Natasha Lee20b0c332019-04-01 19:49:04 +0000533 void BufferBase::DestroyInternal() {
534 if (mState != BufferState::Destroyed) {
535 DestroyImpl();
536 }
537 mState = BufferState::Destroyed;
538 }
539
Corentin Wallez53cdbea2020-09-28 14:14:44 +0000540 void BufferBase::OnMapRequestCompleted(MapRequestID mapID) {
541 CallMapCallback(mapID, WGPUBufferMapAsyncStatus_Success);
Natasha Lee949f1e42020-05-19 01:29:32 +0000542 }
543
Jiawei Shao80f927d2020-07-06 08:24:30 +0000544 bool BufferBase::IsDataInitialized() const {
545 return mIsDataInitialized;
546 }
547
548 void BufferBase::SetIsDataInitialized() {
549 mIsDataInitialized = true;
550 }
551
552 bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
553 return offset == 0 && size == GetSize();
554 }
Corentin Wallez49a65d02018-07-24 16:45:45 +0200555} // namespace dawn_native