[dawn][node] Update node to use the newer C++ entry points.
Bug: 42241461, 346289774, 346356636, 347509737
Change-Id: I145e73116b0abac2e2f99c5b44d80630ed14ae97
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/194521
Commit-Queue: Loko Kung <lokokung@google.com>
Reviewed-by: Austin Eng <enga@chromium.org>
diff --git a/src/dawn/node/binding/AsyncRunner.h b/src/dawn/node/binding/AsyncRunner.h
index eb72e8f..5da1e83 100644
--- a/src/dawn/node/binding/AsyncRunner.h
+++ b/src/dawn/node/binding/AsyncRunner.h
@@ -71,25 +71,33 @@
};
// AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
-// AsyncRunner::End() on destruction.
-class AsyncTask {
+// AsyncRunner::End() on destruction, that also encapsulates the promise generally
+// associated with any async task.
+template <typename T>
+class AsyncContext {
public:
- inline AsyncTask(AsyncTask&&) = default;
-
// Constructor.
// Calls AsyncRunner::Begin()
- explicit inline AsyncTask(Napi::Env env, std::shared_ptr<AsyncRunner> runner)
- : runner_(std::move(runner)) {
+ inline AsyncContext(Napi::Env env,
+ const interop::PromiseInfo& info,
+ std::shared_ptr<AsyncRunner> runner)
+ : env(env), promise(env, info), runner_(runner) {
runner_->Begin(env);
}
// Destructor.
// Calls AsyncRunner::End()
- inline ~AsyncTask() { runner_->End(); }
+ inline ~AsyncContext() { runner_->End(); }
+
+ // Note these are public to allow for access for the callbacks that take ownership of this
+ // context.
+ Napi::Env env;
+ interop::Promise<T> promise;
private:
- AsyncTask(const AsyncTask&) = delete;
- AsyncTask& operator=(const AsyncTask&) = delete;
+ AsyncContext(const AsyncContext&) = delete;
+ AsyncContext& operator=(const AsyncContext&) = delete;
+
std::shared_ptr<AsyncRunner> runner_;
};
diff --git a/src/dawn/node/binding/GPUAdapter.cpp b/src/dawn/node/binding/GPUAdapter.cpp
index a48f2fc..01b04af 100644
--- a/src/dawn/node/binding/GPUAdapter.cpp
+++ b/src/dawn/node/binding/GPUAdapter.cpp
@@ -132,6 +132,40 @@
return adapterProperties.compatibilityMode;
}
+namespace {
+// Returns a string representation of the wgpu::ErrorType
+const char* str(wgpu::ErrorType ty) {
+ switch (ty) {
+ case wgpu::ErrorType::NoError:
+ return "no error";
+ case wgpu::ErrorType::Validation:
+ return "validation";
+ case wgpu::ErrorType::OutOfMemory:
+ return "out of memory";
+ case wgpu::ErrorType::Internal:
+ return "internal";
+ case wgpu::ErrorType::DeviceLost:
+ return "device lost";
+ case wgpu::ErrorType::Unknown:
+ default:
+ return "unknown";
+ }
+}
+
+// There's something broken with Node when attempting to write more than 65536 bytes to cout.
+// Split the string up into writes of 4k chunks.
+// Likely related: https://github.com/nodejs/node/issues/12921
+void chunkedWrite(const char* msg) {
+ while (true) {
+ auto n = printf("%.4096s", msg);
+ if (n <= 0) {
+ break;
+ }
+ msg += n;
+ }
+}
+} // namespace
+
interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
Napi::Env env,
interop::GPUDeviceDescriptor descriptor) {
@@ -182,6 +216,37 @@
desc.requiredFeatures = requiredFeatures.data();
desc.requiredLimits = &limits;
+ // Set the device callbacks.
+ using DeviceLostContext = AsyncContext<interop::Interface<interop::GPUDeviceLostInfo>>;
+ auto device_lost_ctx = new DeviceLostContext(env, PROMISE_INFO, async_);
+ auto device_lost_promise = device_lost_ctx->promise;
+ desc.SetDeviceLostCallback(
+ wgpu::CallbackMode::AllowSpontaneous,
+ [](const wgpu::Device&, wgpu::DeviceLostReason reason, const char* message,
+ DeviceLostContext* device_lost_ctx) {
+ std::unique_ptr<DeviceLostContext> ctx(device_lost_ctx);
+ auto r = interop::GPUDeviceLostReason::kDestroyed;
+ switch (reason) {
+ case wgpu::DeviceLostReason::Destroyed:
+ case wgpu::DeviceLostReason::InstanceDropped:
+ r = interop::GPUDeviceLostReason::kDestroyed;
+ break;
+ case wgpu::DeviceLostReason::FailedCreation:
+ case wgpu::DeviceLostReason::Unknown:
+ r = interop::GPUDeviceLostReason::kUnknown;
+ break;
+ }
+ if (ctx->promise.GetState() == interop::PromiseState::Pending) {
+ ctx->promise.Resolve(
+ interop::GPUDeviceLostInfo::Create<GPUDeviceLostInfo>(ctx->env, r, message));
+ }
+ },
+ device_lost_ctx);
+ desc.SetUncapturedErrorCallback([](const wgpu::Device&, ErrorType type, const char* message) {
+ printf("%s:\n", str(type));
+ chunkedWrite(message);
+ });
+
// Propagate enabled/disabled dawn features
TogglesLoader togglesLoader(flags_);
DawnTogglesDescriptor deviceTogglesDesc = togglesLoader.GetDescriptor();
@@ -193,9 +258,10 @@
return promise;
}
- auto gpu_device = std::make_unique<GPUDevice>(env, desc, wgpu_device, async_);
+ auto gpu_device =
+ std::make_unique<GPUDevice>(env, desc, wgpu_device, device_lost_promise, async_);
if (!valid_) {
- gpu_device->ForceLoss(wgpu::DeviceLostReason::FailedCreation,
+ gpu_device->ForceLoss(wgpu::DeviceLostReason::Unknown,
"Device was marked as lost due to a stale adapter.");
}
valid_ = false;
diff --git a/src/dawn/node/binding/GPUBuffer.cpp b/src/dawn/node/binding/GPUBuffer.cpp
index f5965a1..ec598b8 100644
--- a/src/dawn/node/binding/GPUBuffer.cpp
+++ b/src/dawn/node/binding/GPUBuffer.cpp
@@ -69,56 +69,42 @@
return promise;
}
- pending_map_.emplace(env, PROMISE_INFO);
uint64_t rangeSize = size.has_value() ? size.value().value : (desc_.size - offset);
- struct Context {
- Napi::Env env;
- GPUBuffer* self;
- AsyncTask task;
- interop::Promise<void> promise;
- };
- auto ctx = new Context{env, this, AsyncTask(env, async_), *pending_map_};
+ auto ctx = std::make_unique<AsyncContext<void>>(env, PROMISE_INFO, async_);
+ pending_map_.emplace(ctx->promise);
buffer_.MapAsync(
- mode, offset, rangeSize,
- [](WGPUBufferMapAsyncStatus status, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
+ mode, offset, rangeSize, wgpu::CallbackMode::AllowProcessEvents,
+ [ctx = std::move(ctx), this](wgpu::MapAsyncStatus status, char const*) {
// The promise may already have been resolved with an AbortError if there was an early
// destroy() or early unmap().
- if (c->promise.GetState() != interop::PromiseState::Pending) {
- assert(c->promise.GetState() == interop::PromiseState::Rejected);
+ if (ctx->promise.GetState() != interop::PromiseState::Pending) {
+ assert(ctx->promise.GetState() == interop::PromiseState::Rejected);
return;
}
switch (status) {
- case WGPUBufferMapAsyncStatus_Force32:
- UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
+ case wgpu::MapAsyncStatus::Success:
+ ctx->promise.Resolve();
+ mapped_ = true;
break;
- case WGPUBufferMapAsyncStatus_Success:
- c->promise.Resolve();
- c->self->mapped_ = true;
+ case wgpu::MapAsyncStatus::InstanceDropped:
+ case wgpu::MapAsyncStatus::Aborted:
+ async_->Reject(ctx->env, ctx->promise, Errors::AbortError(ctx->env));
break;
- case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
- case WGPUBufferMapAsyncStatus_DeviceLost:
- case WGPUBufferMapAsyncStatus_InstanceDropped:
- case WGPUBufferMapAsyncStatus_MappingAlreadyPending:
- case WGPUBufferMapAsyncStatus_OffsetOutOfRange:
- case WGPUBufferMapAsyncStatus_SizeOutOfRange:
- case WGPUBufferMapAsyncStatus_Unknown:
- case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
- case WGPUBufferMapAsyncStatus_ValidationError:
- c->self->async_->Reject(c->env, c->promise, Errors::OperationError(c->env));
+ case wgpu::MapAsyncStatus::Error:
+ case wgpu::MapAsyncStatus::Unknown:
+ default:
+ async_->Reject(ctx->env, ctx->promise, Errors::OperationError(ctx->env));
break;
}
// This captured promise is the currently pending mapping, reset it so we can start new
// mappings.
- assert(*c->self->pending_map_ == c->promise);
- c->self->pending_map_.reset();
- },
- ctx);
+ assert(*pending_map_ == ctx->promise);
+ pending_map_.reset();
+ });
return pending_map_.value();
}
diff --git a/src/dawn/node/binding/GPUDevice.cpp b/src/dawn/node/binding/GPUDevice.cpp
index 9776fc8..f0b2a6a 100644
--- a/src/dawn/node/binding/GPUDevice.cpp
+++ b/src/dawn/node/binding/GPUDevice.cpp
@@ -28,6 +28,7 @@
#include "src/dawn/node/binding/GPUDevice.h"
#include <memory>
+#include <type_traits>
#include <utility>
#include <vector>
@@ -71,24 +72,6 @@
}
}
-// Returns a string representation of the WGPUErrorType
-const char* str(WGPUErrorType ty) {
- switch (ty) {
- case WGPUErrorType_NoError:
- return "no error";
- case WGPUErrorType_Validation:
- return "validation";
- case WGPUErrorType_OutOfMemory:
- return "out of memory";
- case WGPUErrorType_Unknown:
- return "unknown";
- case WGPUErrorType_DeviceLost:
- return "device lost";
- default:
- return "unknown";
- }
-}
-
// There's something broken with Node when attempting to write more than 65536 bytes to cout.
// Split the string up into writes of 4k chunks.
// Likely related: https://github.com/nodejs/node/issues/12921
@@ -102,18 +85,6 @@
}
}
-class DeviceLostInfo : public interop::GPUDeviceLostInfo {
- public:
- DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
- : reason_(reason), message_(message) {}
- interop::GPUDeviceLostReason getReason(Napi::Env env) override { return reason_; }
- std::string getMessage(Napi::Env) override { return message_; }
-
- private:
- interop::GPUDeviceLostReason reason_;
- std::string message_;
-};
-
class OOMError : public interop::GPUOutOfMemoryError {
public:
explicit OOMError(std::string message) : message_(std::move(message)) {}
@@ -147,16 +118,31 @@
} // namespace
////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUDeviceLostInfo
+////////////////////////////////////////////////////////////////////////////////
+GPUDeviceLostInfo::GPUDeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
+ : reason_(reason), message_(message) {}
+
+interop::GPUDeviceLostReason GPUDeviceLostInfo::getReason(Napi::Env env) {
+ return reason_;
+}
+
+std::string GPUDeviceLostInfo::getMessage(Napi::Env) {
+ return message_;
+}
+
+////////////////////////////////////////////////////////////////////////////////
// wgpu::bindings::GPUDevice
////////////////////////////////////////////////////////////////////////////////
GPUDevice::GPUDevice(Napi::Env env,
const wgpu::DeviceDescriptor& desc,
wgpu::Device device,
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> lost_promise,
std::shared_ptr<AsyncRunner> async)
: env_(env),
device_(device),
async_(async),
- lost_promise_(env, PROMISE_INFO),
+ lost_promise_(lost_promise),
label_(desc.label ? desc.label : "") {
device_.SetLoggingCallback(
[](WGPULoggingType type, char const* message, void* userdata) {
@@ -164,38 +150,6 @@
chunkedWrite(message);
},
nullptr);
- device_.SetUncapturedErrorCallback(
- [](WGPUErrorType type, char const* message, void* userdata) {
- printf("%s:\n", str(type));
- chunkedWrite(message);
- },
- nullptr);
-
- device_.SetDeviceLostCallback(
- [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
- auto r = interop::GPUDeviceLostReason::kDestroyed;
- switch (reason) {
- case WGPUDeviceLostReason_Force32:
- // This case never happens with wgpu::Device::SetDeviceCallback, and is specific to
- // wgpu::DeviceDescriptor::deviceLostCallback.
- case WGPUDeviceLostReason_FailedCreation:
- UNREACHABLE("WGPUDeviceLostReason_Force32|FailedAtCreation");
- break;
- case WGPUDeviceLostReason_Destroyed:
- case WGPUDeviceLostReason_InstanceDropped:
- r = interop::GPUDeviceLostReason::kDestroyed;
- break;
- case WGPUDeviceLostReason_Unknown:
- r = interop::GPUDeviceLostReason::kUnknown;
- break;
- }
- auto* self = static_cast<GPUDevice*>(userdata);
- if (self->lost_promise_.GetState() == interop::PromiseState::Pending) {
- self->lost_promise_.Resolve(
- interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
- }
- },
- this);
}
GPUDevice::~GPUDevice() {
@@ -212,7 +166,7 @@
void GPUDevice::ForceLoss(wgpu::DeviceLostReason reason, const char* message) {
if (lost_promise_.GetState() == interop::PromiseState::Pending) {
- lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
+ lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<GPUDeviceLostInfo>(
env_, interop::GPUDeviceLostReason::kUnknown, message));
}
device_.ForceLoss(reason, message);
@@ -242,7 +196,7 @@
void GPUDevice::destroy(Napi::Env env) {
if (lost_promise_.GetState() == interop::PromiseState::Pending) {
- lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
+ lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<GPUDeviceLostInfo>(
env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
}
device_.Destroy();
@@ -423,8 +377,6 @@
interop::Promise<interop::Interface<interop::GPUComputePipeline>>
GPUDevice::createComputePipelineAsync(Napi::Env env,
interop::GPUComputePipelineDescriptor descriptor) {
- using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
-
Converter conv(env, device_);
wgpu::ComputePipelineDescriptor desc{};
@@ -432,33 +384,24 @@
return {env, interop::kUnusedPromise};
}
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- std::string label;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(env, async_),
- desc.label ? desc.label : ""};
+ auto ctx = std::make_unique<AsyncContext<interop::Interface<interop::GPUComputePipeline>>>(
+ env, PROMISE_INFO, async_);
auto promise = ctx->promise;
device_.CreateComputePipelineAsync(
- &desc,
- [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline, char const* message,
- void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
+ &desc, wgpu::CallbackMode::AllowProcessEvents,
+ [ctx = std::move(ctx), label = std::string(desc.label ? desc.label : "")](
+ wgpu::CreatePipelineAsyncStatus status, wgpu::ComputePipeline pipeline, char const*) {
switch (status) {
- case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
- c->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
- c->env, pipeline, c->label));
+ case wgpu::CreatePipelineAsyncStatus::Success:
+ ctx->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
+ ctx->env, pipeline, label));
break;
default:
- c->promise.Reject(Errors::GPUPipelineError(c->env));
+ ctx->promise.Reject(Errors::GPUPipelineError(ctx->env));
break;
}
- },
- ctx);
+ });
return promise;
}
@@ -466,8 +409,6 @@
interop::Promise<interop::Interface<interop::GPURenderPipeline>>
GPUDevice::createRenderPipelineAsync(Napi::Env env,
interop::GPURenderPipelineDescriptor descriptor) {
- using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
-
Converter conv(env, device_);
wgpu::RenderPipelineDescriptor desc{};
@@ -475,33 +416,24 @@
return {env, interop::kUnusedPromise};
}
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- std::string label;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(env, async_),
- desc.label ? desc.label : ""};
+ auto ctx = std::make_unique<AsyncContext<interop::Interface<interop::GPURenderPipeline>>>(
+ env, PROMISE_INFO, async_);
auto promise = ctx->promise;
device_.CreateRenderPipelineAsync(
- &desc,
- [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline, char const* message,
- void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
+ &desc, wgpu::CallbackMode::AllowProcessEvents,
+ [ctx = std::move(ctx), label = std::string(desc.label ? desc.label : "")](
+ wgpu::CreatePipelineAsyncStatus status, wgpu::RenderPipeline pipeline, char const*) {
switch (status) {
- case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
- c->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
- c->env, pipeline, c->label));
+ case wgpu::CreatePipelineAsyncStatus::Success:
+ ctx->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
+ ctx->env, pipeline, label));
break;
default:
- c->promise.Reject(Errors::GPUPipelineError(c->env));
+ ctx->promise.Reject(Errors::GPUPipelineError(ctx->env));
break;
}
- },
- ctx);
+ });
return promise;
}
@@ -576,51 +508,50 @@
interop::Promise<std::optional<interop::Interface<interop::GPUError>>> GPUDevice::popErrorScope(
Napi::Env env) {
- using Promise = interop::Promise<std::optional<interop::Interface<interop::GPUError>>>;
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto* ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(env, async_)};
+ auto ctx = std::make_unique<AsyncContext<std::optional<interop::Interface<interop::GPUError>>>>(
+ env, PROMISE_INFO, async_);
auto promise = ctx->promise;
device_.PopErrorScope(
- [](WGPUErrorType type, char const* message, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- auto env = c->env;
+ wgpu::CallbackMode::AllowProcessEvents,
+ [ctx = std::move(ctx)](wgpu::PopErrorScopeStatus, wgpu::ErrorType type,
+ char const* message) {
+ auto env = ctx->env;
switch (type) {
- case WGPUErrorType::WGPUErrorType_NoError:
- c->promise.Resolve({});
+ case wgpu::ErrorType::NoError:
+ ctx->promise.Resolve({});
break;
- case WGPUErrorType::WGPUErrorType_OutOfMemory: {
+ case wgpu::ErrorType::OutOfMemory: {
interop::Interface<interop::GPUError> err{
interop::GPUOutOfMemoryError::Create<OOMError>(env, message)};
- c->promise.Resolve(err);
+ ctx->promise.Resolve(err);
break;
}
- case WGPUErrorType::WGPUErrorType_Validation: {
+ case wgpu::ErrorType::Validation: {
interop::Interface<interop::GPUError> err{
interop::GPUValidationError::Create<ValidationError>(env, message)};
- c->promise.Resolve(err);
+ ctx->promise.Resolve(err);
break;
}
- case WGPUErrorType::WGPUErrorType_Internal: {
+ case wgpu::ErrorType::Internal: {
interop::Interface<interop::GPUError> err{
interop::GPUInternalError::Create<InternalError>(env, message)};
- c->promise.Resolve(err);
+ ctx->promise.Resolve(err);
break;
}
- case WGPUErrorType::WGPUErrorType_Unknown:
- case WGPUErrorType::WGPUErrorType_DeviceLost:
- c->promise.Reject(Errors::OperationError(env, message));
+ case wgpu::ErrorType::Unknown:
+ case wgpu::ErrorType::DeviceLost:
+ ctx->promise.Reject(Errors::OperationError(env, message));
break;
default:
- c->promise.Reject("unhandled error type (" + std::to_string(type) + ")");
+ ctx->promise.Reject(
+ "unhandled error type (" +
+ std::to_string(
+ static_cast<std::underlying_type<wgpu::ErrorType>::type>(type)) +
+ ")");
break;
}
- },
- ctx);
+ });
return promise;
}
diff --git a/src/dawn/node/binding/GPUDevice.h b/src/dawn/node/binding/GPUDevice.h
index d39d489..7fbd5f3 100644
--- a/src/dawn/node/binding/GPUDevice.h
+++ b/src/dawn/node/binding/GPUDevice.h
@@ -37,12 +37,26 @@
#include "src/dawn/node/interop/WebGPU.h"
namespace wgpu::binding {
+// GPUDeviceLostInfo is an implementation of interop::GPUDeviceLostInfo that wraps the members.
+class GPUDeviceLostInfo final : public interop::GPUDeviceLostInfo {
+ public:
+ GPUDeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message);
+
+ interop::GPUDeviceLostReason getReason(Napi::Env env) override;
+ std::string getMessage(Napi::Env) override;
+
+ private:
+ interop::GPUDeviceLostReason reason_;
+ std::string message_;
+};
+
// GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
class GPUDevice final : public interop::GPUDevice {
public:
GPUDevice(Napi::Env env,
const wgpu::DeviceDescriptor& desc,
wgpu::Device device,
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> lost_promise,
std::shared_ptr<AsyncRunner> async);
~GPUDevice();
diff --git a/src/dawn/node/binding/GPUQueue.cpp b/src/dawn/node/binding/GPUQueue.cpp
index 382deff..d6f796a 100644
--- a/src/dawn/node/binding/GPUQueue.cpp
+++ b/src/dawn/node/binding/GPUQueue.cpp
@@ -60,24 +60,17 @@
}
interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
- struct Context {
- Napi::Env env;
- interop::Promise<void> promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), AsyncTask(env, async_)};
+ auto ctx = std::make_unique<AsyncContext<void>>(env, PROMISE_INFO, async_);
auto promise = ctx->promise;
- queue_.OnSubmittedWorkDone(
- [](WGPUQueueWorkDoneStatus status, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
- Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
- .ThrowAsJavaScriptException();
- }
- c->promise.Resolve();
- },
- ctx);
+ queue_.OnSubmittedWorkDone(wgpu::CallbackMode::AllowProcessEvents,
+ [ctx = std::move(ctx)](wgpu::QueueWorkDoneStatus status) {
+ if (status != wgpu::QueueWorkDoneStatus::Success) {
+ Napi::Error::New(ctx->env, "onSubmittedWorkDone() failed")
+ .ThrowAsJavaScriptException();
+ }
+ ctx->promise.Resolve();
+ });
return promise;
}
diff --git a/src/dawn/node/binding/GPUShaderModule.cpp b/src/dawn/node/binding/GPUShaderModule.cpp
index 2b34693..aef597a 100644
--- a/src/dawn/node/binding/GPUShaderModule.cpp
+++ b/src/dawn/node/binding/GPUShaderModule.cpp
@@ -89,32 +89,24 @@
}
};
- using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
-
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(env, async_)};
+ auto ctx = std::make_unique<AsyncContext<interop::Interface<interop::GPUCompilationInfo>>>(
+ env, PROMISE_INFO, async_);
auto promise = ctx->promise;
shader_.GetCompilationInfo(
- [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
- void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
+ wgpu::CallbackMode::AllowProcessEvents,
+ [ctx = std::move(ctx)](wgpu::CompilationInfoRequestStatus status,
+ wgpu::CompilationInfo const* compilationInfo) {
Messages messages(compilationInfo->messageCount);
for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
auto& msg = compilationInfo->messages[i];
messages[i] =
- interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
+ interop::GPUCompilationMessage::Create<GPUCompilationMessage>(ctx->env, msg);
}
- c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
- c->env, c->env, std::move(messages)));
- },
- ctx);
+ ctx->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
+ ctx->env, ctx->env, std::move(messages)));
+ });
return promise;
}