Dawn change for half_moon
Change-Id: Ibef01e7d4003341e81025db50d15edc3102fa83e
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/13480
Reviewed-by: Zhenyao Mo <zmo@google.com>
Commit-Queue: Zhenyao Mo <zmo@google.com>
diff --git a/src/common/Math.cpp b/src/common/Math.cpp
index a8823e5..c7ff48b 100644
--- a/src/common/Math.cpp
+++ b/src/common/Math.cpp
@@ -74,40 +74,6 @@
#endif
}
-bool IsPowerOfTwo(uint64_t n) {
- ASSERT(n != 0);
- return (n & (n - 1)) == 0;
-}
-
-bool IsPtrAligned(const void* ptr, size_t alignment) {
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- return (reinterpret_cast<size_t>(ptr) & (alignment - 1)) == 0;
-}
-
-void* AlignVoidPtr(void* ptr, size_t alignment) {
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- return reinterpret_cast<void*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
- ~(alignment - 1));
-}
-
-bool IsAligned(uint32_t value, size_t alignment) {
- ASSERT(alignment <= UINT32_MAX);
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- uint32_t alignment32 = static_cast<uint32_t>(alignment);
- return (value & (alignment32 - 1)) == 0;
-}
-
-uint32_t Align(uint32_t value, size_t alignment) {
- ASSERT(alignment <= UINT32_MAX);
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- uint32_t alignment32 = static_cast<uint32_t>(alignment);
- return (value + (alignment32 - 1)) & ~(alignment32 - 1);
-}
-
uint16_t Float32ToFloat16(float fp32) {
uint32_t fp32i = BitCast<uint32_t>(fp32);
uint32_t sign16 = (fp32i & 0x80000000) >> 16;
diff --git a/src/common/Math.h b/src/common/Math.h
index ac40dd9..b9e0b9f 100644
--- a/src/common/Math.h
+++ b/src/common/Math.h
@@ -22,17 +22,55 @@
#include <limits>
#include <type_traits>
+#include "common/Assert.h"
+
// The following are not valid for 0
uint32_t ScanForward(uint32_t bits);
uint32_t Log2(uint32_t value);
uint32_t Log2(uint64_t value);
-bool IsPowerOfTwo(uint64_t n);
uint64_t NextPowerOfTwo(uint64_t n);
-bool IsPtrAligned(const void* ptr, size_t alignment);
-void* AlignVoidPtr(void* ptr, size_t alignment);
-bool IsAligned(uint32_t value, size_t alignment);
-uint32_t Align(uint32_t value, size_t alignment);
+
+// bool IsPowerOfTwo(size_t n);
+
+inline bool IsPowerOfTwo(uint64_t n) {
+ ASSERT(n != 0);
+ return (n & (n - 1)) == 0;
+}
+
+inline bool IsPtrAligned(const void* ptr, size_t alignment) {
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ return (reinterpret_cast<size_t>(ptr) & (alignment - 1)) == 0;
+}
+
+inline void* AlignVoidPtr(void* ptr, size_t alignment) {
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ return reinterpret_cast<void*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
+ ~(alignment - 1));
+}
+
+inline bool IsAligned(uint32_t value, size_t alignment) {
+ ASSERT(alignment <= UINT32_MAX);
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ uint32_t alignment32 = static_cast<uint32_t>(alignment);
+ return (value & (alignment32 - 1)) == 0;
+}
+
+inline uint32_t Align(uint32_t value, size_t alignment) {
+ ASSERT(alignment <= UINT32_MAX);
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ uint32_t alignment32 = static_cast<uint32_t>(alignment);
+ return (value + (alignment32 - 1)) & ~(alignment32 - 1);
+}
+
+// bool IsPtrAligned(const void* ptr, size_t alignment);
+// void* AlignVoidPtr(void* ptr, size_t alignment);
+// bool IsAligned(uint32_t value, size_t alignment);
+// uint32_t Align(uint32_t value, size_t alignment);
template <typename T>
T* AlignPtr(T* ptr, size_t alignment) {
diff --git a/src/dawn_native/BindGroup.cpp b/src/dawn_native/BindGroup.cpp
index 9e85be1..ff9d695 100644
--- a/src/dawn_native/BindGroup.cpp
+++ b/src/dawn_native/BindGroup.cpp
@@ -17,10 +17,8 @@
#include "common/Assert.h"
#include "common/Math.h"
#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
+
#include "dawn_native/Device.h"
-#include "dawn_native/Sampler.h"
-#include "dawn_native/Texture.h"
namespace dawn_native {
@@ -221,37 +219,4 @@
return new BindGroupBase(device, ObjectBase::kError);
}
- BindGroupLayoutBase* BindGroupBase::GetLayout() {
- ASSERT(!IsError());
- return mLayout.Get();
- }
-
- BufferBinding BindGroupBase::GetBindingAsBufferBinding(size_t binding) {
- ASSERT(!IsError());
- ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::UniformBuffer ||
- mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::StorageBuffer ||
- mLayout->GetBindingInfo().types[binding] ==
- wgpu::BindingType::ReadonlyStorageBuffer);
- BufferBase* buffer = static_cast<BufferBase*>(mBindings[binding].Get());
- return {buffer, mOffsets[binding], mSizes[binding]};
- }
-
- SamplerBase* BindGroupBase::GetBindingAsSampler(size_t binding) {
- ASSERT(!IsError());
- ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::Sampler);
- return static_cast<SamplerBase*>(mBindings[binding].Get());
- }
-
- TextureViewBase* BindGroupBase::GetBindingAsTextureView(size_t binding) {
- ASSERT(!IsError());
- ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::SampledTexture);
- return static_cast<TextureViewBase*>(mBindings[binding].Get());
- }
-
} // namespace dawn_native
diff --git a/src/dawn_native/BindGroup.h b/src/dawn_native/BindGroup.h
index fae804d..9fb603a 100644
--- a/src/dawn_native/BindGroup.h
+++ b/src/dawn_native/BindGroup.h
@@ -21,6 +21,10 @@
#include "dawn_native/Forward.h"
#include "dawn_native/ObjectBase.h"
+#include "dawn_native/Buffer.h"
+#include "dawn_native/Sampler.h"
+#include "dawn_native/Texture.h"
+
#include "dawn_native/dawn_platform.h"
#include <array>
@@ -44,10 +48,36 @@
static BindGroupBase* MakeError(DeviceBase* device);
- BindGroupLayoutBase* GetLayout();
- BufferBinding GetBindingAsBufferBinding(size_t binding);
- SamplerBase* GetBindingAsSampler(size_t binding);
- TextureViewBase* GetBindingAsTextureView(size_t binding);
+ inline BindGroupLayoutBase* GetLayout() {
+ ASSERT(!IsError());
+ return mLayout.Get();
+ }
+
+ inline BufferBinding GetBindingAsBufferBinding(size_t binding) {
+ ASSERT(!IsError());
+ ASSERT(binding < kMaxBindingsPerGroup);
+ ASSERT(mLayout->GetBindingInfo().mask[binding]);
+ ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::UniformBuffer ||
+ mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::StorageBuffer);
+ BufferBase* buffer = static_cast<BufferBase*>(mBindings[binding].Get());
+ return {buffer, mOffsets[binding], mSizes[binding]};
+ }
+
+ inline SamplerBase* GetBindingAsSampler(size_t binding) {
+ ASSERT(!IsError());
+ ASSERT(binding < kMaxBindingsPerGroup);
+ ASSERT(mLayout->GetBindingInfo().mask[binding]);
+ ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::Sampler);
+ return static_cast<SamplerBase*>(mBindings[binding].Get());
+ }
+
+ inline TextureViewBase* GetBindingAsTextureView(size_t binding) {
+ ASSERT(!IsError());
+ ASSERT(binding < kMaxBindingsPerGroup);
+ ASSERT(mLayout->GetBindingInfo().mask[binding]);
+ ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::SampledTexture);
+ return static_cast<TextureViewBase*>(mBindings[binding].Get());
+ }
private:
BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
diff --git a/src/dawn_native/BindGroupLayout.cpp b/src/dawn_native/BindGroupLayout.cpp
index 3d0ecd2..209463d 100644
--- a/src/dawn_native/BindGroupLayout.cpp
+++ b/src/dawn_native/BindGroupLayout.cpp
@@ -198,16 +198,5 @@
return a->mBindingInfo == b->mBindingInfo;
}
- uint32_t BindGroupLayoutBase::GetDynamicBufferCount() const {
- return mDynamicStorageBufferCount + mDynamicUniformBufferCount;
- }
-
- uint32_t BindGroupLayoutBase::GetDynamicUniformBufferCount() const {
- return mDynamicUniformBufferCount;
- }
-
- uint32_t BindGroupLayoutBase::GetDynamicStorageBufferCount() const {
- return mDynamicStorageBufferCount;
- }
} // namespace dawn_native
diff --git a/src/dawn_native/BindGroupLayout.h b/src/dawn_native/BindGroupLayout.h
index 4c0dd7a..5d68d40 100644
--- a/src/dawn_native/BindGroupLayout.h
+++ b/src/dawn_native/BindGroupLayout.h
@@ -56,9 +56,15 @@
bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
};
- uint32_t GetDynamicBufferCount() const;
- uint32_t GetDynamicUniformBufferCount() const;
- uint32_t GetDynamicStorageBufferCount() const;
+ inline uint32_t GetDynamicBufferCount() const {
+ return mDynamicStorageBufferCount + mDynamicUniformBufferCount;
+ }
+ inline uint32_t GetDynamicUniformBufferCount() const {
+ return mDynamicUniformBufferCount;
+ }
+ inline uint32_t GetDynamicStorageBufferCount() const {
+ return mDynamicStorageBufferCount;
+ }
private:
BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
diff --git a/src/dawn_native/Buffer.cpp b/src/dawn_native/Buffer.cpp
index a44e3c2..e522ece 100644
--- a/src/dawn_native/Buffer.cpp
+++ b/src/dawn_native/Buffer.cpp
@@ -147,15 +147,6 @@
return ErrorBuffer::MakeMapped(device, size, mappedPointer);
}
- uint64_t BufferBase::GetSize() const {
- ASSERT(!IsError());
- return mSize;
- }
-
- wgpu::BufferUsage BufferBase::GetUsage() const {
- ASSERT(!IsError());
- return mUsage;
- }
MaybeError BufferBase::MapAtCreation(uint8_t** mappedPointer) {
ASSERT(!IsError());
diff --git a/src/dawn_native/Buffer.h b/src/dawn_native/Buffer.h
index 054e555..9c4100a 100644
--- a/src/dawn_native/Buffer.h
+++ b/src/dawn_native/Buffer.h
@@ -54,8 +54,15 @@
uint64_t size,
uint8_t** mappedPointer);
- uint64_t GetSize() const;
- wgpu::BufferUsage GetUsage() const;
+ inline uint64_t GetSize() const {
+ ASSERT(!IsError());
+ return mSize;
+ }
+
+ inline wgpu::BufferUsage GetUsage() const {
+ ASSERT(!IsError());
+ return mUsage;
+ }
MaybeError MapAtCreation(uint8_t** mappedPointer);
diff --git a/src/dawn_native/CommandAllocator.cpp b/src/dawn_native/CommandAllocator.cpp
index 990c1c5..94d9b18 100644
--- a/src/dawn_native/CommandAllocator.cpp
+++ b/src/dawn_native/CommandAllocator.cpp
@@ -23,9 +23,6 @@
namespace dawn_native {
- constexpr uint32_t EndOfBlock = UINT_MAX; // std::numeric_limits<uint32_t>::max();
- constexpr uint32_t AdditionalData = UINT_MAX - 1; // std::numeric_limits<uint32_t>::max() - 1;
-
// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
CommandIterator::CommandIterator() : mEndOfBlock(EndOfBlock) {
@@ -93,50 +90,6 @@
mDataWasDestroyed = true;
}
- bool CommandIterator::IsEmpty() const {
- return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
- }
-
- bool CommandIterator::NextCommandId(uint32_t* commandId) {
- uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
- ASSERT(idPtr + sizeof(uint32_t) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
- uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
-
- if (id == EndOfBlock) {
- mCurrentBlock++;
- if (mCurrentBlock >= mBlocks.size()) {
- Reset();
- *commandId = EndOfBlock;
- return false;
- }
- mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
- return NextCommandId(commandId);
- }
-
- mCurrentPtr = idPtr + sizeof(uint32_t);
- *commandId = id;
- return true;
- }
-
- void* CommandIterator::NextCommand(size_t commandSize, size_t commandAlignment) {
- uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
- ASSERT(commandPtr + sizeof(commandSize) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
- mCurrentPtr = commandPtr + commandSize;
- return commandPtr;
- }
-
- void* CommandIterator::NextData(size_t dataSize, size_t dataAlignment) {
- uint32_t id;
- bool hasId = NextCommandId(&id);
- ASSERT(hasId);
- ASSERT(id == AdditionalData);
-
- return NextCommand(dataSize, dataAlignment);
- }
// Potential TODO(cwallez@chromium.org):
// - Host the size and pointer to next block in the block itself to avoid having an allocation
@@ -168,46 +121,12 @@
return std::move(mBlocks);
}
- uint8_t* CommandAllocator::Allocate(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment) {
- ASSERT(mCurrentPtr != nullptr);
- ASSERT(mEndPtr != nullptr);
- ASSERT(commandId != EndOfBlock);
-
- // It should always be possible to allocate one id, for EndOfBlock tagging,
- ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- ASSERT(mEndPtr >= mCurrentPtr);
- ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
-
- // The memory after the ID will contain the following:
- // - the current ID
- // - padding to align the command, maximum kMaxSupportedAlignment
- // - the command of size commandSize
- // - padding to align the next ID, maximum alignof(uint32_t)
- // - the next ID of size sizeof(uint32_t)
- //
- // To avoid checking for overflows at every step of the computations we compute an upper
- // bound of the space that will be needed in addition to the command data.
+ uint8_t* CommandAllocator::AllocateAtEnd(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment) {
static constexpr size_t kWorstCaseAdditionalSize =
sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
- // This can't overflow because by construction mCurrentPtr always has space for the next ID.
- size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
-
- // The good case were we have enough space for the command data and upper bound of the
- // extra required space.
- if ((remainingSize >= kWorstCaseAdditionalSize) &&
- (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
- *idAlloc = commandId;
-
- uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
- mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
-
- return commandAlloc;
- }
-
// When there is not enough space, we signal the EndOfBlock, so that the iterator knows to
// move to the next one. EndOfBlock on the last block means the end of the commands.
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
@@ -228,10 +147,6 @@
return Allocate(commandId, commandSize, commandAlignment);
}
- uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
- return Allocate(AdditionalData, commandSize, commandAlignment);
- }
-
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
mLastAllocationSize =
diff --git a/src/dawn_native/CommandAllocator.h b/src/dawn_native/CommandAllocator.h
index 504ba7a..1a16533 100644
--- a/src/dawn_native/CommandAllocator.h
+++ b/src/dawn_native/CommandAllocator.h
@@ -15,12 +15,18 @@
#ifndef DAWNNATIVE_COMMAND_ALLOCATOR_H_
#define DAWNNATIVE_COMMAND_ALLOCATOR_H_
+#include "common/Assert.h"
+#include "common/Math.h"
+
#include <cstddef>
#include <cstdint>
#include <vector>
namespace dawn_native {
+ constexpr uint32_t EndOfBlock = UINT_MAX; // std::numeric_limits<uint32_t>::max();
+ constexpr uint32_t AdditionalData = UINT_MAX - 1; // std::numeric_limits<uint32_t>::max() - 1;
+
// Allocation for command buffers should be fast. To avoid doing an allocation per command
// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
@@ -89,11 +95,42 @@
void DataWasDestroyed();
private:
- bool IsEmpty() const;
-
- bool NextCommandId(uint32_t* commandId);
- void* NextCommand(size_t commandSize, size_t commandAlignment);
- void* NextData(size_t dataSize, size_t dataAlignment);
+ inline bool IsEmpty() const {
+ return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
+ }
+ inline bool NextCommandId(uint32_t* commandId) {
+ uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
+ ASSERT(idPtr + sizeof(uint32_t) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+ uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
+ if (DAWN_LIKELY(id != EndOfBlock)) {
+ mCurrentPtr = idPtr + sizeof(uint32_t);
+ *commandId = id;
+ return true;
+ }
+ mCurrentBlock++;
+ if (mCurrentBlock >= mBlocks.size()) {
+ Reset();
+ *commandId = EndOfBlock;
+ return false;
+ }
+ mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
+ return NextCommandId(commandId);
+ }
+ inline void* NextCommand(size_t commandSize, size_t commandAlignment) {
+ uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
+ ASSERT(commandPtr + sizeof(commandSize) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+ mCurrentPtr = commandPtr + commandSize;
+ return commandPtr;
+ }
+ inline void* NextData(size_t dataSize, size_t dataAlignment) {
+ uint32_t id;
+ bool hasId = NextCommandId(&id);
+ ASSERT(hasId);
+ ASSERT(id == AdditionalData);
+ return NextCommand(dataSize, dataAlignment);
+ }
CommandBlocks mBlocks;
uint8_t* mCurrentPtr = nullptr;
@@ -143,8 +180,45 @@
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
- uint8_t* Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment);
- uint8_t* AllocateData(size_t dataSize, size_t dataAlignment);
+ inline uint8_t* Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment) {
+ ASSERT(mCurrentPtr != nullptr);
+ ASSERT(mEndPtr != nullptr);
+ ASSERT(commandId != EndOfBlock);
+ // It should always be possible to allocate one id, for EndOfBlock tagging,
+ ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+ ASSERT(mEndPtr >= mCurrentPtr);
+ ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
+ // The memory after the ID will contain the following:
+ // - the current ID
+ // - padding to align the command, maximum kMaxSupportedAlignment
+ // - the command of size commandSize
+ // - padding to align the next ID, maximum alignof(uint32_t)
+ // - the next ID of size sizeof(uint32_t)
+ //
+ // To avoid checking for overflows at every step of the computations we compute an upper
+ // bound of the space that will be needed in addition to the command data.
+ static constexpr size_t kWorstCaseAdditionalSize =
+ sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
+ // This can't overflow because by construction mCurrentPtr always has space for the next
+ // ID.
+ size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
+ // The good case were we have enough space for the command data and upper bound of the
+ // extra required space.
+ if (DAWN_LIKELY((remainingSize >= kWorstCaseAdditionalSize) &&
+ (remainingSize - kWorstCaseAdditionalSize >= commandSize))) {
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = commandId;
+ uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
+ mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+ return commandAlloc;
+ }
+ return AllocateAtEnd(commandId, commandSize, commandAlignment);
+ }
+ uint8_t* AllocateAtEnd(uint32_t commandId, size_t commandSize, size_t commandAlignment);
+ inline uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
+ return Allocate(AdditionalData, commandSize, commandAlignment);
+ }
+
bool GetNewBlock(size_t minimumSize);
CommandBlocks mBlocks;
diff --git a/src/dawn_native/Device.cpp b/src/dawn_native/Device.cpp
index bbb83bd..0cfe0de 100644
--- a/src/dawn_native/Device.cpp
+++ b/src/dawn_native/Device.cpp
@@ -81,7 +81,6 @@
}
DeviceBase::~DeviceBase() {
- // Devices must explicitly free the uploader
ASSERT(mDynamicUploader == nullptr);
ASSERT(mDeferredCreateBufferMappedAsyncResults.empty());
@@ -141,21 +140,6 @@
return mCurrentErrorScope.Get();
}
- MaybeError DeviceBase::ValidateObject(const ObjectBase* object) const {
- ASSERT(object != nullptr);
- if (DAWN_UNLIKELY(object->GetDevice() != this)) {
- return DAWN_VALIDATION_ERROR("Object from a different device.");
- }
- if (DAWN_UNLIKELY(object->IsError())) {
- return DAWN_VALIDATION_ERROR("Object is an error.");
- }
- return {};
- }
-
- AdapterBase* DeviceBase::GetAdapter() const {
- return mAdapter;
- }
-
dawn_platform::Platform* DeviceBase::GetPlatform() const {
return GetAdapter()->GetInstance()->GetPlatform();
}
diff --git a/src/dawn_native/Device.h b/src/dawn_native/Device.h
index af6ca98..6c3462c 100644
--- a/src/dawn_native/Device.h
+++ b/src/dawn_native/Device.h
@@ -66,9 +66,19 @@
return false;
}
- MaybeError ValidateObject(const ObjectBase* object) const;
+ inline AdapterBase* GetAdapter() const {
+ return mAdapter;
+ }
+ inline MaybeError ValidateObject(const ObjectBase* object) const {
+ if (DAWN_UNLIKELY(object->GetDevice() != this)) {
+ return DAWN_VALIDATION_ERROR("Object from a different device.");
+ }
+ if (DAWN_UNLIKELY(object->IsError())) {
+ return DAWN_VALIDATION_ERROR("Object is an error.");
+ }
+ return {};
+ }
- AdapterBase* GetAdapter() const;
dawn_platform::Platform* GetPlatform() const;
ErrorScopeTracker* GetErrorScopeTracker() const;
diff --git a/src/dawn_native/ObjectBase.cpp b/src/dawn_native/ObjectBase.cpp
index 3e40af4..7a0ff92 100644
--- a/src/dawn_native/ObjectBase.cpp
+++ b/src/dawn_native/ObjectBase.cpp
@@ -25,12 +25,5 @@
ObjectBase::~ObjectBase() {
}
- DeviceBase* ObjectBase::GetDevice() const {
- return mDevice;
- }
-
- bool ObjectBase::IsError() const {
- return mIsError;
- }
} // namespace dawn_native
diff --git a/src/dawn_native/ObjectBase.h b/src/dawn_native/ObjectBase.h
index 02dd7ec..aaa0894 100644
--- a/src/dawn_native/ObjectBase.h
+++ b/src/dawn_native/ObjectBase.h
@@ -30,8 +30,12 @@
ObjectBase(DeviceBase* device, ErrorTag tag);
virtual ~ObjectBase();
- DeviceBase* GetDevice() const;
- bool IsError() const;
+ inline DeviceBase* GetDevice() const {
+ return mDevice;
+ }
+ inline bool IsError() const {
+ return mIsError;
+ }
private:
DeviceBase* mDevice;
diff --git a/src/dawn_native/RefCounted.cpp b/src/dawn_native/RefCounted.cpp
index 5ec8050..28033c5 100644
--- a/src/dawn_native/RefCounted.cpp
+++ b/src/dawn_native/RefCounted.cpp
@@ -24,22 +24,4 @@
RefCounted::~RefCounted() {
}
- uint64_t RefCounted::GetRefCount() const {
- return mRefCount;
- }
-
- void RefCounted::Reference() {
- ASSERT(mRefCount != 0);
- mRefCount++;
- }
-
- void RefCounted::Release() {
- ASSERT(mRefCount != 0);
-
- mRefCount--;
- if (mRefCount == 0) {
- delete this;
- }
- }
-
} // namespace dawn_native
diff --git a/src/dawn_native/RefCounted.h b/src/dawn_native/RefCounted.h
index 89b0666..4751504 100644
--- a/src/dawn_native/RefCounted.h
+++ b/src/dawn_native/RefCounted.h
@@ -18,6 +18,8 @@
#include <atomic>
#include <cstdint>
+#include "common/Assert.h"
+
namespace dawn_native {
class RefCounted {
@@ -25,11 +27,20 @@
RefCounted();
virtual ~RefCounted();
- uint64_t GetRefCount() const;
-
- // Dawn API
- void Reference();
- void Release();
+ inline uint64_t GetRefCount() const {
+ return mRefCount;
+ }
+ inline void Reference() {
+ ASSERT(mRefCount != 0);
+ mRefCount++;
+ }
+ inline void Release() {
+ ASSERT(mRefCount != 0);
+ mRefCount--;
+ if (mRefCount == 0) {
+ delete this;
+ }
+ }
protected:
std::atomic_uint64_t mRefCount = {1};
diff --git a/src/dawn_native/vulkan/DeviceVk.cpp b/src/dawn_native/vulkan/DeviceVk.cpp
index 7e00edd..af4db1d 100644
--- a/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/src/dawn_native/vulkan/DeviceVk.cpp
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <stdio.h>
+
#include "dawn_native/vulkan/DeviceVk.h"
#include "common/Platform.h"
@@ -630,14 +632,35 @@
return {};
}
+ MaybeError Device::ImportImageWaitFDs(Texture* texture,
+ std::vector<int> waitHandles) {
+ //fprintf(stderr, "ImportImageWaitFDs %p\n", texture);
+
+ std::vector<VkSemaphore> waitSemaphores;
+ waitSemaphores.reserve(waitHandles.size());
+ for (const ExternalSemaphoreHandle& handle : waitHandles) {
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
+ waitSemaphores.push_back(semaphore);
+ }
+
+ DAWN_TRY(texture->SyncFromExternal(std::move(waitSemaphores)));
+
+ return {};
+ }
+
MaybeError Device::SignalAndExportExternalTexture(Texture* texture,
- ExternalSemaphoreHandle* outHandle) {
+ ExternalSemaphoreHandle* outHandle,
+ bool destroy) {
+ //fprintf(stderr, "SignalAndExportExternalTexture %p destroy=%d\n", texture, destroy);
+
DAWN_TRY(ValidateObject(texture));
VkSemaphore outSignalSemaphore;
- DAWN_TRY(texture->SignalAndDestroy(&outSignalSemaphore));
+ outSignalSemaphore = mExternalSemaphoreService->CreateExportableSemaphore().AcquireSuccess();
+ DAWN_TRY(texture->Signal(&outSignalSemaphore, destroy));
- // This has to happen right after SignalAndDestroy, since the semaphore will be
+ // This has to happen right after Signal, since the semaphore will be
// deleted when the fenced deleter runs after the queue submission
DAWN_TRY_ASSIGN(*outHandle, mExternalSemaphoreService->ExportSemaphore(outSignalSemaphore));
@@ -645,6 +668,7 @@
}
TextureBase* Device::CreateTextureWrappingVulkanImage(
+ Texture* texture,
const ExternalImageDescriptor* descriptor,
ExternalMemoryHandle memoryHandle,
const std::vector<ExternalSemaphoreHandle>& waitHandles) {
@@ -659,30 +683,64 @@
return nullptr;
}
- VkSemaphore signalSemaphore = VK_NULL_HANDLE;
- VkDeviceMemory allocation = VK_NULL_HANDLE;
std::vector<VkSemaphore> waitSemaphores;
waitSemaphores.reserve(waitHandles.size());
- // Cleanup in case of a failure, the image creation doesn't acquire the external objects
- // if a failure happems.
- Texture* result = nullptr;
- if (ConsumedError(ImportExternalImage(descriptor, memoryHandle, waitHandles,
- &signalSemaphore, &allocation, &waitSemaphores)) ||
- ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
- signalSemaphore, allocation, waitSemaphores),
- &result)) {
- // Clear the signal semaphore
- fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
-
- // Clear image memory
- fn.FreeMemory(GetVkDevice(), allocation, nullptr);
-
- // Clear any wait semaphores we were able to import
- for (VkSemaphore semaphore : waitSemaphores) {
- fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
+ if (texture) {
+ //fprintf(stderr, "Sync existing texture %p\n", texture);
+ // Re-use existing texture
+ for (const ExternalSemaphoreHandle& handle : waitHandles) {
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ semaphore = mExternalSemaphoreService->ImportSemaphore(handle).AcquireSuccess();
+ waitSemaphores.push_back(semaphore);
}
- return nullptr;
+
+ //signalSemaphore = mExternalSemaphoreService->CreateExportableSemaphore().AcquireSuccess();
+ ConsumedError(texture->SyncFromExternal(waitSemaphores));
+ return texture;
+ } else {
+
+ //fprintf(stderr, "Create texture ");
+
+ VkSemaphore signalSemaphore = VK_NULL_HANDLE;
+ VkDeviceMemory allocation = VK_NULL_HANDLE;
+
+ // Cleanup in case of a failure, the image creation doesn't acquire the external objects
+ // if a failure happems.
+ Texture* result = nullptr;
+ if (ConsumedError(ImportExternalImage(descriptor, memoryHandle, waitHandles,
+ &signalSemaphore, &allocation, &waitSemaphores)) ||
+ ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
+ signalSemaphore, allocation, waitSemaphores),
+ &result)) {
+ // Clear the signal semaphore
+ fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
+
+ // Clear image memory
+ fn.FreeMemory(GetVkDevice(), allocation, nullptr);
+
+ // Clear any wait semaphores we were able to import
+ for (VkSemaphore semaphore : waitSemaphores) {
+ fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
+ }
+ return nullptr;
+ }
+ //fprintf(stderr, " %p\n", result);
+ return result;
+ }
+ }
+
+ TextureBase* Device::CreateTextureWrappingVulkanImage(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image,
+ const std::vector<VkSemaphore>& waitSemaphores) {
+ Texture* result = nullptr;
+ const TextureDescriptor* textureDescriptor =
+ reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
+
+ if (ConsumedError(Texture::CreateFromVkImage(this, descriptor, textureDescriptor,
+ image, waitSemaphores),
+ &result)) {
}
return result;
diff --git a/src/dawn_native/vulkan/DeviceVk.h b/src/dawn_native/vulkan/DeviceVk.h
index 1b6d93c..1ce199d 100644
--- a/src/dawn_native/vulkan/DeviceVk.h
+++ b/src/dawn_native/vulkan/DeviceVk.h
@@ -69,12 +69,22 @@
MaybeError SubmitPendingCommands();
TextureBase* CreateTextureWrappingVulkanImage(
+ Texture* texture,
const ExternalImageDescriptor* descriptor,
ExternalMemoryHandle memoryHandle,
const std::vector<ExternalSemaphoreHandle>& waitHandles);
+ TextureBase* CreateTextureWrappingVulkanImage(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image,
+ const std::vector<VkSemaphore>& waitSemaphores);
+
+ MaybeError ImportImageWaitFDs(Texture* texture,
+ std::vector<int> waitFDs);
+
MaybeError SignalAndExportExternalTexture(Texture* texture,
- ExternalSemaphoreHandle* outHandle);
+ ExternalSemaphoreHandle* outHandle,
+ bool destroy);
// Dawn API
CommandBufferBase* CreateCommandBuffer(CommandEncoderBase* encoder,
diff --git a/src/dawn_native/vulkan/TextureVk.cpp b/src/dawn_native/vulkan/TextureVk.cpp
index a4eb35b..0199995 100644
--- a/src/dawn_native/vulkan/TextureVk.cpp
+++ b/src/dawn_native/vulkan/TextureVk.cpp
@@ -419,6 +419,19 @@
return texture.release();
}
+ ResultOrError<Texture*> Texture::CreateFromVkImage(
+ Device* device,
+ const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ VkImage image,
+ std::vector<VkSemaphore> waitSemaphores) {
+ std::unique_ptr<Texture> texture =
+ std::make_unique<Texture>(device, textureDescriptor, image);
+ DAWN_TRY(texture->InitializeFromVkImage(
+ descriptor, image, std::move((waitSemaphores))));
+ return texture.release();
+ }
+
MaybeError Texture::InitializeAsInternalTexture() {
Device* device = ToBackend(GetDevice());
@@ -483,17 +496,41 @@
: TextureBase(device, descriptor, TextureState::OwnedExternal), mHandle(nativeImage) {
}
+ MaybeError Texture::SyncFromExternal(std::vector<VkSemaphore> waitSemaphores) {
+ //fprintf(stderr, "SyncFromExternal this=%p state%d\n", this, (int)mExternalState);
+
+ //if (mExternalState != ExternalState::Released)
+ // return {};
+ //mExternalState = ExternalState::InternalOnly;
+ //mSignalSemaphore = signalSemaphore;
+ mWaitRequirements = std::move(waitSemaphores);
+
+ //mRecordingContext.waitSemaphores.insert(mRecordingContext.waitSemaphores.end(),
+ // waitSemaphores.begin(), waitSemaphores.end());
+
+ Device* device = ToBackend(GetDevice());
+ TransitionUsageNow(device->GetPendingRecordingContext(), wgpu::TextureUsage::Sampled);
+
+ return {};
+ }
// Internally managed, but imported from external handle
MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptor* descriptor,
VkSemaphore signalSemaphore,
VkDeviceMemory externalMemoryAllocation,
std::vector<VkSemaphore> waitSemaphores) {
+ //fprintf(stderr, "InitializeFromExternal this=%p state=%d\n", this, (int)mExternalState);
mExternalState = ExternalState::PendingAcquire;
Device* device = ToBackend(GetDevice());
+ VkExternalMemoryImageCreateInfoKHR externalInfo = {};
+ externalInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
+ // Only works for Linux/X11.
+ // Use VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT on Windows.
+ externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+
VkImageCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- createInfo.pNext = nullptr;
+ createInfo.pNext = &externalInfo;
createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
createInfo.imageType = VulkanImageType(GetDimension());
createInfo.format = VulkanImageFormat(GetFormat().format);
@@ -536,40 +573,61 @@
// Success, acquire all the external objects.
mExternalAllocation = externalMemoryAllocation;
- mSignalSemaphore = signalSemaphore;
+ //mSignalSemaphore = signalSemaphore;
mWaitRequirements = std::move(waitSemaphores);
return {};
}
- MaybeError Texture::SignalAndDestroy(VkSemaphore* outSignalSemaphore) {
+ MaybeError Texture::InitializeFromVkImage(const ExternalImageDescriptor* descriptor,
+ VkImage image,
+ std::vector<VkSemaphore> waitSemaphores) {
+ // Don't clear imported texture if already cleared
+ if (descriptor->isCleared) {
+ SetIsSubresourceContentInitialized(true, 0, 1, 0, 1);
+ }
+
+ //mExternalState = ExternalState::Acquired;
+ mExternalState = ExternalState::InternalOnly;
+ mWaitRequirements = std::move(waitSemaphores);
+
+ return {};
+ }
+
+ MaybeError Texture::Signal(VkSemaphore* outSignalSemaphore, bool destroy) {
Device* device = ToBackend(GetDevice());
+ //fprintf(stderr, "Signal this=%p state=%d destroy=%d\n", this, (int)mExternalState, destroy);
+
if (mExternalState == ExternalState::Released) {
return DAWN_VALIDATION_ERROR("Can't export signal semaphore from signaled texture");
}
- if (mExternalAllocation == VK_NULL_HANDLE) {
- return DAWN_VALIDATION_ERROR(
- "Can't export signal semaphore from destroyed / non-external texture");
+ //if (mExternalAllocation == VK_NULL_HANDLE) {
+ // return DAWN_VALIDATION_ERROR(
+ // "Can't export signal semaphore from destroyed / non-external texture");
+ //}
+
+ ASSERT(*outSignalSemaphore != VK_NULL_HANDLE);
+
+ if (destroy) {
+ // Release the texture
+ //mExternalState = ExternalState::PendingRelease;
+ TransitionUsageNow(device->GetPendingRecordingContext(), wgpu::TextureUsage::None);
}
- ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
-
- // Release the texture
- mExternalState = ExternalState::PendingRelease;
- TransitionUsageNow(device->GetPendingRecordingContext(), wgpu::TextureUsage::None);
-
// Queue submit to signal we are done with the texture
- device->GetPendingRecordingContext()->signalSemaphores.push_back(mSignalSemaphore);
+ device->GetPendingRecordingContext()->signalSemaphores.push_back(*outSignalSemaphore);
DAWN_TRY(device->SubmitPendingCommands());
// Write out the signal semaphore
- *outSignalSemaphore = mSignalSemaphore;
- mSignalSemaphore = VK_NULL_HANDLE;
+ //*outSignalSemaphore = mSignalSemaphore;
+ //mSignalSemaphore = VK_NULL_HANDLE;
- // Destroy the texture so it can't be used again
- DestroyInternal();
+ if (destroy) {
+ // Destroy the texture so it can't be used again
+ DestroyInternal();
+ }
return {};
}
diff --git a/src/dawn_native/vulkan/TextureVk.h b/src/dawn_native/vulkan/TextureVk.h
index ecb2cda..cf8e9aa 100644
--- a/src/dawn_native/vulkan/TextureVk.h
+++ b/src/dawn_native/vulkan/TextureVk.h
@@ -50,6 +50,13 @@
VkDeviceMemory externalMemoryAllocation,
std::vector<VkSemaphore> waitSemaphores);
+ static ResultOrError<Texture*> CreateFromVkImage(
+ Device* device,
+ const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ VkImage image,
+ std::vector<VkSemaphore> waitSemaphores);
+
Texture(Device* device, const TextureDescriptor* descriptor, VkImage nativeImage);
~Texture();
@@ -67,7 +74,8 @@
uint32_t baseArrayLayer,
uint32_t layerCount);
- MaybeError SignalAndDestroy(VkSemaphore* outSignalSemaphore);
+ MaybeError Signal(VkSemaphore* outSignalSemaphore, bool destroy);
+ MaybeError SyncFromExternal(std::vector<VkSemaphore> waitSemaphores);
private:
using TextureBase::TextureBase;
@@ -76,6 +84,9 @@
VkSemaphore signalSemaphore,
VkDeviceMemory externalMemoryAllocation,
std::vector<VkSemaphore> waitSemaphores);
+ MaybeError InitializeFromVkImage(const ExternalImageDescriptor* descriptor,
+ VkImage image,
+ std::vector<VkSemaphore> waitSemaphores);
void DestroyImpl() override;
MaybeError ClearTexture(CommandRecordingContext* recordingContext,
diff --git a/src/dawn_native/vulkan/VulkanBackend.cpp b/src/dawn_native/vulkan/VulkanBackend.cpp
index e13b965..3b36f0f 100644
--- a/src/dawn_native/vulkan/VulkanBackend.cpp
+++ b/src/dawn_native/vulkan/VulkanBackend.cpp
@@ -33,6 +33,16 @@
return backendDevice->GetVkInstance();
}
+ VkDevice GetDevice(WGPUDevice device) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+ return backendDevice->GetVkDevice();
+ }
+
+ VkQueue GetQueue(WGPUDevice device) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+ return backendDevice->GetQueue();
+ }
+
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
const char* pName) {
Device* backendDevice = reinterpret_cast<Device*>(device);
@@ -61,16 +71,26 @@
#ifdef DAWN_PLATFORM_LINUX
WGPUTexture WrapVulkanImageOpaqueFD(WGPUDevice cDevice,
+ WGPUTexture cTexture,
const ExternalImageDescriptorOpaqueFD* descriptor) {
Device* device = reinterpret_cast<Device*>(cDevice);
+ Texture* texture = reinterpret_cast<Texture*>(cTexture);
- TextureBase* texture = device->CreateTextureWrappingVulkanImage(
- descriptor, descriptor->memoryFD, descriptor->waitFDs);
+ TextureBase* textureOut = device->CreateTextureWrappingVulkanImage(
+ texture, descriptor, descriptor->memoryFD, descriptor->waitFDs);
- return reinterpret_cast<WGPUTexture>(texture);
+ return reinterpret_cast<WGPUTexture>(textureOut);
}
- int ExportSignalSemaphoreOpaqueFD(WGPUDevice cDevice, WGPUTexture cTexture) {
+ void ImportImageWaitFDs(WGPUDevice cDevice, WGPUTexture cTexture, std::vector<int> waitFDs) {
+ Device* device = reinterpret_cast<Device*>(cDevice);
+ Texture* texture = reinterpret_cast<Texture*>(cTexture);
+
+ MaybeError result = device->ImportImageWaitFDs(texture, waitFDs);
+ (void)result;
+ }
+
+ int ExportSignalSemaphoreOpaqueFD(WGPUDevice cDevice, WGPUTexture cTexture, bool destroy) {
Device* device = reinterpret_cast<Device*>(cDevice);
Texture* texture = reinterpret_cast<Texture*>(cTexture);
@@ -79,12 +99,19 @@
}
ExternalSemaphoreHandle outHandle;
- if (device->ConsumedError(device->SignalAndExportExternalTexture(texture, &outHandle))) {
+ if (device->ConsumedError(device->SignalAndExportExternalTexture(texture, &outHandle, destroy))) {
return -1;
}
return outHandle;
}
+
+ WGPUTexture WrapVulkanImageVkImage(WGPUDevice cDevice,
+ const ExternalImageDescriptorVkImage* descriptor) {
+ Device* device = reinterpret_cast<Device*>(cDevice);
+ TextureBase* textureOut = device->CreateTextureWrappingVulkanImage(descriptor, descriptor->image, descriptor->waitSemaphores);
+ return reinterpret_cast<WGPUTexture>(textureOut);
+ }
#endif
}} // namespace dawn_native::vulkan
diff --git a/src/dawn_native/vulkan/VulkanFunctions.cpp b/src/dawn_native/vulkan/VulkanFunctions.cpp
index d3bbe67..5c5073a 100644
--- a/src/dawn_native/vulkan/VulkanFunctions.cpp
+++ b/src/dawn_native/vulkan/VulkanFunctions.cpp
@@ -14,9 +14,30 @@
#include "dawn_native/vulkan/VulkanFunctions.h"
+#include <pthread.h>
+
#include "common/DynamicLib.h"
#include "dawn_native/vulkan/VulkanInfo.h"
+namespace {
+
+pthread_mutex_t g_vk_queue_submit_lock;
+
+PFN_vkQueueSubmit g_native_vk_queue_submit_fn = nullptr;
+
+VkResult vkQueueSubmit_ThreadSafe(
+ VkQueue queue,
+ uint32_t submitCount,
+ const VkSubmitInfo* pSubmits,
+ VkFence fence) {
+ pthread_mutex_lock(&g_vk_queue_submit_lock);
+ VkResult r = g_native_vk_queue_submit_fn(queue, submitCount, pSubmits, fence);
+ pthread_mutex_unlock(&g_vk_queue_submit_lock);
+ return r;
+}
+
+}
+
namespace dawn_native { namespace vulkan {
#define GET_GLOBAL_PROC(name) \
@@ -229,6 +250,11 @@
GET_DEVICE_PROC(MergePipelineCaches);
GET_DEVICE_PROC(QueueBindSparse);
GET_DEVICE_PROC(QueueSubmit);
+ {
+ // Hack to make using single queue on multiple threads safe.
+ g_native_vk_queue_submit_fn = QueueSubmit;
+ QueueSubmit = &vkQueueSubmit_ThreadSafe;
+ }
GET_DEVICE_PROC(QueueWaitIdle);
GET_DEVICE_PROC(ResetCommandBuffer);
GET_DEVICE_PROC(ResetCommandPool);
diff --git a/src/include/dawn_native/VulkanBackend.h b/src/include/dawn_native/VulkanBackend.h
index 77a4c4f..74f5d4d 100644
--- a/src/include/dawn_native/VulkanBackend.h
+++ b/src/include/dawn_native/VulkanBackend.h
@@ -33,6 +33,8 @@
};
DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
+ DAWN_NATIVE_EXPORT VkDevice GetDevice(WGPUDevice device);
+ DAWN_NATIVE_EXPORT VkQueue GetQueue(WGPUDevice device);
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
@@ -56,12 +58,28 @@
// On failure, returns a nullptr
DAWN_NATIVE_EXPORT WGPUTexture
WrapVulkanImageOpaqueFD(WGPUDevice cDevice,
+ WGPUTexture cTexture,
const ExternalImageDescriptorOpaqueFD* descriptor);
+ DAWN_NATIVE_EXPORT
+ void ImportImageWaitFDs(WGPUDevice cDevice, WGPUTexture cTexture, std::vector<int> waitFDs);
+
// Exports a signal semaphore from a wrapped texture. This must be called on wrapped
// textures before they are destroyed. On failure, returns -1
DAWN_NATIVE_EXPORT int ExportSignalSemaphoreOpaqueFD(WGPUDevice cDevice,
- WGPUTexture cTexture);
+ WGPUTexture cTexture,
+ bool destroy);
+
+ // Descriptor for opaque file descriptor image import
+ struct ExternalImageDescriptorVkImage : ExternalImageDescriptor {
+ VkImage image;
+ std::vector<VkSemaphore> waitSemaphores;
+ };
+
+ DAWN_NATIVE_EXPORT WGPUTexture
+ WrapVulkanImageVkImage(WGPUDevice cDevice,
+ const ExternalImageDescriptorVkImage* descriptor);
+
#endif // __linux__
}} // namespace dawn_native::vulkan