Fix overflow in CommandAllocator
It was essentially checking "currentPtr + commandSize <= endPtr" and
commandSize could make currentPtr overflow, making the comparison
succeed when it shouldn't have. This was caught through flakiness of the
LargeCommands allocator test.
Added a test provoking an overflow in Allocate and checking nullptr is
returned.
BUG=
Change-Id: I8ae4dad5b33c9d2005027c4d45b110ee0c65dd9a
Reviewed-on: https://dawn-review.googlesource.com/c/2841
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Stephen White <senorblanco@chromium.org>
diff --git a/src/dawn_native/CommandAllocator.cpp b/src/dawn_native/CommandAllocator.cpp
index 3f90f6f..6a09c1c 100644
--- a/src/dawn_native/CommandAllocator.cpp
+++ b/src/dawn_native/CommandAllocator.cpp
@@ -177,32 +177,55 @@
// It should always be possible to allocate one id, for EndOfBlock tagging,
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ ASSERT(mEndPtr >= mCurrentPtr);
+ ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
- uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
- uint8_t* nextPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+ // The memory after the ID will contain the following:
+ // - the current ID
+ // - padding to align the command, maximum kMaxSupportedAlignment
+ // - the command of size commandSize
+ // - padding to align the next ID, maximum alignof(uint32_t)
+ // - the next ID of size sizeof(uint32_t)
+ //
+ // To avoid checking for overflows at every step of the computations we compute an upper
+ // bound of the space that will be needed in addition to the command data.
+ static constexpr size_t kWorstCaseAdditionalSize =
+ sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
- // When there is not enough space, we signal the EndOfBlock, so that the iterator nows to
- // move to the next one. EndOfBlock on the last block means the end of the commands.
- if (nextPtr + sizeof(uint32_t) > mEndPtr) {
- // Even if we are not able to get another block, the list of commands will be
- // well-formed and iterable as this block will be that last one.
- *idAlloc = EndOfBlock;
+ // This can't overflow because by construction mCurrentPtr always has space for the next ID.
+ size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
- // Make sure we have space for current allocation, plus end of block and alignment
- // padding for the first id.
- ASSERT(nextPtr > mCurrentPtr);
- if (!GetNewBlock(static_cast<size_t>(nextPtr - mCurrentPtr) + sizeof(uint32_t) +
- alignof(uint32_t))) {
- return nullptr;
- }
- return Allocate(commandId, commandSize, commandAlignment);
+ // The good case were we have enough space for the command data and upper bound of the
+ // extra required space.
+ if ((remainingSize >= kWorstCaseAdditionalSize) &&
+ (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = commandId;
+
+ uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
+ mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+
+ return commandAlloc;
}
- *idAlloc = commandId;
- mCurrentPtr = nextPtr;
- return commandAlloc;
+ // When there is not enough space, we signal the EndOfBlock, so that the iterator knows to
+ // move to the next one. EndOfBlock on the last block means the end of the commands.
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = EndOfBlock;
+
+ // We'll request a block that can contain at least the command ID, the command and an
+ // additional ID to contain the EndOfBlock tag.
+ size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
+
+ // The computation of the request could overflow.
+ if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
+ return nullptr;
+ }
+
+ if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
+ return nullptr;
+ }
+ return Allocate(commandId, commandSize, commandAlignment);
}
uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
@@ -215,7 +238,7 @@
std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
uint8_t* block = reinterpret_cast<uint8_t*>(malloc(mLastAllocationSize));
- if (block == nullptr) {
+ if (DAWN_UNLIKELY(block == nullptr)) {
return false;
}
diff --git a/src/dawn_native/CommandAllocator.h b/src/dawn_native/CommandAllocator.h
index 06257c6..39446ce 100644
--- a/src/dawn_native/CommandAllocator.h
+++ b/src/dawn_native/CommandAllocator.h
@@ -34,9 +34,8 @@
//
// CommandIterator commands(allocator);
// CommandType type;
- // void* command;
- // while(commands.NextCommandId(&e)) {
- // switch(e) {
+ // while(commands.NextCommandId(&type)) {
+ // switch(type) {
// case CommandType::Draw:
// DrawCommand* draw = commands.NextCommand<DrawCommand>();
// // Do the draw
@@ -113,16 +112,22 @@
T* Allocate(E commandId) {
static_assert(sizeof(E) == sizeof(uint32_t), "");
static_assert(alignof(E) == alignof(uint32_t), "");
+ static_assert(alignof(T) <= kMaxSupportedAlignment, "");
return reinterpret_cast<T*>(
Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
}
template <typename T>
T* AllocateData(size_t count) {
+ static_assert(alignof(T) <= kMaxSupportedAlignment, "");
return reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
}
private:
+ // This is used for some internal computations and can be any power of two as long as code
+ // using the CommandAllocator passes the static_asserts.
+ static constexpr size_t kMaxSupportedAlignment = 8;
+
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
@@ -134,7 +139,7 @@
size_t mLastAllocationSize = 2048;
// Pointers to the current range of allocation in the block. Guaranteed to allow for at
- // least one uint32_t is not nullptr, so that the special EndOfBlock command id can always
+ // least one uint32_t if not nullptr, so that the special EndOfBlock command id can always
// be written. Nullptr iff the blocks were moved out.
uint8_t* mCurrentPtr = nullptr;
uint8_t* mEndPtr = nullptr;
diff --git a/src/tests/unittests/CommandAllocatorTests.cpp b/src/tests/unittests/CommandAllocatorTests.cpp
index d870694..8e8b7a7 100644
--- a/src/tests/unittests/CommandAllocatorTests.cpp
+++ b/src/tests/unittests/CommandAllocatorTests.cpp
@@ -16,6 +16,8 @@
#include "dawn_native/CommandAllocator.h"
+#include <limits>
+
using namespace dawn_native;
// Definition of the command types used in the tests
@@ -362,3 +364,40 @@
iterator2.DataWasDestroyed();
}
}
+
+template <size_t A>
+struct alignas(A) AlignedStruct {
+ char dummy;
+};
+
+// Test for overflows in Allocate's computations, size 1 variant
+TEST(CommandAllocator, AllocationOverflow_1) {
+ CommandAllocator allocator;
+ AlignedStruct<1>* data =
+ allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
+ ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 2 variant
+TEST(CommandAllocator, AllocationOverflow_2) {
+ CommandAllocator allocator;
+ AlignedStruct<2>* data =
+ allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
+ ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 4 variant
+TEST(CommandAllocator, AllocationOverflow_4) {
+ CommandAllocator allocator;
+ AlignedStruct<4>* data =
+ allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
+ ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 8 variant
+TEST(CommandAllocator, AllocationOverflow_8) {
+ CommandAllocator allocator;
+ AlignedStruct<8>* data =
+ allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
+ ASSERT_EQ(data, nullptr);
+}