[BRP] Un-rewrite raw_ptr used in MotionMark
This CL un-rewrites |raw_ptr| identified in tasak's investigation into MotionMark using his amazing tool to count all raw_ptr calls.
https://docs.google.com/spreadsheets/d/1gZYdSd_rnmQVNS6rujxwNz0ujpYT2Ty3WEQ6GR8hwiI/edit
Bug: 335556942
Change-Id: Ibfb3ead3a991c31fb5231a1a544522ffb0a634ae
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/187760
Commit-Queue: Keishi Hattori <keishi@google.com>
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
diff --git a/src/dawn/common/LinkedList.h b/src/dawn/common/LinkedList.h
index 02ef59c..ff89d47 100644
--- a/src/dawn/common/LinkedList.h
+++ b/src/dawn/common/LinkedList.h
@@ -12,7 +12,7 @@
#define SRC_DAWN_COMMON_LINKEDLIST_H_
#include "dawn/common/Assert.h"
-#include "partition_alloc/pointers/raw_ptr.h"
+#include "partition_alloc/pointers/raw_ptr_exclusion.h"
namespace dawn {
@@ -172,8 +172,9 @@
private:
friend class LinkedList<T>;
- raw_ptr<LinkNode<T>> previous_;
- raw_ptr<LinkNode<T>> next_;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION LinkNode<T>* previous_ = nullptr;
+ RAW_PTR_EXCLUSION LinkNode<T>* next_ = nullptr;
};
template <typename T>
@@ -234,8 +235,9 @@
LinkNode<T>* operator*() const { return current_; }
private:
- raw_ptr<LinkNode<T>> current_;
- raw_ptr<LinkNode<T>> next_;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION LinkNode<T>* current_ = nullptr;
+ RAW_PTR_EXCLUSION LinkNode<T>* next_ = nullptr;
};
template <typename T>
diff --git a/src/dawn/common/MutexProtected.h b/src/dawn/common/MutexProtected.h
index 2de9b1e..c08e3a1 100644
--- a/src/dawn/common/MutexProtected.h
+++ b/src/dawn/common/MutexProtected.h
@@ -33,7 +33,7 @@
#include "dawn/common/Mutex.h"
#include "dawn/common/Ref.h"
-#include "partition_alloc/pointers/raw_ptr.h"
+#include "partition_alloc/pointers/raw_ptr_exclusion.h"
namespace dawn {
@@ -85,14 +85,15 @@
Guard& operator=(const Guard& other) = delete;
Guard& operator=(Guard&& other) = delete;
- auto* Get() const { return Traits::GetObj(mObj.get()); }
+ auto* Get() const { return Traits::GetObj(mObj); }
private:
using NonConstT = typename std::remove_const<T>::type;
friend class MutexProtected<NonConstT, Guard>;
typename Traits::LockType mLock;
- raw_ptr<T> mObj;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION T* mObj = nullptr;
};
} // namespace detail
diff --git a/src/dawn/native/BindGroupTracker.h b/src/dawn/native/BindGroupTracker.h
index 2d43fa1..7b95af1 100644
--- a/src/dawn/native/BindGroupTracker.h
+++ b/src/dawn/native/BindGroupTracker.h
@@ -36,7 +36,7 @@
#include "dawn/native/BindGroup.h"
#include "dawn/native/Pipeline.h"
#include "dawn/native/PipelineLayout.h"
-#include "partition_alloc/pointers/raw_ptr.h"
+#include "partition_alloc/pointers/raw_ptr_exclusion.h"
namespace dawn::native {
@@ -126,8 +126,9 @@
// |mPipelineLayout| is the current pipeline layout set on the command buffer.
// |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
// to the bind group bindings.
- raw_ptr<PipelineLayoutBase> mPipelineLayout = nullptr;
- raw_ptr<PipelineLayoutBase> mLastAppliedPipelineLayout = nullptr;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION PipelineLayoutBase* mPipelineLayout = nullptr;
+ RAW_PTR_EXCLUSION PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
};
} // namespace dawn::native
diff --git a/src/dawn/native/CommandAllocator.cpp b/src/dawn/native/CommandAllocator.cpp
index 9b40585..f5d0cda 100644
--- a/src/dawn/native/CommandAllocator.cpp
+++ b/src/dawn/native/CommandAllocator.cpp
@@ -91,7 +91,7 @@
*commandId = detail::kEndOfBlock;
return false;
}
- mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block.get(), alignof(uint32_t));
+ mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
return NextCommandId(commandId);
}
@@ -106,7 +106,7 @@
mBlocks[0].size = sizeof(mEndOfBlock);
mBlocks[0].block = mCurrentPtr;
} else {
- mCurrentPtr = AlignPtr(mBlocks[0].block.get(), alignof(uint32_t));
+ mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
}
}
@@ -117,7 +117,7 @@
mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
for (BlockDef& block : mBlocks) {
- free(block.block.ExtractAsDangling());
+ free(block.block);
}
mBlocks.clear();
Reset();
@@ -173,7 +173,7 @@
void CommandAllocator::Reset() {
ResetPointers();
for (BlockDef& block : mBlocks) {
- free(block.block.ExtractAsDangling());
+ free(block.block);
}
mBlocks.clear();
mLastAllocationSize = kDefaultBaseAllocationSize;
@@ -186,8 +186,8 @@
CommandBlocks&& CommandAllocator::AcquireBlocks() {
DAWN_ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
DAWN_ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- DAWN_ASSERT(mCurrentPtr.get() + sizeof(uint32_t) <= mEndPtr);
- *reinterpret_cast<uint32_t*>(mCurrentPtr.get()) = detail::kEndOfBlock;
+ DAWN_ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
+ *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
mCurrentPtr = nullptr;
mEndPtr = nullptr;
@@ -199,7 +199,7 @@
size_t commandAlignment) {
// When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
// to move to the next one. kEndOfBlock on the last block means the end of the commands.
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr.get());
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
*idAlloc = detail::kEndOfBlock;
// We'll request a block that can contain at least the command ID, the command and an
diff --git a/src/dawn/native/CommandAllocator.h b/src/dawn/native/CommandAllocator.h
index 711c9c1..1d9ddf1 100644
--- a/src/dawn/native/CommandAllocator.h
+++ b/src/dawn/native/CommandAllocator.h
@@ -36,7 +36,7 @@
#include "dawn/common/Assert.h"
#include "dawn/common/Math.h"
#include "dawn/common/NonCopyable.h"
-#include "partition_alloc/pointers/raw_ptr.h"
+#include "partition_alloc/pointers/raw_ptr_exclusion.h"
namespace dawn::native {
@@ -71,7 +71,8 @@
// and CommandIterator
struct BlockDef {
size_t size;
- raw_ptr<uint8_t> block;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION uint8_t* block = nullptr;
};
using CommandBlocks = std::vector<BlockDef>;
@@ -120,9 +121,9 @@
bool IsEmpty() const;
DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
- uint8_t* idPtr = AlignPtr(mCurrentPtr.get(), alignof(uint32_t));
+ uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
DAWN_ASSERT(idPtr + sizeof(uint32_t) <=
- mBlocks[mCurrentBlock].block.get() + mBlocks[mCurrentBlock].size);
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
@@ -137,9 +138,9 @@
bool NextCommandIdInNewBlock(uint32_t* commandId);
DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
- uint8_t* commandPtr = AlignPtr(mCurrentPtr.get(), commandAlignment);
+ uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
DAWN_ASSERT(commandPtr + sizeof(commandSize) <=
- mBlocks[mCurrentBlock].block.get() + mBlocks[mCurrentBlock].size);
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
mCurrentPtr = commandPtr + commandSize;
return commandPtr;
@@ -155,7 +156,8 @@
}
CommandBlocks mBlocks;
- raw_ptr<uint8_t> mCurrentPtr = nullptr;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION uint8_t* mCurrentPtr = nullptr;
size_t mCurrentBlock = 0;
// Used to avoid a special case for empty iterators.
uint32_t mEndOfBlock = detail::kEndOfBlock;
@@ -245,11 +247,10 @@
// extra required space.
if ((remainingSize >= kWorstCaseAdditionalSize) &&
(remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr.get());
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
*idAlloc = commandId;
- uint8_t* commandAlloc =
- AlignPtr(mCurrentPtr.get() + sizeof(uint32_t), commandAlignment);
+ uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
return commandAlloc;
@@ -278,8 +279,9 @@
// Pointers to the current range of allocation in the block. Guaranteed to allow for at
// least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
// be written. Nullptr iff the blocks were moved out.
- raw_ptr<uint8_t, AllowPtrArithmetic> mCurrentPtr = nullptr;
- raw_ptr<uint8_t, AllowPtrArithmetic> mEndPtr = nullptr;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION uint8_t* mCurrentPtr = nullptr;
+ RAW_PTR_EXCLUSION uint8_t* mEndPtr = nullptr;
};
} // namespace dawn::native
diff --git a/src/dawn/native/CommandBufferStateTracker.cpp b/src/dawn/native/CommandBufferStateTracker.cpp
index 8f6ae91..07d6d7a 100644
--- a/src/dawn/native/CommandBufferStateTracker.cpp
+++ b/src/dawn/native/CommandBufferStateTracker.cpp
@@ -100,7 +100,7 @@
template <typename Return>
Return FindStorageBufferBindingAliasing(const PipelineLayoutBase* pipelineLayout,
- const PerBindGroup<raw_ptr<BindGroupBase>>& bindGroups,
+ const PerBindGroup<BindGroupBase*>& bindGroups,
const PerBindGroup<std::vector<uint32_t>>& dynamicOffsets) {
// If true, returns detailed validation error info. Otherwise simply returns if any binding
// aliasing is found.
diff --git a/src/dawn/native/CommandBufferStateTracker.h b/src/dawn/native/CommandBufferStateTracker.h
index 5f147dc..78232b4 100644
--- a/src/dawn/native/CommandBufferStateTracker.h
+++ b/src/dawn/native/CommandBufferStateTracker.h
@@ -36,7 +36,7 @@
#include "dawn/native/BindingInfo.h"
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
-#include "partition_alloc/pointers/raw_ptr.h"
+#include "partition_alloc/pointers/raw_ptr_exclusion.h"
namespace dawn::native {
@@ -95,7 +95,8 @@
ValidationAspects mAspects;
- PerBindGroup<raw_ptr<BindGroupBase>> mBindgroups = {};
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION PerBindGroup<BindGroupBase*> mBindgroups = {};
PerBindGroup<std::vector<uint32_t>> mDynamicOffsets = {};
VertexBufferMask mVertexBuffersUsed;
@@ -106,9 +107,10 @@
uint64_t mIndexBufferSize = 0;
uint64_t mIndexBufferOffset = 0;
- raw_ptr<PipelineLayoutBase> mLastPipelineLayout = nullptr;
- raw_ptr<PipelineBase> mLastPipeline = nullptr;
- raw_ptr<const RequiredBufferSizes> mMinBufferSizes = nullptr;
+ // RAW_PTR_EXCLUSION: Performance reasons (based on analysis of MotionMark).
+ RAW_PTR_EXCLUSION PipelineLayoutBase* mLastPipelineLayout = nullptr;
+ RAW_PTR_EXCLUSION PipelineBase* mLastPipeline = nullptr;
+ RAW_PTR_EXCLUSION const RequiredBufferSizes* mMinBufferSizes = nullptr;
};
} // namespace dawn::native