[compiler-rt] Replace deprecated aligned_storage with aligned byte array (#94171)

`std::aligned_storage` is deprecated with C++23, see
[here](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1413r3.pdf).

This replaces the usages of `std::aligned_storage` within compiler-rt
with an aligned `std::byte` array.
I will provide patches for other subcomponents as well.
This commit is contained in:
Marc Auberer
2024-06-08 12:29:01 +02:00
committed by GitHub
parent 25506f4864
commit cac7821438
11 changed files with 50 additions and 71 deletions

View File

@@ -310,16 +310,14 @@ TEST(FunctionCallTrieTest, MergeInto) {
TEST(FunctionCallTrieTest, PlacementNewOnAlignedStorage) {
profilingFlags()->setDefaults();
typename std::aligned_storage<sizeof(FunctionCallTrie::Allocators),
alignof(FunctionCallTrie::Allocators)>::type
AllocatorsStorage;
alignas(FunctionCallTrie::Allocators)
std::byte AllocatorsStorage[sizeof(FunctionCallTrie::Allocators)];
new (&AllocatorsStorage)
FunctionCallTrie::Allocators(FunctionCallTrie::InitAllocators());
auto *A =
reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage);
typename std::aligned_storage<sizeof(FunctionCallTrie),
alignof(FunctionCallTrie)>::type FCTStorage;
alignas(FunctionCallTrie) std::byte FCTStorage[sizeof(FunctionCallTrie)];
new (&FCTStorage) FunctionCallTrie(*A);
auto *T = reinterpret_cast<FunctionCallTrie *>(&FCTStorage);

View File

@@ -38,8 +38,8 @@ struct ExpectedProfilingFileHeader {
void ValidateFileHeaderBlock(XRayBuffer B) {
ASSERT_NE(static_cast<const void *>(B.Data), nullptr);
ASSERT_EQ(B.Size, sizeof(ExpectedProfilingFileHeader));
typename std::aligned_storage<sizeof(ExpectedProfilingFileHeader)>::type
FileHeaderStorage;
alignas(ExpectedProfilingFileHeader)
std::byte FileHeaderStorage[sizeof(ExpectedProfilingFileHeader)];
ExpectedProfilingFileHeader ExpectedHeader;
std::memcpy(&FileHeaderStorage, B.Data, B.Size);
auto &FileHeader =

View File

@@ -226,13 +226,11 @@ TEST(SegmentedArrayTest, SimulateStackBehaviour) {
TEST(SegmentedArrayTest, PlacementNewOnAlignedStorage) {
using AllocatorType = typename Array<ShadowStackEntry>::AllocatorType;
typename std::aligned_storage<sizeof(AllocatorType),
alignof(AllocatorType)>::type AllocatorStorage;
alignas(AllocatorType) std::byte AllocatorStorage[sizeof(AllocatorType)];
new (&AllocatorStorage) AllocatorType(1 << 10);
auto *A = reinterpret_cast<AllocatorType *>(&AllocatorStorage);
typename std::aligned_storage<sizeof(Array<ShadowStackEntry>),
alignof(Array<ShadowStackEntry>)>::type
ArrayStorage;
alignas(Array<ShadowStackEntry>)
std::byte ArrayStorage[sizeof(Array<ShadowStackEntry>)];
new (&ArrayStorage) Array<ShadowStackEntry>(*A);
auto *Data = reinterpret_cast<Array<ShadowStackEntry> *>(&ArrayStorage);

View File

@@ -69,8 +69,7 @@ namespace __xray {
std::string serialize(BufferQueue &Buffers, int32_t Version) {
std::string Serialized;
std::aligned_storage<sizeof(XRayFileHeader), alignof(XRayFileHeader)>::type
HeaderStorage;
alignas(XRayFileHeader) std::byte HeaderStorage[sizeof(XRayFileHeader)];
auto *Header = reinterpret_cast<XRayFileHeader *>(&HeaderStorage);
new (Header) XRayFileHeader();
Header->Version = Version;

View File

@@ -55,17 +55,12 @@ struct XRAY_TLS_ALIGNAS(64) ThreadLocalData {
BufferQueue::Buffer Buffer{};
BufferQueue *BQ = nullptr;
using LogWriterStorage =
typename std::aligned_storage<sizeof(FDRLogWriter),
alignof(FDRLogWriter)>::type;
LogWriterStorage LWStorage;
using LogWriterStorage = std::byte[sizeof(FDRLogWriter)];
alignas(FDRLogWriter) LogWriterStorage LWStorage;
FDRLogWriter *Writer = nullptr;
using ControllerStorage =
typename std::aligned_storage<sizeof(FDRController<>),
alignof(FDRController<>)>::type;
ControllerStorage CStorage;
using ControllerStorage = std::byte[sizeof(FDRController<>)];
alignas(FDRController<>) ControllerStorage CStorage;
FDRController<> *Controller = nullptr;
};
@@ -78,7 +73,7 @@ static_assert(std::is_trivially_destructible<ThreadLocalData>::value,
static pthread_key_t Key;
// Global BufferQueue.
static std::aligned_storage<sizeof(BufferQueue)>::type BufferQueueStorage;
static std::byte BufferQueueStorage[sizeof(BufferQueue)];
static BufferQueue *BQ = nullptr;
// Global thresholds for function durations.
@@ -129,8 +124,8 @@ static_assert(alignof(ThreadLocalData) >= 64,
"ThreadLocalData must be cache line aligned.");
#endif
static ThreadLocalData &getThreadLocalData() {
thread_local typename std::aligned_storage<
sizeof(ThreadLocalData), alignof(ThreadLocalData)>::type TLDStorage{};
alignas(ThreadLocalData) thread_local std::byte
TLDStorage[sizeof(ThreadLocalData)];
if (pthread_getspecific(Key) == NULL) {
new (reinterpret_cast<ThreadLocalData *>(&TLDStorage)) ThreadLocalData{};

View File

@@ -139,18 +139,14 @@ public:
// Use hosted aligned storage members to allow for trivial move and init.
// This also allows us to sidestep the potential-failing allocation issue.
typename std::aligned_storage<sizeof(NodeAllocatorType),
alignof(NodeAllocatorType)>::type
NodeAllocatorStorage;
typename std::aligned_storage<sizeof(RootAllocatorType),
alignof(RootAllocatorType)>::type
RootAllocatorStorage;
typename std::aligned_storage<sizeof(ShadowStackAllocatorType),
alignof(ShadowStackAllocatorType)>::type
ShadowStackAllocatorStorage;
typename std::aligned_storage<sizeof(NodeIdPairAllocatorType),
alignof(NodeIdPairAllocatorType)>::type
NodeIdPairAllocatorStorage;
alignas(NodeAllocatorType) std::byte
NodeAllocatorStorage[sizeof(NodeAllocatorType)];
alignas(RootAllocatorType) std::byte
RootAllocatorStorage[sizeof(RootAllocatorType)];
alignas(ShadowStackAllocatorType) std::byte
ShadowStackAllocatorStorage[sizeof(ShadowStackAllocatorType)];
alignas(NodeIdPairAllocatorType) std::byte
NodeIdPairAllocatorStorage[sizeof(NodeIdPairAllocatorType)];
NodeAllocatorType *NodeAllocator = nullptr;
RootAllocatorType *RootAllocator = nullptr;

View File

@@ -29,7 +29,7 @@ namespace {
SpinMutex GlobalMutex;
struct ThreadTrie {
tid_t TId;
typename std::aligned_storage<sizeof(FunctionCallTrie)>::type TrieStorage;
alignas(FunctionCallTrie) std::byte TrieStorage[sizeof(FunctionCallTrie)];
};
struct ProfileBuffer {
@@ -71,16 +71,13 @@ using ThreadDataAllocator = ThreadDataArray::AllocatorType;
// by the ThreadData array. This lets us host the buffers, allocators, and tries
// associated with a thread by moving the data into the array instead of
// attempting to copy the data to a separately backed set of tries.
static typename std::aligned_storage<
sizeof(BufferQueue), alignof(BufferQueue)>::type BufferQueueStorage;
alignas(BufferQueue) static std::byte BufferQueueStorage[sizeof(BufferQueue)];
static BufferQueue *BQ = nullptr;
static BufferQueue::Buffer Buffer;
static typename std::aligned_storage<sizeof(ThreadDataAllocator),
alignof(ThreadDataAllocator)>::type
ThreadDataAllocatorStorage;
static typename std::aligned_storage<sizeof(ThreadDataArray),
alignof(ThreadDataArray)>::type
ThreadDataArrayStorage;
alignas(ThreadDataAllocator) static std::byte
ThreadDataAllocatorStorage[sizeof(ThreadDataAllocator)];
alignas(ThreadDataArray) static std::byte
ThreadDataArrayStorage[sizeof(ThreadDataArray)];
static ThreadDataAllocator *TDAllocator = nullptr;
static ThreadDataArray *TDArray = nullptr;
@@ -91,10 +88,10 @@ using ProfileBufferArrayAllocator = typename ProfileBufferArray::AllocatorType;
// These need to be global aligned storage to avoid dynamic initialization. We
// need these to be aligned to allow us to placement new objects into the
// storage, and have pointers to those objects be appropriately aligned.
static typename std::aligned_storage<sizeof(ProfileBufferArray)>::type
ProfileBuffersStorage;
static typename std::aligned_storage<sizeof(ProfileBufferArrayAllocator)>::type
ProfileBufferArrayAllocatorStorage;
alignas(ProfileBufferArray) static std::byte
ProfileBuffersStorage[sizeof(ProfileBufferArray)];
alignas(ProfileBufferArrayAllocator) static std::byte
ProfileBufferArrayAllocatorStorage[sizeof(ProfileBufferArrayAllocator)];
static ProfileBufferArrayAllocator *ProfileBuffersAllocator = nullptr;
static ProfileBufferArray *ProfileBuffers = nullptr;
@@ -382,8 +379,8 @@ XRayBuffer nextBuffer(XRayBuffer B) XRAY_NEVER_INSTRUMENT {
return {nullptr, 0};
static pthread_once_t Once = PTHREAD_ONCE_INIT;
static typename std::aligned_storage<sizeof(XRayProfilingFileHeader)>::type
FileHeaderStorage;
alignas(XRayProfilingFileHeader) static std::byte
FileHeaderStorage[sizeof(XRayProfilingFileHeader)];
pthread_once(
&Once, +[]() XRAY_NEVER_INSTRUMENT {
new (&FileHeaderStorage) XRayProfilingFileHeader{};

View File

@@ -48,17 +48,14 @@ static pthread_key_t ProfilingKey;
// We use a global buffer queue, which gets initialized once at initialisation
// time, and gets reset when profiling is "done".
static std::aligned_storage<sizeof(BufferQueue), alignof(BufferQueue)>::type
BufferQueueStorage;
alignas(BufferQueue) static std::byte BufferQueueStorage[sizeof(BufferQueue)];
static BufferQueue *BQ = nullptr;
thread_local FunctionCallTrie::Allocators::Buffers ThreadBuffers;
thread_local std::aligned_storage<sizeof(FunctionCallTrie::Allocators),
alignof(FunctionCallTrie::Allocators)>::type
AllocatorsStorage;
thread_local std::aligned_storage<sizeof(FunctionCallTrie),
alignof(FunctionCallTrie)>::type
FunctionCallTrieStorage;
alignas(FunctionCallTrie::Allocators) thread_local std::byte
AllocatorsStorage[sizeof(FunctionCallTrie::Allocators)];
alignas(FunctionCallTrie) thread_local std::byte
FunctionCallTrieStorage[sizeof(FunctionCallTrie)];
thread_local ProfilingData TLD{{0}, {0}};
thread_local atomic_uint8_t ReentranceGuard{0};

View File

@@ -56,8 +56,7 @@ public:
// kCacheLineSize-multiple segments, minus the size of two pointers.
//
// - Request cacheline-multiple sized elements from the allocator.
static constexpr uint64_t AlignedElementStorageSize =
sizeof(typename std::aligned_storage<sizeof(T), alignof(T)>::type);
static constexpr uint64_t AlignedElementStorageSize = sizeof(T);
static constexpr uint64_t SegmentControlBlockSize = sizeof(Segment *) * 2;

View File

@@ -1,7 +1,7 @@
// RUN: %clangxx_tsan -O1 --std=c++11 %s -o %t && %run %t 2>&1 | FileCheck %s
// RUN: %clangxx_tsan -O1 --std=c++17 %s -o %t && %run %t 2>&1 | FileCheck %s
#include "custom_mutex.h"
#include <type_traits>
#include <cstddef>
// Test that the destruction events of a mutex are ignored when the
// annotations request this.
@@ -12,14 +12,14 @@
// has run.
int main() {
std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu1_store;
alignas(Mutex) std::byte mu1_store[sizeof(Mutex)];
Mutex* mu1 = reinterpret_cast<Mutex*>(&mu1_store);
new(&mu1_store) Mutex(false, __tsan_mutex_linker_init);
mu1->Lock();
mu1->~Mutex();
mu1->Unlock();
std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu2_store;
alignas(Mutex) std::byte mu2_store[sizeof(Mutex)];
Mutex* mu2 = reinterpret_cast<Mutex*>(&mu2_store);
new(&mu2_store) Mutex(false, 0, __tsan_mutex_not_static);
mu2->Lock();

View File

@@ -1,20 +1,20 @@
// RUN: %clangxx_tsan -O1 --std=c++11 %s -o %t && %deflake %run %t 2>&1 | FileCheck %s
// RUN: %clangxx_tsan -O1 --std=c++17 %s -o %t && %deflake %run %t 2>&1 | FileCheck %s
#include "custom_mutex.h"
#include <type_traits>
#include <cstddef>
// Test that we detect the destruction of an in-use mutex when the
// thread annotations don't otherwise disable the check.
int main() {
std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu1_store;
alignas(Mutex) std::byte mu1_store[sizeof(Mutex)];
Mutex* mu1 = reinterpret_cast<Mutex*>(&mu1_store);
new(&mu1_store) Mutex(false, 0);
mu1->Lock();
mu1->~Mutex();
mu1->Unlock();
std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu2_store;
alignas(Mutex) std::byte mu2_store[sizeof(Mutex)];
Mutex* mu2 = reinterpret_cast<Mutex*>(&mu2_store);
new(&mu2_store)
Mutex(false, __tsan_mutex_not_static, __tsan_mutex_not_static);