[scudo] Add ConditionVariable in SizeClassAllocator64 (#69031)

This may improve the waiting of `Region->MMLock` while trying to refill
the freelist. Instead of always waiting on the completion of
`populateFreeListAndPopBatch()` or `releaseToOSMaybe()`, `pushBlocks()`
also refills the freelist. This increases the chance of earlier return
from `popBatches()`.

The support of condition variable hasn't been done for all platforms.
Therefore, add another `popBatchWithCV()` and it can be configured in
the allocator configuration by setting `Primary::UseConditionVariable`
and the desired `ConditionVariableT`.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D156146
This commit is contained in:
ChiaHungDuan
2023-10-19 15:47:18 -07:00
committed by GitHub
parent ff21a90e51
commit ab17ecd107
11 changed files with 476 additions and 21 deletions

View File

@@ -62,6 +62,9 @@ set(SCUDO_HEADERS
bytemap.h
checksum.h
chunk.h
condition_variable.h
condition_variable_base.h
condition_variable_linux.h
combined.h
common.h
flags_parser.h
@@ -104,6 +107,7 @@ set(SCUDO_HEADERS
set(SCUDO_SOURCES
checksum.cpp
common.cpp
condition_variable_linux.cpp
crc32_hw.cpp
flags_parser.cpp
flags.cpp

View File

@@ -11,6 +11,7 @@
#include "combined.h"
#include "common.h"
#include "condition_variable.h"
#include "flags.h"
#include "primary32.h"
#include "primary64.h"
@@ -82,6 +83,14 @@ namespace scudo {
// // Defines the minimal & maximal release interval that can be set.
// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
//
// // Use condition variable to shorten the waiting time of refillment of
// // freelist. Note that this depends on the implementation of condition
// // variable on each platform and the performance may vary so that it
// // doesn't guarantee a performance benefit.
// // Note that both variables have to be defined to enable it.
// static const bool UseConditionVariable = true;
// using ConditionVariableT = ConditionVariableLinux;
// };
// // Defines the type of Primary allocator to use.
// template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;

View File

@@ -0,0 +1,60 @@
//===-- condition_variable.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SCUDO_CONDITION_VARIABLE_H_
#define SCUDO_CONDITION_VARIABLE_H_
#include "condition_variable_base.h"
#include "common.h"
#include "platform.h"
#include "condition_variable_linux.h"
namespace scudo {
// A default implementation of default condition variable. It doesn't do a real
// `wait`, instead it spins a short amount of time only.
class ConditionVariableDummy
: public ConditionVariableBase<ConditionVariableDummy> {
public:
void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {}
void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) {
M.unlock();
constexpr u32 SpinTimes = 64;
volatile u32 V = 0;
for (u32 I = 0; I < SpinTimes; ++I) {
u32 Tmp = V + 1;
V = Tmp;
}
M.lock();
}
};
template <typename Config, typename = const bool>
struct ConditionVariableState {
static constexpr bool enabled() { return false; }
// This is only used for compilation purpose so that we won't end up having
// many conditional compilations. If you want to use `ConditionVariableDummy`,
// define `ConditionVariableT` in your allocator configuration. See
// allocator_config.h for more details.
using ConditionVariableT = ConditionVariableDummy;
};
template <typename Config>
struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
static constexpr bool enabled() { return true; }
using ConditionVariableT = typename Config::ConditionVariableT;
};
} // namespace scudo
#endif // SCUDO_CONDITION_VARIABLE_H_

View File

@@ -0,0 +1,56 @@
//===-- condition_variable_base.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_
#define SCUDO_CONDITION_VARIABLE_BASE_H_
#include "mutex.h"
#include "thread_annotations.h"
namespace scudo {
template <typename Derived> class ConditionVariableBase {
public:
constexpr ConditionVariableBase() = default;
void bindTestOnly(HybridMutex &Mutex) {
#if SCUDO_DEBUG
boundMutex = &Mutex;
#else
(void)Mutex;
#endif
}
void notifyAll(HybridMutex &M) REQUIRES(M) {
#if SCUDO_DEBUG
CHECK_EQ(&M, boundMutex);
#endif
getDerived()->notifyAllImpl(M);
}
void wait(HybridMutex &M) REQUIRES(M) {
#if SCUDO_DEBUG
CHECK_EQ(&M, boundMutex);
#endif
getDerived()->waitImpl(M);
}
protected:
Derived *getDerived() { return static_cast<Derived *>(this); }
#if SCUDO_DEBUG
// Because thread-safety analysis doesn't support pointer aliasing, we are not
// able to mark the proper annotations without false positive. Instead, we
// pass the lock and do the same-lock check separately.
HybridMutex *boundMutex = nullptr;
#endif
};
} // namespace scudo
#endif // SCUDO_CONDITION_VARIABLE_BASE_H_

View File

@@ -0,0 +1,52 @@
//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "platform.h"
#if SCUDO_LINUX
#include "condition_variable_linux.h"
#include "atomic_helpers.h"
#include <limits.h>
#include <linux/futex.h>
#include <sys/syscall.h>
#include <unistd.h>
namespace scudo {
void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) {
const u32 V = atomic_load_relaxed(&Counter);
atomic_store_relaxed(&Counter, V + 1);
// TODO(chiahungduan): Move the waiters from the futex waiting queue
// `Counter` to futex waiting queue `M` so that the awoken threads won't be
// blocked again due to locked `M` by current thread.
if (LastNotifyAll != V) {
syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAKE_PRIVATE,
INT_MAX, nullptr, nullptr, 0);
}
LastNotifyAll = V + 1;
}
void ConditionVariableLinux::waitImpl(HybridMutex &M) {
const u32 V = atomic_load_relaxed(&Counter) + 1;
atomic_store_relaxed(&Counter, V);
// TODO: Use ScopedUnlock when it's supported.
M.unlock();
syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAIT_PRIVATE, V,
nullptr, nullptr, 0);
M.lock();
}
} // namespace scudo
#endif // SCUDO_LINUX

View File

@@ -0,0 +1,38 @@
//===-- condition_variable_linux.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_
#define SCUDO_CONDITION_VARIABLE_LINUX_H_
#include "platform.h"
#if SCUDO_LINUX
#include "atomic_helpers.h"
#include "condition_variable_base.h"
#include "thread_annotations.h"
namespace scudo {
class ConditionVariableLinux
: public ConditionVariableBase<ConditionVariableLinux> {
public:
void notifyAllImpl(HybridMutex &M) REQUIRES(M);
void waitImpl(HybridMutex &M) REQUIRES(M);
private:
u32 LastNotifyAll = 0;
atomic_u32 Counter = {};
};
} // namespace scudo
#endif // SCUDO_LINUX
#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_

View File

@@ -22,6 +22,8 @@
#include "string_utils.h"
#include "thread_annotations.h"
#include "condition_variable.h"
namespace scudo {
// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
@@ -48,6 +50,8 @@ template <typename Config> class SizeClassAllocator64 {
public:
typedef typename Config::Primary::CompactPtrT CompactPtrT;
typedef typename Config::Primary::SizeClassMap SizeClassMap;
typedef typename ConditionVariableState<
typename Config::Primary>::ConditionVariableT ConditionVariableT;
static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
static const uptr RegionSizeLog = Config::Primary::RegionSizeLog;
static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
@@ -70,6 +74,10 @@ public:
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
static bool conditionVariableEnabled() {
return ConditionVariableState<typename Config::Primary>::enabled();
}
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
@@ -124,6 +132,7 @@ public:
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
// The actual start of a region is offset by a random number of pages
// when PrimaryEnableRandomOffset is set.
Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
@@ -145,6 +154,11 @@ public:
}
shuffle(RegionInfoArray, NumClasses, &Seed);
// The binding should be done after region shuffling so that it won't bind
// the FLLock from the wrong region.
for (uptr I = 0; I < NumClasses; I++)
getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
@@ -236,26 +250,26 @@ public:
bool ReportRegionExhausted = false;
TransferBatchT *B = nullptr;
while (true) {
// When two threads compete for `Region->MMLock`, we only want one of them
// does the populateFreeListAndPopBatch(). To avoid both of them doing
// that, always check the freelist before mapping new pages.
//
// TODO(chiahungduan): Use a condition variable so that we don't need to
// hold `Region->MMLock` here.
ScopedLock ML(Region->MMLock);
{
ScopedLock FL(Region->FLLock);
B = popBatchImpl(C, ClassId, Region);
if (LIKELY(B))
return B;
}
if (conditionVariableEnabled()) {
B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
} else {
while (true) {
// When two threads compete for `Region->MMLock`, we only want one of
// them to call populateFreeListAndPopBatch(). To avoid both of them
// doing that, always check the freelist before mapping new pages.
ScopedLock ML(Region->MMLock);
{
ScopedLock FL(Region->FLLock);
if ((B = popBatchImpl(C, ClassId, Region)))
break;
}
const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
B = populateFreeListAndPopBatch(C, ClassId, Region);
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
break;
const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
B = populateFreeListAndPopBatch(C, ClassId, Region);
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
break;
}
}
if (UNLIKELY(ReportRegionExhausted)) {
@@ -280,6 +294,8 @@ public:
if (ClassId == SizeClassMap::BatchClassId) {
ScopedLock L(Region->FLLock);
pushBatchClassBlocks(Region, Array, Size);
if (conditionVariableEnabled())
Region->FLLockCV.notifyAll(Region->FLLock);
return;
}
@@ -306,6 +322,8 @@ public:
{
ScopedLock L(Region->FLLock);
pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
if (conditionVariableEnabled())
Region->FLLockCV.notifyAll(Region->FLLock);
}
}
@@ -538,6 +556,7 @@ private:
struct UnpaddedRegionInfo {
// Mutex for operations on freelist
HybridMutex FLLock;
ConditionVariableT FLLockCV GUARDED_BY(FLLock);
// Mutex for memmap operations
HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
// `RegionBeg` is initialized before thread creation and won't be changed.
@@ -549,6 +568,7 @@ private:
uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
bool Exhausted GUARDED_BY(MMLock) = false;
bool isPopulatingFreeList GUARDED_BY(FLLock) = false;
};
struct RegionInfo : UnpaddedRegionInfo {
char Padding[SCUDO_CACHE_LINE_SIZE -
@@ -831,6 +851,76 @@ private:
InsertBlocks(Cur, Array + Size - Count, Count);
}
TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
bool &ReportRegionExhausted) {
TransferBatchT *B = nullptr;
while (true) {
// We only expect one thread doing the freelist refillment and other
// threads will be waiting for either the completion of the
// `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
// threads.
bool PopulateFreeList = false;
{
ScopedLock FL(Region->FLLock);
if (!Region->isPopulatingFreeList) {
Region->isPopulatingFreeList = true;
PopulateFreeList = true;
}
}
if (PopulateFreeList) {
ScopedLock ML(Region->MMLock);
const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
B = populateFreeListAndPopBatch(C, ClassId, Region);
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
{
// Before reacquiring the `FLLock`, the freelist may be used up again
// and some threads are waiting for the freelist refillment by the
// current thread. It's important to set
// `Region->isPopulatingFreeList` to false so the threads about to
// sleep will notice the status change.
ScopedLock FL(Region->FLLock);
Region->isPopulatingFreeList = false;
Region->FLLockCV.notifyAll(Region->FLLock);
}
break;
}
// At here, there are two preconditions to be met before waiting,
// 1. The freelist is empty.
// 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
// `populateFreeListAndPopBatch()`.
//
// Note that it has the chance that freelist is empty but
// Region->isPopulatingFreeList == false because all the new populated
// blocks were used up right after the refillment. Therefore, we have to
// check if someone is still populating the freelist.
ScopedLock FL(Region->FLLock);
if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
break;
if (!Region->isPopulatingFreeList)
continue;
// Now the freelist is empty and someone's doing the refillment. We will
// wait until anyone refills the freelist or someone finishes doing
// `populateFreeListAndPopBatch()`. The refillment can be done by
// `populateFreeListAndPopBatch()`, `pushBlocks()`,
// `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
Region->FLLockCV.wait(Region->FLLock);
if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
break;
}
return B;
}
// Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
// group id will be considered first.
//
@@ -1521,6 +1611,8 @@ private:
if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
ScopedLock L(BatchClassRegion->FLLock);
pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
if (conditionVariableEnabled())
BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
Idx = 0;
}
Blocks[Idx++] =
@@ -1556,6 +1648,8 @@ private:
if (Idx != 0) {
ScopedLock L(BatchClassRegion->FLLock);
pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
if (conditionVariableEnabled())
BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
}
if (SCUDO_DEBUG) {
@@ -1565,6 +1659,9 @@ private:
CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
}
}
if (conditionVariableEnabled())
Region->FLLockCV.notifyAll(Region->FLLock);
}
// TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be

View File

@@ -96,6 +96,7 @@ set(SCUDO_UNIT_TEST_SOURCES
chunk_test.cpp
combined_test.cpp
common_test.cpp
condition_variable_test.cpp
flags_test.cpp
list_test.cpp
map_test.cpp

View File

@@ -12,7 +12,9 @@
#include "allocator_config.h"
#include "chunk.h"
#include "combined.h"
#include "condition_variable.h"
#include "mem_map.h"
#include "size_class_map.h"
#include <algorithm>
#include <condition_variable>
@@ -164,13 +166,60 @@ template <class TypeParam> struct ScudoCombinedTest : public Test {
template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
namespace scudo {
struct TestConditionVariableConfig {
static const bool MaySupportMemoryTagging = true;
template <class A>
using TSDRegistryT =
scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
struct Primary {
using SizeClassMap = scudo::AndroidSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
static const scudo::uptr RegionSizeLog = 28U;
typedef scudo::u32 CompactPtrT;
static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
static const scudo::uptr GroupSizeLog = 20U;
static const bool EnableRandomOffset = true;
static const scudo::uptr MapSizeIncrement = 1UL << 18;
#else
static const scudo::uptr RegionSizeLog = 18U;
static const scudo::uptr GroupSizeLog = 18U;
typedef scudo::uptr CompactPtrT;
#endif
static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
static const bool UseConditionVariable = true;
#if SCUDO_LINUX
using ConditionVariableT = scudo::ConditionVariableLinux;
#else
using ConditionVariableT = scudo::ConditionVariableDummy;
#endif
};
#if SCUDO_CAN_USE_PRIMARY64
template <typename Config>
using PrimaryT = scudo::SizeClassAllocator64<Config>;
#else
template <typename Config>
using PrimaryT = scudo::SizeClassAllocator32<Config>;
#endif
struct Secondary {
template <typename Config>
using CacheT = scudo::MapAllocatorNoCache<Config>;
};
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
};
} // namespace scudo
#if SCUDO_FUCHSIA
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
#else
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
#endif
#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \

View File

@@ -0,0 +1,59 @@
//===-- condition_variable_test.cpp -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "tests/scudo_unit_test.h"
#include "common.h"
#include "condition_variable.h"
#include "mutex.h"
#include <thread>
template <typename ConditionVariableT> void simpleWaitAndNotifyAll() {
constexpr scudo::u32 NumThreads = 2;
constexpr scudo::u32 CounterMax = 1024;
std::thread Threads[NumThreads];
scudo::HybridMutex M;
ConditionVariableT CV;
CV.bindTestOnly(M);
scudo::u32 Counter = 0;
for (scudo::u32 I = 0; I < NumThreads; ++I) {
Threads[I] = std::thread(
[&](scudo::u32 Id) {
do {
scudo::ScopedLock L(M);
if (Counter % NumThreads != Id && Counter < CounterMax)
CV.wait(M);
if (Counter >= CounterMax) {
break;
} else {
++Counter;
CV.notifyAll(M);
}
} while (true);
},
I);
}
for (std::thread &T : Threads)
T.join();
EXPECT_EQ(Counter, CounterMax);
}
TEST(ScudoConditionVariableTest, DummyCVWaitAndNotifyAll) {
simpleWaitAndNotifyAll<scudo::ConditionVariableDummy>();
}
#ifdef SCUDO_LINUX
TEST(ScudoConditionVariableTest, LinuxCVWaitAndNotifyAll) {
simpleWaitAndNotifyAll<scudo::ConditionVariableLinux>();
}
#endif

View File

@@ -9,6 +9,7 @@
#include "tests/scudo_unit_test.h"
#include "allocator_config.h"
#include "condition_variable.h"
#include "primary32.h"
#include "primary64.h"
#include "size_class_map.h"
@@ -105,6 +106,34 @@ template <typename SizeClassMapT> struct TestConfig4 {
};
};
// This is the only test config that enables the condition variable.
template <typename SizeClassMapT> struct TestConfig5 {
static const bool MaySupportMemoryTagging = true;
struct Primary {
using SizeClassMap = SizeClassMapT;
#if defined(__mips__)
// Unable to allocate greater size on QEMU-user.
static const scudo::uptr RegionSizeLog = 23U;
#else
static const scudo::uptr RegionSizeLog = 24U;
#endif
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
static const scudo::uptr GroupSizeLog = 18U;
typedef scudo::u32 CompactPtrT;
static const bool EnableRandomOffset = true;
static const scudo::uptr MapSizeIncrement = 1UL << 18;
static const bool UseConditionVariable = true;
#if SCUDO_LINUX
using ConditionVariableT = scudo::ConditionVariableLinux;
#else
using ConditionVariableT = scudo::ConditionVariableDummy;
#endif
};
};
template <template <typename> class BaseConfig, typename SizeClassMapT>
struct Config : public BaseConfig<SizeClassMapT> {};
@@ -143,7 +172,8 @@ struct ScudoPrimaryTest : public Test {};
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4)
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig5)
#endif
#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \