This PR adds a `__sanitizer_copy_contiguous_container_annotations`
function, which copies annotations from one memory area to another. New
area is annotated in the same way as the old region at the beginning
(within limitations of ASan).
Overlapping case: The function supports overlapping containers, however
no assumptions should be made outside of no false positives in new
buffer area. (It doesn't modify old container annotations where it's not
necessary, false negatives may happen in edge granules of the new
container area.) I don't expect this function to be used with
overlapping buffers, but it's designed to work with them and not result
in incorrect ASan errors (false positives).
If buffers have granularity-aligned distance between them (`old_beg %
granularity == new_beg % granularity`), copying algorithm works faster.
If the distance is not granularity-aligned, annotations are copied byte
after byte.
```cpp
void __sanitizer_copy_contiguous_container_annotations(
const void *old_storage_beg_p, const void *old_storage_end_p,
const void *new_storage_beg_p, const void *new_storage_end_p) {
```
This function aims to help with short string annotations and similar
container annotations. Right now we change trait types of
`std::basic_string` when compiling with ASan and this function purpose
is reverting that change as soon as possible.
87f3407856/libcxx/include/string (L738-L751)
The goal is to not change `__trivially_relocatable` when compiling with
ASan. If this function is accepted and upstreamed, the next step is
creating a function like `__memcpy_with_asan` moving memory with ASan.
And then using this function instead of `__builtin__memcpy` while moving
trivially relocatable objects.
11a6799740/libcxx/include/__memory/uninitialized_algorithms.h (L644-L646)
---
I'm thinking if there is a good way to address fact that in a container
the new buffer is usually bigger than the previous one. We may add two
more arguments to the functions to address it (the beginning and the end
of the whole buffer.
Another potential change is removing `new_storage_end_p` as it's
redundant, because we require the same size.
Potential future work is creating a function `__asan_unsafe_memmove`,
which will be basically memmove, but with turned off instrumentation
(therefore it will allow copy data from poisoned area).
---------
Co-authored-by: Vitaly Buka <vitalybuka@google.com>
172 lines
6.1 KiB
C++
172 lines
6.1 KiB
C++
// RUN: %clangxx_asan -fexceptions -O %s -o %t && %env_asan_opts=detect_stack_use_after_return=0 %run %t
|
|
//
|
|
// Test __sanitizer_copy_contiguous_container_annotations.
|
|
|
|
#include <algorithm>
|
|
#include <iostream>
|
|
#include <memory>
|
|
#include <numeric>
|
|
#include <vector>
|
|
|
|
#include <assert.h>
|
|
#include <sanitizer/asan_interface.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
static constexpr size_t kGranularity = 8;
|
|
|
|
template <class T> static constexpr T RoundDown(T x) {
|
|
return reinterpret_cast<T>(reinterpret_cast<uintptr_t>(x) &
|
|
~(kGranularity - 1));
|
|
}
|
|
template <class T> static constexpr T RoundUp(T x) {
|
|
return reinterpret_cast<T>(
|
|
RoundDown(reinterpret_cast<uintptr_t>(x) + kGranularity - 1));
|
|
}
|
|
|
|
static std::vector<int> GetPoisonedState(char *begin, char *end) {
|
|
std::vector<int> result;
|
|
for (char *ptr = begin; ptr != end; ++ptr) {
|
|
result.push_back(__asan_address_is_poisoned(ptr));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static void RandomPoison(char *beg, char *end) {
|
|
assert(beg == RoundDown(beg));
|
|
assert(end == RoundDown(end));
|
|
__asan_poison_memory_region(beg, end - beg);
|
|
for (beg = RoundUp(beg); beg < end; beg += kGranularity) {
|
|
__asan_unpoison_memory_region(beg, rand() % (kGranularity + 1));
|
|
}
|
|
}
|
|
|
|
template <bool benchmark>
|
|
static void Test(size_t capacity, size_t off_src, size_t off_dst,
|
|
char *src_buffer_beg, char *src_buffer_end,
|
|
char *dst_buffer_beg, char *dst_buffer_end) {
|
|
size_t dst_buffer_size = dst_buffer_end - dst_buffer_beg;
|
|
char *src_beg = src_buffer_beg + off_src;
|
|
char *src_end = src_beg + capacity;
|
|
|
|
char *dst_beg = dst_buffer_beg + off_dst;
|
|
char *dst_end = dst_beg + capacity;
|
|
if (benchmark) {
|
|
__sanitizer_copy_contiguous_container_annotations(src_beg, src_end, dst_beg,
|
|
dst_end);
|
|
return;
|
|
}
|
|
|
|
std::vector<int> src_poison_states =
|
|
GetPoisonedState(src_buffer_beg, src_buffer_end);
|
|
std::vector<int> dst_poison_before =
|
|
GetPoisonedState(dst_buffer_beg, dst_buffer_end);
|
|
__sanitizer_copy_contiguous_container_annotations(src_beg, src_end, dst_beg,
|
|
dst_end);
|
|
std::vector<int> dst_poison_after =
|
|
GetPoisonedState(dst_buffer_beg, dst_buffer_end);
|
|
|
|
// Create ideal copy of src over dst.
|
|
std::vector<int> dst_poison_exp = dst_poison_before;
|
|
for (size_t cur = 0; cur < capacity; ++cur)
|
|
dst_poison_exp[off_dst + cur] = src_poison_states[off_src + cur];
|
|
|
|
// Unpoison prefixes of Asan granules.
|
|
for (size_t cur = dst_buffer_size - 1; cur > 0; --cur) {
|
|
if (cur % kGranularity != 0 && !dst_poison_exp[cur])
|
|
dst_poison_exp[cur - 1] = 0;
|
|
}
|
|
|
|
if (dst_poison_after != dst_poison_exp) {
|
|
std::cerr << "[" << off_dst << ", " << off_dst + capacity << ")\n";
|
|
for (size_t i = 0; i < dst_poison_after.size(); ++i) {
|
|
std::cerr << i << ":\t" << dst_poison_before[i] << "\t"
|
|
<< dst_poison_after[i] << "\t" << dst_poison_exp[i] << "\n";
|
|
}
|
|
std::cerr << "----------\n";
|
|
|
|
assert(dst_poison_after == dst_poison_exp);
|
|
}
|
|
}
|
|
|
|
template <bool benchmark>
|
|
static void TestNonOverlappingContainers(size_t capacity, size_t off_src,
|
|
size_t off_dst) {
|
|
// Test will copy [off_src, off_src + capacity) to [off_dst, off_dst + capacity).
|
|
// Allocate buffers to have additional granule before and after tested ranges.
|
|
off_src += kGranularity;
|
|
off_dst += kGranularity;
|
|
size_t src_buffer_size = RoundUp(off_src + capacity) + kGranularity;
|
|
size_t dst_buffer_size = RoundUp(off_dst + capacity) + kGranularity;
|
|
|
|
std::unique_ptr<char[]> src_buffer =
|
|
std::make_unique<char[]>(src_buffer_size);
|
|
std::unique_ptr<char[]> dst_buffer =
|
|
std::make_unique<char[]>(dst_buffer_size);
|
|
|
|
char *src_buffer_beg = src_buffer.get();
|
|
char *src_buffer_end = src_buffer_beg + src_buffer_size;
|
|
assert(RoundDown(src_buffer_beg) == src_buffer_beg);
|
|
|
|
char *dst_buffer_beg = dst_buffer.get();
|
|
char *dst_buffer_end = dst_buffer_beg + dst_buffer_size;
|
|
assert(RoundDown(dst_buffer_beg) == dst_buffer_beg);
|
|
|
|
for (int i = 0; i < 35; i++) {
|
|
if (!benchmark || !i) {
|
|
RandomPoison(src_buffer_beg, src_buffer_end);
|
|
RandomPoison(dst_buffer_beg, dst_buffer_end);
|
|
}
|
|
|
|
Test<benchmark>(capacity, off_src, off_dst, src_buffer_beg, src_buffer_end,
|
|
dst_buffer_beg, dst_buffer_end);
|
|
}
|
|
|
|
__asan_unpoison_memory_region(src_buffer_beg, src_buffer_size);
|
|
__asan_unpoison_memory_region(dst_buffer_beg, dst_buffer_size);
|
|
}
|
|
|
|
template <bool benchmark>
|
|
static void TestOverlappingContainers(size_t capacity, size_t off_src,
|
|
size_t off_dst) {
|
|
// Test will copy [off_src, off_src + capacity) to [off_dst, off_dst + capacity).
|
|
// Allocate buffers to have additional granule before and after tested ranges.
|
|
off_src += kGranularity;
|
|
off_dst += kGranularity;
|
|
size_t buffer_size =
|
|
RoundUp(std::max(off_src, off_dst) + capacity) + kGranularity;
|
|
|
|
// Use unique_ptr with a custom deleter to manage the buffer
|
|
std::unique_ptr<char[]> buffer = std::make_unique<char[]>(buffer_size);
|
|
|
|
char *buffer_beg = buffer.get();
|
|
char *buffer_end = buffer_beg + buffer_size;
|
|
assert(RoundDown(buffer_beg) == buffer_beg);
|
|
|
|
for (int i = 0; i < 35; i++) {
|
|
if (!benchmark || !i)
|
|
RandomPoison(buffer_beg, buffer_end);
|
|
Test<benchmark>(capacity, off_src, off_dst, buffer_beg, buffer_end,
|
|
buffer_beg, buffer_end);
|
|
}
|
|
|
|
__asan_unpoison_memory_region(buffer_beg, buffer_size);
|
|
}
|
|
|
|
int main(int argc, char **argv) {
|
|
int n = argc == 1 ? 64 : atoi(argv[1]);
|
|
for (size_t off_src = 0; off_src < kGranularity; off_src++) {
|
|
for (size_t off_dst = 0; off_dst < kGranularity; off_dst++) {
|
|
for (int capacity = 0; capacity <= n; capacity++) {
|
|
if (n < 1024) {
|
|
TestNonOverlappingContainers<false>(capacity, off_src, off_dst);
|
|
TestOverlappingContainers<false>(capacity, off_src, off_dst);
|
|
} else {
|
|
TestNonOverlappingContainers<true>(capacity, off_src, off_dst);
|
|
TestOverlappingContainers<true>(capacity, off_src, off_dst);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} |