Previously, we were returning an error if we couldn't read the whole region. This doesn't matter most of the time, because lldb caches memory reads, and in that process it aligns them to cache line boundaries. As (LLDB) cache lines are smaller than pages, the reads are unlikely to cross page boundaries. Nonetheless, this can cause a problem for large reads (which bypass the cache), where we're unable to read anything even if just a single byte of the memory is unreadable. This patch fixes the lldb-server to do that, and also changes the linux implementation, to reuse any partial results it got from the process_vm_readv call (to avoid having to re-read everything again using ptrace, only to find that it stopped at the same place). This matches debugserver behavior. It is also consistent with the gdb remote protocol documentation, but -- notably -- not with actual gdbserver behavior (which returns errors instead of partial results). We filed a [clarification bug](https://sourceware.org/bugzilla/show_bug.cgi?id=24751) several years ago. Though we did not really reach a conclusion there, I think this is the most logical behavior. The associated test does not currently pass on windows, because the windows memory read APIs don't support partial reads (I have a WIP patch to work around that).
89 lines
2.5 KiB
C++
89 lines
2.5 KiB
C++
#include <algorithm>
|
|
#include <cstdint>
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <cstring>
|
|
#include <iostream>
|
|
|
|
constexpr size_t num_pages = 7;
|
|
constexpr size_t accessible_pages[] = {0, 2, 4, 6};
|
|
|
|
bool is_accessible(size_t page) {
|
|
return std::find(std::begin(accessible_pages), std::end(accessible_pages),
|
|
page) != std::end(accessible_pages);
|
|
}
|
|
|
|
// allocate_memory_with_holes returns a pointer to `num_pages` pages of memory,
|
|
// where some of the pages are inaccessible (even to debugging APIs). We use
|
|
// this to test lldb's ability to skip over inaccessible blocks.
|
|
#ifdef _WIN32
|
|
#include "Windows.h"
|
|
|
|
int getpagesize() {
|
|
SYSTEM_INFO system_info;
|
|
GetSystemInfo(&system_info);
|
|
return system_info.dwPageSize;
|
|
}
|
|
|
|
char *allocate_memory_with_holes() {
|
|
int pagesize = getpagesize();
|
|
void *mem =
|
|
VirtualAlloc(nullptr, num_pages * pagesize, MEM_RESERVE, PAGE_NOACCESS);
|
|
if (!mem) {
|
|
std::cerr << std::system_category().message(GetLastError()) << std::endl;
|
|
exit(1);
|
|
}
|
|
char *bytes = static_cast<char *>(mem);
|
|
for (size_t page = 0; page < num_pages; ++page) {
|
|
if (!is_accessible(page))
|
|
continue;
|
|
if (!VirtualAlloc(bytes + page * pagesize, pagesize, MEM_COMMIT,
|
|
PAGE_READWRITE)) {
|
|
std::cerr << std::system_category().message(GetLastError()) << std::endl;
|
|
exit(1);
|
|
}
|
|
}
|
|
return bytes;
|
|
}
|
|
#else
|
|
#include "sys/mman.h"
|
|
#include "unistd.h"
|
|
|
|
char *allocate_memory_with_holes() {
|
|
int pagesize = getpagesize();
|
|
void *mem = mmap(nullptr, num_pages * pagesize, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
if (mem == MAP_FAILED) {
|
|
perror("mmap");
|
|
exit(1);
|
|
}
|
|
char *bytes = static_cast<char *>(mem);
|
|
for (size_t page = 0; page < num_pages; ++page) {
|
|
if (is_accessible(page))
|
|
continue;
|
|
if (munmap(bytes + page * pagesize, pagesize) != 0) {
|
|
perror("munmap");
|
|
exit(1);
|
|
}
|
|
}
|
|
return bytes;
|
|
}
|
|
#endif
|
|
|
|
int main(int argc, char const *argv[]) {
|
|
char *mem_with_holes = allocate_memory_with_holes();
|
|
int pagesize = getpagesize();
|
|
char *positions[] = {
|
|
mem_with_holes, // Beginning of memory
|
|
mem_with_holes + 2 * pagesize, // After a hole
|
|
mem_with_holes + 2 * pagesize +
|
|
pagesize / 2, // Middle of a block, after an existing match.
|
|
mem_with_holes + 5 * pagesize - 7, // End of a block
|
|
mem_with_holes + 7 * pagesize - 7, // End of memory
|
|
};
|
|
for (char *p : positions)
|
|
strcpy(p, "needle");
|
|
|
|
return 0; // break here
|
|
}
|