/Users/eugenesiegel/btc/bitcoin/src/support/lockedpool.cpp
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | // Copyright (c) 2016-present The Bitcoin Core developers | 
| 2 |  | // Distributed under the MIT software license, see the accompanying | 
| 3 |  | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | 
| 4 |  |  | 
| 5 |  | #include <support/lockedpool.h> | 
| 6 |  | #include <support/cleanse.h> | 
| 7 |  |  | 
| 8 |  | #ifdef WIN32 | 
| 9 |  | #include <windows.h> | 
| 10 |  | #else | 
| 11 |  | #include <sys/mman.h> | 
| 12 |  | #include <sys/resource.h> | 
| 13 |  | #include <unistd.h> | 
| 14 |  | #endif | 
| 15 |  |  | 
| 16 |  | #include <algorithm> | 
| 17 |  | #include <limits> | 
| 18 |  | #include <stdexcept> | 
| 19 |  | #include <utility> | 
| 20 |  | #ifdef ARENA_DEBUG | 
| 21 |  | #include <iomanip> | 
| 22 |  | #include <iostream> | 
| 23 |  | #endif | 
| 24 |  |  | 
| 25 |  | LockedPoolManager* LockedPoolManager::_instance = nullptr; | 
| 26 |  |  | 
| 27 |  | /*******************************************************************************/ | 
| 28 |  | // Utilities | 
| 29 |  | // | 
| 30 |  | /** Align up to power of 2 */ | 
| 31 |  | static inline size_t align_up(size_t x, size_t align) | 
| 32 | 51.2k | { | 
| 33 | 51.2k |     return (x + align - 1) & ~(align - 1); | 
| 34 | 51.2k | } | 
| 35 |  |  | 
| 36 |  | /*******************************************************************************/ | 
| 37 |  | // Implementation: Arena | 
| 38 |  |  | 
| 39 |  | Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): | 
| 40 | 0 |     base(base_in), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in) | 
| 41 | 0 | { | 
| 42 |  |     // Start with one free chunk that covers the entire arena | 
| 43 | 0 |     auto it = size_to_free_chunk.emplace(size_in, base); | 
| 44 | 0 |     chunks_free.emplace(base, it); | 
| 45 | 0 |     chunks_free_end.emplace(static_cast<char*>(base) + size_in, it); | 
| 46 | 0 | } | 
| 47 |  |  | 
| 48 | 0 | Arena::~Arena() = default; | 
| 49 |  |  | 
| 50 |  | void* Arena::alloc(size_t size) | 
| 51 | 51.2k | { | 
| 52 |  |     // Round to next multiple of alignment | 
| 53 | 51.2k |     size = align_up(size, alignment); | 
| 54 |  |  | 
| 55 |  |     // Don't handle zero-sized chunks | 
| 56 | 51.2k |     if (size == 0) | 
| 57 | 0 |         return nullptr; | 
| 58 |  |  | 
| 59 |  |     // Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key. | 
| 60 |  |     // This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review", | 
| 61 |  |     // Wilson et. al. 1995, https://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit | 
| 62 |  |     // policies seem to work well in practice. | 
| 63 | 51.2k |     auto size_ptr_it = size_to_free_chunk.lower_bound(size); | 
| 64 | 51.2k |     if (size_ptr_it == size_to_free_chunk.end()) | 
| 65 | 0 |         return nullptr; | 
| 66 |  |  | 
| 67 |  |     // Create the used-chunk, taking its space from the end of the free-chunk | 
| 68 | 51.2k |     const size_t size_remaining = size_ptr_it->first - size; | 
| 69 | 51.2k |     char* const free_chunk = static_cast<char*>(size_ptr_it->second); | 
| 70 | 51.2k |     auto allocated = chunks_used.emplace(free_chunk + size_remaining, size).first; | 
| 71 | 51.2k |     chunks_free_end.erase(free_chunk + size_ptr_it->first); | 
| 72 | 51.2k |     if (size_ptr_it->first == size) { | 
| 73 |  |         // whole chunk is used up | 
| 74 | 0 |         chunks_free.erase(size_ptr_it->second); | 
| 75 | 51.2k |     } else { | 
| 76 |  |         // still some memory left in the chunk | 
| 77 | 51.2k |         auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second); | 
| 78 | 51.2k |         chunks_free[size_ptr_it->second] = it_remaining; | 
| 79 | 51.2k |         chunks_free_end.emplace(free_chunk + size_remaining, it_remaining); | 
| 80 | 51.2k |     } | 
| 81 | 51.2k |     size_to_free_chunk.erase(size_ptr_it); | 
| 82 |  |  | 
| 83 | 51.2k |     return allocated->first; | 
| 84 | 51.2k | } | 
| 85 |  |  | 
| 86 |  | void Arena::free(void *ptr) | 
| 87 | 51.2k | { | 
| 88 |  |     // Freeing the nullptr pointer is OK. | 
| 89 | 51.2k |     if (ptr == nullptr) { | 
| 90 | 0 |         return; | 
| 91 | 0 |     } | 
| 92 |  |  | 
| 93 |  |     // Remove chunk from used map | 
| 94 | 51.2k |     auto i = chunks_used.find(ptr); | 
| 95 | 51.2k |     if (i == chunks_used.end()) { | 
| 96 | 0 |         throw std::runtime_error("Arena: invalid or double free"); | 
| 97 | 0 |     } | 
| 98 | 51.2k |     auto freed = std::make_pair(static_cast<char*>(i->first), i->second); | 
| 99 | 51.2k |     chunks_used.erase(i); | 
| 100 |  |  | 
| 101 |  |     // coalesce freed with previous chunk | 
| 102 | 51.2k |     auto prev = chunks_free_end.find(freed.first); | 
| 103 | 51.2k |     if (prev != chunks_free_end.end()) { | 
| 104 | 51.2k |         freed.first -= prev->second->first; | 
| 105 | 51.2k |         freed.second += prev->second->first; | 
| 106 | 51.2k |         size_to_free_chunk.erase(prev->second); | 
| 107 | 51.2k |         chunks_free_end.erase(prev); | 
| 108 | 51.2k |     } | 
| 109 |  |  | 
| 110 |  |     // coalesce freed with chunk after freed | 
| 111 | 51.2k |     auto next = chunks_free.find(freed.first + freed.second); | 
| 112 | 51.2k |     if (next != chunks_free.end()) { | 
| 113 | 0 |         freed.second += next->second->first; | 
| 114 | 0 |         size_to_free_chunk.erase(next->second); | 
| 115 | 0 |         chunks_free.erase(next); | 
| 116 | 0 |     } | 
| 117 |  |  | 
| 118 |  |     // Add/set space with coalesced free chunk | 
| 119 | 51.2k |     auto it = size_to_free_chunk.emplace(freed.second, freed.first); | 
| 120 | 51.2k |     chunks_free[freed.first] = it; | 
| 121 | 51.2k |     chunks_free_end[freed.first + freed.second] = it; | 
| 122 | 51.2k | } | 
| 123 |  |  | 
| 124 |  | Arena::Stats Arena::stats() const | 
| 125 | 0 | { | 
| 126 | 0 |     Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() }; | 
| 127 | 0 |     for (const auto& chunk: chunks_used) | 
| 128 | 0 |         r.used += chunk.second; | 
| 129 | 0 |     for (const auto& chunk: chunks_free) | 
| 130 | 0 |         r.free += chunk.second->first; | 
| 131 | 0 |     r.total = r.used + r.free; | 
| 132 | 0 |     return r; | 
| 133 | 0 | } | 
| 134 |  |  | 
| 135 |  | #ifdef ARENA_DEBUG | 
| 136 |  | static void printchunk(void* base, size_t sz, bool used) { | 
| 137 |  |     std::cout << | 
| 138 |  |         "0x" << std::hex << std::setw(16) << std::setfill('0') << base << | 
| 139 |  |         " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz << | 
| 140 |  |         " 0x" << used << std::endl; | 
| 141 |  | } | 
| 142 |  | void Arena::walk() const | 
| 143 |  | { | 
| 144 |  |     for (const auto& chunk: chunks_used) | 
| 145 |  |         printchunk(chunk.first, chunk.second, true); | 
| 146 |  |     std::cout << std::endl; | 
| 147 |  |     for (const auto& chunk: chunks_free) | 
| 148 |  |         printchunk(chunk.first, chunk.second->first, false); | 
| 149 |  |     std::cout << std::endl; | 
| 150 |  | } | 
| 151 |  | #endif | 
| 152 |  |  | 
| 153 |  | /*******************************************************************************/ | 
| 154 |  | // Implementation: Win32LockedPageAllocator | 
| 155 |  |  | 
| 156 |  | #ifdef WIN32 | 
| 157 |  | /** LockedPageAllocator specialized for Windows. | 
| 158 |  |  */ | 
| 159 |  | class Win32LockedPageAllocator: public LockedPageAllocator | 
| 160 |  | { | 
| 161 |  | public: | 
| 162 |  |     Win32LockedPageAllocator(); | 
| 163 |  |     void* AllocateLocked(size_t len, bool *lockingSuccess) override; | 
| 164 |  |     void FreeLocked(void* addr, size_t len) override; | 
| 165 |  |     size_t GetLimit() override; | 
| 166 |  | private: | 
| 167 |  |     size_t page_size; | 
| 168 |  | }; | 
| 169 |  |  | 
| 170 |  | Win32LockedPageAllocator::Win32LockedPageAllocator() | 
| 171 |  | { | 
| 172 |  |     // Determine system page size in bytes | 
| 173 |  |     SYSTEM_INFO sSysInfo; | 
| 174 |  |     GetSystemInfo(&sSysInfo); | 
| 175 |  |     page_size = sSysInfo.dwPageSize; | 
| 176 |  | } | 
| 177 |  | void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) | 
| 178 |  | { | 
| 179 |  |     len = align_up(len, page_size); | 
| 180 |  |     void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); | 
| 181 |  |     if (addr) { | 
| 182 |  |         // VirtualLock is used to attempt to keep keying material out of swap. Note | 
| 183 |  |         // that it does not provide this as a guarantee, but, in practice, memory | 
| 184 |  |         // that has been VirtualLock'd almost never gets written to the pagefile | 
| 185 |  |         // except in rare circumstances where memory is extremely low. | 
| 186 |  |         *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0; | 
| 187 |  |     } | 
| 188 |  |     return addr; | 
| 189 |  | } | 
| 190 |  | void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len) | 
| 191 |  | { | 
| 192 |  |     len = align_up(len, page_size); | 
| 193 |  |     memory_cleanse(addr, len); | 
| 194 |  |     VirtualUnlock(const_cast<void*>(addr), len); | 
| 195 |  | } | 
| 196 |  |  | 
| 197 |  | size_t Win32LockedPageAllocator::GetLimit() | 
| 198 |  | { | 
| 199 |  |     size_t min, max; | 
| 200 |  |     if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) { | 
| 201 |  |         return min; | 
| 202 |  |     } | 
| 203 |  |     return std::numeric_limits<size_t>::max(); | 
| 204 |  | } | 
| 205 |  | #endif | 
| 206 |  |  | 
| 207 |  | /*******************************************************************************/ | 
| 208 |  | // Implementation: PosixLockedPageAllocator | 
| 209 |  |  | 
| 210 |  | #ifndef WIN32 | 
| 211 |  | /** LockedPageAllocator specialized for OSes that don't try to be | 
| 212 |  |  * special snowflakes. | 
| 213 |  |  */ | 
| 214 |  | class PosixLockedPageAllocator: public LockedPageAllocator | 
| 215 |  | { | 
| 216 |  | public: | 
| 217 |  |     PosixLockedPageAllocator(); | 
| 218 |  |     void* AllocateLocked(size_t len, bool *lockingSuccess) override; | 
| 219 |  |     void FreeLocked(void* addr, size_t len) override; | 
| 220 |  |     size_t GetLimit() override; | 
| 221 |  | private: | 
| 222 |  |     size_t page_size; | 
| 223 |  | }; | 
| 224 |  |  | 
| 225 |  | PosixLockedPageAllocator::PosixLockedPageAllocator() | 
| 226 | 0 | { | 
| 227 |  |     // Determine system page size in bytes | 
| 228 |  | #if defined(PAGESIZE) // defined in limits.h | 
| 229 |  |     page_size = PAGESIZE; | 
| 230 |  | #else                   // assume some POSIX OS | 
| 231 | 0 |     page_size = sysconf(_SC_PAGESIZE); | 
| 232 | 0 | #endif | 
| 233 | 0 | } | 
| 234 |  |  | 
| 235 |  | void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) | 
| 236 | 0 | { | 
| 237 | 0 |     void *addr; | 
| 238 | 0 |     len = align_up(len, page_size); | 
| 239 | 0 |     addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | 
| 240 | 0 |     if (addr == MAP_FAILED) { | 
| 241 | 0 |         return nullptr; | 
| 242 | 0 |     } | 
| 243 | 0 |     if (addr) { | 
| 244 | 0 |         *lockingSuccess = mlock(addr, len) == 0; | 
| 245 |  | #if defined(MADV_DONTDUMP) // Linux | 
| 246 |  |         madvise(addr, len, MADV_DONTDUMP); | 
| 247 |  | #elif defined(MADV_NOCORE) // FreeBSD | 
| 248 |  |         madvise(addr, len, MADV_NOCORE); | 
| 249 |  | #endif | 
| 250 | 0 |     } | 
| 251 | 0 |     return addr; | 
| 252 | 0 | } | 
| 253 |  | void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len) | 
| 254 | 0 | { | 
| 255 | 0 |     len = align_up(len, page_size); | 
| 256 | 0 |     memory_cleanse(addr, len); | 
| 257 | 0 |     munlock(addr, len); | 
| 258 | 0 |     munmap(addr, len); | 
| 259 | 0 | } | 
| 260 |  | size_t PosixLockedPageAllocator::GetLimit() | 
| 261 | 0 | { | 
| 262 | 0 | #ifdef RLIMIT_MEMLOCK | 
| 263 | 0 |     struct rlimit rlim; | 
| 264 | 0 |     if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) { | 
| 265 | 0 |         if (rlim.rlim_cur != RLIM_INFINITY) { | 
| 266 | 0 |             return rlim.rlim_cur; | 
| 267 | 0 |         } | 
| 268 | 0 |     } | 
| 269 | 0 | #endif | 
| 270 | 0 |     return std::numeric_limits<size_t>::max(); | 
| 271 | 0 | } | 
| 272 |  | #endif | 
| 273 |  |  | 
| 274 |  | /*******************************************************************************/ | 
| 275 |  | // Implementation: LockedPool | 
| 276 |  |  | 
| 277 |  | LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in) | 
| 278 | 0 |     : allocator(std::move(allocator_in)), lf_cb(lf_cb_in) | 
| 279 | 0 | { | 
| 280 | 0 | } | 
| 281 |  |  | 
| 282 | 0 | LockedPool::~LockedPool() = default; | 
| 283 |  |  | 
| 284 |  | void* LockedPool::alloc(size_t size) | 
| 285 | 51.2k | { | 
| 286 | 51.2k |     std::lock_guard<std::mutex> lock(mutex); | 
| 287 |  |  | 
| 288 |  |     // Don't handle impossible sizes | 
| 289 | 51.2k |     if (size == 0 || size > ARENA_SIZE) | 
| 290 | 0 |         return nullptr; | 
| 291 |  |  | 
| 292 |  |     // Try allocating from each current arena | 
| 293 | 51.2k |     for (auto &arena: arenas) { | 
| 294 | 51.2k |         void *addr = arena.alloc(size); | 
| 295 | 51.2k |         if (addr) { | 
| 296 | 51.2k |             return addr; | 
| 297 | 51.2k |         } | 
| 298 | 51.2k |     } | 
| 299 |  |     // If that fails, create a new one | 
| 300 | 0 |     if (new_arena(ARENA_SIZE, ARENA_ALIGN)) { | 
| 301 | 0 |         return arenas.back().alloc(size); | 
| 302 | 0 |     } | 
| 303 | 0 |     return nullptr; | 
| 304 | 0 | } | 
| 305 |  |  | 
| 306 |  | void LockedPool::free(void *ptr) | 
| 307 | 51.2k | { | 
| 308 | 51.2k |     std::lock_guard<std::mutex> lock(mutex); | 
| 309 |  |     // TODO we can do better than this linear search by keeping a map of arena | 
| 310 |  |     // extents to arena, and looking up the address. | 
| 311 | 51.2k |     for (auto &arena: arenas) { | 
| 312 | 51.2k |         if (arena.addressInArena(ptr)) { | 
| 313 | 51.2k |             arena.free(ptr); | 
| 314 | 51.2k |             return; | 
| 315 | 51.2k |         } | 
| 316 | 51.2k |     } | 
| 317 | 0 |     throw std::runtime_error("LockedPool: invalid address not pointing to any arena"); | 
| 318 | 51.2k | } | 
| 319 |  |  | 
| 320 |  | LockedPool::Stats LockedPool::stats() const | 
| 321 | 0 | { | 
| 322 | 0 |     std::lock_guard<std::mutex> lock(mutex); | 
| 323 | 0 |     LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0}; | 
| 324 | 0 |     for (const auto &arena: arenas) { | 
| 325 | 0 |         Arena::Stats i = arena.stats(); | 
| 326 | 0 |         r.used += i.used; | 
| 327 | 0 |         r.free += i.free; | 
| 328 | 0 |         r.total += i.total; | 
| 329 | 0 |         r.chunks_used += i.chunks_used; | 
| 330 | 0 |         r.chunks_free += i.chunks_free; | 
| 331 | 0 |     } | 
| 332 | 0 |     return r; | 
| 333 | 0 | } | 
| 334 |  |  | 
| 335 |  | bool LockedPool::new_arena(size_t size, size_t align) | 
| 336 | 0 | { | 
| 337 | 0 |     bool locked; | 
| 338 |  |     // If this is the first arena, handle this specially: Cap the upper size | 
| 339 |  |     // by the process limit. This makes sure that the first arena will at least | 
| 340 |  |     // be locked. An exception to this is if the process limit is 0: | 
| 341 |  |     // in this case no memory can be locked at all so we'll skip past this logic. | 
| 342 | 0 |     if (arenas.empty()) { | 
| 343 | 0 |         size_t limit = allocator->GetLimit(); | 
| 344 | 0 |         if (limit > 0) { | 
| 345 | 0 |             size = std::min(size, limit); | 
| 346 | 0 |         } | 
| 347 | 0 |     } | 
| 348 | 0 |     void *addr = allocator->AllocateLocked(size, &locked); | 
| 349 | 0 |     if (!addr) { | 
| 350 | 0 |         return false; | 
| 351 | 0 |     } | 
| 352 | 0 |     if (locked) { | 
| 353 | 0 |         cumulative_bytes_locked += size; | 
| 354 | 0 |     } else if (lf_cb) { // Call the locking-failed callback if locking failed | 
| 355 | 0 |         if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed. | 
| 356 | 0 |             allocator->FreeLocked(addr, size); | 
| 357 | 0 |             return false; | 
| 358 | 0 |         } | 
| 359 | 0 |     } | 
| 360 | 0 |     arenas.emplace_back(allocator.get(), addr, size, align); | 
| 361 | 0 |     return true; | 
| 362 | 0 | } | 
| 363 |  |  | 
| 364 |  | LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in): | 
| 365 | 0 |     Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in) | 
| 366 | 0 | { | 
| 367 | 0 | } | 
| 368 |  | LockedPool::LockedPageArena::~LockedPageArena() | 
| 369 | 0 | { | 
| 370 | 0 |     allocator->FreeLocked(base, size); | 
| 371 | 0 | } | 
| 372 |  |  | 
| 373 |  | /*******************************************************************************/ | 
| 374 |  | // Implementation: LockedPoolManager | 
| 375 |  | // | 
| 376 |  | LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator_in): | 
| 377 | 0 |     LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed) | 
| 378 | 0 | { | 
| 379 | 0 | } | 
| 380 |  |  | 
| 381 |  | bool LockedPoolManager::LockingFailed() | 
| 382 | 0 | { | 
| 383 |  |     // TODO: log something but how? without including util.h | 
| 384 | 0 |     return true; | 
| 385 | 0 | } | 
| 386 |  |  | 
| 387 |  | void LockedPoolManager::CreateInstance() | 
| 388 | 0 | { | 
| 389 |  |     // Using a local static instance guarantees that the object is initialized | 
| 390 |  |     // when it's first needed and also deinitialized after all objects that use | 
| 391 |  |     // it are done with it.  I can think of one unlikely scenario where we may | 
| 392 |  |     // have a static deinitialization order/problem, but the check in | 
| 393 |  |     // LockedPoolManagerBase's destructor helps us detect if that ever happens. | 
| 394 |  | #ifdef WIN32 | 
| 395 |  |     std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator()); | 
| 396 |  | #else | 
| 397 | 0 |     std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator()); | 
| 398 | 0 | #endif | 
| 399 | 0 |     static LockedPoolManager instance(std::move(allocator)); | 
| 400 | 0 |     LockedPoolManager::_instance = &instance; | 
| 401 | 0 | } |