Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 | /* * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_LICENSE_HEADER_END@ */ #include "internal.h" static volatile uintptr_t entropic_address = 0; static volatile uintptr_t entropic_base = 0; static volatile uintptr_t entropic_limit = 0; MALLOC_NOEXPORT uint64_t malloc_entropy[2] = {0, 0}; #define ENTROPIC_KABILLION 0x10000000 /* 256Mb */ #define ENTROPIC_USER_RANGE_SIZE 0x200000000ULL /* 8Gb */ // <rdar://problem/22277891> align 64bit ARM shift to 32MB PTE entries #if MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT #define ENTROPIC_SHIFT 25 #else // MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT #define ENTROPIC_SHIFT SMALL_BLOCKS_ALIGN #endif void mvm_aslr_init(void) { // Prepare ASLR #if __i386__ || __x86_64__ || __arm64__ || TARGET_OS_DRIVERKIT || (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #if __i386__ uintptr_t stackbase = 0x8fe00000; int entropic_bits = 3; #elif __x86_64__ uintptr_t stackbase = USRSTACK64; int entropic_bits = 16; #elif __arm64__ #if __LP64__ uintptr_t stackbase = USRSTACK64; int entropic_bits = 7; #else // __LP64__ uintptr_t stackbase = USRSTACK; int entropic_bits = 3; #endif #else uintptr_t stackbase = USRSTACK; int entropic_bits = 3; #endif // assert(((1 << entropic_bits) - 1) << SMALL_BLOCKS_ALIGN < (stackbase - MAXSSIZ - ENTROPIC_KABILLION)); if (mvm_aslr_enabled()) { if (0 == entropic_address) { uintptr_t t = stackbase - MAXSSIZ - ((uintptr_t)(malloc_entropy[1] & ((1 << entropic_bits) - 1)) << ENTROPIC_SHIFT); #if MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT uintptr_t addr = 0; /* If kernel VM user ranges are enabled mach_vm_allocate/map will provide memory * in the upper VM address range. This range is randomized per process. For now * we do not have this metadata plumbed through so we make a single allocation * with the appropriate tag to determine where our heap is. If we are given an * allocation above where we expect then we can safely assume VM ranges are enabled. * * If so we do not need to apply further entropy but do need to ensure * we mask off the address to a PTE boundary. */ if (KERN_SUCCESS == mach_vm_allocate(mach_task_self(), (mach_vm_address_t *)&addr, vm_page_quanta_size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_MALLOC))) { // Fall through and use existing base if addr < stackbase if (addr > stackbase) { t = (addr + ENTROPIC_USER_RANGE_SIZE) & ~((1 << ENTROPIC_SHIFT) - 1); OSAtomicCompareAndSwapLong(0, addr, (volatile long *)&entropic_base); } mach_vm_deallocate(mach_task_self(), addr, vm_page_quanta_size); } #endif // MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT OSAtomicCompareAndSwapLong(0, t, (volatile long *)&entropic_limit); OSAtomicCompareAndSwapLong(0, t - ENTROPIC_KABILLION, (volatile long *)&entropic_address); } } else { // zero slide when ASLR has been disabled by boot-arg. Eliminate cloaking. malloc_entropy[0] = 0; malloc_entropy[1] = 0; } #else // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #error ASLR unhandled on this platform #endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR } void * mvm_allocate_pages(size_t size, unsigned char align, uint32_t debug_flags, int vm_page_label) { boolean_t add_prelude_guard_page = debug_flags & MALLOC_ADD_PRELUDE_GUARD_PAGE; boolean_t add_postlude_guard_page = debug_flags & MALLOC_ADD_POSTLUDE_GUARD_PAGE; boolean_t purgeable = debug_flags & MALLOC_PURGEABLE; boolean_t use_entropic_range = !(debug_flags & DISABLE_ASLR); mach_vm_address_t vm_addr; uintptr_t addr; mach_vm_size_t allocation_size = round_page_quanta(size); mach_vm_offset_t allocation_mask = ((mach_vm_offset_t)1 << align) - 1; int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(vm_page_label); kern_return_t kr; if (!allocation_size) { allocation_size = vm_page_quanta_size; } if (add_postlude_guard_page || add_prelude_guard_page) { if (add_prelude_guard_page && align > vm_page_quanta_shift) { /* <rdar://problem/16601499> alignment greater than pagesize needs more work */ allocation_size += (1 << align) + large_vm_page_quanta_size; } else { allocation_size += add_prelude_guard_page && add_postlude_guard_page ? 2 * large_vm_page_quanta_size : large_vm_page_quanta_size; } } if (purgeable) { alloc_flags |= VM_FLAGS_PURGABLE; } if (allocation_size < size) { // size_t arithmetic wrapped! return NULL; } retry: vm_addr = use_entropic_range ? entropic_address : vm_page_quanta_size; kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); if (kr == KERN_NO_SPACE && use_entropic_range) { vm_addr = vm_page_quanta_size; kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); } if (kr) { if (kr != KERN_NO_SPACE) { malloc_zone_error(debug_flags, false, "can't allocate region\n:" "*** mach_vm_map(size=%lu, flags: %x) failed (error code=%d)\n", size, debug_flags, kr); } return NULL; } addr = (uintptr_t)vm_addr; if (use_entropic_range) { // Don't allow allocation to rise above entropic_limit (for tidiness). if (addr + allocation_size > entropic_limit) { // Exhausted current range? uintptr_t t = entropic_address; uintptr_t u = t - ENTROPIC_KABILLION; // provided we don't wrap, deallocate and retry, in theexpanded // entropic range if (u < t && u >= entropic_base) { mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size); OSAtomicCompareAndSwapLong(t, u, (volatile long *)&entropic_address); // Just one reduction please goto retry; } // fall through to use what we got } if (addr < entropic_address) { // we wrapped to find this allocation, expand the entropic range uintptr_t t = entropic_address; uintptr_t u = t - ENTROPIC_KABILLION; if (u < t && u >= entropic_base) { OSAtomicCompareAndSwapLong(t, u, (volatile long *)&entropic_address); // Just one reduction please } // fall through to use what we got } } if (add_postlude_guard_page || add_prelude_guard_page) { if (add_prelude_guard_page && align > vm_page_quanta_shift) { /* <rdar://problem/16601499> calculate the first address inside the alignment padding * where we can place the guard page and still be aligned. * * |-----------------------------------------------------------| * |leading|gp| alloc |gp| t | * |-----------------------------------------------------------| */ uintptr_t alignaddr = ((addr + large_vm_page_quanta_size) + (1 << align) - 1) & ~((1 << align) - 1); size_t leading = alignaddr - addr - large_vm_page_quanta_size; size_t trailing = (1 << align) - large_vm_page_quanta_size - leading; /* Unmap the excess area. */ kr = mach_vm_deallocate(mach_task_self(), addr, leading); if (kr) { malloc_zone_error(debug_flags, false, "can't unmap excess guard region\n" "*** mach_vm_deallocate(addr=%p, size=%lu) failed (code=%d)\n", (void *)addr, leading, kr); return NULL; } if (trailing) { kr = mach_vm_deallocate(mach_task_self(), addr + allocation_size - trailing, trailing); if (kr) { malloc_zone_error(debug_flags, false, "can't unmap excess trailing guard region\n" "*** mach_vm_deallocate(addr=%p, size=%lu) failed (code=%d)\n", (void *)(addr + allocation_size - trailing), trailing, kr); return NULL; } } addr = alignaddr; } else if (add_prelude_guard_page) { addr += large_vm_page_quanta_size; } mvm_protect((void *)addr, size, PROT_NONE, debug_flags); } return (void *)addr; } void mvm_deallocate_pages(void *addr, size_t size, unsigned debug_flags) { boolean_t added_prelude_guard_page = debug_flags & MALLOC_ADD_PRELUDE_GUARD_PAGE; boolean_t added_postlude_guard_page = debug_flags & MALLOC_ADD_POSTLUDE_GUARD_PAGE; mach_vm_address_t vm_addr = (mach_vm_address_t)addr; mach_vm_size_t allocation_size = size; kern_return_t kr; if (added_prelude_guard_page) { vm_addr -= large_vm_page_quanta_size; allocation_size += large_vm_page_quanta_size; } if (added_postlude_guard_page) { allocation_size += large_vm_page_quanta_size; } kr = mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size); if (kr) { malloc_zone_error(debug_flags, false, "Can't deallocate_pages region at %p\n", addr); } } void mvm_protect(void *address, size_t size, unsigned protection, unsigned debug_flags) { kern_return_t err; if ((debug_flags & MALLOC_ADD_PRELUDE_GUARD_PAGE) && !(debug_flags & MALLOC_DONT_PROTECT_PRELUDE)) { err = mprotect((void *)((uintptr_t)address - large_vm_page_quanta_size), large_vm_page_quanta_size, protection); if (err) { malloc_report(ASL_LEVEL_ERR, "*** can't mvm_protect(%u) region for prelude guard page at %p\n", protection, (void *)((uintptr_t)address - large_vm_page_quanta_size)); } } if ((debug_flags & MALLOC_ADD_POSTLUDE_GUARD_PAGE) && !(debug_flags & MALLOC_DONT_PROTECT_POSTLUDE)) { err = mprotect((void *)(round_page_quanta(((uintptr_t)address + size))), large_vm_page_quanta_size, protection); if (err) { malloc_report(ASL_LEVEL_ERR, "*** can't mvm_protect(%u) region for postlude guard page at %p\n", protection, (void *)((uintptr_t)address + size)); } } } int mvm_madvise_free(void *rack, void *r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last, boolean_t scribble) { if (pgHi > pgLo) { size_t len = pgHi - pgLo; if (scribble && malloc_zero_policy != MALLOC_ZERO_ON_FREE) { memset((void *)pgLo, SCRUBBLE_BYTE, len); // Scribble on MADV_FREEd memory } #if MALLOC_TARGET_IOS if (last) { if (*last == pgLo) { return 0; } *last = pgLo; } #endif // MALLOC_TARGET_IOS MAGMALLOC_MADVFREEREGION(rack, r, (void *)pgLo, (int)len); // DTrace USDT Probe if (-1 == madvise((void *)pgLo, len, CONFIG_MADVISE_STYLE)) { /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ #if DEBUG_MADVISE malloc_zone_error(NULL, false, "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed for %p, length=%d\n", (void *)pgLo, len); #endif return 1; } else { MALLOC_TRACE(TRACE_madvise, (uintptr_t)r, (uintptr_t)pgLo, len, CONFIG_MADVISE_STYLE); } } return 0; } #if CONFIG_DEFERRED_RECLAIM static struct mach_vm_reclaim_ringbuffer_v1_s reclaim_buffer; static _malloc_lock_s reclaim_buffer_lock = _MALLOC_LOCK_INIT; kern_return_t mvm_deferred_reclaim_init(void) { return mach_vm_reclaim_ringbuffer_init(&reclaim_buffer); } bool mvm_reclaim_mark_used(uint64_t id, mach_vm_address_t ptr, uint32_t size, unsigned int debug_flags) { bool used; if (debug_flags & MALLOC_ADD_GUARD_PAGE_FLAGS) { if (os_add_overflow(size, 2 * large_vm_page_quanta_size, &size)) { return false; } ptr -= large_vm_page_quanta_size; } _malloc_lock_lock(&reclaim_buffer_lock); used = mach_vm_reclaim_mark_used(&reclaim_buffer, id, ptr, size); _malloc_lock_unlock(&reclaim_buffer_lock); return used; } uint64_t mvm_reclaim_mark_free(vm_address_t ptr, uint32_t size, unsigned int debug_flags) { uint64_t id; bool should_update_kernel_accounting = false; if (debug_flags & MALLOC_ADD_GUARD_PAGE_FLAGS) { if (os_add_overflow(size, 2 * large_vm_page_quanta_size, &size)) { return VM_RECLAIM_INDEX_NULL; } ptr -= large_vm_page_quanta_size; } _malloc_lock_lock(&reclaim_buffer_lock); id = mach_vm_reclaim_mark_free(&reclaim_buffer, ptr, size, &should_update_kernel_accounting); _malloc_lock_unlock(&reclaim_buffer_lock); if (should_update_kernel_accounting) { mach_vm_reclaim_update_kernel_accounting(&reclaim_buffer); } return id; } bool mvm_reclaim_is_available(uint64_t id) { return mach_vm_reclaim_is_available(&reclaim_buffer, id); } #endif // CONFIG_DEFERRED_RECLAIM |