Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 | /* * Copyright (c) 2003-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. The rights granted to you under the License * may not be used to create, or enable the creation or redistribution of, * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Kernel stack management routines. */ #include <mach/mach_host.h> #include <mach/mach_types.h> #include <mach/processor_set.h> #include <kern/kern_types.h> #include <kern/lock_group.h> #include <kern/mach_param.h> #include <kern/misc_protos.h> #include <kern/percpu.h> #include <kern/processor.h> #include <kern/thread.h> #include <kern/zalloc.h> #include <kern/kalloc.h> #include <kern/ledger.h> #include <vm/vm_map_xnu.h> #include <vm/vm_kern_xnu.h> #include <san/kasan.h> /* * We allocate stacks from generic kernel VM. * * The stack_free_list can only be accessed at splsched, * because stack_alloc_try/thread_invoke operate at splsched. */ static SIMPLE_LOCK_DECLARE(stack_lock_data, 0); #define stack_lock() simple_lock(&stack_lock_data, LCK_GRP_NULL) #define stack_unlock() simple_unlock(&stack_lock_data) #define STACK_CACHE_SIZE 2 static vm_offset_t stack_free_list; static unsigned int stack_free_count, stack_free_hiwat; /* free list count */ static unsigned int stack_hiwat; unsigned int stack_total; /* current total count */ unsigned long long stack_allocs; /* total count of allocations */ static unsigned int stack_free_target; static int stack_free_delta; static unsigned int stack_new_count; /* total new stack allocations */ static SECURITY_READ_ONLY_LATE(vm_offset_t) stack_addr_mask; SECURITY_READ_ONLY_LATE(vm_offset_t) kernel_stack_size; SECURITY_READ_ONLY_LATE(vm_offset_t) kernel_stack_mask; vm_offset_t kernel_stack_depth_max; struct stack_cache { vm_offset_t free; unsigned int count; }; static struct stack_cache PERCPU_DATA(stack_cache); /* * The next field is at the base of the stack, * so the low end is left unsullied. */ #define stack_next(stack) \ (*((vm_offset_t *)((stack) + kernel_stack_size) - 1)) static inline vm_offset_t roundup_pow2(vm_offset_t size) { if ((size & (size - 1)) == 0) { /* if size is a power of 2 we're good */ return size; } return 1ul << flsll(size); } static vm_offset_t stack_alloc_internal(void); static void stack_free_stack(vm_offset_t); static void stack_init(void) { uint32_t kernel_stack_pages = atop(KERNEL_STACK_SIZE); kernel_stack_size = KERNEL_STACK_SIZE; kernel_stack_mask = -KERNEL_STACK_SIZE; if (PE_parse_boot_argn("kernel_stack_pages", &kernel_stack_pages, sizeof(kernel_stack_pages))) { kernel_stack_size = kernel_stack_pages * PAGE_SIZE; } if (kernel_stack_size < round_page(kernel_stack_size)) { panic("stack_init: stack size %p not a multiple of page size %d", (void *) kernel_stack_size, PAGE_SIZE); } stack_addr_mask = roundup_pow2(kernel_stack_size) - 1; kernel_stack_mask = ~stack_addr_mask; } STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, stack_init); /* * stack_alloc: * * Allocate a stack for a thread, may * block. */ static vm_offset_t stack_alloc_internal(void) { vm_offset_t stack = 0; spl_t s; kma_flags_t flags = KMA_NOFAIL | KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT | KMA_ZERO | KMA_SPRAYQTN; s = splsched(); stack_lock(); stack_allocs++; stack = stack_free_list; if (stack != 0) { stack_free_list = stack_next(stack); stack_free_count--; } else { if (++stack_total > stack_hiwat) { stack_hiwat = stack_total; } stack_new_count++; } stack_free_delta--; stack_unlock(); splx(s); if (stack == 0) { /* * Request guard pages on either side of the stack. Ask * kernel_memory_allocate() for two extra pages to account * for these. */ kernel_memory_allocate(kernel_map, &stack, kernel_stack_size + ptoa(2), stack_addr_mask, flags, VM_KERN_MEMORY_STACK); /* * The stack address that comes back is the address of the lower * guard page. Skip past it to get the actual stack base address. */ stack += PAGE_SIZE; } return stack; } void stack_alloc( thread_t thread) { assert(thread->kernel_stack == 0); machine_stack_attach(thread, stack_alloc_internal()); } void stack_handoff(thread_t from, thread_t to) { assert(from == current_thread()); machine_stack_handoff(from, to); } /* * stack_free: * * Detach and free the stack for a thread. */ void stack_free( thread_t thread) { vm_offset_t stack = machine_stack_detach(thread); assert(stack); if (stack != thread->reserved_stack) { stack_free_stack(stack); } } void stack_free_reserved( thread_t thread) { if (thread->reserved_stack != thread->kernel_stack) { stack_free_stack(thread->reserved_stack); } } static void stack_free_stack( vm_offset_t stack) { struct stack_cache *cache; spl_t s; #if KASAN_DEBUG /* Sanity check - stack should be unpoisoned by now */ assert(kasan_check_shadow(stack, kernel_stack_size, 0)); #endif s = splsched(); cache = PERCPU_GET(stack_cache); if (cache->count < STACK_CACHE_SIZE) { stack_next(stack) = cache->free; cache->free = stack; cache->count++; } else { stack_lock(); stack_next(stack) = stack_free_list; stack_free_list = stack; if (++stack_free_count > stack_free_hiwat) { stack_free_hiwat = stack_free_count; } stack_free_delta++; stack_unlock(); } splx(s); } /* * stack_alloc_try: * * Non-blocking attempt to allocate a * stack for a thread. * * Returns TRUE on success. * * Called at splsched. */ boolean_t stack_alloc_try( thread_t thread) { struct stack_cache *cache; vm_offset_t stack; cache = PERCPU_GET(stack_cache); stack = cache->free; if (stack != 0) { cache->free = stack_next(stack); cache->count--; } else { if (stack_free_list != 0) { stack_lock(); stack = stack_free_list; if (stack != 0) { stack_free_list = stack_next(stack); stack_free_count--; stack_free_delta--; } stack_unlock(); } } if (stack != 0 || (stack = thread->reserved_stack) != 0) { machine_stack_attach(thread, stack); return TRUE; } return FALSE; } static unsigned int stack_collect_tick, last_stack_tick; /* * stack_collect: * * Free excess kernel stacks, may * block. */ void stack_collect(void) { if (stack_collect_tick != last_stack_tick) { unsigned int target; vm_offset_t stack; spl_t s; s = splsched(); stack_lock(); target = stack_free_target + (STACK_CACHE_SIZE * processor_count); target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; while (stack_free_count > target) { stack = stack_free_list; stack_free_list = stack_next(stack); stack_free_count--; stack_total--; stack_unlock(); splx(s); /* * Get the stack base address, then decrement by one page * to account for the lower guard page. Add two extra pages * to the size to account for the guard pages on both ends * that were originally requested when the stack was allocated * back in stack_alloc(). */ stack = (vm_offset_t)vm_map_trunc_page( stack, VM_MAP_PAGE_MASK(kernel_map)); stack -= PAGE_SIZE; kmem_free(kernel_map, stack, kernel_stack_size + ptoa(2), KMF_GUARD_FIRST | KMF_GUARD_LAST); stack = 0; s = splsched(); stack_lock(); target = stack_free_target + (STACK_CACHE_SIZE * processor_count); target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; } last_stack_tick = stack_collect_tick; stack_unlock(); splx(s); } } /* * compute_stack_target: * * Computes a new target free list count * based on recent alloc / free activity. * * Limits stack collection to once per * computation period. */ void compute_stack_target( __unused void *arg) { spl_t s; s = splsched(); stack_lock(); if (stack_free_target > 5) { stack_free_target = (4 * stack_free_target) / 5; } else if (stack_free_target > 0) { stack_free_target--; } stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; stack_free_delta = 0; stack_collect_tick++; stack_unlock(); splx(s); } /* OBSOLETE */ void stack_privilege( thread_t thread); void stack_privilege( __unused thread_t thread) { /* OBSOLETE */ } /* * Return info on stack usage for threads in a specific processor set */ kern_return_t processor_set_stack_usage( processor_set_t pset, unsigned int *totalp, vm_size_t *spacep, vm_size_t *residentp, vm_size_t *maxusagep, vm_offset_t *maxstackp) { #if DEVELOPMENT || DEBUG unsigned int total = 0; thread_t thread; if (pset == PROCESSOR_SET_NULL || pset != &pset0) { return KERN_INVALID_ARGUMENT; } lck_mtx_lock(&tasks_threads_lock); queue_iterate(&threads, thread, thread_t, threads) { total += (thread->kernel_stack != 0); } lck_mtx_unlock(&tasks_threads_lock); *totalp = total; *residentp = *spacep = total * round_page(kernel_stack_size); *maxusagep = 0; *maxstackp = 0; return KERN_SUCCESS; #else #pragma unused(pset, totalp, spacep, residentp, maxusagep, maxstackp) return KERN_NOT_SUPPORTED; #endif /* DEVELOPMENT || DEBUG */ } __mockable vm_offset_t min_valid_stack_address(void) { return (vm_offset_t)vm_map_min(kernel_map); } __mockable vm_offset_t max_valid_stack_address(void) { return (vm_offset_t)vm_map_max(kernel_map); } |