Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 | /* * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. The rights granted to you under the License * may not be used to create, or enable the creation or redistribution of, * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ /* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ /* * File: vm/vm_map.h * Author: Avadis Tevanian, Jr., Michael Wayne Young * Date: 1985 * * Virtual memory map module definitions. * * Contributors: * avie, dlb, mwyoung */ #ifndef _VM_VM_MAP_H_ #define _VM_VM_MAP_H_ #include <mach/mach_types.h> #include <mach/kern_return.h> #include <mach/boolean.h> #include <mach/vm_types.h> #include <mach/vm_prot.h> #include <mach/vm_inherit.h> #include <mach/vm_behavior.h> #include <mach/vm_param.h> #include <vm/pmap.h> #ifdef KERNEL_PRIVATE #include <sys/cdefs.h> __BEGIN_DECLS extern void vm_map_reference(vm_map_t map); extern vm_map_t current_map(void); /* Setup reserved areas in a new VM map */ extern kern_return_t vm_map_exec( vm_map_t new_map, task_t task, void *fsroot, cpu_type_t cpu); __END_DECLS #ifdef MACH_KERNEL_PRIVATE #include <task_swapper.h> #include <mach_assert.h> #include <vm/vm_object.h> #include <vm/vm_page.h> #include <kern/lock.h> #include <kern/zalloc.h> #include <kern/macro_help.h> #include <kern/thread.h> #define current_map_fast() (current_thread()->map) #define current_map() (current_map_fast()) #include <vm/vm_map_store.h> /* * Types defined: * * vm_map_t the high-level address map data structure. * vm_map_entry_t an entry in an address map. * vm_map_version_t a timestamp of a map, for use with vm_map_lookup * vm_map_copy_t represents memory copied from an address map, * used for inter-map copy operations */ typedef struct vm_map_entry *vm_map_entry_t; #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) /* * Type: vm_map_object_t [internal use only] * * Description: * The target of an address mapping, either a virtual * memory object or a sub map (of the kernel map). */ typedef union vm_map_object { vm_object_t vm_object; /* object object */ vm_map_t sub_map; /* belongs to another map */ } vm_map_object_t; #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock) #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock) /* * Type: vm_named_entry_t [internal use only] * * Description: * Description of a mapping to a memory cache object. * * Implementation: * While the handle to this object is used as a means to map * and pass around the right to map regions backed by pagers * of all sorts, the named_entry itself is only manipulated * by the kernel. Named entries hold information on the * right to map a region of a cached object. Namely, * the target cache object, the beginning and ending of the * region to be mapped, and the permissions, (read, write) * with which it can be mapped. * */ struct vm_named_entry { decl_lck_mtx_data(, Lock) /* Synchronization */ union { vm_object_t object; /* object I point to */ memory_object_t pager; /* amo pager port */ vm_map_t map; /* map backing submap */ vm_map_copy_t copy; /* a VM map copy */ } backing; vm_object_offset_t offset; /* offset into object */ vm_object_size_t size; /* size of region */ vm_object_offset_t data_offset; /* offset to first byte of data */ vm_prot_t protection; /* access permissions */ int ref_count; /* Number of references */ unsigned int /* Is backing.xxx : */ /* boolean_t */ internal:1, /* ... an internal object */ /* boolean_t */ is_sub_map:1, /* ... a submap? */ /* boolean_t */ is_pager:1, /* ... a pager port */ /* boolean_t */ is_copy:1; /* ... a VM map copy */ }; /* * Type: vm_map_entry_t [internal use only] * * Description: * A single mapping within an address map. * * Implementation: * Address map entries consist of start and end addresses, * a VM object (or sub map) and offset into that object, * and user-exported inheritance and protection information. * Control information for virtual copy operations is also * stored in the address map entry. */ struct vm_map_links { struct vm_map_entry *prev; /* previous entry */ struct vm_map_entry *next; /* next entry */ vm_map_offset_t start; /* start address */ vm_map_offset_t end; /* end address */ }; struct vm_map_entry { struct vm_map_links links; /* links to other entries */ #define vme_prev links.prev #define vme_next links.next #define vme_start links.start #define vme_end links.end struct vm_map_store store; union vm_map_object object; /* object I point to */ vm_object_offset_t offset; /* offset into object */ unsigned int /* boolean_t */ is_shared:1, /* region is shared */ /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ /* boolean_t */ in_transition:1, /* Entry being changed */ /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ /* behavior is not defined for submap type */ /* boolean_t */ needs_copy:1, /* object need to be copied? */ /* Only in task maps: */ /* vm_prot_t */ protection:3, /* protection code */ /* vm_prot_t */ max_protection:3,/* maximum protection */ /* vm_inherit_t */ inheritance:2, /* inheritance */ /* boolean_t */ use_pmap:1, /* nested pmaps */ /* * IMPORTANT: * The "alias" field can be updated while holding the VM map lock * "shared". It's OK as along as it's the only field that can be * updated without the VM map "exclusive" lock. */ /* unsigned char */ alias:8, /* user alias */ /* boolean_t */ no_cache:1, /* should new pages be cached? */ /* boolean_t */ permanent:1, /* mapping can not be removed */ /* boolean_t */ superpage_size:1,/* use superpages of a certain size */ /* boolean_t */ map_aligned:1, /* align to map's page size */ /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */ /* boolean_t */ used_for_jit:1, /* boolean_t */ from_reserved_zone:1, /* Allocated from * kernel reserved zone */ __unused_bits:1; unsigned short wired_count; /* can be paged if = 0 */ unsigned short user_wired_count; /* for vm_wire */ #if DEBUG #define MAP_ENTRY_CREATION_DEBUG (1) #define MAP_ENTRY_INSERTION_DEBUG (1) #endif #if MAP_ENTRY_CREATION_DEBUG struct vm_map_header *vme_creation_maphdr; uintptr_t vme_creation_bt[16]; #endif #if MAP_ENTRY_INSERTION_DEBUG uintptr_t vme_insertion_bt[16]; #endif }; /* * Convenience macros for dealing with superpages * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h */ #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES) #define SUPERPAGE_MASK (-SUPERPAGE_SIZE) #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK) #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK) /* * wired_counts are unsigned short. This value is used to safeguard * against any mishaps due to runaway user programs. */ #define MAX_WIRE_COUNT 65535 /* * Type: struct vm_map_header * * Description: * Header for a vm_map and a vm_map_copy. */ struct vm_map_header { struct vm_map_links links; /* first, last, min, max */ int nentries; /* Number of entries */ boolean_t entries_pageable; /* are map entries pageable? */ vm_map_offset_t highest_entry_end_addr; /* The ending address of the highest allocated vm_entry_t */ #ifdef VM_MAP_STORE_USE_RB struct rb_head rb_head_store; #endif int page_shift; /* page shift */ }; #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift) #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr))) #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1) /* * Type: vm_map_t [exported; contents invisible] * * Description: * An address map -- a directory relating valid * regions of a task's address space to the corresponding * virtual memory objects. * * Implementation: * Maps are doubly-linked lists of map entries, sorted * by address. One hint is used to start * searches again from the last successful search, * insertion, or removal. Another hint is used to * quickly find free space. */ struct _vm_map { lock_t lock; /* uni- and smp-lock */ struct vm_map_header hdr; /* Map entry header */ #define min_offset hdr.links.start /* start of range */ #define max_offset hdr.links.end /* end of range */ #define highest_entry_end hdr.highest_entry_end_addr pmap_t pmap; /* Physical map */ vm_map_size_t size; /* virtual size */ vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ int ref_count; /* Reference count */ #if TASK_SWAPPER int res_count; /* Residence count (swap) */ int sw_state; /* Swap state */ #endif /* TASK_SWAPPER */ decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */ lck_mtx_ext_t s_lock_ext; vm_map_entry_t hint; /* hint for quick lookups */ vm_map_entry_t first_free; /* First free space hint */ unsigned int /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */ /* boolean_t */ wiring_required:1, /* All memory wired? */ /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */ /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */ /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */ /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */ /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */ /* reserved */ pad:25; unsigned int timestamp; /* Version number */ unsigned int color_rr; /* next color (not protected by a lock) */ #if CONFIG_FREEZE void *default_freezer_handle; #endif boolean_t jit_entry_exists; } ; #define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links) #define vm_map_first_entry(map) ((map)->hdr.links.next) #define vm_map_last_entry(map) ((map)->hdr.links.prev) #if TASK_SWAPPER /* * VM map swap states. There are no transition states. */ #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ #define MAP_SW_OUT 2 /* map is out (res_count == 0 */ #endif /* TASK_SWAPPER */ /* * Type: vm_map_version_t [exported; contents invisible] * * Description: * Map versions may be used to quickly validate a previous * lookup operation. * * Usage note: * Because they are bulky objects, map versions are usually * passed by reference. * * Implementation: * Just a timestamp for the main map. */ typedef struct vm_map_version { unsigned int main_timestamp; } vm_map_version_t; /* * Type: vm_map_copy_t [exported; contents invisible] * * Description: * A map copy object represents a region of virtual memory * that has been copied from an address map but is still * in transit. * * A map copy object may only be used by a single thread * at a time. * * Implementation: * There are three formats for map copy objects. * The first is very similar to the main * address map in structure, and as a result, some * of the internal maintenance functions/macros can * be used with either address maps or map copy objects. * * The map copy object contains a header links * entry onto which the other entries that represent * the region are chained. * * The second format is a single vm object. This was used * primarily in the pageout path - but is not currently used * except for placeholder copy objects (see vm_map_copy_copy()). * * The third format is a kernel buffer copy object - for data * small enough that physical copies were the most efficient * method. */ struct vm_map_copy { int type; #define VM_MAP_COPY_ENTRY_LIST 1 #define VM_MAP_COPY_OBJECT 2 #define VM_MAP_COPY_KERNEL_BUFFER 3 vm_object_offset_t offset; vm_map_size_t size; union { struct vm_map_header hdr; /* ENTRY_LIST */ vm_object_t object; /* OBJECT */ struct { void *kdata; /* KERNEL_BUFFER */ vm_size_t kalloc_size; /* size of this copy_t */ } c_k; } c_u; }; #define cpy_hdr c_u.hdr #define cpy_object c_u.object #define cpy_kdata c_u.c_k.kdata #define cpy_kalloc_size c_u.c_k.kalloc_size #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift) #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy))) #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1) /* * Useful macros for entry list copy objects */ #define vm_map_copy_to_entry(copy) \ ((struct vm_map_entry *) &(copy)->cpy_hdr.links) #define vm_map_copy_first_entry(copy) \ ((copy)->cpy_hdr.links.next) #define vm_map_copy_last_entry(copy) \ ((copy)->cpy_hdr.links.prev) /* * Macros: vm_map_lock, etc. [internal use only] * Description: * Perform locking on the data portion of a map. * When multiple maps are to be locked, order by map address. * (See vm_map.c::vm_remap()) */ #define vm_map_lock_init(map) \ ((map)->timestamp = 0 , \ lock_init(&(map)->lock, TRUE, 0, 0)) #define vm_map_lock(map) lock_write(&(map)->lock) #define vm_map_unlock(map) \ ((map)->timestamp++ , lock_write_done(&(map)->lock)) #define vm_map_lock_read(map) lock_read(&(map)->lock) #define vm_map_unlock_read(map) lock_read_done(&(map)->lock) #define vm_map_lock_write_to_read(map) \ ((map)->timestamp++ , lock_write_to_read(&(map)->lock)) /* lock_read_to_write() returns FALSE on failure. Macro evaluates to * zero on success and non-zero value on failure. */ #define vm_map_lock_read_to_write(map) (lock_read_to_write(&(map)->lock) != TRUE) /* * Exported procedures that operate on vm_map_t. */ /* Initialize the module */ extern void vm_map_init(void); extern void vm_kernel_reserved_entry_init(void); /* Allocate a range in the specified virtual address map and * return the entry allocated for that range. */ extern kern_return_t vm_map_find_space( vm_map_t map, vm_map_address_t *address, /* OUT */ vm_map_size_t size, vm_map_offset_t mask, int flags, vm_map_entry_t *o_entry); /* OUT */ extern void vm_map_clip_start( vm_map_t map, vm_map_entry_t entry, vm_map_offset_t endaddr); extern void vm_map_clip_end( vm_map_t map, vm_map_entry_t entry, vm_map_offset_t endaddr); extern boolean_t vm_map_entry_should_cow_for_true_share( vm_map_entry_t entry); /* Lookup map entry containing or the specified address in the given map */ extern boolean_t vm_map_lookup_entry( vm_map_t map, vm_map_address_t address, vm_map_entry_t *entry); /* OUT */ extern void vm_map_copy_remap( vm_map_t map, vm_map_entry_t where, vm_map_copy_t copy, vm_map_offset_t adjustment, vm_prot_t cur_prot, vm_prot_t max_prot, vm_inherit_t inheritance); /* Find the VM object, offset, and protection for a given virtual address * in the specified map, assuming a page fault of the type specified. */ extern kern_return_t vm_map_lookup_locked( vm_map_t *var_map, /* IN/OUT */ vm_map_address_t vaddr, vm_prot_t fault_type, int object_lock_type, vm_map_version_t *out_version, /* OUT */ vm_object_t *object, /* OUT */ vm_object_offset_t *offset, /* OUT */ vm_prot_t *out_prot, /* OUT */ boolean_t *wired, /* OUT */ vm_object_fault_info_t fault_info, /* OUT */ vm_map_t *real_map); /* OUT */ /* Verifies that the map has not changed since the given version. */ extern boolean_t vm_map_verify( vm_map_t map, vm_map_version_t *version); /* REF */ extern vm_map_entry_t vm_map_entry_insert( vm_map_t map, vm_map_entry_t insp_entry, vm_map_offset_t start, vm_map_offset_t end, vm_object_t object, vm_object_offset_t offset, boolean_t needs_copy, boolean_t is_shared, boolean_t in_transition, vm_prot_t cur_protection, vm_prot_t max_protection, vm_behavior_t behavior, vm_inherit_t inheritance, unsigned wired_count, boolean_t no_cache, boolean_t permanent, unsigned int superpage_size, boolean_t clear_map_aligned); /* * Functions implemented as macros */ #define vm_map_min(map) ((map)->min_offset) /* Lowest valid address in * a map */ #define vm_map_max(map) ((map)->max_offset) /* Highest valid address */ #define vm_map_pmap(map) ((map)->pmap) /* Physical map associated * with this address map */ #define vm_map_verify_done(map, version) vm_map_unlock_read(map) /* Operation that required * a verified lookup is * now complete */ /* * Macros/functions for map residence counts and swapin/out of vm maps */ #if TASK_SWAPPER #if MACH_ASSERT /* Gain a reference to an existing map */ extern void vm_map_reference( vm_map_t map); /* Lose a residence count */ extern void vm_map_res_deallocate( vm_map_t map); /* Gain a residence count on a map */ extern void vm_map_res_reference( vm_map_t map); /* Gain reference & residence counts to possibly swapped-out map */ extern void vm_map_reference_swap( vm_map_t map); #else /* MACH_ASSERT */ #define vm_map_reference(map) \ MACRO_BEGIN \ vm_map_t Map = (map); \ if (Map) { \ lck_mtx_lock(&Map->s_lock); \ Map->res_count++; \ Map->ref_count++; \ lck_mtx_unlock(&Map->s_lock); \ } \ MACRO_END #define vm_map_res_reference(map) \ MACRO_BEGIN \ vm_map_t Lmap = (map); \ if (Lmap->res_count == 0) { \ lck_mtx_unlock(&Lmap->s_lock);\ vm_map_lock(Lmap); \ vm_map_swapin(Lmap); \ lck_mtx_lock(&Lmap->s_lock); \ ++Lmap->res_count; \ vm_map_unlock(Lmap); \ } else \ ++Lmap->res_count; \ MACRO_END #define vm_map_res_deallocate(map) \ MACRO_BEGIN \ vm_map_t Map = (map); \ if (--Map->res_count == 0) { \ lck_mtx_unlock(&Map->s_lock); \ vm_map_lock(Map); \ vm_map_swapout(Map); \ vm_map_unlock(Map); \ lck_mtx_lock(&Map->s_lock); \ } \ MACRO_END #define vm_map_reference_swap(map) \ MACRO_BEGIN \ vm_map_t Map = (map); \ lck_mtx_lock(&Map->s_lock); \ ++Map->ref_count; \ vm_map_res_reference(Map); \ lck_mtx_unlock(&Map->s_lock); \ MACRO_END #endif /* MACH_ASSERT */ extern void vm_map_swapin( vm_map_t map); extern void vm_map_swapout( vm_map_t map); #else /* TASK_SWAPPER */ #define vm_map_reference(map) \ MACRO_BEGIN \ vm_map_t Map = (map); \ if (Map) { \ lck_mtx_lock(&Map->s_lock); \ Map->ref_count++; \ lck_mtx_unlock(&Map->s_lock); \ } \ MACRO_END #define vm_map_reference_swap(map) vm_map_reference(map) #define vm_map_res_reference(map) #define vm_map_res_deallocate(map) #endif /* TASK_SWAPPER */ /* * Submap object. Must be used to create memory to be put * in a submap by vm_map_submap. */ extern vm_object_t vm_submap_object; /* * Wait and wakeup macros for in_transition map entries. */ #define vm_map_entry_wait(map, interruptible) \ ((map)->timestamp++ , \ thread_sleep_lock_write((event_t)&(map)->hdr, \ &(map)->lock, interruptible)) #define vm_map_entry_wakeup(map) \ thread_wakeup((event_t)(&(map)->hdr)) #define vm_map_ref_fast(map) \ MACRO_BEGIN \ lck_mtx_lock(&map->s_lock); \ map->ref_count++; \ vm_map_res_reference(map); \ lck_mtx_unlock(&map->s_lock); \ MACRO_END #define vm_map_dealloc_fast(map) \ MACRO_BEGIN \ register int c; \ \ lck_mtx_lock(&map->s_lock); \ c = --map->ref_count; \ if (c > 0) \ vm_map_res_deallocate(map); \ lck_mtx_unlock(&map->s_lock); \ if (c == 0) \ vm_map_destroy(map); \ MACRO_END /* simplify map entries */ extern void vm_map_simplify_entry( vm_map_t map, vm_map_entry_t this_entry); extern void vm_map_simplify( vm_map_t map, vm_map_offset_t start); /* Move the information in a map copy object to a new map copy object */ extern vm_map_copy_t vm_map_copy_copy( vm_map_copy_t copy); /* Create a copy object from an object. */ extern kern_return_t vm_map_copyin_object( vm_object_t object, vm_object_offset_t offset, vm_object_size_t size, vm_map_copy_t *copy_result); /* OUT */ extern kern_return_t vm_map_random_address_for_size( vm_map_t map, vm_map_offset_t *address, vm_map_size_t size); /* Enter a mapping */ extern kern_return_t vm_map_enter( vm_map_t map, vm_map_offset_t *address, vm_map_size_t size, vm_map_offset_t mask, int flags, vm_object_t object, vm_object_offset_t offset, boolean_t needs_copy, vm_prot_t cur_protection, vm_prot_t max_protection, vm_inherit_t inheritance); /* XXX should go away - replaced with regular enter of contig object */ extern kern_return_t vm_map_enter_cpm( vm_map_t map, vm_map_address_t *addr, vm_map_size_t size, int flags); extern kern_return_t vm_map_remap( vm_map_t target_map, vm_map_offset_t *address, vm_map_size_t size, vm_map_offset_t mask, int flags, vm_map_t src_map, vm_map_offset_t memory_address, boolean_t copy, vm_prot_t *cur_protection, vm_prot_t *max_protection, vm_inherit_t inheritance); /* * Read and write from a kernel buffer to a specified map. */ extern kern_return_t vm_map_write_user( vm_map_t map, void *src_p, vm_map_offset_t dst_addr, vm_size_t size); extern kern_return_t vm_map_read_user( vm_map_t map, vm_map_offset_t src_addr, void *dst_p, vm_size_t size); /* Create a new task map using an existing task map as a template. */ extern vm_map_t vm_map_fork( ledger_t ledger, vm_map_t old_map); /* Change inheritance */ extern kern_return_t vm_map_inherit( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_inherit_t new_inheritance); /* Add or remove machine-dependent attributes from map regions */ extern kern_return_t vm_map_machine_attribute( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_machine_attribute_t attribute, vm_machine_attribute_val_t* value); /* IN/OUT */ extern kern_return_t vm_map_msync( vm_map_t map, vm_map_address_t address, vm_map_size_t size, vm_sync_t sync_flags); /* Set paging behavior */ extern kern_return_t vm_map_behavior_set( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_behavior_t new_behavior); extern kern_return_t vm_map_purgable_control( vm_map_t map, vm_map_offset_t address, vm_purgable_t control, int *state); extern kern_return_t vm_map_region( vm_map_t map, vm_map_offset_t *address, vm_map_size_t *size, vm_region_flavor_t flavor, vm_region_info_t info, mach_msg_type_number_t *count, mach_port_t *object_name); extern kern_return_t vm_map_region_recurse_64( vm_map_t map, vm_map_offset_t *address, vm_map_size_t *size, natural_t *nesting_depth, vm_region_submap_info_64_t info, mach_msg_type_number_t *count); extern kern_return_t vm_map_page_query_internal( vm_map_t map, vm_map_offset_t offset, int *disposition, int *ref_count); extern kern_return_t vm_map_query_volatile( vm_map_t map, mach_vm_size_t *volatile_virtual_size_p, mach_vm_size_t *volatile_resident_size_p, mach_vm_size_t *volatile_pmap_size_p); extern kern_return_t vm_map_submap( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_map_t submap, vm_map_offset_t offset, boolean_t use_pmap); extern void vm_map_submap_pmap_clean( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_map_t sub_map, vm_map_offset_t offset); /* Convert from a map entry port to a map */ extern vm_map_t convert_port_entry_to_map( ipc_port_t port); /* Convert from a port to a vm_object */ extern vm_object_t convert_port_entry_to_object( ipc_port_t port); extern kern_return_t vm_map_set_cache_attr( vm_map_t map, vm_map_offset_t va); /* definitions related to overriding the NX behavior */ #define VM_ABI_32 0x1 #define VM_ABI_64 0x2 extern int override_nx(vm_map_t map, uint32_t user_tag); #endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS /* Create an empty map */ extern vm_map_t vm_map_create( pmap_t pmap, vm_map_offset_t min_off, vm_map_offset_t max_off, boolean_t pageable); /* Get rid of a map */ extern void vm_map_destroy( vm_map_t map, int flags); /* Lose a reference */ extern void vm_map_deallocate( vm_map_t map); extern vm_map_t vm_map_switch( vm_map_t map); /* Change protection */ extern kern_return_t vm_map_protect( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t new_prot, boolean_t set_max); /* Check protection */ extern boolean_t vm_map_check_protection( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t protection); /* wire down a region */ extern kern_return_t vm_map_wire( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t access_type, boolean_t user_wire); /* unwire a region */ extern kern_return_t vm_map_unwire( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, boolean_t user_wire); /* Enter a mapping of a memory object */ extern kern_return_t vm_map_enter_mem_object( vm_map_t map, vm_map_offset_t *address, vm_map_size_t size, vm_map_offset_t mask, int flags, ipc_port_t port, vm_object_offset_t offset, boolean_t needs_copy, vm_prot_t cur_protection, vm_prot_t max_protection, vm_inherit_t inheritance); /* Enter a mapping of a memory object */ extern kern_return_t vm_map_enter_mem_object_control( vm_map_t map, vm_map_offset_t *address, vm_map_size_t size, vm_map_offset_t mask, int flags, memory_object_control_t control, vm_object_offset_t offset, boolean_t needs_copy, vm_prot_t cur_protection, vm_prot_t max_protection, vm_inherit_t inheritance); /* Deallocate a region */ extern kern_return_t vm_map_remove( vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, boolean_t flags); /* Discard a copy without using it */ extern void vm_map_copy_discard( vm_map_copy_t copy); /* Overwrite existing memory with a copy */ extern kern_return_t vm_map_copy_overwrite( vm_map_t dst_map, vm_map_address_t dst_addr, vm_map_copy_t copy, boolean_t interruptible); /* Place a copy into a map */ extern kern_return_t vm_map_copyout( vm_map_t dst_map, vm_map_address_t *dst_addr, /* OUT */ vm_map_copy_t copy); extern kern_return_t vm_map_copyout_internal( vm_map_t dst_map, vm_map_address_t *dst_addr, /* OUT */ vm_map_copy_t copy, boolean_t consume_on_success, vm_prot_t cur_protection, vm_prot_t max_protection, vm_inherit_t inheritance); extern kern_return_t vm_map_copyin( vm_map_t src_map, vm_map_address_t src_addr, vm_map_size_t len, boolean_t src_destroy, vm_map_copy_t *copy_result); /* OUT */ extern kern_return_t vm_map_copyin_common( vm_map_t src_map, vm_map_address_t src_addr, vm_map_size_t len, boolean_t src_destroy, boolean_t src_volatile, vm_map_copy_t *copy_result, /* OUT */ boolean_t use_maxprot); extern kern_return_t vm_map_copy_extract( vm_map_t src_map, vm_map_address_t src_addr, vm_map_size_t len, vm_map_copy_t *copy_result, /* OUT */ vm_prot_t *cur_prot, /* OUT */ vm_prot_t *max_prot); extern void vm_map_disable_NX( vm_map_t map); extern void vm_map_disallow_data_exec( vm_map_t map); extern void vm_map_set_64bit( vm_map_t map); extern void vm_map_set_32bit( vm_map_t map); extern boolean_t vm_map_has_hard_pagezero( vm_map_t map, vm_map_offset_t pagezero_size); extern boolean_t vm_map_is_64bit( vm_map_t map); #define vm_map_has_4GB_pagezero(map) vm_map_has_hard_pagezero(map, (vm_map_offset_t)0x100000000ULL) extern void vm_map_set_4GB_pagezero( vm_map_t map); extern void vm_map_clear_4GB_pagezero( vm_map_t map); extern kern_return_t vm_map_raise_max_offset( vm_map_t map, vm_map_offset_t new_max_offset); extern kern_return_t vm_map_raise_min_offset( vm_map_t map, vm_map_offset_t new_min_offset); extern vm_map_offset_t vm_compute_max_offset( unsigned is64); extern uint64_t vm_map_get_max_aslr_slide_pages( vm_map_t map); extern void vm_map_set_user_wire_limit( vm_map_t map, vm_size_t limit); extern void vm_map_switch_protect( vm_map_t map, boolean_t val); extern void vm_map_iokit_mapped_region( vm_map_t map, vm_size_t bytes); extern void vm_map_iokit_unmapped_region( vm_map_t map, vm_size_t bytes); extern boolean_t first_free_is_valid(vm_map_t); extern int vm_map_page_shift( vm_map_t map); extern int vm_map_page_mask( vm_map_t map); extern int vm_map_page_size( vm_map_t map); extern vm_map_offset_t vm_map_round_page_mask( vm_map_offset_t offset, vm_map_offset_t mask); extern vm_map_offset_t vm_map_trunc_page_mask( vm_map_offset_t offset, vm_map_offset_t mask); #ifdef XNU_KERNEL_PRIVATE extern kern_return_t vm_map_page_info( vm_map_t map, vm_map_offset_t offset, vm_page_info_flavor_t flavor, vm_page_info_t info, mach_msg_type_number_t *count); #endif /* XNU_KERNEL_PRIVATE */ #ifdef MACH_KERNEL_PRIVATE /* * Macros to invoke vm_map_copyin_common. vm_map_copyin is the * usual form; it handles a copyin based on the current protection * (current protection == VM_PROT_NONE) is a failure. * vm_map_copyin_maxprot handles a copyin based on maximum possible * access. The difference is that a region with no current access * BUT possible maximum access is rejected by vm_map_copyin(), but * returned by vm_map_copyin_maxprot. */ #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ FALSE, copy_result, FALSE) #define vm_map_copyin_maxprot(src_map, \ src_addr, len, src_destroy, copy_result) \ vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ FALSE, copy_result, TRUE) /* * Internal macros for rounding and truncation of vm_map offsets and sizes */ #define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) #define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) /* * Macros for rounding and truncation of vm_map offsets and sizes */ #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT) #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map))) #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1) #define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0) #endif /* MACH_KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift); #endif /* XNU_KERNEL_PRIVATE */ #define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) #define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) /* * Flags for vm_map_remove() and vm_map_delete() */ #define VM_MAP_NO_FLAGS 0x0 #define VM_MAP_REMOVE_KUNWIRE 0x1 #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10 /* Support for UPLs from vm_maps */ extern kern_return_t vm_map_get_upl( vm_map_t target_map, vm_map_offset_t map_offset, upl_size_t *size, upl_t *upl, upl_page_info_array_t page_info, unsigned int *page_infoCnt, int *flags, int force_data_sync); #if CONFIG_DYNAMIC_CODE_SIGNING extern kern_return_t vm_map_sign(vm_map_t map, vm_map_offset_t start, vm_map_offset_t end); #endif #if CONFIG_FREEZE void vm_map_freeze_thaw_init(void); void vm_map_freeze_thaw(void); void vm_map_demand_fault(void); extern kern_return_t vm_map_freeze_walk( vm_map_t map, unsigned int *purgeable_count, unsigned int *wired_count, unsigned int *clean_count, unsigned int *dirty_count, unsigned int dirty_budget, boolean_t *has_shared); extern kern_return_t vm_map_freeze( vm_map_t map, unsigned int *purgeable_count, unsigned int *wired_count, unsigned int *clean_count, unsigned int *dirty_count, unsigned int dirty_budget, boolean_t *has_shared); extern kern_return_t vm_map_thaw( vm_map_t map); #endif __END_DECLS #endif /* KERNEL_PRIVATE */ #endif /* _VM_VM_MAP_H_ */ |