Loading...
   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
/*
 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
 *
 * This file contains Original Code and/or Modifications of Original Code
 * as defined in and that are subject to the Apple Public Source License
 * Version 2.0 (the 'License'). You may not use this file except in
 * compliance with the License. The rights granted to you under the License
 * may not be used to create, or enable the creation or redistribution of,
 * unlawful or unlicensed copies of an Apple operating system, or to
 * circumvent, violate, or enable the circumvention or violation of, any
 * terms of an Apple operating system software license agreement.
 *
 * Please obtain a copy of the License at
 * http://www.opensource.apple.com/apsl/ and read it before using this file.
 *
 * The Original Code and all software distributed under the License are
 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 * Please see the License for the specific language governing rights and
 * limitations under the License.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 */
/*
 * @OSF_COPYRIGHT@
 */
/*
 * Mach Operating System
 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
 * All Rights Reserved.
 *
 * Permission to use, copy, modify and distribute this software and its
 * documentation is hereby granted, provided that both the copyright
 * notice and this permission notice appear in all copies of the
 * software, derivative works or modified versions, and any portions
 * thereof, and that both notices appear in supporting documentation.
 *
 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
 *
 * Carnegie Mellon requests users of this software to return to
 *
 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
 *  School of Computer Science
 *  Carnegie Mellon University
 *  Pittsburgh PA 15213-3890
 *
 * any improvements or extensions that they make and grant Carnegie Mellon
 * the rights to redistribute these changes.
 */
/*
 */

/*
 *	File:	vm/vm_map.h
 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
 *	Date:	1985
 *
 *	Virtual memory map module definitions.
 *
 * Contributors:
 *	avie, dlb, mwyoung
 */

#ifndef _VM_VM_MAP_H_
#define _VM_VM_MAP_H_

#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <mach/boolean.h>
#include <mach/vm_types.h>
#include <mach/vm_prot.h>
#include <mach/vm_inherit.h>
#include <mach/vm_behavior.h>
#include <mach/vm_param.h>
#include <mach/sdt.h>
#include <vm/pmap.h>
#include <os/overflow.h>

#ifdef  KERNEL_PRIVATE

#include <sys/cdefs.h>

#ifdef XNU_KERNEL_PRIVATE
#include <vm/vm_protos.h>
#endif /* XNU_KERNEL_PRIVATE */

__BEGIN_DECLS

extern void     vm_map_reference(vm_map_t       map);
extern vm_map_t current_map(void);

/* Setup reserved areas in a new VM map */
extern kern_return_t    vm_map_exec(
	vm_map_t                new_map,
	task_t                  task,
	boolean_t               is64bit,
	void                    *fsroot,
	cpu_type_t              cpu,
	cpu_subtype_t           cpu_subtype,
	boolean_t               reslide,
	boolean_t               is_driverkit);

__END_DECLS

#ifdef  MACH_KERNEL_PRIVATE

#include <mach_assert.h>

#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <kern/locks.h>
#include <kern/zalloc.h>
#include <kern/macro_help.h>

#include <kern/thread.h>
#include <os/refcnt.h>

#define current_map_fast()      (current_thread()->map)
#define current_map()           (current_map_fast())

#include <vm/vm_map_store.h>


/*
 *	Types defined:
 *
 *	vm_map_t		the high-level address map data structure.
 *	vm_map_entry_t		an entry in an address map.
 *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
 *	vm_map_copy_t		represents memory copied from an address map,
 *				 used for inter-map copy operations
 */
typedef struct vm_map_entry     *vm_map_entry_t;
#define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL)


/*
 *	Type:		vm_map_object_t [internal use only]
 *
 *	Description:
 *		The target of an address mapping, either a virtual
 *		memory object or a sub map (of the kernel map).
 */
typedef union vm_map_object {
	vm_object_t             vmo_object;     /* object object */
	vm_map_t                vmo_submap;     /* belongs to another map */
} vm_map_object_t;

/*
 *	Type:		vm_named_entry_t [internal use only]
 *
 *	Description:
 *		Description of a mapping to a memory cache object.
 *
 *	Implementation:
 *		While the handle to this object is used as a means to map
 *              and pass around the right to map regions backed by pagers
 *		of all sorts, the named_entry itself is only manipulated
 *		by the kernel.  Named entries hold information on the
 *		right to map a region of a cached object.  Namely,
 *		the target cache object, the beginning and ending of the
 *		region to be mapped, and the permissions, (read, write)
 *		with which it can be mapped.
 *
 */

struct vm_named_entry {
	union {
		vm_map_t        map;            /* map backing submap */
		vm_map_copy_t   copy;           /* a VM map copy */
	} backing;
	vm_object_offset_t      offset;         /* offset into object */
	vm_object_size_t        size;           /* size of region */
	vm_object_offset_t      data_offset;    /* offset to first byte of data */
	unsigned int                            /* Is backing.xxx : */
	/* vm_prot_t */ protection:4,           /* access permissions */
	/* boolean_t */ is_object:1,            /* ... a VM object (wrapped in a VM map copy) */
	/* boolean_t */ internal:1,             /* ... an internal object */
	/* boolean_t */ is_sub_map:1,           /* ... a submap? */
	/* boolean_t */ is_copy:1;              /* ... a VM map copy */
#if VM_NAMED_ENTRY_DEBUG
	uint32_t                named_entry_bt; /* btref_t */
#endif /* VM_NAMED_ENTRY_DEBUG */
};

/*
 *	Type:		vm_map_entry_t [internal use only]
 *
 *	Description:
 *		A single mapping within an address map.
 *
 *	Implementation:
 *		Address map entries consist of start and end addresses,
 *		a VM object (or sub map) and offset into that object,
 *		and user-exported inheritance and protection information.
 *		Control information for virtual copy operations is also
 *		stored in the address map entry.
 */

struct vm_map_links {
	struct vm_map_entry     *prev;          /* previous entry */
	struct vm_map_entry     *next;          /* next entry */
	vm_map_offset_t         start;          /* start address */
	vm_map_offset_t         end;            /* end address */
};

/*
 * Bit 3 of the protection and max_protection bitfields in a vm_map_entry
 * does not correspond to bit 3 of a vm_prot_t, so these macros provide a means
 * to convert between the "packed" representation in the vm_map_entry's fields
 * and the equivalent bits defined in vm_prot_t.
 */
#if defined(__x86_64__)
#define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY | VM_PROT_UEXEC)
#else
#define VM_VALID_VMPROTECT_FLAGS        (VM_PROT_ALL | VM_PROT_COPY)
#endif

/*
 * FOOTPRINT ACCOUNTING:
 * The "memory footprint" is better described in the pmap layer.
 *
 * At the VM level, these 2 vm_map_entry_t fields are relevant:
 * iokit_mapped:
 *	For an "iokit_mapped" entry, we add the size of the entry to the
 *	footprint when the entry is entered into the map and we subtract that
 *	size when the entry is removed.  No other accounting should take place.
 *	"use_pmap" should be FALSE but is not taken into account.
 * use_pmap: (only when is_sub_map is FALSE)
 *	This indicates if we should ask the pmap layer to account for pages
 *	in this mapping.  If FALSE, we expect that another form of accounting
 *	is being used (e.g. "iokit_mapped" or the explicit accounting of
 *	non-volatile purgable memory).
 *
 * So the logic is mostly:
 * if entry->is_sub_map == TRUE
 *	anything in a submap does not count for the footprint
 * else if entry->iokit_mapped == TRUE
 *	footprint includes the entire virtual size of this entry
 * else if entry->use_pmap == FALSE
 *	tell pmap NOT to account for pages being pmap_enter()'d from this
 *	mapping (i.e. use "alternate accounting")
 * else
 *	pmap will account for pages being pmap_enter()'d from this mapping
 *	as it sees fit (only if anonymous, etc...)
 */

struct vm_map_entry {
	struct vm_map_links     links;          /* links to other entries */
#define vme_prev                links.prev
#define vme_next                links.next
#define vme_start               links.start
#define vme_end                 links.end

	struct vm_map_store     store;
	union vm_map_object     vme_object;     /* object I point to */
	vm_object_offset_t      vme_offset;     /* offset into object */

	unsigned int
	/* boolean_t */ is_shared:1,    /* region is shared */
	/* boolean_t */ is_sub_map:1,   /* Is "object" a submap? */
	/* boolean_t */ in_transition:1, /* Entry being changed */
	/* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */
	/* vm_behavior_t */ behavior:2, /* user paging behavior hint */
	/* behavior is not defined for submap type */
	/* boolean_t */ needs_copy:1,   /* object need to be copied? */

	/* Only in task maps: */
	/* vm_prot_t-like */ protection:4,   /* protection code, bit3=UEXEC */
	/* vm_prot_t-like */ max_protection:4, /* maximum protection, bit3=UEXEC */
	/* vm_inherit_t */ inheritance:2, /* inheritance */
	/* boolean_t */ use_pmap:1,     /*
	                                 * use_pmap is overloaded:
	                                 * if "is_sub_map":
	                                 *      use a nested pmap?
	                                 * else (i.e. if object):
	                                 *      use pmap accounting
	                                 *      for footprint?
	                                 */
	/* boolean_t */ no_cache:1,     /* should new pages be cached? */
	/* boolean_t */ permanent:1,    /* mapping can not be removed */
	/* boolean_t */ superpage_size:1, /* use superpages of a certain size */
	/* boolean_t */ map_aligned:1,  /* align to map's page size */
	/* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of
	                                     * this entry it is being deleted
	                                     * without unwiring them */
	/* boolean_t */ used_for_jit:1,
	/* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */

	/* iokit accounting: use the virtual size rather than resident size: */
	/* boolean_t */ iokit_acct:1,
	/* boolean_t */ vme_resilient_codesign:1,
	/* boolean_t */ vme_resilient_media:1,
	/* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */
	/* boolean_t */ vme_no_copy_on_read:1,
	/* boolean_t */ translated_allow_execute:1, /* execute in translated processes */
	/* boolean_t */ vme_kernel_object:1; /* vme_object is kernel_object */

	unsigned short          wired_count;    /* can be paged if = 0 */
	unsigned short          user_wired_count; /* for vm_wire */
#if     DEBUG
#define MAP_ENTRY_CREATION_DEBUG (1)
#define MAP_ENTRY_INSERTION_DEBUG (1)
#endif
#if     MAP_ENTRY_CREATION_DEBUG
	struct vm_map_header    *vme_creation_maphdr;
	uint32_t                vme_creation_bt; /* btref_t */
#endif
#if     MAP_ENTRY_INSERTION_DEBUG
	uint32_t                vme_insertion_bt; /* btref_t */
	vm_map_offset_t         vme_start_original;
	vm_map_offset_t         vme_end_original;
#endif
};

#define VME_SUBMAP_PTR(entry)                   \
	(&((entry)->vme_object.vmo_submap))
#define VME_SUBMAP(entry)                                       \
	((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry)))
#define VME_OBJECT(entry)                                       \
	((entry)->vme_kernel_object ? \
	        kernel_object : \
	        ((entry)->vme_object.vmo_object))
#define VME_OFFSET(entry)                       \
	((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK)
#define VME_ALIAS_MASK (FOURK_PAGE_MASK)
#define VME_ALIAS(entry)                                        \
	((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK))

static inline void
VME_OBJECT_SET(
	vm_map_entry_t entry,
	vm_object_t object)
{
	if (object == kernel_object) {
		entry->vme_kernel_object = TRUE;
		entry->vme_object.vmo_object = VM_OBJECT_NULL;
	} else {
		entry->vme_kernel_object = FALSE;
		entry->vme_object.vmo_object = object;
	}
	if (object != VM_OBJECT_NULL && !object->internal) {
		entry->vme_resilient_media = FALSE;
	}
	entry->vme_resilient_codesign = FALSE;
	entry->used_for_jit = FALSE;
}
static inline void
VME_SUBMAP_SET(
	vm_map_entry_t entry,
	vm_map_t submap)
{
	entry->vme_object.vmo_submap = submap;
}
static inline void
VME_OFFSET_SET(
	vm_map_entry_t entry,
	vm_object_offset_t offset)
{
	unsigned int alias;
	alias = VME_ALIAS(entry);
	assert((offset & FOURK_PAGE_MASK) == 0);
	entry->vme_offset = offset | alias;
}
/*
 * IMPORTANT:
 * The "alias" field can be updated while holding the VM map lock
 * "shared".  It's OK as along as it's the only field that can be
 * updated without the VM map "exclusive" lock.
 */
static inline void
VME_ALIAS_SET(
	vm_map_entry_t entry,
	int alias)
{
	vm_object_offset_t offset;
	offset = VME_OFFSET(entry);
	entry->vme_offset = offset | ((unsigned int)alias & VME_ALIAS_MASK);
}

static inline void
VME_OBJECT_SHADOW(
	vm_map_entry_t entry,
	vm_object_size_t length)
{
	vm_object_t object;
	vm_object_offset_t offset;

	object = VME_OBJECT(entry);
	offset = VME_OFFSET(entry);
	vm_object_shadow(&object, &offset, length);
	if (object != VME_OBJECT(entry)) {
		VME_OBJECT_SET(entry, object);
		entry->use_pmap = TRUE;
	}
	if (offset != VME_OFFSET(entry)) {
		VME_OFFSET_SET(entry, offset);
	}
}


/*
 * Convenience macros for dealing with superpages
 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
 */
#define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
#define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
#define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
#define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)

/*
 * wired_counts are unsigned short.  This value is used to safeguard
 * against any mishaps due to runaway user programs.
 */
#define MAX_WIRE_COUNT          65535



/*
 *	Type:		struct vm_map_header
 *
 *	Description:
 *		Header for a vm_map and a vm_map_copy.
 */


struct vm_map_header {
	struct vm_map_links     links;          /* first, last, min, max */
	int                     nentries;       /* Number of entries */
	uint16_t                page_shift;     /* page shift */
	unsigned int
	/* boolean_t */ entries_pageable : 1,   /* are map entries pageable? */
	/* reserved  */ __padding : 15;
#ifdef VM_MAP_STORE_USE_RB
	struct rb_head  rb_head_store;
#endif
};

#define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
#define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
#define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)

/*
 *	Type:		vm_map_t [exported; contents invisible]
 *
 *	Description:
 *		An address map -- a directory relating valid
 *		regions of a task's address space to the corresponding
 *		virtual memory objects.
 *
 *	Implementation:
 *		Maps are doubly-linked lists of map entries, sorted
 *		by address.  One hint is used to start
 *		searches again from the last successful search,
 *		insertion, or removal.  Another hint is used to
 *		quickly find free space.
 */
struct _vm_map {
	lck_rw_t                lock;           /* map lock */
	struct vm_map_header    hdr;            /* Map entry header */
#define min_offset              hdr.links.start /* start of range */
#define max_offset              hdr.links.end   /* end of range */
	pmap_t                  XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap;           /* Physical map */
	vm_map_size_t           size;           /* virtual size */
	uint64_t                size_limit;     /* rlimit on address space size */
	uint64_t                data_limit;     /* rlimit on data size */
	vm_map_size_t           user_wire_limit;/* rlimit on user locked memory */
	vm_map_size_t           user_wire_size; /* current size of user locked memory in this map */
#if XNU_TARGET_OS_OSX
	vm_map_offset_t         vmmap_high_start;
#endif /* XNU_TARGET_OS_OSX */

	union {
		/*
		 * If map->disable_vmentry_reuse == TRUE:
		 * the end address of the highest allocated vm_map_entry_t.
		 */
		vm_map_offset_t         vmu1_highest_entry_end;
		/*
		 * For a nested VM map:
		 * the lowest address in this nested VM map that we would
		 * expect to be unnested under normal operation (i.e. for
		 * regular copy-on-write on DATA section).
		 */
		vm_map_offset_t         vmu1_lowest_unnestable_start;
	} vmu1;
#define highest_entry_end       vmu1.vmu1_highest_entry_end
#define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start
	vm_map_entry_t          hint;           /* hint for quick lookups */
	union {
		struct vm_map_links* vmmap_hole_hint;   /* hint for quick hole lookups */
		struct vm_map_corpse_footprint_header *vmmap_corpse_footprint;
	} vmmap_u_1;
#define hole_hint vmmap_u_1.vmmap_hole_hint
#define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint
	union {
		vm_map_entry_t          _first_free;    /* First free space hint */
		struct vm_map_links*    _holes;         /* links all holes between entries */
	} f_s;                                          /* Union for free space data structures being used */

#define first_free              f_s._first_free
#define holes_list              f_s._holes

	os_ref_atomic_t         map_refcnt;       /* Reference count */

	unsigned int
	/* boolean_t */ wait_for_space:1,         /* Should callers wait for space? */
	/* boolean_t */ wiring_required:1,        /* All memory wired? */
	/* boolean_t */ no_zero_fill:1,           /* No zero fill absent pages */
	/* boolean_t */ mapped_in_other_pmaps:1,  /* has this submap been mapped in maps that use a different pmap */
	/* boolean_t */ switch_protect:1,         /* Protect map from write faults while switched */
	/* boolean_t */ disable_vmentry_reuse:1,  /* All vm entries should keep using newer and higher addresses in the map */
	/* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
	/* boolean_t */ holelistenabled:1,
	/* boolean_t */ is_nested_map:1,
	/* boolean_t */ map_disallow_new_exec:1,  /* Disallow new executable code */
	/* boolean_t */ jit_entry_exists:1,
	/* boolean_t */ has_corpse_footprint:1,
	/* boolean_t */ terminated:1,
	/* boolean_t */ is_alien:1,              /* for platform simulation, i.e. PLATFORM_IOS on OSX */
	/* boolean_t */ cs_enforcement:1,        /* code-signing enforcement */
	/* boolean_t */ cs_debugged:1,           /* code-signed but debugged */
	/* boolean_t */ reserved_regions:1,      /* has reserved regions. The map size that userspace sees should ignore these. */
	/* boolean_t */ single_jit:1,            /* only allow one JIT mapping */
	/* boolean_t */ never_faults:1,          /* this map should never cause faults */
	/* reserved  */ pad:13;
	unsigned int            timestamp;       /* Version number */
};

#define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
#define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
#define vm_map_first_entry(map) ((map)->hdr.links.next)
#define vm_map_last_entry(map)  ((map)->hdr.links.prev)

/*
 *	Type:		vm_map_version_t [exported; contents invisible]
 *
 *	Description:
 *		Map versions may be used to quickly validate a previous
 *		lookup operation.
 *
 *	Usage note:
 *		Because they are bulky objects, map versions are usually
 *		passed by reference.
 *
 *	Implementation:
 *		Just a timestamp for the main map.
 */
typedef struct vm_map_version {
	unsigned int    main_timestamp;
} vm_map_version_t;

/*
 *	Type:		vm_map_copy_t [exported; contents invisible]
 *
 *	Description:
 *		A map copy object represents a region of virtual memory
 *		that has been copied from an address map but is still
 *		in transit.
 *
 *		A map copy object may only be used by a single thread
 *		at a time.
 *
 *	Implementation:
 *              There are three formats for map copy objects.
 *		The first is very similar to the main
 *		address map in structure, and as a result, some
 *		of the internal maintenance functions/macros can
 *		be used with either address maps or map copy objects.
 *
 *		The map copy object contains a header links
 *		entry onto which the other entries that represent
 *		the region are chained.
 *
 *		The second format is a single vm object.  This was used
 *		primarily in the pageout path - but is not currently used
 *		except for placeholder copy objects (see vm_map_copy_copy()).
 *
 *		The third format is a kernel buffer copy object - for data
 *              small enough that physical copies were the most efficient
 *		method. This method uses a zero-sized array unioned with
 *		other format-specific data in the 'c_u' member. This unsized
 *		array overlaps the other elements and allows us to use this
 *		extra structure space for physical memory copies. On 64-bit
 *		systems this saves ~64 bytes per vm_map_copy.
 */

struct vm_map_copy {
	int                     type;
#define VM_MAP_COPY_ENTRY_LIST          1
#define VM_MAP_COPY_OBJECT              2
#define VM_MAP_COPY_KERNEL_BUFFER       3
	vm_object_offset_t      offset;
	vm_map_size_t           size;
	union {
		struct vm_map_header                  hdr;    /* ENTRY_LIST */
		vm_object_t                           object; /* OBJECT */
		void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata;  /* KERNEL_BUFFER */
	} c_u;
};


#define cpy_hdr                 c_u.hdr

#define cpy_object              c_u.object
#define cpy_kdata               c_u.kdata

#define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
#define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
#define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)

/*
 *	Useful macros for entry list copy objects
 */

#define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links)
#define vm_map_copy_first_entry(copy)           \
	        ((copy)->cpy_hdr.links.next)
#define vm_map_copy_last_entry(copy)            \
	        ((copy)->cpy_hdr.links.prev)

extern kern_return_t
vm_map_copy_adjust_to_target(
	vm_map_copy_t           copy_map,
	vm_map_offset_t         offset,
	vm_map_size_t           size,
	vm_map_t                target_map,
	boolean_t               copy,
	vm_map_copy_t           *target_copy_map_p,
	vm_map_offset_t         *overmap_start_p,
	vm_map_offset_t         *overmap_end_p,
	vm_map_offset_t         *trimmed_start_p);

/*
 *	Macros:		vm_map_lock, etc. [internal use only]
 *	Description:
 *		Perform locking on the data portion of a map.
 *	When multiple maps are to be locked, order by map address.
 *	(See vm_map.c::vm_remap())
 */

#define vm_map_lock_init(map)                                           \
	((map)->timestamp = 0 ,                                         \
	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))

#define vm_map_lock(map)                     \
	MACRO_BEGIN                          \
	DTRACE_VM(vm_map_lock_w);            \
	lck_rw_lock_exclusive(&(map)->lock); \
	MACRO_END

#define vm_map_unlock(map)          \
	MACRO_BEGIN                 \
	DTRACE_VM(vm_map_unlock_w); \
	(map)->timestamp++;         \
	lck_rw_done(&(map)->lock);  \
	MACRO_END

#define vm_map_lock_read(map)             \
	MACRO_BEGIN                       \
	DTRACE_VM(vm_map_lock_r);         \
	lck_rw_lock_shared(&(map)->lock); \
	MACRO_END

#define vm_map_unlock_read(map)     \
	MACRO_BEGIN                 \
	DTRACE_VM(vm_map_unlock_r); \
	lck_rw_done(&(map)->lock);  \
	MACRO_END

#define vm_map_lock_write_to_read(map)                 \
	MACRO_BEGIN                                    \
	DTRACE_VM(vm_map_lock_downgrade);              \
	(map)->timestamp++;                            \
	lck_rw_lock_exclusive_to_shared(&(map)->lock); \
	MACRO_END

__attribute__((always_inline))
int vm_map_lock_read_to_write(vm_map_t map);

__attribute__((always_inline))
boolean_t vm_map_try_lock(vm_map_t map);

__attribute__((always_inline))
boolean_t vm_map_try_lock_read(vm_map_t map);

int vm_self_region_page_shift(vm_map_t target_map);
int vm_self_region_page_shift_safely(vm_map_t target_map);

#if MACH_ASSERT || DEBUG
#define vm_map_lock_assert_held(map) \
	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD)
#define vm_map_lock_assert_shared(map)  \
	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED)
#define vm_map_lock_assert_exclusive(map) \
	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE)
#define vm_map_lock_assert_notheld(map) \
	lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD)
#else  /* MACH_ASSERT || DEBUG */
#define vm_map_lock_assert_held(map)
#define vm_map_lock_assert_shared(map)
#define vm_map_lock_assert_exclusive(map)
#define vm_map_lock_assert_notheld(map)
#endif /* MACH_ASSERT || DEBUG */

/*
 *	Exported procedures that operate on vm_map_t.
 */

/* Initialize the module */
extern void             vm_map_init(void);

/* Allocate a range in the specified virtual address map and
 * return the entry allocated for that range. */
extern kern_return_t vm_map_find_space(
	vm_map_t                map,
	vm_map_address_t        *address,                               /* OUT */
	vm_map_size_t           size,
	vm_map_offset_t         mask,
	vm_map_kernel_flags_t   vmk_flags,
	vm_tag_t                tag,
	vm_map_entry_t          *o_entry);                              /* OUT */

extern void vm_map_clip_start(
	vm_map_t        map,
	vm_map_entry_t  entry,
	vm_map_offset_t endaddr);
extern void vm_map_clip_end(
	vm_map_t        map,
	vm_map_entry_t  entry,
	vm_map_offset_t endaddr);
extern boolean_t vm_map_entry_should_cow_for_true_share(
	vm_map_entry_t  entry);

/* Lookup map entry containing or the specified address in the given map */
extern boolean_t        vm_map_lookup_entry(
	vm_map_t                map,
	vm_map_address_t        address,
	vm_map_entry_t          *entry);                                /* OUT */

/* like vm_map_lookup_entry without the PGZ bear trap */
#if CONFIG_PROB_GZALLOC
extern boolean_t        vm_map_lookup_entry_allow_pgz(
	vm_map_t                map,
	vm_map_address_t        address,
	vm_map_entry_t          *entry);                                /* OUT */
#else
#define vm_map_lookup_entry_allow_pgz vm_map_lookup_entry
#endif

extern void             vm_map_copy_remap(
	vm_map_t                map,
	vm_map_entry_t          where,
	vm_map_copy_t           copy,
	vm_map_offset_t         adjustment,
	vm_prot_t               cur_prot,
	vm_prot_t               max_prot,
	vm_inherit_t            inheritance);

/* Find the VM object, offset, and protection for a given virtual address
 * in the specified map, assuming a page fault of the	type specified. */
extern kern_return_t    vm_map_lookup_locked(
	vm_map_t                *var_map,                               /* IN/OUT */
	vm_map_address_t        vaddr,
	vm_prot_t               fault_type,
	int                     object_lock_type,
	vm_map_version_t        *out_version,                           /* OUT */
	vm_object_t             *object,                                /* OUT */
	vm_object_offset_t      *offset,                                /* OUT */
	vm_prot_t               *out_prot,                              /* OUT */
	boolean_t               *wired,                                 /* OUT */
	vm_object_fault_info_t  fault_info,                             /* OUT */
	vm_map_t                *real_map,                              /* OUT */
	bool                    *contended);                            /* OUT */

/* Verifies that the map has not changed since the given version. */
extern boolean_t        vm_map_verify(
	vm_map_t                map,
	vm_map_version_t        *version);                              /* REF */

extern vm_map_entry_t   vm_map_entry_insert(
	vm_map_t                map,
	vm_map_entry_t          insp_entry,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_object_t             object,
	vm_object_offset_t      offset,
	vm_map_kernel_flags_t   vmk_flags,
	boolean_t               needs_copy,
	boolean_t               is_shared,
	boolean_t               in_transition,
	vm_prot_t               cur_protection,
	vm_prot_t               max_protection,
	vm_behavior_t           behavior,
	vm_inherit_t            inheritance,
	unsigned short          wired_count,
	boolean_t               no_cache,
	boolean_t               permanent,
	boolean_t               no_copy_on_read,
	unsigned int            superpage_size,
	boolean_t               clear_map_aligned,
	boolean_t               is_submap,
	boolean_t               used_for_jit,
	int                     alias,
	boolean_t               translated_allow_execute);


/*
 *	Functions implemented as macros
 */
#define         vm_map_min(map) ((map)->min_offset)
/* Lowest valid address in
 * a map */

#define         vm_map_max(map) ((map)->max_offset)
/* Highest valid address */

#define         vm_map_pmap(map)        ((map)->pmap)
/* Physical map associated
* with this address map */

/* Gain a reference to an existing map */
extern void             vm_map_reference(
	vm_map_t        map);

/*
 *	Submap object.  Must be used to create memory to be put
 *	in a submap by vm_map_submap.
 */
extern vm_object_t      vm_submap_object;

/*
 *	Wait and wakeup macros for in_transition map entries.
 */
#define vm_map_entry_wait(map, interruptible)           \
	((map)->timestamp++ ,                           \
	 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
	                          (event_t)&(map)->hdr,	interruptible))


#define vm_map_entry_wakeup(map)        \
	thread_wakeup((event_t)(&(map)->hdr))


/* simplify map entries */
extern void             vm_map_simplify_entry(
	vm_map_t        map,
	vm_map_entry_t  this_entry);
extern void             vm_map_simplify(
	vm_map_t                map,
	vm_map_offset_t         start);

/* Move the information in a map copy object to a new map copy object */
extern vm_map_copy_t    vm_map_copy_copy(
	vm_map_copy_t           copy);

/* Create a copy object from an object. */
extern kern_return_t    vm_map_copyin_object(
	vm_object_t             object,
	vm_object_offset_t      offset,
	vm_object_size_t        size,
	vm_map_copy_t           *copy_result);                         /* OUT */

extern kern_return_t    vm_map_random_address_for_size(
	vm_map_t        map,
	vm_map_offset_t *address,
	vm_map_size_t   size);

/* Enter a mapping */
extern kern_return_t    vm_map_enter(
	vm_map_t                map,
	vm_map_offset_t         *address,
	vm_map_size_t           size,
	vm_map_offset_t         mask,
	int                     flags,
	vm_map_kernel_flags_t   vmk_flags,
	vm_tag_t                tag,
	vm_object_t             object,
	vm_object_offset_t      offset,
	boolean_t               needs_copy,
	vm_prot_t               cur_protection,
	vm_prot_t               max_protection,
	vm_inherit_t            inheritance);

#if __arm64__
extern kern_return_t    vm_map_enter_fourk(
	vm_map_t                map,
	vm_map_offset_t         *address,
	vm_map_size_t           size,
	vm_map_offset_t         mask,
	int                     flags,
	vm_map_kernel_flags_t   vmk_flags,
	vm_tag_t                tag,
	vm_object_t             object,
	vm_object_offset_t      offset,
	boolean_t               needs_copy,
	vm_prot_t               cur_protection,
	vm_prot_t               max_protection,
	vm_inherit_t            inheritance);
#endif /* __arm64__ */

/* XXX should go away - replaced with regular enter of contig object */
extern  kern_return_t   vm_map_enter_cpm(
	vm_map_t                map,
	vm_map_address_t        *addr,
	vm_map_size_t           size,
	int                     flags);

extern kern_return_t vm_map_remap(
	vm_map_t                target_map,
	vm_map_offset_t         *address,
	vm_map_size_t           size,
	vm_map_offset_t         mask,
	int                     flags,
	vm_map_kernel_flags_t   vmk_flags,
	vm_tag_t                tag,
	vm_map_t                src_map,
	vm_map_offset_t         memory_address,
	boolean_t               copy,
	vm_prot_t               *cur_protection,
	vm_prot_t               *max_protection,
	vm_inherit_t            inheritance);


/*
 * Read and write from a kernel buffer to a specified map.
 */
extern  kern_return_t   vm_map_write_user(
	vm_map_t                map,
	void                    *src_p,
	vm_map_offset_t         dst_addr,
	vm_size_t               size);

extern  kern_return_t   vm_map_read_user(
	vm_map_t                map,
	vm_map_offset_t         src_addr,
	void                    *dst_p,
	vm_size_t               size);

/* Create a new task map using an existing task map as a template. */
extern vm_map_t         vm_map_fork(
	ledger_t                ledger,
	vm_map_t                old_map,
	int                     options);
#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001
#define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002
#define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004

/* Change inheritance */
extern kern_return_t    vm_map_inherit(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_inherit_t            new_inheritance);

/* Add or remove machine-dependent attributes from map regions */
extern kern_return_t    vm_map_machine_attribute(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_machine_attribute_t  attribute,
	vm_machine_attribute_val_t* value);                         /* IN/OUT */

extern kern_return_t    vm_map_msync(
	vm_map_t                map,
	vm_map_address_t        address,
	vm_map_size_t           size,
	vm_sync_t               sync_flags);

/* Set paging behavior */
extern kern_return_t    vm_map_behavior_set(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_behavior_t           new_behavior);

extern kern_return_t vm_map_region(
	vm_map_t                 map,
	vm_map_offset_t         *address,
	vm_map_size_t           *size,
	vm_region_flavor_t       flavor,
	vm_region_info_t         info,
	mach_msg_type_number_t  *count,
	mach_port_t             *object_name);

extern kern_return_t vm_map_region_recurse_64(
	vm_map_t                 map,
	vm_map_offset_t         *address,
	vm_map_size_t           *size,
	natural_t               *nesting_depth,
	vm_region_submap_info_64_t info,
	mach_msg_type_number_t  *count);

extern kern_return_t vm_map_page_query_internal(
	vm_map_t                map,
	vm_map_offset_t         offset,
	int                     *disposition,
	int                     *ref_count);

extern kern_return_t vm_map_query_volatile(
	vm_map_t        map,
	mach_vm_size_t  *volatile_virtual_size_p,
	mach_vm_size_t  *volatile_resident_size_p,
	mach_vm_size_t  *volatile_compressed_size_p,
	mach_vm_size_t  *volatile_pmap_size_p,
	mach_vm_size_t  *volatile_compressed_pmap_size_p);

extern kern_return_t    vm_map_submap(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_map_t                submap,
	vm_map_offset_t         offset,
	boolean_t               use_pmap);

extern void vm_map_submap_pmap_clean(
	vm_map_t        map,
	vm_map_offset_t start,
	vm_map_offset_t end,
	vm_map_t        sub_map,
	vm_map_offset_t offset);

/* Convert from a map entry port to a map */
extern vm_map_t convert_port_entry_to_map(
	ipc_port_t      port);


extern kern_return_t vm_map_set_cache_attr(
	vm_map_t        map,
	vm_map_offset_t va);


/* definitions related to overriding the NX behavior */

#define VM_ABI_32       0x1
#define VM_ABI_64       0x2

extern int override_nx(vm_map_t map, uint32_t user_tag);


extern void vm_map_region_top_walk(
	vm_map_entry_t entry,
	vm_region_top_info_t top);
extern void vm_map_region_walk(
	vm_map_t map,
	vm_map_offset_t va,
	vm_map_entry_t entry,
	vm_object_offset_t offset,
	vm_object_size_t range,
	vm_region_extended_info_t extended,
	boolean_t look_for_pages,
	mach_msg_type_number_t count);



extern void vm_map_copy_footprint_ledgers(
	task_t  old_task,
	task_t  new_task);
extern void vm_map_copy_ledger(
	task_t  old_task,
	task_t  new_task,
	int     ledger_entry);

/**
 * Represents a single region of virtual address space that should be reserved
 * (pre-mapped) in a user address space.
 */
struct vm_reserved_region {
	char            *vmrr_name;
	vm_map_offset_t vmrr_addr;
	vm_map_size_t   vmrr_size;
};

/**
 * Return back a machine-dependent array of address space regions that should be
 * reserved by the VM. This function is defined in the machine-dependent
 * machine_routines.c files.
 */
extern size_t ml_get_vm_reserved_regions(
	bool vm_is64bit,
	struct vm_reserved_region **regions);

#endif /* MACH_KERNEL_PRIVATE */

__BEGIN_DECLS

/* Create an empty map */
extern vm_map_t         vm_map_create(
	pmap_t                  pmap,
	vm_map_offset_t         min_off,
	vm_map_offset_t         max_off,
	boolean_t               pageable);

extern vm_map_size_t    vm_map_adjusted_size(vm_map_t map);

extern void             vm_map_disable_hole_optimization(vm_map_t map);

/* Get rid of a map */
extern void             vm_map_destroy(
	vm_map_t                map,
	int                     flags);

/* Lose a reference */
extern void             vm_map_deallocate(
	vm_map_t                map);

/* Lose a reference */
extern void             vm_map_inspect_deallocate(
	vm_map_inspect_t        map);

/* Lose a reference */
extern void             vm_map_read_deallocate(
	vm_map_read_t        map);

extern vm_map_t         vm_map_switch(
	vm_map_t                map);

/* Change protection */
extern kern_return_t    vm_map_protect(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_prot_t               new_prot,
	boolean_t               set_max);

/* Check protection */
extern boolean_t vm_map_check_protection(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_prot_t               protection);

extern boolean_t vm_map_cs_enforcement(
	vm_map_t                map);
extern void vm_map_cs_enforcement_set(
	vm_map_t                map,
	boolean_t               val);

extern void vm_map_cs_debugged_set(
	vm_map_t map,
	boolean_t val);

extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);

/* wire down a region */

#ifdef XNU_KERNEL_PRIVATE

#define VM_MAP_CREATE_ZAP_OPTIONS(map) \
	(VM_MAP_CREATE_DISABLE_HOLELIST | ((map)->hdr.entries_pageable \
	? VM_MAP_CREATE_PAGEABLE : VM_MAP_CREATE_DEFAULT))

/* never fails */
extern vm_map_t vm_map_create_options(
	pmap_t                  pmap,
	vm_map_offset_t         min_off,
	vm_map_offset_t         max_off,
	vm_map_create_options_t options);

extern kern_return_t    vm_map_wire_kernel(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_prot_t               access_type,
	vm_tag_t                tag,
	boolean_t               user_wire);

extern kern_return_t    vm_map_wire_and_extract_kernel(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_prot_t               access_type,
	vm_tag_t                tag,
	boolean_t               user_wire,
	ppnum_t                 *physpage_p);

/* kext exported versions */

extern kern_return_t    vm_map_wire_external(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_prot_t               access_type,
	boolean_t               user_wire);

extern kern_return_t    vm_map_wire_and_extract_external(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_prot_t               access_type,
	boolean_t               user_wire,
	ppnum_t                 *physpage_p);

#else /* XNU_KERNEL_PRIVATE */

extern kern_return_t    vm_map_wire(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	vm_prot_t               access_type,
	boolean_t               user_wire);

extern kern_return_t    vm_map_wire_and_extract(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_prot_t               access_type,
	boolean_t               user_wire,
	ppnum_t                 *physpage_p);

#endif /* !XNU_KERNEL_PRIVATE */

/* unwire a region */
extern kern_return_t    vm_map_unwire(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	boolean_t               user_wire);

#ifdef XNU_KERNEL_PRIVATE

/* Enter a mapping of a memory object */
extern kern_return_t    vm_map_enter_mem_object(
	vm_map_t                map,
	vm_map_offset_t         *address,
	vm_map_size_t           size,
	vm_map_offset_t         mask,
	int                     flags,
	vm_map_kernel_flags_t   vmk_flags,
	vm_tag_t                tag,
	ipc_port_t              port,
	vm_object_offset_t      offset,
	boolean_t               needs_copy,
	vm_prot_t               cur_protection,
	vm_prot_t               max_protection,
	vm_inherit_t            inheritance);

/* Enter a mapping of a memory object */
extern kern_return_t    vm_map_enter_mem_object_prefault(
	vm_map_t                map,
	vm_map_offset_t         *address,
	vm_map_size_t           size,
	vm_map_offset_t         mask,
	int                     flags,
	vm_map_kernel_flags_t   vmk_flags,
	vm_tag_t                tag,
	ipc_port_t              port,
	vm_object_offset_t      offset,
	vm_prot_t               cur_protection,
	vm_prot_t               max_protection,
	upl_page_list_ptr_t     page_list,
	unsigned int            page_list_count);

/* Enter a mapping of a memory object */
extern kern_return_t    vm_map_enter_mem_object_control(
	vm_map_t                map,
	vm_map_offset_t         *address,
	vm_map_size_t           size,
	vm_map_offset_t         mask,
	int                     flags,
	vm_map_kernel_flags_t   vmk_flags,
	vm_tag_t                tag,
	memory_object_control_t control,
	vm_object_offset_t      offset,
	boolean_t               needs_copy,
	vm_prot_t               cur_protection,
	vm_prot_t               max_protection,
	vm_inherit_t            inheritance);

extern kern_return_t    vm_map_terminate(
	vm_map_t                map);

extern void             vm_map_require(
	vm_map_t                map);

#endif /* !XNU_KERNEL_PRIVATE */

/* Deallocate a region */
extern kern_return_t    vm_map_remove(
	vm_map_t                map,
	vm_map_offset_t         start,
	vm_map_offset_t         end,
	boolean_t               flags);

/* Deallocate a region when the map is already locked */
extern kern_return_t    vm_map_remove_locked(
	vm_map_t        map,
	vm_map_offset_t     start,
	vm_map_offset_t     end,
	boolean_t       flags);

/* Discard a copy without using it */
extern void             vm_map_copy_discard(
	vm_map_copy_t           copy);

/* Overwrite existing memory with a copy */
extern kern_return_t    vm_map_copy_overwrite(
	vm_map_t                dst_map,
	vm_map_address_t        dst_addr,
	vm_map_copy_t           copy,
	vm_map_size_t           copy_size,
	boolean_t               interruptible);

#define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES      (3)


/* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
extern boolean_t        vm_map_copy_validate_size(
	vm_map_t                dst_map,
	vm_map_copy_t           copy,
	vm_map_size_t           *size);

/* Place a copy into a map */
extern kern_return_t    vm_map_copyout(
	vm_map_t                dst_map,
	vm_map_address_t        *dst_addr,                              /* OUT */
	vm_map_copy_t           copy);

extern kern_return_t vm_map_copyout_size(
	vm_map_t                dst_map,
	vm_map_address_t        *dst_addr,                              /* OUT */
	vm_map_copy_t           copy,
	vm_map_size_t           copy_size);

extern kern_return_t    vm_map_copyout_internal(
	vm_map_t                dst_map,
	vm_map_address_t        *dst_addr,      /* OUT */
	vm_map_copy_t           copy,
	vm_map_size_t           copy_size,
	boolean_t               consume_on_success,
	vm_prot_t               cur_protection,
	vm_prot_t               max_protection,
	vm_inherit_t            inheritance);

extern kern_return_t    vm_map_copyin(
	vm_map_t                        src_map,
	vm_map_address_t        src_addr,
	vm_map_size_t           len,
	boolean_t                       src_destroy,
	vm_map_copy_t           *copy_result);                          /* OUT */

extern kern_return_t    vm_map_copyin_common(
	vm_map_t                src_map,
	vm_map_address_t        src_addr,
	vm_map_size_t           len,
	boolean_t               src_destroy,
	boolean_t               src_volatile,
	vm_map_copy_t           *copy_result,                           /* OUT */
	boolean_t               use_maxprot);

#define VM_MAP_COPYIN_SRC_DESTROY       0x00000001
#define VM_MAP_COPYIN_USE_MAXPROT       0x00000002
#define VM_MAP_COPYIN_ENTRY_LIST        0x00000004
#define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008
#define VM_MAP_COPYIN_ALL_FLAGS         0x0000000F
extern kern_return_t    vm_map_copyin_internal(
	vm_map_t                src_map,
	vm_map_address_t        src_addr,
	vm_map_size_t           len,
	int                     flags,
	vm_map_copy_t           *copy_result);                         /* OUT */

extern kern_return_t    vm_map_copy_extract(
	vm_map_t                src_map,
	vm_map_address_t        src_addr,
	vm_map_size_t           len,
	boolean_t               copy,
	vm_map_copy_t           *copy_result,   /* OUT */
	vm_prot_t               *cur_prot,      /* OUT */
	vm_prot_t               *max_prot,      /* OUT */
	vm_inherit_t            inheritance,
	vm_map_kernel_flags_t   vmk_flags);


extern void             vm_map_disable_NX(
	vm_map_t                map);

extern void             vm_map_disallow_data_exec(
	vm_map_t                map);

extern void             vm_map_set_64bit(
	vm_map_t                map);

extern void             vm_map_set_32bit(
	vm_map_t                map);

extern void             vm_map_set_jumbo(
	vm_map_t                map);

extern void             vm_map_set_jit_entitled(
	vm_map_t                map);

extern void             vm_map_set_max_addr(
	vm_map_t                map, vm_map_offset_t new_max_offset);

extern boolean_t        vm_map_has_hard_pagezero(
	vm_map_t                map,
	vm_map_offset_t         pagezero_size);
extern void             vm_commit_pagezero_status(vm_map_t      tmap);

#ifdef __arm__
static inline boolean_t
vm_map_is_64bit(__unused vm_map_t map)
{
	return 0;
}
#else
extern boolean_t        vm_map_is_64bit(
	vm_map_t                map);
#endif


extern kern_return_t    vm_map_raise_max_offset(
	vm_map_t        map,
	vm_map_offset_t new_max_offset);

extern kern_return_t    vm_map_raise_min_offset(
	vm_map_t        map,
	vm_map_offset_t new_min_offset);
#if XNU_TARGET_OS_OSX
extern void vm_map_set_high_start(
	vm_map_t        map,
	vm_map_offset_t high_start);
#endif /* XNU_TARGET_OS_OSX */

extern vm_map_offset_t  vm_compute_max_offset(
	boolean_t               is64);

extern void             vm_map_get_max_aslr_slide_section(
	vm_map_t                map,
	int64_t                 *max_sections,
	int64_t                 *section_size);

extern uint64_t         vm_map_get_max_aslr_slide_pages(
	vm_map_t map);

extern uint64_t         vm_map_get_max_loader_aslr_slide_pages(
	vm_map_t map);

extern kern_return_t    vm_map_set_size_limit(
	vm_map_t                map,
	uint64_t                limit);

extern kern_return_t    vm_map_set_data_limit(
	vm_map_t                map,
	uint64_t                limit);

extern void             vm_map_set_user_wire_limit(
	vm_map_t                map,
	vm_size_t               limit);

extern void vm_map_switch_protect(
	vm_map_t                map,
	boolean_t               val);

extern void vm_map_iokit_mapped_region(
	vm_map_t                map,
	vm_size_t               bytes);

extern void vm_map_iokit_unmapped_region(
	vm_map_t                map,
	vm_size_t               bytes);


extern boolean_t first_free_is_valid(vm_map_t);

extern int              vm_map_page_shift(
	vm_map_t                map);

extern vm_map_offset_t  vm_map_page_mask(
	vm_map_t                map);

extern int              vm_map_page_size(
	vm_map_t                map);

extern vm_map_offset_t  vm_map_round_page_mask(
	vm_map_offset_t         offset,
	vm_map_offset_t         mask);

extern vm_map_offset_t  vm_map_trunc_page_mask(
	vm_map_offset_t         offset,
	vm_map_offset_t         mask);

extern boolean_t        vm_map_page_aligned(
	vm_map_offset_t         offset,
	vm_map_offset_t         mask);

static inline int
vm_map_range_overflows(vm_map_offset_t addr, vm_map_size_t size)
{
	vm_map_offset_t sum;
	return os_add_overflow(addr, size, &sum);
}

static inline int
mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size)
{
	mach_vm_offset_t sum;
	return os_add_overflow(addr, size, &sum);
}

#ifdef XNU_KERNEL_PRIVATE

#if XNU_TARGET_OS_OSX
extern void vm_map_mark_alien(vm_map_t map);
extern void vm_map_single_jit(vm_map_t map);
#endif /* XNU_TARGET_OS_OSX */

extern kern_return_t vm_map_page_info(
	vm_map_t                map,
	vm_map_offset_t         offset,
	vm_page_info_flavor_t   flavor,
	vm_page_info_t          info,
	mach_msg_type_number_t  *count);
extern kern_return_t vm_map_page_range_info_internal(
	vm_map_t                map,
	vm_map_offset_t         start_offset,
	vm_map_offset_t         end_offset,
	int                     effective_page_shift,
	vm_page_info_flavor_t   flavor,
	vm_page_info_t          info,
	mach_msg_type_number_t  *count);
#endif /* XNU_KERNEL_PRIVATE */


#ifdef  MACH_KERNEL_PRIVATE

/*
 *	Macros to invoke vm_map_copyin_common.  vm_map_copyin is the
 *	usual form; it handles a copyin based on the current protection
 *	(current protection == VM_PROT_NONE) is a failure.
 *	vm_map_copyin_maxprot handles a copyin based on maximum possible
 *	access.  The difference is that a region with no current access
 *	BUT possible maximum access is rejected by vm_map_copyin(), but
 *	returned by vm_map_copyin_maxprot.
 */
#define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
	        vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
	                                FALSE, copy_result, FALSE)

#define vm_map_copyin_maxprot(src_map, \
	    src_addr, len, src_destroy, copy_result) \
	        vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
	                                FALSE, copy_result, TRUE)


/*
 * Internal macros for rounding and truncation of vm_map offsets and sizes
 */
#define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
#define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))

/*
 * Macros for rounding and truncation of vm_map offsets and sizes
 */
static inline int
VM_MAP_PAGE_SHIFT(
	vm_map_t map)
{
	if (map) {
		return map->hdr.page_shift;
	}
	return PAGE_SHIFT;
}

#define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
#define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
#define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)

static inline bool
VM_MAP_IS_EXOTIC(
	vm_map_t map __unused)
{
#if __arm64__
	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT ||
	    pmap_is_exotic(map->pmap)) {
		return true;
	}
#endif /* __arm64__ */
	return false;
}

static inline bool
VM_MAP_IS_ALIEN(
	vm_map_t map __unused)
{
	/*
	 * An "alien" process/task/map/pmap should mostly behave
	 * as it currently would on iOS.
	 */
#if XNU_TARGET_OS_OSX
	if (map->is_alien) {
		return true;
	}
	return false;
#else /* XNU_TARGET_OS_OSX */
	return true;
#endif /* XNU_TARGET_OS_OSX */
}

static inline bool
VM_MAP_POLICY_WX_FAIL(
	vm_map_t map __unused)
{
	if (VM_MAP_IS_ALIEN(map)) {
		return false;
	}
	return true;
}

static inline bool
VM_MAP_POLICY_WX_STRIP_X(
	vm_map_t map __unused)
{
	if (VM_MAP_IS_ALIEN(map)) {
		return true;
	}
	return false;
}

static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
	vm_map_t map __unused)
{
	if (VM_MAP_IS_ALIEN(map) || map->single_jit) {
		return false;
	}
	return true;
}

static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(
	vm_map_t map)
{
	return VM_MAP_IS_ALIEN(map);
}

static inline bool
VM_MAP_POLICY_ALLOW_JIT_INHERIT(
	vm_map_t map __unused)
{
	if (VM_MAP_IS_ALIEN(map)) {
		return false;
	}
	return true;
}

static inline bool
VM_MAP_POLICY_ALLOW_JIT_SHARING(
	vm_map_t map __unused)
{
	if (VM_MAP_IS_ALIEN(map)) {
		return false;
	}
	return true;
}

static inline bool
VM_MAP_POLICY_ALLOW_JIT_COPY(
	vm_map_t map __unused)
{
	if (VM_MAP_IS_ALIEN(map)) {
		return false;
	}
	return true;
}

static inline bool
VM_MAP_POLICY_WRITABLE_SHARED_REGION(
	vm_map_t map __unused)
{
#if __x86_64__
	return true;
#else /* __x86_64__ */
	if (VM_MAP_IS_EXOTIC(map)) {
		return true;
	}
	return false;
#endif /* __x86_64__ */
}

static inline void
vm_prot_to_wimg(unsigned int prot, unsigned int *wimg)
{
	switch (prot) {
	case MAP_MEM_NOOP:                      break;
	case MAP_MEM_IO:                        *wimg = VM_WIMG_IO; break;
	case MAP_MEM_COPYBACK:                  *wimg = VM_WIMG_USE_DEFAULT; break;
	case MAP_MEM_INNERWBACK:                *wimg = VM_WIMG_INNERWBACK; break;
	case MAP_MEM_POSTED:                    *wimg = VM_WIMG_POSTED; break;
	case MAP_MEM_POSTED_REORDERED:          *wimg = VM_WIMG_POSTED_REORDERED; break;
	case MAP_MEM_POSTED_COMBINED_REORDERED: *wimg = VM_WIMG_POSTED_COMBINED_REORDERED; break;
	case MAP_MEM_WTHRU:                     *wimg = VM_WIMG_WTHRU; break;
	case MAP_MEM_WCOMB:                     *wimg = VM_WIMG_WCOMB; break;
	case MAP_MEM_RT:                        *wimg = VM_WIMG_RT; break;
	default:                                break;
	}
}

#endif /* MACH_KERNEL_PRIVATE */

#ifdef XNU_KERNEL_PRIVATE
extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
extern bool vm_map_is_exotic(vm_map_t map);
extern bool vm_map_is_alien(vm_map_t map);
extern pmap_t vm_map_get_pmap(vm_map_t map);
#endif /* XNU_KERNEL_PRIVATE */

#define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
#define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))

/*
 * Flags for vm_map_remove() and vm_map_delete()
 */
#define VM_MAP_REMOVE_NO_FLAGS          0x0
#define VM_MAP_REMOVE_KUNWIRE           0x1
#define VM_MAP_REMOVE_INTERRUPTIBLE     0x2
#define VM_MAP_REMOVE_WAIT_FOR_KWIRE    0x4
#define VM_MAP_REMOVE_SAVE_ENTRIES      0x8
#define VM_MAP_REMOVE_NO_PMAP_CLEANUP   0x10
#define VM_MAP_REMOVE_NO_MAP_ALIGN      0x20
#define VM_MAP_REMOVE_NO_UNNESTING      0x40
#define VM_MAP_REMOVE_IMMUTABLE         0x80
#define VM_MAP_REMOVE_GAPS_OK           0x100

/* Support for UPLs from vm_maps */

#ifdef XNU_KERNEL_PRIVATE

extern kern_return_t vm_map_get_upl(
	vm_map_t                target_map,
	vm_map_offset_t         map_offset,
	upl_size_t              *size,
	upl_t                   *upl,
	upl_page_info_array_t   page_info,
	unsigned int            *page_infoCnt,
	upl_control_flags_t     *flags,
	vm_tag_t                tag,
	int                     force_data_sync);

#endif /* XNU_KERNEL_PRIVATE */

extern void
vm_map_sizes(vm_map_t map,
    vm_map_size_t * psize,
    vm_map_size_t * pfree,
    vm_map_size_t * plargest_free);

#if CONFIG_DYNAMIC_CODE_SIGNING
extern kern_return_t vm_map_sign(vm_map_t map,
    vm_map_offset_t start,
    vm_map_offset_t end);
#endif

extern kern_return_t vm_map_partial_reap(
	vm_map_t map,
	unsigned int *reclaimed_resident,
	unsigned int *reclaimed_compressed);


#if DEVELOPMENT || DEBUG

extern int vm_map_disconnect_page_mappings(
	vm_map_t map,
	boolean_t);

extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr);

#endif


#if CONFIG_FREEZE

extern kern_return_t vm_map_freeze(
	task_t       task,
	unsigned int *purgeable_count,
	unsigned int *wired_count,
	unsigned int *clean_count,
	unsigned int *dirty_count,
	unsigned int dirty_budget,
	unsigned int *shared_count,
	int          *freezer_error_code,
	boolean_t    eval_only);

#define FREEZER_ERROR_GENERIC                   (-1)
#define FREEZER_ERROR_EXCESS_SHARED_MEMORY      (-2)
#define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO  (-3)
#define FREEZER_ERROR_NO_COMPRESSOR_SPACE       (-4)
#define FREEZER_ERROR_NO_SWAP_SPACE             (-5)

#endif

__END_DECLS

/*
 * In some cases, we don't have a real VM object but still want to return a
 * unique ID (to avoid a memory region looking like shared memory), so build
 * a fake pointer based on the map's ledger and the index of the ledger being
 * reported.
 */
#define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id)))

#endif  /* KERNEL_PRIVATE */

#endif  /* _VM_VM_MAP_H_ */