Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
/*
 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
 *
 * This file contains Original Code and/or Modifications of Original Code
 * as defined in and that are subject to the Apple Public Source License
 * Version 2.0 (the 'License'). You may not use this file except in
 * compliance with the License. The rights granted to you under the License
 * may not be used to create, or enable the creation or redistribution of,
 * unlawful or unlicensed copies of an Apple operating system, or to
 * circumvent, violate, or enable the circumvention or violation of, any
 * terms of an Apple operating system software license agreement.
 *
 * Please obtain a copy of the License at
 * http://www.opensource.apple.com/apsl/ and read it before using this file.
 *
 * The Original Code and all software distributed under the License are
 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 * Please see the License for the specific language governing rights and
 * limitations under the License.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 */
/*
 * @OSF_COPYRIGHT@
 */
/*
 * Mach Operating System
 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
 * All Rights Reserved.
 *
 * Permission to use, copy, modify and distribute this software and its
 * documentation is hereby granted, provided that both the copyright
 * notice and this permission notice appear in all copies of the
 * software, derivative works or modified versions, and any portions
 * thereof, and that both notices appear in supporting documentation.
 *
 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
 *
 * Carnegie Mellon requests users of this software to return to
 *
 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
 *  School of Computer Science
 *  Carnegie Mellon University
 *  Pittsburgh PA 15213-3890
 *
 * any improvements or extensions that they make and grant Carnegie Mellon
 * the rights to redistribute these changes.
 */
/*
 */

/*
 *	File:	pmap.h
 *
 *	Authors:  Avadis Tevanian, Jr., Michael Wayne Young
 *	Date:	1985
 *
 *	Machine-dependent structures for the physical map module.
 */
#ifdef KERNEL_PRIVATE
#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_  1

#ifndef ASSEMBLER

#include <mach/kern_return.h>
#include <mach/machine/vm_types.h>
#include <mach/vm_prot.h>
#include <mach/vm_statistics.h>
#include <mach/machine/vm_param.h>
#include <kern/kern_types.h>
#include <kern/thread.h>
#include <kern/simple_lock.h>

#include <i386/mp.h>
#include <i386/cpu_number.h>
#include <i386/proc_reg.h>
#include <os/atomic_private.h>
#include <i386/pal_routines.h>

/*
 *	Define the generic in terms of the specific
 */

#define INTEL_PGBYTES           I386_PGBYTES
#define INTEL_PGSHIFT           I386_PGSHIFT
#define intel_btop(x)           i386_btop(x)
#define intel_ptob(x)           i386_ptob(x)
#define intel_round_page(x)     i386_round_page(x)
#define intel_trunc_page(x)     i386_trunc_page(x)

/*
 *	i386/i486/i860 Page Table Entry
 */

#endif  /* ASSEMBLER */

#define NPGPTD          4ULL
#define PDESHIFT        21ULL
#define PTEMASK         0x1ffULL
#define PTEINDX         3ULL

#define PTESHIFT        12ULL

#define LOW_4GB_MASK    ((vm_offset_t)0x00000000FFFFFFFFUL)

#define PDESIZE         sizeof(pd_entry_t) /* for assembly files */
#define PTESIZE         sizeof(pt_entry_t) /* for assembly files */

#define INTEL_OFFMASK   (I386_PGBYTES - 1)
#define INTEL_LOFFMASK  (I386_LPGBYTES - 1)
#define PG_FRAME        0x000FFFFFFFFFF000ULL
#define NPTEPG          (PAGE_SIZE/(sizeof (pt_entry_t)))
#define NPTDPG          (PAGE_SIZE/(sizeof (pd_entry_t)))

#define NBPTD           (NPGPTD << PAGE_SHIFT)
#define NPDEPTD         (NBPTD / (sizeof (pd_entry_t)))
#define NPDEPG          (PAGE_SIZE/(sizeof (pd_entry_t)))
#define NBPDE           (1ULL << PDESHIFT)
#define PDEMASK         (NBPDE - 1)

#define PTE_PER_PAGE    512 /* number of PTE's per page on any level */

/* cleanly define parameters for all the page table levels */
typedef uint64_t        pml4_entry_t;
#define NPML4PG         (PAGE_SIZE/(sizeof (pml4_entry_t)))
#define PML4SHIFT       39
#define PML4PGSHIFT     9
#define NBPML4          (1ULL << PML4SHIFT)
#define PML4MASK        (NBPML4-1)
#define PML4_ENTRY_NULL ((pml4_entry_t *) 0)

typedef uint64_t        pdpt_entry_t;
#define NPDPTPG         (PAGE_SIZE/(sizeof (pdpt_entry_t)))
#define PDPTSHIFT       30
#define PDPTPGSHIFT     9
#define NBPDPT          (1ULL << PDPTSHIFT)
#define PDPTMASK        (NBPDPT-1)
#define PDPT_ENTRY_NULL ((pdpt_entry_t *) 0)

typedef uint64_t        pd_entry_t;
#define NPDPG           (PAGE_SIZE/(sizeof (pd_entry_t)))
#define PDSHIFT         21
#define PDPGSHIFT       9
#define NBPD            (1ULL << PDSHIFT)
#define PDMASK          (NBPD-1)
#define PD_ENTRY_NULL   ((pd_entry_t *) 0)

typedef uint64_t        pt_entry_t;
#define NPTPG           (PAGE_SIZE/(sizeof (pt_entry_t)))
#define PTSHIFT         12
#define PTPGSHIFT       9
#define NBPT            (1ULL << PTSHIFT)
#define PTMASK          (NBPT-1)
#define PT_ENTRY_NULL   ((pt_entry_t *) 0)

typedef uint64_t  pmap_paddr_t;

#if     DEVELOPMENT || DEBUG
#define PMAP_ASSERT 1
extern int pmap_asserts_enabled;
extern int pmap_asserts_traced;
#endif

#if PMAP_ASSERT
#define pmap_assert(ex) (pmap_asserts_enabled ? ((ex) ? (void)0 : Assert(__FILE_NAME__, __LINE__, # ex)) : (void)0)

#define pmap_assert2(ex, fmt, args...)                                  \
	do {                                                            \
	        if (__improbable(pmap_asserts_enabled && !(ex))) {      \
	                if (pmap_asserts_traced) {                      \
	                        KERNEL_DEBUG_CONSTANT(0xDEAD1000, __builtin_return_address(0), __LINE__, 0, 0, 0); \
	                        kdebug_enable = 0;                      \
	                } else {                                        \
	                                kprintf("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE_NAME__, __LINE__, __builtin_return_address(0),  ##args); \
	                                panic("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE_NAME__, __LINE__, __builtin_return_address(0),  ##args); \
	                }                                               \
	        }                                                       \
	} while(0)
#else
#define pmap_assert(ex)
#define pmap_assert2(ex, fmt, args...)
#endif

/* superpages */
#define SUPERPAGE_NBASEPAGES 512

/* in 64 bit spaces, the number of each type of page in the page tables */
#define NPML4PGS        (1ULL * (PAGE_SIZE/(sizeof (pml4_entry_t))))
#define NPDPTPGS        (NPML4PGS * (PAGE_SIZE/(sizeof (pdpt_entry_t))))
#define NPDEPGS         (NPDPTPGS * (PAGE_SIZE/(sizeof (pd_entry_t))))
#define NPTEPGS         (NPDEPGS * (PAGE_SIZE/(sizeof (pt_entry_t))))

extern int      kernPhysPML4Index;
extern int      kernPhysPML4EntryCount;

#define KERNEL_PML4_INDEX               511
#define KERNEL_KEXTS_INDEX              (KERNEL_PML4_INDEX - 1)         /* 510: Home of KEXTs - the basement */
#define KERNEL_PHYSMAP_PML4_INDEX       (kernPhysPML4Index)             /* 50X: virtual to physical map */
#define KERNEL_PHYSMAP_PML4_COUNT       (kernPhysPML4EntryCount)
#define KERNEL_PHYSMAP_PML4_COUNT_MAX   (16 - 2)        /* 1 for KERNEL, 1 for BASEMENT */
/* 2 PML4s for KASAN to cover a maximum of 16 PML4s {PHYSMAP + BASEMENT + KVA} */
#define KERNEL_KASAN_PML4_LAST          (495) /* 511 - 16 */
#define KERNEL_KASAN_PML4_FIRST         (494) /* 511 - 17 */
#define KERNEL_DBLMAP_PML4_INDEX        (KERNEL_KASAN_PML4_FIRST - 1)
#define KERNEL_PML4_COUNT               1
#define KERNEL_BASE                     (0ULL - (NBPML4 * KERNEL_PML4_COUNT))
#define KERNEL_BASEMENT                 (KERNEL_BASE - NBPML4)  /* Basement uses one PML4 entry */

/*
 * Pte related macros
 */
#define KVADDR(pmi, pdpi, pdi, pti)               \
	 ((vm_offset_t)                   \
	        ((uint64_t) -1    << 47)        | \
	        ((uint64_t)(pmi)  << PML4SHIFT) | \
	        ((uint64_t)(pdpi) << PDPTSHIFT) | \
	        ((uint64_t)(pdi)  << PDESHIFT)  | \
	        ((uint64_t)(pti)  << PTESHIFT))


#ifndef NKPT
#define NKPT            500     /* actual number of bootstrap kernel page tables */
#endif



/*
 *	Convert address offset to page descriptor index
 */
#define pdptnum(pmap, a) (((vm_offset_t)(a) >> PDPTSHIFT) & PDPTMASK)
#define pdenum(pmap, a) (((vm_offset_t)(a) >> PDESHIFT) & PDEMASK)
#define PMAP_INVALID_PDPTNUM (~0ULL)

#define pdeidx(pmap, a)    (((a) >> PDSHIFT)   & ((1ULL<<(48 - PDSHIFT)) -1))
#define pdptidx(pmap, a)   (((a) >> PDPTSHIFT) & ((1ULL<<(48 - PDPTSHIFT)) -1))
#define pml4idx(pmap, a)   (((a) >> PML4SHIFT) & ((1ULL<<(48 - PML4SHIFT)) -1))


/*
 *	Convert page descriptor index to user virtual address
 */
#define pdetova(a)      ((vm_offset_t)(a) << PDESHIFT)

/*
 *	Convert address offset to page table index
 */
#define ptenum(a)       (((vm_offset_t)(a) >> PTESHIFT) & PTEMASK)

/*
 *	Hardware pte bit definitions (to be used directly on the ptes
 *	without using the bit fields).
 */

#define INTEL_PTE_VALID         0x00000001ULL

#define INTEL_PTE_WRITE         0x00000002ULL
#define INTEL_PTE_RW            0x00000002ULL

#define INTEL_PTE_USER          0x00000004ULL

#define INTEL_PTE_WTHRU         0x00000008ULL
#define INTEL_PTE_NCACHE        0x00000010ULL

#define INTEL_PTE_REF           0x00000020ULL
#define INTEL_PTE_MOD           0x00000040ULL

#define INTEL_PTE_PS            0x00000080ULL
#define INTEL_PTE_PAT           0x00000080ULL

#define INTEL_PTE_GLOBAL        0x00000100ULL

/* These markers use software available bits ignored by the
 * processor's 4-level and EPT pagetable walkers.
 * N.B.: WIRED was originally bit 10, but that conflicts with
 * execute permissions for EPT entries iff mode-based execute controls
 * are enabled.
 */
#define INTEL_PTE_SWLOCK        (0x1ULL << 52)
#define INTEL_PDPTE_NESTED      (0x1ULL << 53)
#define INTEL_PTE_WIRED         (0x1ULL << 54)
/* TODO: Compressed markers, potential conflict with protection keys? */
#define INTEL_PTE_COMPRESSED_ALT (1ULL << 61) /* compressed but with "alternate accounting" */
#define INTEL_PTE_COMPRESSED    (1ULL << 62) /* marker, for invalid PTE only -- ignored by hardware for both regular/EPT entries*/

#define INTEL_PTE_PFN           PG_FRAME
/* TODO: these should be internal definitions */
#define INTEL_PTE_NX            (1ULL << 63)

#define INTEL_PTE_INVALID       0
/* This is conservative, but suffices */
#define INTEL_PTE_RSVD          ((1ULL << 10) | (1ULL << 11))


#define INTEL_PTE_COMPRESSED_MASK (INTEL_PTE_COMPRESSED | \
	                           INTEL_PTE_COMPRESSED_ALT | INTEL_PTE_SWLOCK)
#define PTE_IS_COMPRESSED(x, ptep, pmap, vaddr)                            \
	((((x) & INTEL_PTE_VALID) == 0) && /* PTE is not valid... */       \
	 ((x) & INTEL_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \
	 ((!((x) & ~INTEL_PTE_COMPRESSED_MASK)) || /* ...no other bits */  \
	  pmap_compressed_pte_corruption_repair((x), &(x), (ptep), (pmap), (vaddr))))

#define pa_to_pte(a)            ((a) & INTEL_PTE_PFN) /* XXX */
#define pte_to_pa(p)            ((p) & INTEL_PTE_PFN) /* XXX */
#define pte_increment_pa(p)     ((p) += INTEL_OFFMASK+1)

#define pte_kernel_rw(p)          ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_RW))
#define pte_kernel_ro(p)          ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID))
#define pte_user_rw(p)            ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER|INTEL_PTE_RW))
#define pte_user_ro(p)            ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER))

#define PMAP_INVEPT_SINGLE_CONTEXT      1


#define INTEL_EPTP_AD           0x00000040ULL

#define INTEL_EPT_READ          0x00000001ULL
#define INTEL_EPT_WRITE         0x00000002ULL
#define INTEL_EPT_EX            0x00000004ULL   /* Supervisor-execute when MBE is enabled */
#define INTEL_EPT_IPAT          0x00000040ULL
#define INTEL_EPT_PS            0x00000080ULL
#define INTEL_EPT_REF           0x00000100ULL
#define INTEL_EPT_MOD           0x00000200ULL
#define INTEL_EPT_UEX           0x00000400ULL   /* User-execute when MBE is enabled (ignored otherwise) */

#define INTEL_EPT_CACHE_MASK    0x00000038ULL
#define INTEL_EPT_NCACHE        0x00000000ULL
#define INTEL_EPT_WC            0x00000008ULL
#define INTEL_EPT_WTHRU         0x00000020ULL
#define INTEL_EPT_WP            0x00000028ULL
#define INTEL_EPT_WB            0x00000030ULL

/*
 * Routines to filter correct bits depending on the pmap type
 */

static inline pt_entry_t
pte_remove_ex(pt_entry_t pte, boolean_t is_ept)
{
	if (__probable(!is_ept)) {
		return pte | INTEL_PTE_NX;
	}

	return pte & (~INTEL_EPT_EX);
}

static inline pt_entry_t
pte_set_ex(pt_entry_t pte, boolean_t is_ept)
{
	if (__probable(!is_ept)) {
		return pte & (~INTEL_PTE_NX);
	}

	return pte | INTEL_EPT_EX;
}

static inline pt_entry_t
pte_set_uex(pt_entry_t pte)
{
	return pte | INTEL_EPT_UEX;
}

static inline pt_entry_t
physmap_refmod_to_ept(pt_entry_t physmap_pte)
{
	pt_entry_t ept_pte = 0;

	if (physmap_pte & INTEL_PTE_MOD) {
		ept_pte |= INTEL_EPT_MOD;
	}

	if (physmap_pte & INTEL_PTE_REF) {
		ept_pte |= INTEL_EPT_REF;
	}

	return ept_pte;
}

static inline pt_entry_t
ept_refmod_to_physmap(pt_entry_t ept_pte)
{
	pt_entry_t physmap_pte = 0;

	assert((ept_pte & ~(INTEL_EPT_REF | INTEL_EPT_MOD)) == 0);

	if (ept_pte & INTEL_EPT_REF) {
		physmap_pte |= INTEL_PTE_REF;
	}

	if (ept_pte & INTEL_EPT_MOD) {
		physmap_pte |= INTEL_PTE_MOD;
	}

	return physmap_pte;
}

/*
 * Note: Not all Intel processors support EPT referenced access and dirty bits.
 *	 During pmap_init() we check the VMX capability for the current hardware
 *	 and update this variable accordingly.
 */
extern boolean_t pmap_ept_support_ad;

#define PTE_VALID_MASK(is_ept)  ((is_ept) ? (INTEL_EPT_READ | INTEL_EPT_WRITE | INTEL_EPT_EX | INTEL_EPT_UEX) : INTEL_PTE_VALID)
#define PTE_READ(is_ept)        ((is_ept) ? INTEL_EPT_READ : INTEL_PTE_VALID)
#define PTE_WRITE(is_ept)       ((is_ept) ? INTEL_EPT_WRITE : INTEL_PTE_WRITE)
#define PTE_IS_EXECUTABLE(is_ept, pte)  ((is_ept) ? (((pte) & (INTEL_EPT_EX | INTEL_EPT_UEX)) != 0) : (((pte) & INTEL_PTE_NX) == 0))
#define PTE_PS                  INTEL_PTE_PS
#define PTE_COMPRESSED          INTEL_PTE_COMPRESSED
#define PTE_COMPRESSED_ALT      INTEL_PTE_COMPRESSED_ALT
#define PTE_NCACHE(is_ept)      ((is_ept) ? INTEL_EPT_NCACHE : INTEL_PTE_NCACHE)
#define PTE_WTHRU(is_ept)       ((is_ept) ? INTEL_EPT_WTHRU : INTEL_PTE_WTHRU)
#define PTE_REF(is_ept)         ((is_ept) ? INTEL_EPT_REF : INTEL_PTE_REF)
#define PTE_MOD(is_ept)         ((is_ept) ? INTEL_EPT_MOD : INTEL_PTE_MOD)
#define PTE_WIRED               INTEL_PTE_WIRED


#define PMAP_DEFAULT_CACHE      0
#define PMAP_INHIBIT_CACHE      1
#define PMAP_GUARDED_CACHE      2
#define PMAP_ACTIVATE_CACHE     4
#define PMAP_NO_GUARD_CACHE     8

/* Per-pmap ledger operations */
#define pmap_ledger_debit(p, e, a) ledger_debit((p)->ledger, e, a)
#define pmap_ledger_credit(p, e, a) ledger_credit((p)->ledger, e, a)

#ifndef ASSEMBLER

#include <sys/queue.h>

/*
 * Address of current and alternate address space page table maps
 * and directories.
 */

extern pt_entry_t       *PTmap;
extern pdpt_entry_t     *IdlePDPT;
extern pml4_entry_t     *IdlePML4;
extern boolean_t        no_shared_cr3;
extern pd_entry_t       *IdlePTD;       /* physical addr of "Idle" state PTD */

extern uint64_t         pmap_pv_hashlist_walks;
extern uint64_t         pmap_pv_hashlist_cnts;
extern uint32_t         pmap_pv_hashlist_max;
extern uint32_t         pmap_kernel_text_ps;

#define ID_MAP_VTOP(x)  ((void *)(((uint64_t)(x)) & LOW_4GB_MASK))

extern  uint64_t physmap_base, physmap_max;

#define NPHYSMAP (MAX(((physmap_max - physmap_base) / GB), 4))

extern pt_entry_t *PTE_corrupted_ptr;

#if DEVELOPMENT || DEBUG
extern int pmap_inject_pte_corruption;
#endif

static inline void
pmap_corrupted_pte_detected(pt_entry_t *ptep, uint64_t clear_bits, uint64_t set_bits)
{
	if (__c11_atomic_compare_exchange_strong((_Atomic(pt_entry_t *)*) & PTE_corrupted_ptr, &PTE_corrupted_ptr, ptep,
	    memory_order_acq_rel_smp, memory_order_relaxed)) {
		force_immediate_debugger_NMI = TRUE;
		NMIPI_panic(CPUMASK_REAL_OTHERS, PTE_CORRUPTION);
		if (clear_bits == 0 && set_bits == 0) {
			panic("PTE Corruption detected: ptep 0x%llx pte value 0x%llx", (unsigned long long)(uintptr_t)ptep, *(uint64_t *)ptep);
		} else {
			panic("PTE Corruption detected: ptep 0x%llx pte value 0x%llx clear 0x%llx set 0x%llx",
			    (unsigned long long)(uintptr_t)ptep, *(uint64_t *)ptep, clear_bits, set_bits);
		}
	}
}

/*
 * Atomic 64-bit store of a page table entry.
 */
static inline void
pmap_store_pte(boolean_t is_ept, pt_entry_t *entryp, pt_entry_t value)
{
	/*
	 * In the 32-bit kernel a compare-and-exchange loop was
	 * required to provide atomicity. For K64, life is easier:
	 */
	*entryp = value;

#if DEVELOPMENT || DEBUG
	if (__improbable(pmap_inject_pte_corruption != 0 && is_ept == FALSE && (value & PTE_COMPRESSED))) {
		pmap_inject_pte_corruption = 0;
		/* Inject a corruption event */
		value |= INTEL_PTE_NX;
	}
#endif

	if (__improbable((is_ept == FALSE) && (value & PTE_COMPRESSED) && (value & INTEL_PTE_NX))) {
		pmap_corrupted_pte_detected(entryp, 0, 0);
	}
}

static inline boolean_t
physmap_enclosed(addr64_t a)
{
	return a < (NPHYSMAP * GB);
}

static  inline void *
PHYSMAP_PTOV_check(void *paddr)
{
	uint64_t pvaddr = (uint64_t)paddr + physmap_base;

	if (__improbable(pvaddr >= physmap_max)) {
		panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx",
		    pvaddr, physmap_base, physmap_max);
	}

	return (void *)pvaddr;
}

#define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x)))
#define phystokv(x) ((vm_offset_t)(PHYSMAP_PTOV(x)))
#if MACH_KERNEL_PRIVATE
extern uint64_t dblmap_base, dblmap_max, dblmap_dist;

static inline uint64_t
DBLMAP_CHECK(uintptr_t x)
{
	uint64_t dbladdr = (uint64_t)x + dblmap_dist;
	if (__improbable((dbladdr >= dblmap_max) || (dbladdr < dblmap_base))) {
		panic("DBLMAP bounds exceeded, 0x%qx, 0x%qx 0x%qx, 0x%qx",
		    (uint64_t)x, dbladdr, dblmap_base, dblmap_max);
	}
	return dbladdr;
}
#define DBLMAP(x) (DBLMAP_CHECK((uint64_t) x))
extern uint64_t ldt_alias_offset;
static inline uint64_t
LDTALIAS_CHECK(uintptr_t x)
{
	uint64_t dbladdr = (uint64_t)x + ldt_alias_offset;
	if (__improbable((dbladdr >= dblmap_max) || (dbladdr < dblmap_base))) {
		panic("LDTALIAS: bounds exceeded, 0x%qx, 0x%qx 0x%qx, 0x%qx",
		    (uint64_t)x, dbladdr, dblmap_base, dblmap_max);
	}
	return dbladdr;
}
#define LDTALIAS(x) (LDTALIAS_CHECK((uint64_t) x))
#endif

/*
 * For KASLR, we alias the master processor's IDT and GDT at fixed
 * virtual addresses to defeat SIDT/SGDT address leakage.
 * And non-boot processor's GDT aliases likewise (skipping LOWGLOBAL_ALIAS)
 * The low global vector page is mapped at a fixed alias also.
 */
#define LOWGLOBAL_ALIAS         (VM_MIN_KERNEL_ADDRESS + 0x2000)

/*
 * This indicates (roughly) where there is free space for the VM
 * to use for the heap; this does not need to be precise.
 */
#define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS

#if MACH_KERNEL_PRIVATE
extern void
pmap_tlbi_range(uint64_t startv, uint64_t endv, bool global, uint16_t pcid);

#include <vm/vm_page.h>

/*
 *	For each vm_page_t, there is a list of all currently
 *	valid virtual mappings of that page.  An entry is
 *	a pv_entry_t; the list is the pv_table.
 */

struct pmap {
	lck_rw_t        pmap_rwl __attribute((aligned(64)));
	pmap_paddr_t    pm_cr3 __attribute((aligned(64))); /* Kernel+user shared PML4 physical*/
	pmap_paddr_t    pm_ucr3;        /* Mirrored user PML4 physical */
	pml4_entry_t    *pm_pml4;       /* VKA of top level */
	pml4_entry_t    *pm_upml4;      /* Shadow VKA of top level */
	pmap_paddr_t    pm_eptp;        /* EPTP */

	task_map_t      pm_task_map;
	boolean_t       pagezero_accessible;
	boolean_t       pm_vm_map_cs_enforced; /* is vm_map cs_enforced? */
#define PMAP_PCID_MAX_CPUS      MAX_CPUS        /* Must be a multiple of 8 */
	pcid_t          pmap_pcid_cpus[PMAP_PCID_MAX_CPUS];
	volatile uint8_t pmap_pcid_coherency_vector[PMAP_PCID_MAX_CPUS];
	boolean_t       pm_shared;
	os_refcnt_t     ref_count;
	pdpt_entry_t    *pm_pdpt;       /* KVA of 3rd level page */
	vm_object_t     pm_obj;         /* object to hold pde's */
	vm_object_t     pm_obj_pdpt;    /* holds pdpt pages */
	vm_object_t     pm_obj_pml4;    /* holds pml4 pages */
#if     DEVELOPMENT || DEBUG
	int             nx_enabled;
#endif
	ledger_t        ledger;         /* ledger tracking phys mappings */
	uint64_t        corrected_compressed_ptes_count;
#if MACH_ASSERT
	boolean_t       pmap_stats_assert;
	int             pmap_pid;
	char            pmap_procname[17];
#endif /* MACH_ASSERT */
};

static inline boolean_t
is_ept_pmap(pmap_t p)
{
	if (__probable(p->pm_cr3 != 0)) {
		assert(p->pm_eptp == 0);
		return FALSE;
	}

	assert(p->pm_eptp != 0);

	return TRUE;
}

void hv_ept_pmap_create(void **ept_pmap, void **eptp);

typedef struct pmap_memory_regions {
	ppnum_t base;            /* first page of this region */
	ppnum_t alloc_up;        /* pages below this one have been "stolen" */
	ppnum_t alloc_down;      /* pages above this one have been "stolen" */
	ppnum_t alloc_frag_up;   /* low page of fragment after large page alloc */
	ppnum_t alloc_frag_down; /* high page of fragment after large page alloc */
	ppnum_t end;             /* last page of this region */
	uint32_t type;
	uint64_t attribute;
} pmap_memory_region_t;

extern unsigned pmap_memory_region_count;
extern unsigned pmap_memory_region_current;

#define PMAP_MEMORY_REGIONS_SIZE 128

extern pmap_memory_region_t pmap_memory_regions[];
#include <i386/pmap_pcid.h>

static inline void
set_dirbase(pmap_t tpmap, thread_t thread, int my_cpu)
{
	int ccpu = my_cpu;
	uint64_t pcr3 = tpmap->pm_cr3, ucr3 = tpmap->pm_ucr3;
	cpu_datap(ccpu)->cpu_task_cr3 = pcr3;
	cpu_shadowp(ccpu)->cpu_shadowtask_cr3 = pcr3;

	cpu_datap(ccpu)->cpu_ucr3 = ucr3;
	cpu_shadowp(ccpu)->cpu_ucr3 = ucr3;

	cpu_datap(ccpu)->cpu_task_map = cpu_shadowp(ccpu)->cpu_task_map =
	    tpmap->pm_task_map;

	assert((get_preemption_level() > 0) || (ml_get_interrupts_enabled() == FALSE));
	assert(ccpu == cpu_number());
	/*
	 * Switch cr3 if necessary
	 * - unless running with no_shared_cr3 debugging mode
	 *   and we're not on the kernel's cr3 (after pre-empted copyio)
	 */
	boolean_t nopagezero = tpmap->pagezero_accessible;
	boolean_t priorpagezero = cpu_datap(ccpu)->cpu_pagezero_mapped;
	cpu_datap(ccpu)->cpu_pagezero_mapped = nopagezero;

	if (__probable(!no_shared_cr3)) {
		if (__improbable(nopagezero)) {
			boolean_t copyio_active = ((thread->machine.specFlags & CopyIOActive) != 0);
			if (pmap_pcid_ncpus) {
				pmap_pcid_activate(tpmap, ccpu, TRUE, copyio_active);
			} else {
				if (copyio_active) {
					if (get_cr3_base() != tpmap->pm_cr3) {
						set_cr3_raw(tpmap->pm_cr3);
					}
				} else if (get_cr3_base() != cpu_datap(ccpu)->cpu_kernel_cr3) {
					set_cr3_raw(cpu_datap(ccpu)->cpu_kernel_cr3);
				}
			}
		} else if ((get_cr3_base() != tpmap->pm_cr3) || priorpagezero) {
			if (pmap_pcid_ncpus) {
				pmap_pcid_activate(tpmap, ccpu, FALSE, FALSE);
			} else {
				set_cr3_raw(tpmap->pm_cr3);
			}
		}
	} else {
		if (get_cr3_base() != cpu_datap(ccpu)->cpu_kernel_cr3) {
			set_cr3_raw(cpu_datap(ccpu)->cpu_kernel_cr3);
		}
	}
}

/*
 *	External declarations for PMAP_ACTIVATE.
 */

extern void             pmap_update_interrupt(void);

extern addr64_t(kvtophys)(
	vm_offset_t     addr);

extern kern_return_t    pmap_expand(
	pmap_t          pmap,
	vm_map_offset_t addr,
	unsigned int options);
extern vm_offset_t      pmap_map(
	vm_offset_t     virt,
	vm_map_offset_t start,
	vm_map_offset_t end,
	vm_prot_t       prot,
	unsigned int    flags);

extern vm_offset_t      pmap_map_bd(
	vm_offset_t     virt,
	vm_map_offset_t start,
	vm_map_offset_t end,
	vm_prot_t       prot,
	unsigned int    flags);
extern void             pmap_bootstrap(
	vm_offset_t     load_start,
	boolean_t       IA32e);

extern boolean_t        pmap_valid_page(
	ppnum_t pn);

extern int              pmap_list_resident_pages(
	struct pmap     *pmap,
	vm_offset_t     *listp,
	int             space);
extern void             x86_filter_TLB_coherency_interrupts(boolean_t);

extern void
pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX,
    boolean_t ro);

/*
 * Get cache attributes (as pagetable bits) for the specified phys page
 */
extern  unsigned        pmap_get_cache_attributes(ppnum_t, boolean_t is_ept);

extern kern_return_t    pmap_map_block_addr(
	pmap_t pmap,
	addr64_t va,
	pmap_paddr_t pa,
	uint32_t size,
	vm_prot_t prot,
	int attr,
	unsigned int flags);
extern kern_return_t    pmap_map_block(
	pmap_t pmap,
	addr64_t va,
	ppnum_t pa,
	uint32_t size,
	vm_prot_t prot,
	int attr,
	unsigned int flags);

extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va);
extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
extern ppnum_t pmap_find_phys_nofault(pmap_t pmap, addr64_t va);

extern kern_return_t pmap_get_prot(pmap_t pmap, addr64_t va, vm_prot_t *protp);

extern void pmap_cpu_init(void);
extern void pmap_disable_NX(pmap_t pmap);

extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__printflike(1, 2));

extern void x86_64_protect_data_const(void);

extern uint64_t pmap_commpage_size_min(pmap_t pmap);

extern void pmap_ro_zone_memcpy(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
    vm_offset_t new_data, vm_size_t new_data_size);
extern uint64_t pmap_ro_zone_atomic_op(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
    uint32_t op, uint64_t value);
extern void pmap_ro_zone_bzero(zone_id_t zid, vm_offset_t va, vm_offset_t offset, vm_size_t size);

/*
 *	Macros for speed.
 */


#include <kern/spl.h>


#define PMAP_ACTIVATE_MAP(map, thread, my_cpu)  {                               \
	pmap_t		tpmap;                                  \
                                                                        \
	tpmap = vm_map_pmap(map);                                       \
	set_dirbase(tpmap, thread, my_cpu);                                     \
}

#if   defined(__x86_64__)
#define PMAP_DEACTIVATE_MAP(map, thread, ccpu)                          \
	pmap_assert2((pmap_pcid_ncpus ? (pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu) == (get_cr3_raw() & 0xFFF)) : TRUE),"PCIDs: 0x%x, active PCID: 0x%x, CR3: 0x%lx, pmap_cr3: 0x%llx, kernel_cr3: 0x%llx, kernel pmap cr3: 0x%llx, CPU active PCID: 0x%x, CPU kernel PCID: 0x%x, specflags: 0x%x, pagezero: 0x%x", pmap_pcid_ncpus, pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu), get_cr3_raw(), map->pmap->pm_cr3, cpu_datap(ccpu)->cpu_kernel_cr3, kernel_pmap->pm_cr3, cpu_datap(ccpu)->cpu_active_pcid, cpu_datap(ccpu)->cpu_kernel_pcid, thread->machine.specFlags, map->pmap->pagezero_accessible);
#else
#define PMAP_DEACTIVATE_MAP(map, thread)
#endif

#define PMAP_SWITCH_USER(th, new_map, my_cpu) {                         \
	spl_t		spl;                                            \
                                                                        \
	spl = splhigh();                                                \
	PMAP_DEACTIVATE_MAP(th->map, th, my_cpu);                       \
	th->map = new_map;                                              \
	PMAP_ACTIVATE_MAP(th->map, th, my_cpu);                         \
	splx(spl);                                                      \
}

/*
 * Marking the current cpu's cr3 inactive is achieved by setting its lsb.
 * Marking the current cpu's cr3 active once more involves clearng this bit.
 * Note that valid page tables are page-aligned and so the bottom 12 bits
 * are normally zero, modulo PCID.
 * We can only mark the current cpu active/inactive but we can test any cpu.
 */
#define CPU_CR3_MARK_INACTIVE()                                         \
	current_cpu_datap()->cpu_active_cr3 |= 1

#define CPU_CR3_MARK_ACTIVE()                                           \
	current_cpu_datap()->cpu_active_cr3 &= ~1

#define CPU_CR3_IS_ACTIVE(cpu)                                          \
	((cpu_datap(cpu)->cpu_active_cr3 & 1) == 0)

#define CPU_GET_ACTIVE_CR3(cpu)                                         \
	(cpu_datap(cpu)->cpu_active_cr3 & ~1)

#define CPU_GET_TASK_CR3(cpu)                                           \
	(cpu_datap(cpu)->cpu_task_cr3)

/*
 *	Mark this cpu idle, and remove it from the active set,
 *	since it is not actively using any pmap.  Signal_cpus
 *	will notice that it is idle, and avoid signaling it,
 *	but will queue the update request for when the cpu
 *	becomes active.
 */
#define MARK_CPU_IDLE(my_cpu)   {                                       \
	assert(ml_get_interrupts_enabled() == FALSE);                   \
	CPU_CR3_MARK_INACTIVE();                                        \
	mfence();                                                                       \
}

#define MARK_CPU_ACTIVE(my_cpu) {                                       \
	assert(ml_get_interrupts_enabled() == FALSE);                   \
	/* \
	 *	If a kernel_pmap update was requested while this cpu \
	 *	was idle, process it as if we got the interrupt. \
	 *	Before doing so, remove this cpu from the idle set. \
	 *	Since we do not grab any pmap locks while we flush \
	 *	our TLB, another cpu may start an update operation \
	 *	before we finish.  Removing this cpu from the idle \
	 *	set assures that we will receive another update \
	 *	interrupt if this happens. \
	 */                                                             \
	CPU_CR3_MARK_ACTIVE();                                          \
	mfence();                                                       \
	pmap_update_interrupt();                                        \
}

#define PMAP_CONTEXT(pmap, thread)

#define pmap_kernel_va(VA)      \
	((((vm_offset_t) (VA)) >= vm_min_kernel_address) &&     \
	 (((vm_offset_t) (VA)) <= vm_max_kernel_address))


#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
#define pmap_attribute(pmap, addr, size, attr, value) \
	                                (KERN_INVALID_ADDRESS)
#define pmap_attribute_cache_sync(addr, size, attr, value) \
	                                (KERN_INVALID_ADDRESS)

extern boolean_t pmap_is_empty(pmap_t           pmap,
    vm_map_offset_t  start,
    vm_map_offset_t  end);

#define MACHINE_BOOTSTRAPPTD    1       /* Static bootstrap page-tables */

kern_return_t
    pmap_permissions_verify(pmap_t, vm_map_t, vm_offset_t, vm_offset_t);

#if DEVELOPMENT || DEBUG
extern kern_return_t pmap_test_text_corruption(pmap_paddr_t);
#endif /* DEVELOPMENT || DEBUG */

#if MACH_ASSERT
extern int pmap_stats_assert;
#define PMAP_STATS_ASSERTF(args)                \
	MACRO_BEGIN                             \
	if (pmap_stats_assert) assertf args;    \
	MACRO_END
#else /* MACH_ASSERT */
#define PMAP_STATS_ASSERTF(args)
#endif /* MACH_ASSERT */
#endif /* MACH_KERNEL_PRIVATE */
#endif  /* ASSEMBLER */
#endif  /* _PMAP_MACHINE_ */
#endif  /* KERNEL_PRIVATE */