Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
/*
 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
 *
 * This file contains Original Code and/or Modifications of Original Code
 * as defined in and that are subject to the Apple Public Source License
 * Version 2.0 (the 'License'). You may not use this file except in
 * compliance with the License. The rights granted to you under the License
 * may not be used to create, or enable the creation or redistribution of,
 * unlawful or unlicensed copies of an Apple operating system, or to
 * circumvent, violate, or enable the circumvention or violation of, any
 * terms of an Apple operating system software license agreement.
 *
 * Please obtain a copy of the License at
 * http://www.opensource.apple.com/apsl/ and read it before using this file.
 *
 * The Original Code and all software distributed under the License are
 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 * Please see the License for the specific language governing rights and
 * limitations under the License.
 *
 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 */
/*
 * @OSF_COPYRIGHT@
 */
/*
 * Mach Operating System
 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
 * All Rights Reserved.
 *
 * Permission to use, copy, modify and distribute this software and its
 * documentation is hereby granted, provided that both the copyright
 * notice and this permission notice appear in all copies of the
 * software, derivative works or modified versions, and any portions
 * thereof, and that both notices appear in supporting documentation.
 *
 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
 *
 * Carnegie Mellon requests users of this software to return to
 *
 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
 *  School of Computer Science
 *  Carnegie Mellon University
 *  Pittsburgh PA 15213-3890
 *
 * any improvements or extensions that they make and grant Carnegie Mellon
 * the rights to redistribute these changes.
 */
/*
 */
/*
 *	File:	vm/vm_pageout.h
 *	Author:	Avadis Tevanian, Jr.
 *	Date:	1986
 *
 *	Declarations for the pageout daemon interface.
 */

#ifndef _VM_VM_PAGEOUT_H_
#define _VM_VM_PAGEOUT_H_

#ifdef  KERNEL_PRIVATE

#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/machine/vm_types.h>
#include <mach/memory_object_types.h>

#include <kern/kern_types.h>
#include <kern/locks.h>
#include <kern/sched_prim.h>
#include <kern/bits.h>

#include <libkern/OSAtomic.h>


#include <vm/vm_options.h>

#ifdef  MACH_KERNEL_PRIVATE
#include <vm/vm_page.h>
#endif

#include <sys/kdebug.h>

#define VM_PAGE_AVAILABLE_COUNT()               ((unsigned int)(vm_page_cleaned_count))

/* externally manipulated counters */
extern unsigned int vm_pageout_cleaned_fault_reactivated;

#if CONFIG_FREEZE
extern boolean_t memorystatus_freeze_enabled;

struct freezer_context {
	/*
	 * All these counters & variables track the task
	 * being frozen.
	 * Currently we only freeze one task at a time. Should that
	 * change, we'll need to add support for multiple freezer contexts.
	 */

	task_t  freezer_ctx_task; /* Task being frozen. */

	void    *freezer_ctx_chead; /* The chead used to track c_segs allocated */
	                            /* to freeze the task.*/

	uint64_t        freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/

	int     freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */

	char    *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */
};

#endif /* CONFIG_FREEZE */

#define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE)

#if VM_PRESSURE_EVENTS
extern boolean_t vm_pressure_events_enabled;
#endif /* VM_PRESSURE_EVENTS */


/*
 * the following codes are used in the DBG_MACH_WORKINGSET subclass
 * of the DBG_MACH class
 */
#define VM_DISCONNECT_ALL_PAGE_MAPPINGS         0x00
#define VM_DISCONNECT_TASK_PAGE_MAPPINGS        0x01
#define VM_REAL_FAULT_ADDR_INTERNAL             0x02
#define VM_REAL_FAULT_ADDR_PURGABLE             0x03
#define VM_REAL_FAULT_ADDR_EXTERNAL             0x04
#define VM_REAL_FAULT_ADDR_SHAREDCACHE          0x05
#define VM_REAL_FAULT_FAST                      0x06
#define VM_REAL_FAULT_SLOW                      0x07
#define VM_MAP_LOOKUP_OBJECT                    0x08



extern int      vm_debug_events;

#define VMF_CHECK_ZFDELAY               0x100
#define VMF_COWDELAY                    0x101
#define VMF_ZFDELAY                     0x102
#define VMF_COMPRESSORDELAY             0x103

#define VM_PAGEOUT_SCAN                 0x104
#define VM_PAGEOUT_BALANCE              0x105
#define VM_PAGEOUT_FREELIST             0x106
#define VM_PAGEOUT_PURGEONE             0x107
#define VM_PAGEOUT_CACHE_EVICT          0x108
#define VM_PAGEOUT_THREAD_BLOCK         0x109
#define VM_PAGEOUT_JETSAM               0x10A
#define VM_INFO1                        0x10B
#define VM_INFO2                        0x10C
#define VM_INFO3                        0x10D
#define VM_INFO4                        0x10E
#define VM_INFO5                        0x10F
#define VM_INFO6                        0x110
#define VM_INFO7                        0x111
#define VM_INFO8                        0x112
#define VM_INFO9                        0x113
#define VM_INFO10                       0x114

#define VM_UPL_PAGE_WAIT                0x120
#define VM_IOPL_PAGE_WAIT               0x121
#define VM_PAGE_WAIT_BLOCK              0x122

#if CONFIG_IOSCHED
#define VM_PAGE_SLEEP                   0x123
#define VM_PAGE_EXPEDITE                0x124
#define VM_PAGE_EXPEDITE_NO_MEMORY      0x125
#endif

#define VM_PAGE_GRAB                    0x126
#define VM_PAGE_RELEASE                 0x127
#define VM_COMPRESSOR_COMPACT_AND_SWAP  0x128
#define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129


#define VM_PRESSURE_EVENT               0x130
#define VM_EXECVE                       0x131
#define VM_WAKEUP_COMPACTOR_SWAPPER     0x132
#define VM_UPL_REQUEST                  0x133
#define VM_IOPL_REQUEST                 0x134
#define VM_KERN_REQUEST                 0x135

#define VM_DATA_WRITE                   0x140

#define VM_PRESSURE_LEVEL_CHANGE        0x141

#define VM_PHYS_WRITE_ACCT              0x142

#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4)    \
	MACRO_BEGIN                                             \
	if (__improbable(vm_debug_events)) {                    \
	        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
	}                                                       \
	MACRO_END

#define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4)   \
	MACRO_BEGIN                                             \
	        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
	MACRO_END

extern void memoryshot(unsigned int event, unsigned int control);

extern void update_vm_info(void);

#if CONFIG_IOSCHED
extern int upl_get_cached_tier(
	upl_t                   upl);
#endif

extern void upl_set_iodone(upl_t, void *);
extern void upl_set_iodone_error(upl_t, int);
extern void upl_callout_iodone(upl_t);

extern ppnum_t upl_get_highest_page(
	upl_t                   upl);

extern upl_size_t upl_get_size(
	upl_t                   upl);

extern upl_t upl_associated_upl(upl_t upl);
extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl);

#ifndef MACH_KERNEL_PRIVATE
typedef struct vm_page  *vm_page_t;
#endif
#ifdef  XNU_KERNEL_PRIVATE
#include <vm/vm_kern.h>

extern upl_size_t upl_adjusted_size(
	upl_t upl,
	vm_map_offset_t page_mask);
extern vm_object_offset_t upl_adjusted_offset(
	upl_t upl,
	vm_map_offset_t page_mask);
extern vm_object_offset_t upl_get_data_offset(
	upl_t upl);

extern kern_return_t vm_map_create_upl(
	vm_map_t                map,
	vm_map_address_t        offset,
	upl_size_t              *upl_size,
	upl_t                   *upl,
	upl_page_info_array_t   page_list,
	unsigned int            *count,
	upl_control_flags_t     *flags,
	vm_tag_t            tag);

extern void iopl_valid_data(
	upl_t                   upl_ptr,
	vm_tag_t        tag);

extern void               vm_page_free_list(
	vm_page_t   mem,
	boolean_t   prepare_object);

extern kern_return_t vm_page_alloc_list(
	vm_size_t   page_count,
	kma_flags_t flags,
	vm_page_t  *list);

#endif  /* XNU_KERNEL_PRIVATE */

extern struct vnode * upl_lookup_vnode(upl_t upl);

extern void               vm_page_set_offset(vm_page_t page, vm_object_offset_t offset);
extern vm_object_offset_t vm_page_get_offset(vm_page_t page);
extern ppnum_t            vm_page_get_phys_page(vm_page_t page);
extern vm_page_t          vm_page_get_next(vm_page_t page);

extern kern_return_t    mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level);
#if KERNEL_PRIVATE
extern kern_return_t    mach_vm_wire_level_monitor(int64_t requested_pages);
#endif /* KERNEL_PRIVATE */

#if XNU_TARGET_OS_OSX
extern kern_return_t    vm_pageout_wait(uint64_t deadline);
#endif /* XNU_TARGET_OS_OSX */

#ifdef  MACH_KERNEL_PRIVATE

#include <vm/vm_page.h>

extern unsigned int     vm_pageout_scan_event_counter;
extern unsigned int     vm_page_anonymous_count;
extern thread_t         vm_pageout_scan_thread;
extern thread_t         vm_pageout_gc_thread;

#define VM_PAGEOUT_GC_INIT      ((void *)0)
#define VM_PAGEOUT_GC_COLLECT   ((void *)1)
#define VM_PAGEOUT_GC_EVENT     ((event_t)&vm_pageout_garbage_collect)
extern void vm_pageout_garbage_collect(void *, wait_result_t);


/*
 * must hold the page queues lock to
 * manipulate this structure
 */
struct vm_pageout_queue {
	vm_page_queue_head_t pgo_pending;  /* laundry pages to be processed by pager's iothread */
	unsigned int    pgo_laundry;       /* current count of laundry pages on queue or in flight */
	unsigned int    pgo_maxlaundry;

	uint32_t
	    pgo_busy:1,        /* iothread is currently processing request from pgo_pending */
	    pgo_throttled:1,   /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
	    pgo_lowpriority:1, /* iothread is set to use low priority I/O */
	    pgo_draining:1,
	    pgo_inited:1,
	    pgo_unused_bits:26;
};

#define VM_PAGE_Q_THROTTLED(q)          \
	((q)->pgo_laundry >= (q)->pgo_maxlaundry)

extern struct   vm_pageout_queue        vm_pageout_queue_internal;
extern struct   vm_pageout_queue        vm_pageout_queue_external;


/*
 *	Routines exported to Mach.
 */
extern void             vm_pageout(void);

__startup_func extern void             vm_config_init(void);

extern kern_return_t    vm_pageout_internal_start(void);

extern void             vm_pageout_object_terminate(
	vm_object_t     object);

extern void             vm_pageout_cluster(
	vm_page_t       m);

extern void             vm_pageout_initialize_page(
	vm_page_t       m);

/* UPL exported routines and structures */

#define upl_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
#define upl_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
#define upl_lock(object)        lck_mtx_lock(&(object)->Lock)
#define upl_unlock(object)      lck_mtx_unlock(&(object)->Lock)
#define upl_try_lock(object)    lck_mtx_try_lock(&(object)->Lock)

struct _vector_upl_iostates {
	upl_offset_t offset;
	upl_size_t   size;
};

typedef struct _vector_upl_iostates vector_upl_iostates_t;

struct _vector_upl {
	upl_size_t              size;
	uint32_t                num_upls;
	uint32_t                invalid_upls;
	uint32_t                max_upls;
	vm_map_t                submap;
	vm_offset_t             submap_dst_addr;
	vm_object_offset_t      offset;
	upl_page_info_array_t   pagelist;
	struct {
		upl_t                   elem;
		vector_upl_iostates_t   iostate;
	} upls[];
};

typedef struct _vector_upl* vector_upl_t;

uint32_t vector_upl_max_upls(const upl_t upl);

/* universal page list structure */

#if UPL_DEBUG
#define UPL_DEBUG_COMMIT_RECORDS 4

struct ucd {
	upl_offset_t    c_beg;
	upl_offset_t    c_end;
	int             c_aborted;
	uint32_t        c_btref; /* btref_t */
};
#endif

struct upl_io_completion {
	void     *io_context;
	void     (*io_done)(void *, int);

	int      io_error;
};


struct upl {
	decl_lck_mtx_data(, Lock);      /* Synchronization */
	int             ref_count;
	int             ext_ref_count;
	int             flags;
	/*
	 * XXX CAUTION: to accomodate devices with "mixed page sizes",
	 * u_offset and u_size are now byte-aligned and no longer
	 * page-aligned, on all devices.
	 */
	vm_object_offset_t u_offset;
	upl_size_t      u_size;       /* size in bytes of the address space */
	upl_size_t      u_mapped_size;       /* size in bytes of the UPL that is mapped */
	vm_offset_t     kaddr;      /* secondary mapping in kernel */
	vm_object_t     map_object;
	vector_upl_t    vector_upl;
	upl_t           associated_upl;
	struct upl_io_completion *upl_iodone;
	ppnum_t         highest_page;
#if CONFIG_IOSCHED
	int             upl_priority;
	uint64_t        *upl_reprio_info;
	void            *decmp_io_upl;
#endif
#if CONFIG_IOSCHED || UPL_DEBUG
	thread_t        upl_creator;
	queue_chain_t   uplq;       /* List of outstanding upls on an obj */
#endif
#if     UPL_DEBUG
	uintptr_t       ubc_alias1;
	uintptr_t       ubc_alias2;

	uint32_t        upl_state;
	uint32_t        upl_commit_index;
	uint32_t        upl_create_btref; /* btref_t */

	struct  ucd     upl_commit_records[UPL_DEBUG_COMMIT_RECORDS];
#endif  /* UPL_DEBUG */

	bitmap_t       *lite_list;
	struct upl_page_info page_list[];
};

/* upl struct flags */
#define UPL_PAGE_LIST_MAPPED    0x1
#define UPL_KERNEL_MAPPED       0x2
#define UPL_CLEAR_DIRTY         0x4
#define UPL_COMPOSITE_LIST      0x8
#define UPL_INTERNAL            0x10
#define UPL_PAGE_SYNC_DONE      0x20
#define UPL_DEVICE_MEMORY       0x40
#define UPL_PAGEOUT             0x80
#define UPL_LITE                0x100
#define UPL_IO_WIRE             0x200
#define UPL_ACCESS_BLOCKED      0x400
#define UPL_SHADOWED            0x1000
#define UPL_KERNEL_OBJECT       0x2000
#define UPL_VECTOR              0x4000
#define UPL_SET_DIRTY           0x8000
#define UPL_HAS_BUSY            0x10000
#define UPL_TRACKED_BY_OBJECT   0x20000
#define UPL_EXPEDITE_SUPPORTED  0x40000
#define UPL_DECMP_REQ           0x80000
#define UPL_DECMP_REAL_IO       0x100000

/* flags for upl_create flags parameter */
#define UPL_CREATE_EXTERNAL     0
#define UPL_CREATE_INTERNAL     0x1
#define UPL_CREATE_LITE         0x2
#define UPL_CREATE_IO_TRACKING  0x4
#define UPL_CREATE_EXPEDITE_SUP 0x8

extern upl_t vector_upl_create(vm_offset_t, uint32_t);
extern void vector_upl_deallocate(upl_t);
extern boolean_t vector_upl_is_valid(upl_t);
extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
extern void vector_upl_set_pagelist(upl_t);
extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t);
extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*);
extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t);
extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*);
extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*);
extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t);
extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*);

extern void vm_object_set_pmap_cache_attr(
	vm_object_t             object,
	upl_page_info_array_t   user_page_list,
	unsigned int            num_pages,
	boolean_t               batch_pmap_op);

extern kern_return_t vm_object_iopl_request(
	vm_object_t             object,
	vm_object_offset_t      offset,
	upl_size_t              size,
	upl_t                   *upl_ptr,
	upl_page_info_array_t   user_page_list,
	unsigned int            *page_list_count,
	upl_control_flags_t     cntrl_flags,
	vm_tag_t            tag);

extern kern_return_t vm_object_super_upl_request(
	vm_object_t             object,
	vm_object_offset_t      offset,
	upl_size_t              size,
	upl_size_t              super_cluster,
	upl_t                   *upl,
	upl_page_info_t         *user_page_list,
	unsigned int            *page_list_count,
	upl_control_flags_t     cntrl_flags,
	vm_tag_t            tag);

/* should be just a regular vm_map_enter() */
extern kern_return_t vm_map_enter_upl(
	vm_map_t                map,
	upl_t                   upl,
	vm_map_offset_t         *dst_addr);

/* should be just a regular vm_map_remove() */
extern kern_return_t vm_map_remove_upl(
	vm_map_t                map,
	upl_t                   upl);

extern kern_return_t vm_map_enter_upl_range(
	vm_map_t                map,
	upl_t                   upl,
	vm_object_offset_t             offset,
	upl_size_t               size,
	vm_prot_t               prot,
	vm_map_offset_t         *dst_addr);

extern kern_return_t vm_map_remove_upl_range(
	vm_map_t                map,
	upl_t                   upl,
	vm_object_offset_t             offset,
	upl_size_t               size);

extern struct vm_page_delayed_work*
vm_page_delayed_work_get_ctx(void);

extern void
vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp);

extern void vm_page_free_reserve(int pages);

extern void vm_pageout_throttle_down(vm_page_t page);
extern void vm_pageout_throttle_up(vm_page_t page);

extern kern_return_t vm_paging_map_object(
	vm_page_t               page,
	vm_object_t             object,
	vm_object_offset_t      offset,
	vm_prot_t               protection,
	boolean_t               can_unlock_object,
	vm_map_size_t           *size,          /* IN/OUT */
	vm_map_offset_t         *address,       /* OUT */
	boolean_t               *need_unmap);   /* OUT */
extern void vm_paging_unmap_object(
	vm_object_t             object,
	vm_map_offset_t         start,
	vm_map_offset_t         end);
decl_simple_lock_data(extern, vm_paging_lock);

/*
 * Backing store throttle when BS is exhausted
 */
extern unsigned int    vm_backing_store_low;

extern void vm_pageout_steal_laundry(
	vm_page_t page,
	boolean_t queues_locked);

#endif  /* MACH_KERNEL_PRIVATE */

#if UPL_DEBUG
extern kern_return_t  upl_ubc_alias_set(
	upl_t upl,
	uintptr_t alias1,
	uintptr_t alias2);
extern int  upl_ubc_alias_get(
	upl_t upl,
	uintptr_t * al,
	uintptr_t * al2);
#endif /* UPL_DEBUG */

extern void vm_countdirtypages(void);

extern void vm_backing_store_disable(
	boolean_t       suspend);

extern kern_return_t upl_transpose(
	upl_t   upl1,
	upl_t   upl2);

extern kern_return_t mach_vm_pressure_monitor(
	boolean_t       wait_for_pressure,
	unsigned int    nsecs_monitored,
	unsigned int    *pages_reclaimed_p,
	unsigned int    *pages_wanted_p);

extern kern_return_t
vm_set_buffer_cleanup_callout(
	boolean_t       (*func)(int));

struct vm_page_stats_reusable {
	SInt32          reusable_count;
	uint64_t        reusable;
	uint64_t        reused;
	uint64_t        reused_wire;
	uint64_t        reused_remove;
	uint64_t        all_reusable_calls;
	uint64_t        partial_reusable_calls;
	uint64_t        all_reuse_calls;
	uint64_t        partial_reuse_calls;
	uint64_t        reusable_pages_success;
	uint64_t        reusable_pages_failure;
	uint64_t        reusable_pages_shared;
	uint64_t        reuse_pages_success;
	uint64_t        reuse_pages_failure;
	uint64_t        can_reuse_success;
	uint64_t        can_reuse_failure;
	uint64_t        reusable_reclaimed;
	uint64_t        reusable_nonwritable;
	uint64_t        reusable_shared;
	uint64_t        free_shared;
};
extern struct vm_page_stats_reusable vm_page_stats_reusable;

extern int hibernate_flush_memory(void);
extern void hibernate_reset_stats(void);
extern void hibernate_create_paddr_map(void);

extern void vm_set_restrictions(unsigned int num_cpus);

extern int vm_compressor_mode;
extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t);
extern void vm_pageout_anonymous_pages(void);
extern void vm_pageout_disconnect_all_pages(void);
extern int vm_toggle_task_selfdonate_pages(task_t);
extern void vm_task_set_selfdonate_pages(task_t, bool);

struct  vm_config {
	boolean_t       compressor_is_present;          /* compressor is initialized and can be used by the freezer, the sweep or the pager */
	boolean_t       compressor_is_active;           /* pager can actively compress pages...  'compressor_is_present' must be set */
	boolean_t       swap_is_present;                /* swap is initialized and can be used by the freezer, the sweep or the pager */
	boolean_t       swap_is_active;                 /* pager can actively swap out compressed segments... 'swap_is_present' must be set */
	boolean_t       freezer_swap_is_active;         /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */
};

extern  struct vm_config        vm_config;


#define VM_PAGER_NOT_CONFIGURED                         0x0     /* no compresser or swap configured */
#define VM_PAGER_DEFAULT                                0x1     /* Use default pager... DEPRECATED */
#define VM_PAGER_COMPRESSOR_NO_SWAP                     0x2     /* Active in-core compressor only. */
#define VM_PAGER_COMPRESSOR_WITH_SWAP                   0x4     /* Active in-core compressor + swap backend. */
#define VM_PAGER_FREEZER_DEFAULT                        0x8     /* Freezer backed by default pager... DEPRECATED */
#define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP             0x10    /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/
#define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP   0x20    /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/

#define VM_PAGER_MAX_MODES                              6       /* Total number of vm compressor modes supported */


#define VM_CONFIG_COMPRESSOR_IS_PRESENT         (vm_config.compressor_is_present == TRUE)
#define VM_CONFIG_COMPRESSOR_IS_ACTIVE          (vm_config.compressor_is_active == TRUE)
#define VM_CONFIG_SWAP_IS_PRESENT               (vm_config.swap_is_present == TRUE)
#define VM_CONFIG_SWAP_IS_ACTIVE                (vm_config.swap_is_active == TRUE)
#define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE        (vm_config.freezer_swap_is_active == TRUE)

#endif  /* KERNEL_PRIVATE */

#ifdef XNU_KERNEL_PRIVATE

struct vm_pageout_state {
	boolean_t vm_pressure_thread_running;
	boolean_t vm_pressure_changed;
	boolean_t vm_restricted_to_single_processor;
	int vm_compressor_thread_count;

	unsigned int vm_page_speculative_q_age_ms;
	unsigned int vm_page_speculative_percentage;
	unsigned int vm_page_speculative_target;

	unsigned int vm_pageout_swap_wait;
	unsigned int vm_pageout_idle_wait;      /* milliseconds */
	unsigned int vm_pageout_empty_wait;     /* milliseconds */
	unsigned int vm_pageout_burst_wait;     /* milliseconds */
	unsigned int vm_pageout_deadlock_wait;  /* milliseconds */
	unsigned int vm_pageout_deadlock_relief;
	unsigned int vm_pageout_burst_inactive_throttle;

	unsigned int vm_pageout_inactive;
	unsigned int vm_pageout_inactive_used;  /* debugging */
	unsigned int vm_pageout_inactive_clean; /* debugging */

	uint32_t vm_page_filecache_min;
	uint32_t vm_page_filecache_min_divisor;
	uint32_t vm_page_xpmapped_min;
	uint32_t vm_page_xpmapped_min_divisor;
	uint64_t vm_pageout_considered_page_last;

	int vm_page_free_count_init;

	unsigned int vm_memory_pressure;

	int memorystatus_purge_on_critical;
	int memorystatus_purge_on_warning;
	int memorystatus_purge_on_urgent;

	thread_t vm_pageout_early_swapout_iothread;
};

extern struct vm_pageout_state vm_pageout_state;

/*
 * This structure is used to track the VM_INFO instrumentation
 */
struct vm_pageout_vminfo {
	unsigned long vm_pageout_considered_page;
	unsigned long vm_pageout_considered_bq_internal;
	unsigned long vm_pageout_considered_bq_external;
	unsigned long vm_pageout_skipped_external;
	unsigned long vm_pageout_skipped_internal;

	unsigned long vm_pageout_pages_evicted;
	unsigned long vm_pageout_pages_purged;
	unsigned long vm_pageout_freed_cleaned;
	unsigned long vm_pageout_freed_speculative;
	unsigned long vm_pageout_freed_external;
	unsigned long vm_pageout_freed_internal;
	unsigned long vm_pageout_inactive_dirty_internal;
	unsigned long vm_pageout_inactive_dirty_external;
	unsigned long vm_pageout_inactive_referenced;
	unsigned long vm_pageout_reactivation_limit_exceeded;
	unsigned long vm_pageout_inactive_force_reclaim;
	unsigned long vm_pageout_inactive_nolock;
	unsigned long vm_pageout_filecache_min_reactivated;
	unsigned long vm_pageout_scan_inactive_throttled_internal;
	unsigned long vm_pageout_scan_inactive_throttled_external;

	uint64_t      vm_pageout_compressions;
	uint64_t      vm_compressor_pages_grabbed;
	unsigned long vm_compressor_failed;

	unsigned long vm_page_pages_freed;

	unsigned long vm_phantom_cache_found_ghost;
	unsigned long vm_phantom_cache_added_ghost;

	unsigned long vm_pageout_protected_sharedcache;
	unsigned long vm_pageout_forcereclaimed_sharedcache;
	unsigned long vm_pageout_protected_realtime;
	unsigned long vm_pageout_forcereclaimed_realtime;
};

extern struct vm_pageout_vminfo vm_pageout_vminfo;

extern void vm_swapout_thread(void);

#if DEVELOPMENT || DEBUG

/*
 *	This structure records the pageout daemon's actions:
 *	how many pages it looks at and what happens to those pages.
 *	No locking needed because only one thread modifies the fields.
 */
struct vm_pageout_debug {
	uint32_t vm_pageout_balanced;
	uint32_t vm_pageout_scan_event_counter;
	uint32_t vm_pageout_speculative_dirty;

	uint32_t vm_pageout_inactive_busy;
	uint32_t vm_pageout_inactive_absent;
	uint32_t vm_pageout_inactive_notalive;
	uint32_t vm_pageout_inactive_error;
	uint32_t vm_pageout_inactive_deactivated;

	uint32_t vm_pageout_enqueued_cleaned;

	uint32_t vm_pageout_cleaned_busy;
	uint32_t vm_pageout_cleaned_nolock;
	uint32_t vm_pageout_cleaned_reference_reactivated;
	uint32_t vm_pageout_cleaned_volatile_reactivated;
	uint32_t vm_pageout_cleaned_reactivated;  /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */
	uint32_t vm_pageout_cleaned_fault_reactivated;

	uint32_t vm_pageout_dirty_no_pager;
	uint32_t vm_pageout_purged_objects;

	uint32_t vm_pageout_scan_throttle;
	uint32_t vm_pageout_scan_reclaimed_throttled;
	uint32_t vm_pageout_scan_burst_throttle;
	uint32_t vm_pageout_scan_empty_throttle;
	uint32_t vm_pageout_scan_swap_throttle;
	uint32_t vm_pageout_scan_deadlock_detected;
	uint32_t vm_pageout_scan_inactive_throttle_success;
	uint32_t vm_pageout_scan_throttle_deferred;

	uint32_t vm_pageout_inactive_external_forced_jetsam_count;

	uint32_t vm_grab_anon_overrides;
	uint32_t vm_grab_anon_nops;

	uint32_t vm_pageout_no_victim;
	uint32_t vm_pageout_yield_for_free_pages;
	unsigned long vm_pageout_throttle_up_count;
	uint32_t vm_page_steal_pageout_page;

	uint32_t vm_cs_validated_resets;
	uint32_t vm_object_iopl_request_sleep_for_cleaning;
	uint32_t vm_page_slide_counter;
	uint32_t vm_page_slide_errors;
	uint32_t vm_page_throttle_count;
	/*
	 * Statistics about UPL enforcement of copy-on-write obligations.
	 */
	unsigned long upl_cow;
	unsigned long upl_cow_again;
	unsigned long upl_cow_pages;
	unsigned long upl_cow_again_pages;
	unsigned long iopl_cow;
	unsigned long iopl_cow_pages;
};

extern struct vm_pageout_debug vm_pageout_debug;

#define VM_PAGEOUT_DEBUG(member, value)                 \
	MACRO_BEGIN                                     \
	        vm_pageout_debug.member += value;       \
	MACRO_END
#else /* DEVELOPMENT || DEBUG */
#define VM_PAGEOUT_DEBUG(member, value)
#endif /* DEVELOPMENT || DEBUG */

#define MAX_COMPRESSOR_THREAD_COUNT      8

/*
 * Forward declarations for internal routines.
 */

/*
 * Contains relevant state for pageout iothreads. Some state is unused by
 * external (file-backed) thread.
 */
struct pgo_iothread_state {
	struct vm_pageout_queue *q;
	// cheads unused by external thread
	void                    *current_early_swapout_chead;
	void                    *current_regular_swapout_chead;
	void                    *current_late_swapout_chead;
	char                    *scratch_buf;
	int                     id;
	thread_t                pgo_iothread; // holds a +1 ref
	sched_cond_atomic_t     pgo_wakeup;
#if DEVELOPMENT || DEBUG
	// for perf_compressor benchmark
	struct vm_pageout_queue *benchmark_q;
#endif /* DEVELOPMENT || DEBUG */
};

extern struct pgo_iothread_state pgo_iothread_internal_state[MAX_COMPRESSOR_THREAD_COUNT];

extern struct pgo_iothread_state pgo_iothread_external_state;

struct vm_compressor_swapper_stats {
	uint64_t unripe_under_30s;
	uint64_t unripe_under_60s;
	uint64_t unripe_under_300s;
	uint64_t reclaim_swapins;
	uint64_t defrag_swapins;
	uint64_t compressor_swap_threshold_exceeded;
	uint64_t external_q_throttled;
	uint64_t free_count_below_reserve;
	uint64_t thrashing_detected;
	uint64_t fragmentation_detected;
};
extern struct vm_compressor_swapper_stats vmcs_stats;

#if DEVELOPMENT || DEBUG
typedef struct vmct_stats_s {
	uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT];
	uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT];
	uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT];
	// total mach absolute time that compressor threads has been running
	uint64_t vmct_cthreads_total;
	int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT];
	int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT];
} vmct_stats_t;
#endif /* DEVELOPMENT || DEBUG */
#endif /* XNU_KERNEL_PRIVATE */
#endif  /* _VM_VM_PAGEOUT_H_ */