Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
#ifndef _OS_REFCNT_INTERNAL_H
#define _OS_REFCNT_INTERNAL_H

struct os_refcnt {
	os_ref_atomic_t ref_count;
#if OS_REFCNT_DEBUG
	struct os_refgrp *ref_group;
#endif
};

#if OS_REFCNT_DEBUG
/*
 * As this structure gets baked-in at compile-time, changes can break the ABI.
 * To allow a little more flexibility for the future a new 'flags' member is
 * added but left unused for the moment. Newly compiled consumers will get the
 * new structure and once every upstream project has been recompiled the new field
 * can be used.
 */
struct os_refgrp {
	const char *grp_name;
	os_ref_atomic_t grp_children;  /* number of refcount objects in group */
	os_ref_atomic_t grp_count;     /* current reference count of group */
	_Atomic uint64_t grp_retain_total;
	_Atomic uint64_t grp_release_total;
	struct os_refgrp *grp_parent;
	void *grp_log;                 /* refcount logging context */
	uint64_t grp_flags;            /* Unused for now. */
};

#endif

# define OS_REF_ATOMIC_INITIALIZER ATOMIC_VAR_INIT(0)
#if OS_REFCNT_DEBUG
# define OS_REF_INITIALIZER { .ref_count = OS_REF_ATOMIC_INITIALIZER, .ref_group = NULL }
#else
# define OS_REF_INITIALIZER { .ref_count = OS_REF_ATOMIC_INITIALIZER }
#endif

__BEGIN_DECLS

#if OS_REFCNT_DEBUG
# define os_ref_if_debug(x, y) x
#else
# define os_ref_if_debug(x, y) y
#endif

void os_ref_init_count_external(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t);
void os_ref_retain_external(os_ref_atomic_t *, struct os_refgrp *);
void os_ref_retain_locked_external(os_ref_atomic_t *, struct os_refgrp *);
os_ref_count_t os_ref_release_external(os_ref_atomic_t *, struct os_refgrp *,
    memory_order release_order, memory_order dealloc_order);
os_ref_count_t os_ref_release_relaxed_external(os_ref_atomic_t *, struct os_refgrp *);
os_ref_count_t os_ref_release_barrier_external(os_ref_atomic_t *, struct os_refgrp *);
os_ref_count_t os_ref_release_locked_external(os_ref_atomic_t *, struct os_refgrp *);
bool os_ref_retain_try_external(os_ref_atomic_t *, struct os_refgrp *);

#if XNU_KERNEL_PRIVATE
void os_ref_init_count_internal(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t);
void os_ref_retain_internal(os_ref_atomic_t *, struct os_refgrp *);
void os_ref_retain_floor_internal(os_ref_atomic_t *, os_ref_count_t, struct os_refgrp *);
os_ref_count_t os_ref_release_relaxed_internal(os_ref_atomic_t *, struct os_refgrp *);
os_ref_count_t os_ref_release_barrier_internal(os_ref_atomic_t *, struct os_refgrp *);
os_ref_count_t os_ref_release_internal(os_ref_atomic_t *, struct os_refgrp *,
    memory_order release_order, memory_order dealloc_order);
bool os_ref_retain_try_internal(os_ref_atomic_t *, struct os_refgrp *);
bool os_ref_retain_floor_try_internal(os_ref_atomic_t *, os_ref_count_t, struct os_refgrp *);
void os_ref_retain_locked_internal(os_ref_atomic_t *, struct os_refgrp *);
void os_ref_retain_floor_locked_internal(os_ref_atomic_t *, os_ref_count_t, struct os_refgrp *);
os_ref_count_t os_ref_release_locked_internal(os_ref_atomic_t *, struct os_refgrp *);
#else
/* For now, the internal and external variants are identical */
#define os_ref_init_count_internal      os_ref_init_count_external
#define os_ref_retain_internal          os_ref_retain_external
#define os_ref_retain_locked_internal   os_ref_retain_locked_external
#define os_ref_release_internal         os_ref_release_external
#define os_ref_release_barrier_internal os_ref_release_barrier_external
#define os_ref_release_relaxed_internal os_ref_release_relaxed_external
#define os_ref_release_locked_internal  os_ref_release_locked_external
#define os_ref_retain_try_internal      os_ref_retain_try_external
#endif

static inline void
os_ref_init_count(struct os_refcnt *rc, struct os_refgrp * __unused grp, os_ref_count_t count)
{
#if OS_REFCNT_DEBUG
	rc->ref_group = grp;
#endif
	os_ref_init_count_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL), count);
}

static inline void
os_ref_retain(struct os_refcnt *rc)
{
	os_ref_retain_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
}

static inline os_ref_count_t
os_ref_release_locked(struct os_refcnt *rc)
{
	return os_ref_release_locked_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
}

static inline void
os_ref_retain_locked(struct os_refcnt *rc)
{
	os_ref_retain_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
}

static inline bool
os_ref_retain_try(struct os_refcnt *rc)
{
	return os_ref_retain_try_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
}

__deprecated_msg("inefficient codegen, prefer os_ref_release / os_ref_release_relaxed")
static inline os_ref_count_t OS_WARN_RESULT
os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory_order dealloc_order)
{
	return os_ref_release_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL),
	           release_order, dealloc_order);
}

#if OS_REFCNT_DEBUG
# define os_refgrp_initializer(name, parent) \
	 { \
	        .grp_name =          (name), \
	        .grp_children =      ATOMIC_VAR_INIT(0u), \
	        .grp_count =         ATOMIC_VAR_INIT(0u), \
	        .grp_retain_total =  ATOMIC_VAR_INIT(0u), \
	        .grp_release_total = ATOMIC_VAR_INIT(0u), \
	        .grp_parent =        (parent), \
	        .grp_log =           NULL, \
	}
# define os_refgrp_decl(qual, var, name, parent) \
	qual struct os_refgrp __attribute__((section("__DATA,__refgrps"))) var =  \
	    os_refgrp_initializer(name, parent)
# define os_refgrp_decl_extern(var) \
	extern struct os_refgrp var

/* Create a default group based on the init() callsite if no explicit group
 * is provided. */
# define os_ref_init_count(rc, grp, count) ({ \
	        os_refgrp_decl(static, __grp, __func__, NULL); \
	        (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \
	})

#else /* OS_REFCNT_DEBUG */

# define os_refgrp_decl(qual, var, name, parent) extern struct os_refgrp var __attribute__((unused))
# define os_refgrp_decl_extern(var) os_refgrp_decl(, var, ,)
# define os_ref_init_count(rc, grp, count) (os_ref_init_count)((rc), NULL, (count))

#endif /* OS_REFCNT_DEBUG */

#if XNU_KERNEL_PRIVATE
void os_ref_panic_live(void *rc) __abortlike;
#else
__abortlike
static inline void
os_ref_panic_live(void *rc)
{
	panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
	__builtin_unreachable();
}
#endif

static inline os_ref_count_t OS_WARN_RESULT
os_ref_release(struct os_refcnt *rc)
{
	return os_ref_release_barrier_internal(&rc->ref_count,
	           os_ref_if_debug(rc->ref_group, NULL));
}

static inline os_ref_count_t OS_WARN_RESULT
os_ref_release_relaxed(struct os_refcnt *rc)
{
	return os_ref_release_relaxed_internal(&rc->ref_count,
	           os_ref_if_debug(rc->ref_group, NULL));
}

static inline void
os_ref_release_live(struct os_refcnt *rc)
{
	if (__improbable(os_ref_release(rc) == 0)) {
		os_ref_panic_live(rc);
	}
}

static inline os_ref_count_t
os_ref_get_count_internal(os_ref_atomic_t *rc)
{
	return atomic_load_explicit(rc, memory_order_relaxed);
}

static inline os_ref_count_t
os_ref_get_count(struct os_refcnt *rc)
{
	return os_ref_get_count_internal(&rc->ref_count);
}

#if XNU_KERNEL_PRIVATE
#pragma GCC visibility push(hidden)

/*
 * Raw API
 */

static inline void
os_ref_init_count_raw(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t count)
{
	os_ref_init_count_internal(rc, grp, count);
}

static inline void
os_ref_retain_floor(struct os_refcnt *rc, os_ref_count_t f)
{
	os_ref_retain_floor_internal(&rc->ref_count, f, os_ref_if_debug(rc->ref_group, NULL));
}

static inline void
os_ref_retain_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
{
	os_ref_retain_internal(rc, grp);
}

static inline void
os_ref_retain_floor_raw(os_ref_atomic_t *rc, os_ref_count_t f, struct os_refgrp *grp)
{
	os_ref_retain_floor_internal(rc, f, grp);
}

static inline os_ref_count_t
os_ref_release_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
{
	return os_ref_release_barrier_internal(rc, grp);
}

static inline os_ref_count_t
os_ref_release_raw_relaxed(os_ref_atomic_t *rc, struct os_refgrp *grp)
{
	return os_ref_release_relaxed_internal(rc, grp);
}

static inline void
os_ref_release_live_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
{
	if (__improbable(os_ref_release_barrier_internal(rc, grp) == 0)) {
		os_ref_panic_live(rc);
	}
}

static inline bool
os_ref_retain_try_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
{
	return os_ref_retain_try_internal(rc, grp);
}

static inline bool
os_ref_retain_floor_try_raw(os_ref_atomic_t *rc, os_ref_count_t f,
    struct os_refgrp *grp)
{
	return os_ref_retain_floor_try_internal(rc, f, grp);
}

static inline void
os_ref_retain_locked_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
{
	os_ref_retain_locked_internal(rc, grp);
}

static inline void
os_ref_retain_floor_locked_raw(os_ref_atomic_t *rc, os_ref_count_t f,
    struct os_refgrp *grp)
{
	os_ref_retain_floor_locked_internal(rc, f, grp);
}

static inline os_ref_count_t
os_ref_release_locked_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
{
	return os_ref_release_locked_internal(rc, grp);
}

static inline os_ref_count_t
os_ref_get_count_raw(os_ref_atomic_t *rc)
{
	return os_ref_get_count_internal(rc);
}

#if !OS_REFCNT_DEBUG
/* remove the group argument for non-debug */
#define os_ref_init_count_raw(rc, grp, count) (os_ref_init_count_raw)((rc), NULL, (count))
#define os_ref_retain_raw(rc, grp) (os_ref_retain_raw)((rc), NULL)
#define os_ref_retain_floor_raw(rc, f, grp) (os_ref_retain_floor_raw)((rc), f, NULL)
#define os_ref_release_raw(rc, grp) (os_ref_release_raw)((rc), NULL)
#define os_ref_release_raw_relaxed(rc, grp) (os_ref_release_raw_relaxed)((rc), NULL)
#define os_ref_release_live_raw(rc, grp) (os_ref_release_live_raw)((rc), NULL)
#define os_ref_retain_try_raw(rc, grp) (os_ref_retain_try_raw)((rc), NULL)
#define os_ref_retain_floor_try_raw(rc, f, grp) (os_ref_retain_floor_try_raw)((rc), f, NULL)
#define os_ref_retain_locked_raw(rc, grp) (os_ref_retain_locked_raw)((rc), NULL)
#define os_ref_retain_floor_locked_raw(rc, f, grp) (os_ref_retain_floor_locked_raw)((rc), f, NULL)
#define os_ref_release_locked_raw(rc, grp) (os_ref_release_locked_raw)((rc), NULL)
#endif

extern void
os_ref_log_fini(struct os_refgrp *grp);

extern void
os_ref_log_init(struct os_refgrp *grp);

extern void
os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
extern void
os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
extern uint32_t
os_ref_retain_try_mask_internal(os_ref_atomic_t *, uint32_t n,
    uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT;
extern bool
os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *, uint32_t n,
    uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT;

extern uint32_t
os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
extern uint32_t
os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);

static inline uint32_t
os_ref_get_raw_mask(os_ref_atomic_t *rc)
{
	return os_ref_get_count_internal(rc);
}

static inline uint32_t
os_ref_get_bits_mask(os_ref_atomic_t *rc, uint32_t b)
{
	return os_ref_get_raw_mask(rc) & ((1u << b) - 1);
}

static inline os_ref_count_t
os_ref_get_count_mask(os_ref_atomic_t *rc, uint32_t b)
{
	return os_ref_get_raw_mask(rc) >> b;
}

static inline void
os_ref_retain_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	os_ref_retain_mask_internal(rc, 1u << b, grp);
}

static inline void
os_ref_retain_acquire_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	os_ref_retain_acquire_mask_internal(rc, 1u << b, grp);
}

static inline uint32_t
os_ref_retain_try_mask(os_ref_atomic_t *rc, uint32_t b,
    uint32_t reject_mask, struct os_refgrp *grp)
{
	return os_ref_retain_try_mask_internal(rc, 1u << b, reject_mask, grp);
}

static inline bool
os_ref_retain_try_acquire_mask(os_ref_atomic_t *rc, uint32_t b,
    uint32_t reject_mask, struct os_refgrp *grp)
{
	return os_ref_retain_try_acquire_mask_internal(rc, 1u << b, reject_mask, grp);
}

static inline uint32_t
os_ref_release_raw_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	return os_ref_release_barrier_mask_internal(rc, 1u << b, grp);
}

static inline uint32_t
os_ref_release_raw_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp);
}

static inline os_ref_count_t
os_ref_release_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	return os_ref_release_barrier_mask_internal(rc, 1u << b, grp) >> b;
}

static inline os_ref_count_t
os_ref_release_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp) >> b;
}

static inline uint32_t
os_ref_release_live_raw_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	uint32_t val = os_ref_release_barrier_mask_internal(rc, 1u << b, grp);
	if (__improbable(val < 1u << b)) {
		os_ref_panic_live(rc);
	}
	return val;
}

static inline void
os_ref_release_live_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
{
	os_ref_release_live_raw_mask(rc, b, grp);
}

#if !OS_REFCNT_DEBUG
/* remove the group argument for non-debug */
#define os_ref_init_count_mask(rc, b, grp, init_c, init_b) (os_ref_init_count_mask)(rc, b, NULL, init_c, init_b)
#define os_ref_retain_mask(rc, b, grp) (os_ref_retain_mask)((rc), (b), NULL)
#define os_ref_retain_acquire_mask(rc, b, grp) (os_ref_retain_acquire_mask)((rc), (b), NULL)
#define os_ref_retain_try_mask(rc, b, m, grp) (os_ref_retain_try_mask)((rc), (b), (m), NULL)
#define os_ref_retain_try_acquire_mask(rc, b, grp) (os_ref_retain_try_acquire_mask)((rc), (b), NULL)
#define os_ref_release_mask(rc, b, grp) (os_ref_release_mask)((rc), (b), NULL)
#define os_ref_release_relaxed_mask(rc, b, grp) (os_ref_release_relaxed_mask)((rc), (b), NULL)
#define os_ref_release_raw_mask(rc, b, grp) (os_ref_release_raw_mask)((rc), (b), NULL)
#define os_ref_release_relaxed_raw_mask(rc, b, grp) (os_ref_release_relaxed_raw_mask)((rc), (b), NULL)
#define os_ref_release_live_raw_mask(rc, b, grp) (os_ref_release_live_raw_mask)((rc), (b), NULL)
#define os_ref_release_live_mask(rc, b, grp) (os_ref_release_live_mask)((rc), (b), NULL)
#endif

#pragma GCC visibility pop
#endif

__END_DECLS

#endif /* _OS_REFCNT_INTERNAL_H */