xref: /xnu-12377.81.4/bsd/skywalk/mem/skmem_arena_var.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_MEM_SKMEMARENAVAR_H
30 #define _SKYWALK_MEM_SKMEMARENAVAR_H
31 
32 #ifdef BSD_KERNEL_PRIVATE
33 #include <skywalk/core/skywalk_var.h>
34 
35 /*
36  * Arena types.
37  */
38 typedef enum {
39 	SKMEM_ARENA_TYPE_NEXUS,                 /* skmem_arena_nexus */
40 	SKMEM_ARENA_TYPE_NECP,                  /* skmem_arena_necp */
41 	SKMEM_ARENA_TYPE_SYSTEM,                /* skmem_arena_system */
42 } skmem_arena_type_t;
43 
44 struct skmem_arena_mmap_info;
45 
46 /*
47  * Structure common to all arena types.
48  */
49 struct skmem_arena {
50 	decl_lck_mtx_data(, ar_lock);           /* arena lock */
51 	uint32_t                ar_refcnt;      /* reference count */
52 
53 	/*
54 	 * Arena properties.
55 	 */
56 	TAILQ_ENTRY(skmem_arena) ar_link;       /* skmem_region linkage */
57 	char                    ar_name[64];    /* arena name */
58 	skmem_arena_type_t      ar_type;        /* arena type */
59 	uint32_t                ar_flags;       /* ARF_* */
60 	size_t                  ar_zsize;       /* zone object size */
61 	IOSKArenaRef            ar_ar;          /* backing IOSKArena */
62 
63 	/*
64 	 * Regions.
65 	 */
66 	struct skmem_region     *ar_regions[SKMEM_REGIONS]; /* arena regions */
67 
68 	/*
69 	 * ar_mapsize gets set the first time the arena is mapped to a task;
70 	 * it is an estimate since we don't update it on subsequent mappings.
71 	 * We use it only for statistics purposes.
72 	 */
73 	mach_vm_size_t          ar_mapsize;     /* estimated mmap size */
74 	uint32_t                ar_mapcnt;      /* # of active mmap on arena */
75 	uint32_t                ar_maprdrcnt;   /* # of redirected mmap */
76 	SLIST_HEAD(, skmem_arena_mmap_info) ar_map_head; /* list of mmap info */
77 };
78 
79 /* valid values for ar_flags */
80 #define ARF_ACTIVE              0x1             /* arena is active */
81 #define ARF_DEFUNCT             (1U << 31)      /* arena is defunct */
82 
83 #define ARF_BITS                "\020\01ACTIVE\040DEFUNCT"
84 
85 #define AR_LOCK(_ar)                    \
86 	lck_mtx_lock(&(_ar)->ar_lock)
87 #define AR_LOCK_ASSERT_HELD(_ar)        \
88 	LCK_MTX_ASSERT(&(_ar)->ar_lock, LCK_MTX_ASSERT_OWNED)
89 #define AR_LOCK_ASSERT_NOTHELD(_ar)     \
90 	LCK_MTX_ASSERT(&(_ar)->ar_lock, LCK_MTX_ASSERT_NOTOWNED)
91 #define AR_UNLOCK(_ar)                  \
92 	lck_mtx_unlock(&(_ar)->ar_lock)
93 
94 #define AR_MEM_TOTAL(_ar, _id)          \
95 	((_ar)->ar_regions[_id]->skr_memtotal)
96 #define AR_MEM_INUSE(_ar, _id)          \
97 	((_ar)->ar_regions[_id]->skr_meminuse)
98 #define AR_MEM_WIRED_INUSE(_ar, _id)    \
99 	((_ar)->ar_regions[_id]->skr_w_meminuse)
100 #define AR_MEM_SEGSIZE(_ar, _id)        \
101 	((_ar)->ar_regions[_id]->skr_seg_size)
102 #define AR_MEM_SEGCNT(_ar, _id)         \
103 	((_ar)->ar_regions[_id]->skr_seg_max_cnt)
104 #define AR_MEM_OBJCNT_R(_ar, _id)       \
105 	((_ar)->ar_regions[_id]->skr_r_obj_cnt)
106 #define AR_MEM_OBJCNT_C(_ar, _id)       \
107 	((_ar)->ar_regions[_id]->skr_c_obj_cnt)
108 #define AR_MEM_OBJSIZE_R(_ar, _id)      \
109 	((_ar)->ar_regions[_id]->skr_r_obj_size)
110 #define AR_MEM_OBJSIZE_C(_ar, _id)      \
111 	((_ar)->ar_regions[_id]->skr_c_obj_size)
112 
113 /*
114  * Arena Task Map Information.
115  */
116 struct skmem_arena_mmap_info {
117 	SLIST_ENTRY(skmem_arena_mmap_info)      ami_link;
118 	struct skmem_arena      *ami_arena;     /* backing arena */
119 	IOSKMapperRef           ami_mapref;     /* IOSKMapper handle */
120 	task_t                  ami_maptask;    /* task where it's mapped to */
121 	mach_vm_address_t       ami_mapaddr;    /* start address in task */
122 	mach_vm_size_t          ami_mapsize;    /* size of memory map */
123 	boolean_t               ami_redirect;   /* map is redirected */
124 };
125 
126 /*
127  * Nexus Adapter Arena.
128  */
129 struct skmem_arena_nexus {
130 	struct skmem_arena      arn_cmn;        /* common arena struct */
131 
132 	struct kern_pbufpool    *arn_rx_pp;     /* rx ppool handle */
133 	struct kern_pbufpool    *arn_tx_pp;     /* tx ppool handle */
134 	uint32_t                arn_mode;       /* mode flags */
135 	nexus_meta_type_t       arn_md_type;    /* mdata regions type */
136 	nexus_meta_subtype_t    arn_md_subtype; /* mdata regions subtype */
137 	/*
138 	 * For arenas used by adapters with external ring, slot callbacks or
139 	 * invocations via KPIs accessing kernel slot descriptors, we need to
140 	 * make sure the ksd region is kept intact during defunct.  A non-zero
141 	 * value indicates that we leave ksd region alone, until the time when
142 	 * the arena is torn down for good.
143 	 */
144 	int                     arn_ksd_nodefunct;
145 
146 	/*
147 	 * Caches.
148 	 */
149 	struct skmem_cache      *arn_schema_cache; /* schema object cache */
150 	struct skmem_cache      *arn_ring_cache;   /* ring object cache */
151 	struct skmem_cache      *arn_txaksd_cache; /* tx/alloc slots cache */
152 	struct skmem_cache      *arn_rxfksd_cache; /* rx/free slots cache */
153 
154 	/*
155 	 * Statistics.
156 	 *
157 	 * This may be NULL if the arena was created without a statistics
158 	 * region.  Otherwise, this value contains the segment address of
159 	 * the object that we allocate from that region.  An arena contains
160 	 * at most one monolithic stats region.
161 	 */
162 	void                    *__sized_by(arn_stats_obj_size)arn_stats_obj;
163 	size_t                  arn_stats_obj_size;
164 
165 	/*
166 	 * Flow advisory.
167 	 *
168 	 * This may be NULL if the arena was created without a flow advisory
169 	 * region.  Otherwise, this value contains the segment address of
170 	 * the object that we allocate from that region.  An arena contains
171 	 * at most one monolithic flow advisory region.
172 	 */
173 	struct __flowadv_entry  *__counted_by(arn_flowadv_entries)arn_flowadv_obj;
174 	size_t                  arn_flowadv_entries;
175 
176 	/*
177 	 * Nexus advisory.
178 	 *
179 	 * This may be NULL if the arena was created without a nexus advisory
180 	 * region.  Otherwise, this value contains the segment address of
181 	 * the object that we allocate from that region.  An arena contains
182 	 * at most one monolithic nexus advisory region, that is nexus-wide.
183 	 */
184 	void                    *arn_nexusadv_obj;
185 };
186 
187 /* valid flags for arn_mode */
188 #define AR_NEXUS_MODE_EXTERNAL_PPOOL    0x1     /* external packet pool */
189 
190 /*
191  * Given an arena, return its nexus variant (if applicable).
192  */
193 __attribute__((always_inline))
194 static inline struct skmem_arena_nexus *
skmem_arena_nexus(struct skmem_arena * ar)195 skmem_arena_nexus(struct skmem_arena *ar)
196 {
197 	if (__improbable(ar->ar_type != SKMEM_ARENA_TYPE_NEXUS)) {
198 		return NULL;
199 	}
200 
201 	return (struct skmem_arena_nexus *)ar;
202 }
203 
204 /*
205  * NECP Arena.
206  */
207 struct skmem_arena_necp {
208 	struct skmem_arena      arc_cmn;        /* common arena struct */
209 
210 	/*
211 	 * Caches.
212 	 */
213 	/* stats cache (kernel master mirrored with slave ustats) */
214 	struct skmem_cache      *arc_kstats_cache;
215 };
216 
217 /*
218  * System Arena.
219  */
220 struct skmem_arena_system {
221 	struct skmem_arena      ars_cmn;        /* common arena struct */
222 
223 	/*
224 	 * sysctls.
225 	 *
226 	 * This value contains the kernel virtual address of the system-wide
227 	 * sysctls object.  This object is persistent, i.e. it does not get
228 	 * allocated or freed along with the arena.
229 	 */
230 	void                    *ars_sysctls_obj;
231 	size_t                  ars_sysctls_objsize;
232 };
233 
234 struct kern_nexus_advisory;
235 
236 __BEGIN_DECLS
237 extern struct skmem_arena *skmem_arena_create_for_nexus(
238 	const struct nexus_adapter *, struct skmem_region_params[SKMEM_REGIONS],
239 	struct kern_pbufpool **, struct kern_pbufpool **, boolean_t, boolean_t,
240 	struct kern_nexus_advisory *, int *);
241 extern void skmem_arena_nexus_sd_set_noidle(struct skmem_arena_nexus *, int);
242 extern boolean_t skmem_arena_nexus_sd_idle(struct skmem_arena_nexus *);
243 
244 extern struct skmem_arena *skmem_arena_create_for_necp(const char *,
245     struct skmem_region_params *, struct skmem_region_params *, int *);
246 extern struct skmem_arena_necp *skmem_arena_necp(struct skmem_arena *);
247 
248 extern struct skmem_arena *skmem_arena_create_for_system(const char *, int *);
249 extern struct skmem_arena_system *skmem_arena_system(struct skmem_arena *);
250 extern void *skmem_arena_system_sysctls_obj_addr(struct skmem_arena *);
251 extern size_t skmem_arena_system_sysctls_obj_size(struct skmem_arena *);
252 
253 extern void skmem_arena_retain(struct skmem_arena *);
254 extern boolean_t skmem_arena_release(struct skmem_arena *);
255 extern int skmem_arena_mmap(struct skmem_arena *, struct proc *,
256     struct skmem_arena_mmap_info *);
257 extern void skmem_arena_munmap(struct skmem_arena *,
258     struct skmem_arena_mmap_info *);
259 extern void skmem_arena_munmap_channel(struct skmem_arena *,
260     struct kern_channel *);
261 extern int skmem_arena_mredirect(struct skmem_arena *,
262     struct skmem_arena_mmap_info *, struct proc *, boolean_t *);
263 extern int skmem_arena_defunct(struct skmem_arena *);
264 extern void skmem_arena_get_stats(struct skmem_arena *, uint64_t *,
265     uint64_t *);
266 extern mach_vm_offset_t skmem_arena_get_region_offset(struct skmem_arena *,
267     skmem_region_id_t);
268 extern void skmem_arena_reap(struct skmem_arena *, boolean_t);
269 extern char * ar2str(const struct skmem_arena *ar, char *__counted_by(dsz)dst,
270     size_t dsz);
271 __END_DECLS
272 #endif /* BSD_KERNEL_PRIVATE */
273 #endif /* _SKYWALK_MEM_SKMEMARENAVAR_H */
274