xref: /xnu-11215.41.3/bsd/skywalk/nexus/kpipe/nx_kpipe_loopback.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4) !
1 /*
2  * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #if (DEVELOPMENT || DEBUG) // XXX make this whole file a config option?
30 
31 #include <skywalk/os_skywalk_private.h>
32 #include <skywalk/nexus/kpipe/nx_kernel_pipe.h>
33 
34 static int kplo_enabled;
35 static int kplo_busy;
36 static int kplo_dump_buf;
37 static int kplo_inject_error;
38 static uintptr_t kplo_seed;
39 static uintptr_t kplo_nx_ctx;
40 static uint32_t kplo_drv_slots;
41 
42 static nexus_controller_t kplo_ncd;
43 static uuid_t kplo_dom_prov_uuid;
44 static uuid_t kplo_prov_uuid;
45 static uuid_t kplo_nx_uuid;
46 static uuid_string_t kplo_nx_uuidstr;
47 
48 static uint64_t kplo_ntxrings, kplo_nrxrings;
49 static uint64_t kplo_ntxslots, kplo_nrxslots;
50 static uint64_t kplo_bufsz, kplo_mdatasz;
51 static uint64_t kplo_pipes;
52 static uint64_t kplo_anon = 1;
53 static kern_channel_ring_t kplo_rxring;
54 static kern_channel_ring_t kplo_txring;
55 struct kern_pbufpool_memory_info kplo_tx_pp_info;
56 static kern_pbufpool_t kplo_tx_pp;
57 static kern_pbufpool_t kplo_rx_pp;
58 
59 static LCK_MTX_DECLARE_ATTR(kplo_lock, &sk_lock_group, &sk_lock_attr);
60 
61 #define KPLO_VERIFY_CTX(addr, ctx)      \
62 	VERIFY(((uintptr_t)(addr) ^ (uintptr_t)(ctx)) == kplo_seed)
63 #define KPLO_GENERATE_CTX(addr)         \
64 	(void *)((uintptr_t)(addr) ^ kplo_seed)
65 #define KPLO_WHICH_RING(_ring)          \
66 	((_ring) == kplo_rxring ? "RX" : "TX")
67 
68 #define KPLO_INJECT_ERROR(_err) do {                                    \
69 	if (kplo_inject_error == (_err)) {                              \
70 	        SK_ERR("injecting error %d, returning ENOMEM", (_err)); \
71 	        error = ENOMEM;                                         \
72 	        goto done;                                              \
73 	}                                                               \
74 } while (0)
75 
76 static errno_t
kplo_dom_init(kern_nexus_domain_provider_t domprov)77 kplo_dom_init(kern_nexus_domain_provider_t domprov)
78 {
79 #pragma unused(domprov)
80 	errno_t error = 0;
81 	lck_mtx_lock(&kplo_lock);
82 	read_random(&kplo_nx_ctx, sizeof(kplo_nx_ctx));
83 	read_random(&kplo_seed, sizeof(kplo_seed));
84 	SK_DF(SK_VERB_KERNEL_PIPE, "seed is 0x%llx", (uint64_t)kplo_seed);
85 	VERIFY(kplo_drv_slots == 0);
86 	VERIFY(kplo_ntxrings == 0 && kplo_nrxrings == 0);
87 	VERIFY(kplo_ntxslots == 0 && kplo_nrxslots == 0);
88 	VERIFY(kplo_bufsz == 0 && kplo_mdatasz == 0);
89 	VERIFY(kplo_pipes == 0);
90 	VERIFY(kplo_rxring == NULL && kplo_txring == NULL);
91 	VERIFY(kplo_tx_pp == NULL && kplo_rx_pp == NULL);
92 	lck_mtx_unlock(&kplo_lock);
93 
94 	KPLO_INJECT_ERROR(1);
95 done:
96 	return error;
97 }
98 
99 static void
kplo_dom_fini(kern_nexus_domain_provider_t domprov)100 kplo_dom_fini(kern_nexus_domain_provider_t domprov)
101 {
102 #pragma unused(domprov)
103 	lck_mtx_lock(&kplo_lock);
104 	kplo_nx_ctx = kplo_seed = 0;
105 	kplo_ntxrings = kplo_nrxrings = kplo_ntxslots = kplo_nrxslots = 0;
106 	kplo_bufsz = kplo_mdatasz = 0;
107 	kplo_pipes = 0;
108 	VERIFY(kplo_busy);
109 	kplo_busy = 0;
110 	wakeup(&kplo_enabled); // Allow shutdown to return
111 	VERIFY(kplo_drv_slots == 0);
112 	VERIFY(kplo_rxring == NULL && kplo_txring == NULL);
113 	VERIFY(kplo_tx_pp == NULL && kplo_rx_pp == NULL);
114 	lck_mtx_unlock(&kplo_lock);
115 
116 	SK_DF(SK_VERB_KERNEL_PIPE, "called");
117 }
118 
119 static errno_t
kplo_pre_connect(kern_nexus_provider_t nxprov,proc_t p,kern_nexus_t nexus,nexus_port_t nexus_port,kern_channel_t channel,void ** ch_ctx)120 kplo_pre_connect(kern_nexus_provider_t nxprov,
121     proc_t p, kern_nexus_t nexus,
122     nexus_port_t nexus_port, kern_channel_t channel, void **ch_ctx)
123 {
124 #pragma unused(nxprov, p, nexus_port)
125 	void *pp_ctx = NULL;
126 	errno_t error = 0;
127 
128 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
129 	*ch_ctx = KPLO_GENERATE_CTX(channel);
130 	SK_DF(SK_VERB_KERNEL_PIPE, "nx_port %u ch 0x%llx ch_ctx 0x%llx",
131 	    nexus_port, SK_KVA(channel), (uint64_t)(*ch_ctx));
132 
133 	error = kern_nexus_get_pbufpool(nexus, NULL, NULL);
134 	VERIFY(error == EINVAL);
135 
136 	error = kern_nexus_get_pbufpool(nexus, &kplo_tx_pp, &kplo_rx_pp);
137 	VERIFY(error == 0);
138 	VERIFY(kplo_tx_pp != NULL);     /* built-in pp */
139 	VERIFY(kplo_rx_pp != NULL);     /* built-in pp */
140 
141 	pp_ctx = kern_pbufpool_get_context(kplo_tx_pp);
142 	VERIFY(pp_ctx == NULL); /* must be NULL for built-in pp */
143 
144 	error = kern_pbufpool_get_memory_info(kplo_tx_pp, &kplo_tx_pp_info);
145 	VERIFY(error == 0);
146 	VERIFY(!(kplo_tx_pp_info.kpm_flags & KPMF_EXTERNAL));
147 	VERIFY(kplo_tx_pp_info.kpm_packets >=
148 	    (uint32_t)((kplo_ntxrings * kplo_ntxslots) +
149 	    (kplo_nrxrings * kplo_nrxslots)));
150 	VERIFY(kplo_tx_pp_info.kpm_max_frags == 1);
151 	VERIFY(kplo_tx_pp_info.kpm_buflets >= kplo_tx_pp_info.kpm_packets);
152 	VERIFY(kplo_tx_pp_info.kpm_bufsize == (uint32_t)kplo_bufsz);
153 
154 	SK_DF(SK_VERB_KERNEL_PIPE,
155 	    "kpm_packets %u kpm_max_frags %u kpm_buflets %u kpm_bufsize %u",
156 	    kplo_tx_pp_info.kpm_packets, kplo_tx_pp_info.kpm_max_frags,
157 	    kplo_tx_pp_info.kpm_buflets, kplo_tx_pp_info.kpm_bufsize);
158 
159 	error = 0;
160 
161 	KPLO_INJECT_ERROR(2);
162 done:
163 	if (error != 0) {
164 		kplo_tx_pp = NULL;
165 		kplo_rx_pp = NULL;
166 	}
167 
168 	return error;
169 }
170 
171 static errno_t
kplo_connected(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_t channel)172 kplo_connected(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
173     kern_channel_t channel)
174 {
175 #pragma unused(nxprov)
176 	errno_t error = 0;
177 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
178 	KPLO_VERIFY_CTX(channel, kern_channel_get_context(channel));
179 
180 	SK_DF(SK_VERB_KERNEL_PIPE, "channel 0x%llx", SK_KVA(channel));
181 	SK_DF(SK_VERB_KERNEL_PIPE, "  RX_ring 0x%llx", SK_KVA(kplo_rxring));
182 	SK_DF(SK_VERB_KERNEL_PIPE, "  TX_ring 0x%llx", SK_KVA(kplo_txring));
183 
184 	KPLO_INJECT_ERROR(3);
185 
186 done:
187 	return error;
188 }
189 
190 static void
kplo_pre_disconnect(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_t channel)191 kplo_pre_disconnect(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
192     kern_channel_t channel)
193 {
194 #pragma unused(nxprov)
195 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
196 	KPLO_VERIFY_CTX(channel, kern_channel_get_context(channel));
197 	SK_DF(SK_VERB_KERNEL_PIPE, "called for channel 0x%llx",
198 	    SK_KVA(channel));
199 }
200 
201 static void
kplo_disconnected(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_t channel)202 kplo_disconnected(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
203     kern_channel_t channel)
204 {
205 #pragma unused(nxprov)
206 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
207 	KPLO_VERIFY_CTX(channel, kern_channel_get_context(channel));
208 	SK_DF(SK_VERB_KERNEL_PIPE, "called for channel 0x%llx",
209 	    SK_KVA(channel));
210 	bzero(&kplo_tx_pp_info, sizeof(kplo_tx_pp_info));
211 	kplo_tx_pp = kplo_rx_pp = NULL;
212 }
213 
214 static errno_t
kplo_ring_init(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_t channel,kern_channel_ring_t ring,boolean_t is_tx_ring,void ** ring_ctx)215 kplo_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
216     kern_channel_t channel, kern_channel_ring_t ring, boolean_t is_tx_ring,
217     void **ring_ctx)
218 {
219 #pragma unused(nxprov, is_tx_ring)
220 	errno_t error = 0;
221 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
222 	KPLO_VERIFY_CTX(channel, kern_channel_get_context(channel));
223 
224 	if (is_tx_ring) {
225 		KPLO_INJECT_ERROR(4);
226 		VERIFY(kplo_txring == NULL);
227 		kplo_txring = ring;
228 	} else {
229 		KPLO_INJECT_ERROR(5);
230 		VERIFY(kplo_rxring == NULL);
231 		kplo_rxring = ring;
232 	}
233 	*ring_ctx = KPLO_GENERATE_CTX(ring);
234 
235 	SK_DF(SK_VERB_KERNEL_PIPE, "%s_ring 0x%llx ring_ctx 0x%llx, err(%d)",
236 	    KPLO_WHICH_RING(ring), SK_KVA(ring), (uint64_t)(*ring_ctx), error);
237 
238 done:
239 	return error;
240 }
241 
242 static void
kplo_ring_fini(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_ring_t ring)243 kplo_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
244     kern_channel_ring_t ring)
245 {
246 #pragma unused(nxprov)
247 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
248 	KPLO_VERIFY_CTX(ring, kern_channel_ring_get_context(ring));
249 	SK_DF(SK_VERB_KERNEL_PIPE, "%s_ring 0x%llx",
250 	    KPLO_WHICH_RING(ring), SK_KVA(ring));
251 
252 	if (ring == kplo_txring) {
253 		kplo_txring = NULL;
254 	} else {
255 		VERIFY(ring == kplo_rxring);
256 		kplo_rxring = NULL;
257 	}
258 }
259 
260 static errno_t
kplo_slot_init(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_ring_t ring,channel_slot_t slot,uint32_t slot_id,struct kern_slot_prop ** slot_prop_addr,void ** pslot_ctx)261 kplo_slot_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
262     kern_channel_ring_t ring, channel_slot_t slot,
263     uint32_t slot_id, struct kern_slot_prop **slot_prop_addr, void **pslot_ctx)
264 {
265 #pragma unused(nxprov)
266 	errno_t error = 0;
267 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
268 	KPLO_VERIFY_CTX(ring, kern_channel_ring_get_context(ring));
269 
270 	KPLO_INJECT_ERROR(6);
271 	if ((slot_id % 5) == 4) {
272 		KPLO_INJECT_ERROR(7);
273 	}
274 
275 	lck_mtx_lock(&kplo_lock);
276 	*pslot_ctx = KPLO_GENERATE_CTX(slot);
277 	*slot_prop_addr = NULL;
278 	SK_DF(SK_VERB_KERNEL_PIPE,
279 	    "  slot 0x%llx id %u slot_ctx 0x%llx [%u]",
280 	    SK_KVA(slot), slot_id, SK_KVA(*pslot_ctx), kplo_drv_slots);
281 	lck_mtx_unlock(&kplo_lock);
282 
283 done:
284 	return error;
285 }
286 
287 static void
kplo_slot_fini(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_ring_t ring,channel_slot_t slot,uint32_t slot_id)288 kplo_slot_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
289     kern_channel_ring_t ring, channel_slot_t slot,
290     uint32_t slot_id)
291 {
292 #pragma unused(nxprov, nexus, slot_id)
293 	void *ctx;
294 
295 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
296 	KPLO_VERIFY_CTX(ring, kern_channel_ring_get_context(ring));
297 	ctx = kern_channel_slot_get_context(ring, slot);
298 
299 	lck_mtx_lock(&kplo_lock);
300 	KPLO_VERIFY_CTX(slot, ctx);
301 	SK_DF(SK_VERB_KERNEL_PIPE, "  slot 0x%llx id %u [%u]",
302 	    SK_KVA(slot), slot_id, kplo_drv_slots);
303 	lck_mtx_unlock(&kplo_lock);
304 }
305 
306 static errno_t
kplo_sync_tx(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_ring_t ring,uint32_t flags)307 kplo_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
308     kern_channel_ring_t ring, uint32_t flags)
309 {
310 #pragma unused(nxprov, nexus)
311 #pragma unused(ring, flags)
312 	errno_t error = 0;
313 	struct kern_channel_ring_stat_increment stats;
314 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
315 	KPLO_VERIFY_CTX(ring, kern_channel_ring_get_context(ring));
316 	SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_SYNC | SK_VERB_TX,
317 	    "called with ring \"%s\" krflags 0x%b flags 0x%x",
318 	    ring->ckr_name, ring->ckr_flags, CKRF_BITS, flags);
319 	VERIFY(ring == kplo_txring);
320 
321 	kern_channel_ring_t txkring = kplo_txring;
322 	kern_channel_ring_t rxkring = kplo_rxring;
323 	uint32_t avail_rs, avail_ts;
324 	kern_channel_slot_t rs, ts, prs, pts;
325 	kern_packet_t ph;       /* packet handle */
326 	kern_buflet_t buf;      /* buflet handle */
327 	kern_packet_idx_t pidx;
328 	kern_packet_t *ary = NULL;
329 	uint32_t ary_cnt = 0, dlen, doff;
330 	uint16_t rdlen;
331 	struct kern_pbufpool *pp;
332 
333 	KPLO_INJECT_ERROR(8);
334 
335 	SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_SYNC | SK_VERB_TX,
336 	    "0x%llx: %s %x -> %s", SK_KVA(txkring), txkring->ckr_name,
337 	    flags, rxkring->ckr_name);
338 	SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_SYNC | SK_VERB_TX,
339 	    "tx before: kh %3u kt %3u | h %3u t %3u",
340 	    txkring->ckr_khead, txkring->ckr_ktail,
341 	    txkring->ckr_rhead, txkring->ckr_rtail);
342 	SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_SYNC | SK_VERB_TX,
343 	    "rx before: kh %3u kt %3u | h %3u t %3u",
344 	    rxkring->ckr_khead, rxkring->ckr_ktail,
345 	    rxkring->ckr_rhead, rxkring->ckr_rtail);
346 
347 	pp = skmem_arena_nexus(KRNA(ring)->na_arena)->arn_tx_pp;
348 	VERIFY(pp != NULL);
349 
350 	/*
351 	 * We don't actually use prop or avail here,
352 	 * but get them for test coverage
353 	 */
354 	avail_rs = kern_channel_available_slot_count(rxkring);
355 	avail_ts = kern_channel_available_slot_count(txkring);
356 	rs = kern_channel_get_next_slot(rxkring, NULL, NULL);
357 	ts = kern_channel_get_next_slot(txkring, NULL, NULL);
358 	VERIFY((avail_rs == 0) == (rs == NULL));
359 	VERIFY((avail_ts == 0) == (ts == NULL));
360 
361 	if (!rs || !ts) {
362 		/* either the rxring is full, or nothing to send */
363 		return 0;
364 	}
365 
366 	VERIFY(kern_channel_ring_get_container(txkring, NULL, NULL) == EINVAL);
367 	VERIFY(kern_channel_ring_get_container(txkring, &ary, &ary_cnt) == 0);
368 	VERIFY(ary != NULL && ary_cnt >= kplo_ntxslots);
369 	VERIFY(kplo_bufsz < UINT16_MAX);
370 
371 	read_random(&rdlen, sizeof(rdlen));
372 	rdlen %= kplo_bufsz;
373 
374 	bzero(&stats, sizeof(stats));
375 	do {
376 		kern_packet_t tph;
377 		uint8_t *baddr;
378 
379 		/* get packet handle */
380 		ph = kern_channel_slot_get_packet(txkring, ts);
381 		VERIFY(ph != 0);
382 		pidx = kern_packet_get_object_index(ph);
383 		VERIFY(pidx < kplo_tx_pp_info.kpm_packets);
384 
385 		/* verify buflet and length */
386 		VERIFY(kern_packet_get_buflet_count(ph) == 1);
387 		buf = kern_packet_get_next_buflet(ph, NULL);
388 		VERIFY(buf != NULL);
389 
390 		baddr = kern_buflet_get_data_address(buf);
391 		VERIFY(baddr != NULL);
392 		dlen = kern_buflet_get_data_length(buf);
393 		VERIFY(dlen == kern_packet_get_data_length(ph));
394 		VERIFY(kern_buflet_set_data_length(buf, dlen) == 0);
395 		doff = kern_buflet_get_data_offset(buf);
396 
397 		if (kplo_dump_buf) {
398 			SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_DUMP, "%s",
399 			    sk_dump("buf", baddr + doff, dlen, 128, NULL, 0));
400 		}
401 
402 		VERIFY(kern_buflet_set_data_offset(buf, 0) == 0);
403 		VERIFY(kern_buflet_set_data_length(buf, 0) == 0);
404 		VERIFY(kern_buflet_set_data_length(buf,
405 		    (uint16_t)(kplo_bufsz + 1)) == ERANGE);
406 		VERIFY(kern_buflet_set_data_length(buf, rdlen) == 0);
407 		VERIFY(kern_packet_finalize(ph) == 0);
408 		VERIFY(kern_packet_get_data_length(ph) == rdlen);
409 		VERIFY(kern_buflet_set_data_length(buf, 0) == 0);
410 		VERIFY(kern_buflet_set_data_offset(buf,
411 		    (uint16_t)(kplo_bufsz + 1)) == ERANGE);
412 		VERIFY(kern_buflet_set_data_length(buf, dlen) == 0);
413 		VERIFY(kern_buflet_set_data_offset(buf, doff) == 0);
414 		VERIFY(kern_packet_finalize(ph) == 0);
415 		VERIFY(kern_packet_get_data_length(ph) == dlen);
416 		VERIFY(kern_packet_finalize(ph) == 0);
417 		buf = kern_packet_get_next_buflet(ph, buf);
418 		VERIFY(buf == NULL);
419 
420 		/* verify attach and detach */
421 		VERIFY(kern_channel_slot_detach_packet(txkring, ts, ph) == 0);
422 		VERIFY(kern_channel_slot_get_packet(txkring, ts) == 0);
423 		VERIFY(kern_packet_finalize(ph) == 0);
424 		VERIFY(kern_channel_slot_attach_packet(txkring, ts, ph) == 0);
425 		VERIFY(kern_channel_slot_get_packet(txkring, ts) == ph);
426 
427 		stats.kcrsi_slots_transferred++;
428 		stats.kcrsi_bytes_transferred += dlen;
429 
430 		tph = kern_channel_slot_get_packet(ring, ts);
431 		VERIFY(tph != 0);
432 		VERIFY(kern_channel_slot_detach_packet(txkring, ts, tph) == 0);
433 		VERIFY(kern_packet_finalize(tph) == 0);
434 		VERIFY(kern_channel_slot_attach_packet(rxkring, rs, tph) == 0);
435 
436 		prs = rs;
437 		pts = ts;
438 		rs = kern_channel_get_next_slot(rxkring, rs, NULL);
439 		ts = kern_channel_get_next_slot(txkring, ts, NULL);
440 		avail_rs--;
441 		avail_ts--;
442 		VERIFY((avail_rs == 0) == (rs == NULL));
443 		VERIFY((avail_ts == 0) == (ts == NULL));
444 	} while (rs && ts);
445 
446 	kern_channel_advance_slot(rxkring, prs);
447 	kern_channel_advance_slot(txkring, pts);
448 	kern_channel_increment_ring_stats(txkring, &stats);
449 	kern_channel_increment_ring_stats(rxkring, &stats);
450 
451 	SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_SYNC | SK_VERB_TX,
452 	    "tx after:  kh %3u kt %3u | h %3u t %3u",
453 	    txkring->ckr_khead, txkring->ckr_ktail,
454 	    txkring->ckr_rhead, txkring->ckr_rtail);
455 	SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_SYNC | SK_VERB_TX,
456 	    "rx after:  kh %3u kt %3u | h %3u t %3u",
457 	    rxkring->ckr_khead, rxkring->ckr_ktail,
458 	    rxkring->ckr_rhead, rxkring->ckr_rtail);
459 
460 	(void) kern_channel_reclaim(txkring);
461 
462 	kern_channel_notify(rxkring, 0);
463 
464 	KPLO_INJECT_ERROR(9);
465 done:
466 	return error;
467 }
468 
469 static errno_t
kplo_sync_rx(kern_nexus_provider_t nxprov,kern_nexus_t nexus,kern_channel_ring_t ring,uint32_t flags)470 kplo_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus,
471     kern_channel_ring_t ring, uint32_t flags)
472 {
473 	errno_t error;
474 	struct proc *p = current_proc();
475 #pragma unused(nxprov, nexus)
476 #pragma unused(flags)
477 	KPLO_VERIFY_CTX(kplo_nx_ctx, kern_nexus_get_context(nexus));
478 	KPLO_VERIFY_CTX(ring, kern_channel_ring_get_context(ring));
479 
480 	VERIFY(ring = kplo_rxring);
481 	kern_channel_ring_t txkring = kplo_txring;
482 	kern_channel_ring_t rxkring = ring;
483 
484 	SK_DF(SK_VERB_KERNEL_PIPE | SK_VERB_SYNC | SK_VERB_RX,
485 	    "called with ring \"%s\" krflags 0x%b flags 0x%x",
486 	    ring->ckr_name, ring->ckr_flags, CKRF_BITS, flags);
487 
488 	KPLO_INJECT_ERROR(10);
489 
490 	/* reclaim user-released slots */
491 	(void) kern_channel_reclaim(rxkring);
492 
493 	kr_enter(txkring, TRUE);
494 
495 	if (__improbable(kr_txsync_prologue(NULL, txkring, p) >=
496 	    txkring->ckr_num_slots)) {
497 		error = EFAULT;
498 		goto done;
499 	}
500 	error = kplo_sync_tx(nxprov, nexus, txkring, flags);
501 	kr_txsync_finalize(NULL, txkring, p);
502 
503 	kr_exit(txkring);
504 
505 	kern_channel_notify(txkring, 0);
506 
507 done:
508 	return error;
509 }
510 
511 static void kpipe_loopback_stop(void);
512 
513 static void
kpipe_loopback_start(void)514 kpipe_loopback_start(void)
515 {
516 	nexus_attr_t nxa = NULL;
517 	uuid_t uuidtmp;
518 	uuid_string_t uuidstr;
519 	errno_t error;
520 
521 	SK_D("Hello loopback pipe!");
522 
523 	lck_mtx_lock(&kplo_lock);
524 	/*
525 	 * This will be cleared when kplo_dom_fini() is called,
526 	 * or in kpipe_loopback_stop if we failed to register
527 	 * our domain provider.
528 	 */
529 	VERIFY(!kplo_busy);
530 	kplo_busy = 1;
531 	lck_mtx_unlock(&kplo_lock);
532 
533 	struct kern_nexus_domain_provider_init dom_init = {
534 		.nxdpi_version = KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION,
535 		.nxdpi_flags = 0,
536 		.nxdpi_init = kplo_dom_init,
537 		.nxdpi_fini = kplo_dom_fini,
538 	};
539 
540 	struct kern_nexus_provider_init prov_init = {
541 		.nxpi_version = KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION,
542 		.nxpi_flags = NXPIF_VIRTUAL_DEVICE,
543 		.nxpi_pre_connect = kplo_pre_connect,
544 		.nxpi_connected = kplo_connected,
545 		.nxpi_pre_disconnect = kplo_pre_disconnect,
546 		.nxpi_disconnected = kplo_disconnected,
547 		.nxpi_ring_init = kplo_ring_init,
548 		.nxpi_ring_fini = kplo_ring_fini,
549 		.nxpi_slot_init = kplo_slot_init,
550 		.nxpi_slot_fini = kplo_slot_fini,
551 		.nxpi_sync_tx = kplo_sync_tx,
552 		.nxpi_sync_rx = kplo_sync_rx,
553 		.nxpi_tx_doorbell = NULL,
554 	};
555 
556 	VERIFY(uuid_is_null(kplo_dom_prov_uuid));
557 	error = kern_nexus_register_domain_provider(NEXUS_TYPE_KERNEL_PIPE,
558 	    (const uint8_t *)"kpipe_loopback",
559 	    &dom_init, sizeof(dom_init), &kplo_dom_prov_uuid);
560 	if (error != 0) {
561 		SK_ERR("failed to register kpipe_loopback domain %d", error);
562 		VERIFY(uuid_is_null(kplo_dom_prov_uuid));
563 		goto done;
564 	}
565 
566 	uuid_unparse_upper(kplo_dom_prov_uuid, uuidstr);
567 	SK_DF(SK_VERB_KERNEL_PIPE,
568 	    "Registered kpipe_loopback domain with uuid %s", uuidstr);
569 
570 	VERIFY(kplo_ncd == NULL);
571 	error = kern_nexus_controller_create(&kplo_ncd);
572 	if (error != 0) {
573 		SK_ERR("Failed to create nexus controller %d", error);
574 		VERIFY(kplo_ncd == NULL);
575 		goto done;
576 	}
577 
578 	// XXX opaque violation on kplo_ncd
579 	uuid_unparse_upper(kplo_ncd->ncd_nxctl->nxctl_uuid, uuidstr);
580 	SK_DF(SK_VERB_KERNEL_PIPE,
581 	    "Created nexus controller with uuid %s", uuidstr);
582 
583 	// We don't actually do anything with this.
584 	uuid_clear(uuidtmp);
585 	error = kern_nexus_get_default_domain_provider(NEXUS_TYPE_KERNEL_PIPE,
586 	    &uuidtmp);
587 	if (error) {
588 		SK_ERR("Failed to find kernel pipe domain %d", error);
589 		VERIFY(uuid_is_null(uuidtmp));
590 		goto done;
591 	}
592 
593 	uuid_unparse_upper(uuidtmp, uuidstr);
594 	SK_DF(SK_VERB_KERNEL_PIPE,
595 	    "Found kernel pipe domain with uuid %s", uuidstr);
596 
597 	error = kern_nexus_attr_create(&nxa);
598 	if (error) {
599 		SK_ERR("Failed to create nexus_attr %d", error);
600 		VERIFY(nxa == NULL);
601 		goto done;
602 	}
603 
604 	error = kern_nexus_attr_set(nxa, NEXUS_ATTR_ANONYMOUS, kplo_anon);
605 	if (error) {
606 		SK_ERR("Failed to %s anonymous attribute %d",
607 		    (kplo_anon ? "set" : "clear"), error);
608 		goto done;
609 	}
610 
611 	VERIFY(uuid_is_null(kplo_prov_uuid));
612 	error = kern_nexus_controller_register_provider(kplo_ncd,
613 	    kplo_dom_prov_uuid,
614 	    (const uint8_t *)"com.apple.nexus.kpipe_loopback", &prov_init,
615 	    sizeof(prov_init), nxa, &kplo_prov_uuid);
616 	if (error) {
617 		SK_ERR("Failed to register nexus provider %d", error);
618 		VERIFY(uuid_is_null(kplo_prov_uuid));
619 		goto done;
620 	}
621 
622 	uuid_unparse_upper(kplo_prov_uuid, uuidstr);
623 	SK_DF(SK_VERB_KERNEL_PIPE,
624 	    "Registered nexus controller provider with uuid %s", uuidstr);
625 
626 	error = kern_nexus_controller_read_provider_attr(kplo_ncd,
627 	    kplo_prov_uuid, nxa);
628 	if (error != 0) {
629 		SK_ERR("Failed to read nexus provider attributes %d", error);
630 		goto done;
631 	}
632 
633 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_TX_RINGS,
634 	    &kplo_ntxrings)) != 0) {
635 		SK_ERR("Failed to retrieve NEXUS_ATTR_TX_RINGS %d", error);
636 		goto done;
637 	}
638 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_TX_SLOTS,
639 	    &kplo_ntxslots)) != 0) {
640 		SK_ERR("Failed to retrieve NEXUS_ATTR_TX_SLOTS %d", error);
641 		goto done;
642 	}
643 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_RX_RINGS,
644 	    &kplo_nrxrings)) != 0) {
645 		SK_ERR("Failed to retrieve NEXUS_ATTR_RX_RINGS %d", error);
646 		goto done;
647 	}
648 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_RX_SLOTS,
649 	    &kplo_nrxslots)) != 0) {
650 		SK_ERR("Failed to retrieve NEXUS_ATTR_RX_SLOTS %d", error);
651 		goto done;
652 	}
653 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_SLOT_BUF_SIZE,
654 	    &kplo_bufsz)) != 0) {
655 		SK_ERR("Failed to retrieve NEXUS_ATTR_BUF_SIZE %d", error);
656 		goto done;
657 	}
658 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_SLOT_META_SIZE,
659 	    &kplo_mdatasz)) != 0) {
660 		SK_ERR("Failed to retrieve NEXUS_ATTR_META_SIZE %d", error);
661 		goto done;
662 	}
663 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_ANONYMOUS,
664 	    &kplo_anon)) != 0) {
665 		SK_ERR("Failed to retrieve NEXUS_ATTR_ANONYMOUS %d", error);
666 		goto done;
667 	}
668 	if ((error = kern_nexus_attr_get(nxa, NEXUS_ATTR_PIPES,
669 	    &kplo_pipes)) != 0) {
670 		SK_ERR("Failed to retrieve NEXUS_ATTR_PIPES %d", error);
671 		goto done;
672 	}
673 
674 	SK_DF(SK_VERB_KERNEL_PIPE, "Attributes of %s:", uuidstr);
675 	SK_DF(SK_VERB_KERNEL_PIPE, "    TX rings:   %llu", kplo_ntxrings);
676 	SK_DF(SK_VERB_KERNEL_PIPE, "    TX slots:   %llu", kplo_ntxslots);
677 	SK_DF(SK_VERB_KERNEL_PIPE, "    RX rings:   %llu", kplo_nrxrings);
678 	SK_DF(SK_VERB_KERNEL_PIPE, "    RX slots:   %llu", kplo_nrxslots);
679 	SK_DF(SK_VERB_KERNEL_PIPE, "    buffer:     %llu", kplo_bufsz);
680 	SK_DF(SK_VERB_KERNEL_PIPE, "    metadata:   %llu", kplo_mdatasz);
681 	SK_DF(SK_VERB_KERNEL_PIPE, "    anonymous:  %llu", kplo_anon);
682 	SK_DF(SK_VERB_KERNEL_PIPE, "    pipes:      %llu", kplo_pipes);
683 
684 	struct kern_nexus_init nx_init = {
685 		.nxi_version = KERN_NEXUS_CURRENT_VERSION,
686 		.nxi_flags = 0,
687 		.nxi_tx_pbufpool = NULL,
688 		.nxi_rx_pbufpool = NULL,
689 	};
690 
691 	VERIFY(uuid_is_null(kplo_nx_uuid));
692 	error = kern_nexus_controller_alloc_provider_instance(kplo_ncd,
693 	    kplo_prov_uuid, KPLO_GENERATE_CTX(kplo_nx_ctx), NULL, &kplo_nx_uuid,
694 	    &nx_init);
695 	if (error) {
696 		SK_ERR("Failed to alloc provider instance %d", error);
697 		VERIFY(uuid_is_null(kplo_nx_uuid));
698 		goto done;
699 	}
700 
701 	VERIFY(kplo_nx_uuidstr[0] == '\0');
702 	uuid_unparse_upper(kplo_nx_uuid, kplo_nx_uuidstr);
703 	SK_DF(SK_VERB_KERNEL_PIPE,
704 	    "Allocated provider instance uuid %s", kplo_nx_uuidstr);
705 
706 	lck_mtx_lock(&kplo_lock);
707 	kplo_enabled = 1;
708 	wakeup(&kplo_enabled); // Allow startup to return
709 	lck_mtx_unlock(&kplo_lock);
710 
711 done:
712 	if (nxa != NULL) {
713 		kern_nexus_attr_destroy(nxa);
714 		nxa = NULL;
715 	}
716 	if (error) {
717 		kpipe_loopback_stop();
718 	}
719 }
720 
721 static void
kpipe_loopback_stop(void)722 kpipe_loopback_stop(void)
723 {
724 	uuid_string_t uuidstr;
725 	errno_t error;
726 
727 	SK_D("Stopping loopback pipe!");
728 
729 	if (!uuid_is_null(kplo_nx_uuid)) {
730 		uuid_unparse_upper(kplo_nx_uuid, uuidstr);
731 		SK_DF(SK_VERB_KERNEL_PIPE,
732 		    "Deallocated provider instance uuid %s", uuidstr);
733 		error = kern_nexus_controller_free_provider_instance(kplo_ncd,
734 		    kplo_nx_uuid);
735 		VERIFY(error == 0);
736 		uuid_clear(kplo_nx_uuid);
737 		memset(kplo_nx_uuidstr, 0, sizeof(kplo_nx_uuidstr));
738 	}
739 
740 	if (!uuid_is_null(kplo_prov_uuid)) {
741 		uuid_unparse_upper(kplo_prov_uuid, uuidstr);
742 		SK_DF(SK_VERB_KERNEL_PIPE,
743 		    "Unregistered nexus controller with uuid %s", uuidstr);
744 		error = kern_nexus_controller_deregister_provider(kplo_ncd,
745 		    kplo_prov_uuid);
746 		VERIFY(error == 0);
747 		uuid_clear(kplo_prov_uuid);
748 	}
749 
750 	if (kplo_ncd) {
751 		// XXX opaque violation on kplo_ncd
752 		uuid_unparse_upper(kplo_ncd->ncd_nxctl->nxctl_uuid, uuidstr);
753 		SK_DF(SK_VERB_KERNEL_PIPE,
754 		    "Destroying nexus controller with uuid %s", uuidstr);
755 		kern_nexus_controller_destroy(kplo_ncd);
756 		kplo_ncd = NULL;
757 	}
758 
759 	if (!uuid_is_null(kplo_dom_prov_uuid)) {
760 		/* mark as not enabled, but defer wakeup to kplo_dom_fini */
761 		lck_mtx_lock(&kplo_lock);
762 		VERIFY(kplo_busy);
763 		kplo_enabled = 0;
764 		lck_mtx_unlock(&kplo_lock);
765 
766 		uuid_unparse_upper(kplo_dom_prov_uuid, uuidstr);
767 		SK_DF(SK_VERB_KERNEL_PIPE,
768 		    "Unregistered domain provider with uuid %s", uuidstr);
769 		error = kern_nexus_deregister_domain_provider(
770 			kplo_dom_prov_uuid);
771 		VERIFY(error == 0);
772 		uuid_clear(kplo_dom_prov_uuid);
773 	} else {
774 		/* kplo_dom_fini won't be called, so mark unbusy anyway */
775 		lck_mtx_lock(&kplo_lock);
776 		VERIFY(kplo_busy);
777 		kplo_busy = 0;
778 		kplo_enabled = 0;
779 		wakeup(&kplo_enabled);
780 		lck_mtx_unlock(&kplo_lock);
781 	}
782 
783 	SK_D("Goodbye loopback pipe!");
784 }
785 
786 static int
sysctl_kpipe_loopback_enabled(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)787 sysctl_kpipe_loopback_enabled(__unused struct sysctl_oid *oidp,
788     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
789 {
790 	int error, newvalue, changed;
791 
792 	lck_mtx_lock(&kplo_lock);
793 	if ((error = sysctl_io_number(req, kplo_enabled, sizeof(int),
794 	    &newvalue, &changed)) != 0) {
795 		goto done;
796 	}
797 
798 	if (changed && kplo_enabled != newvalue) {
799 		thread_t kpth;
800 		void (*func)(void);
801 
802 		if (newvalue && kplo_busy) {
803 			SK_ERR("Older kpipe loopback instance is still active");
804 			error = EBUSY;
805 			goto done;
806 		}
807 
808 		if (newvalue) {
809 			func = kpipe_loopback_start;
810 		} else {
811 			func = kpipe_loopback_stop;
812 		}
813 
814 		if (kernel_thread_start((thread_continue_t)func,
815 		    NULL, &kpth) != KERN_SUCCESS) {
816 			SK_ERR("Failed to create kpipe loopback action thread");
817 			error = EBUSY;
818 			goto done;
819 		}
820 		do {
821 			SK_DF(SK_VERB_KERNEL_PIPE, "Waiting for %s to complete",
822 			    newvalue ? "startup" : "shutdown");
823 			error = msleep(&kplo_enabled, &kplo_lock,
824 			    PWAIT | PCATCH, "kplow", NULL);
825 			/* BEGIN CSTYLED */
826 			/*
827 			 * Loop exit conditions:
828 			 *   - we were interrupted
829 			 *     OR
830 			 *   - we are starting up and are enabled
831 			 *     (Startup complete)
832 			 *     OR
833 			 *   - we are starting up and are not busy
834 			 *     (Failed startup)
835 			 *     OR
836 			 *   - we are shutting down and are not busy
837 			 *     (Shutdown complete)
838 			 */
839 			/* END CSTYLED */
840 		} while (!((error == EINTR) || (newvalue && kplo_enabled) ||
841 		    (newvalue && !kplo_busy) || (!newvalue && !kplo_busy)));
842 		thread_deallocate(kpth);
843 	}
844 
845 done:
846 	lck_mtx_unlock(&kplo_lock);
847 	return error;
848 }
849 
850 SYSCTL_NODE(_kern_skywalk_kpipe, OID_AUTO, loopback,
851     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Skywalk kpipe loopback tuning");
852 
853 SYSCTL_INT(_kern_skywalk_kpipe_loopback, OID_AUTO, dump_buf,
854     CTLFLAG_RW | CTLFLAG_LOCKED, &kplo_dump_buf, 0, "Dump buffer");
855 
856 SYSCTL_INT(_kern_skywalk_kpipe_loopback, OID_AUTO, inject_error,
857     CTLFLAG_RW | CTLFLAG_LOCKED, &kplo_inject_error, 0, "Dump metadata");
858 
859 SYSCTL_PROC(_kern_skywalk_kpipe_loopback, OID_AUTO, enabled,
860     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
861     NULL, 0, sysctl_kpipe_loopback_enabled,
862     "I", "Start the loopback kernel pipe");
863 
864 SYSCTL_STRING(_kern_skywalk_kpipe_loopback, OID_AUTO, nx_uuid,
865     CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
866     &kplo_nx_uuidstr[0],
867     0, "Provider instance of loopback kernel pipe");
868 
869 #endif /* DEVELOPMENT || DEBUG */
870