xref: /xnu-11215.41.3/osfmk/ipc/flipc.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2015-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*	File:	ipc/flipc.h
29  *	Author:	Dean Reece
30  *	Date:	2016
31  *
32  *	Implementation of fast local ipc (flipc).
33  */
34 
35 
36 #include <mach/mach_types.h>
37 #include <mach/boolean.h>
38 #include <mach/kern_return.h>
39 
40 #include <kern/kern_types.h>
41 #include <kern/assert.h>
42 #include <kern/host.h>
43 #include <kern/kalloc.h>
44 #include <kern/mach_node.h>
45 
46 #include <ipc/port.h>
47 #include <ipc/ipc_types.h>
48 #include <ipc/ipc_init.h>
49 #include <ipc/ipc_kmsg.h>
50 #include <ipc/ipc_port.h>
51 #include <ipc/ipc_pset.h>
52 #include <ipc/ipc_entry.h>
53 #include <ipc/flipc.h>
54 
55 #pragma pack(4)
56 
57 
58 /*** FLIPC Internal Implementation (private to flipc.c) ***/
59 
60 ZONE_DEFINE_TYPE(flipc_port_zone, "flipc ports",
61     struct flipc_port, ZC_ZFREE_CLEARMEM);
62 
63 /*  Get the mnl_name associated with local ipc_port <lport>.
64  *  Returns MNL_NAME_NULL if <lport> is invalid or not a flipc port.
65  */
66 static inline mnl_name_t
mnl_name_from_port(ipc_port_t lport)67 mnl_name_from_port(ipc_port_t lport)
68 {
69 	mnl_name_t name = MNL_NAME_NULL;
70 
71 	if (IP_VALID(lport)) {
72 		flipc_port_t fport = lport->ip_messages.imq_fport;
73 		if (FPORT_VALID(fport)) {
74 			name = fport->obj.name;
75 		}
76 	}
77 	return name;
78 }
79 
80 
81 /*  Lookup the ipc_port associated with mnl_name <name>.
82  *  Returns IP_NULL if <name> is invalid or not a known mnl object.
83  */
84 static inline ipc_port_t
mnl_name_to_port(mnl_name_t name)85 mnl_name_to_port(mnl_name_t name)
86 {
87 	ipc_port_t lport = IP_NULL;
88 
89 	if (MNL_NAME_VALID(name)) {
90 		flipc_port_t fport = (flipc_port_t)mnl_obj_lookup(name);
91 		if (FPORT_VALID(fport)) {
92 			lport = fport->lport;
93 		}
94 	}
95 	return lport;
96 }
97 
98 
99 /*  flipc_port_create() is called to convert a regular mach port into a
100  *  flipc port (i.e., the port has one or more rights off-node).
101  *  <lport> must be locked on entry and is not unlocked on return.
102  */
103 static kern_return_t
flipc_port_create(ipc_port_t lport,mach_node_t node,mnl_name_t name)104 flipc_port_create(ipc_port_t lport, mach_node_t node, mnl_name_t name)
105 {
106 	/* Ensure parameters are valid and not already linked */
107 	assert(IP_VALID(lport));
108 	assert(MACH_NODE_VALID(node));
109 	assert(MNL_NAME_VALID(name));
110 	assert(!FPORT_VALID(lport->ip_messages.imq_fport));
111 
112 	/* Allocate and initialize a flipc port */
113 	flipc_port_t fport = zalloc_flags(flipc_port_zone, Z_WAITOK | Z_ZERO);
114 	if (!FPORT_VALID(fport)) {
115 		return KERN_RESOURCE_SHORTAGE;
116 	}
117 	fport->obj.name = name;
118 	fport->hostnode = node;
119 	if (node == localnode) {
120 		fport->state = FPORT_STATE_PRINCIPAL;
121 	} else {
122 		fport->state = FPORT_STATE_PROXY;
123 	}
124 
125 	/* Link co-structures (lport is locked) */
126 	fport->lport = lport;
127 	lport->ip_messages.imq_fport = fport;
128 
129 	/* Add fport to the name hash table; revert link if insert fails */
130 	kern_return_t kr =  mnl_obj_insert((mnl_obj_t)fport);
131 	if (kr != KERN_SUCCESS) {
132 		lport->ip_messages.imq_fport = FPORT_NULL;
133 		fport->lport = IP_NULL;
134 		zfree(flipc_port_zone, fport);
135 	}
136 
137 	return kr;
138 }
139 
140 
141 /*  flipc_port_destroy() is called to convert a flipc port back to a
142  *  local-only ipc port (i.e., the port has no remaining off-node rights).
143  *  This will dispose of any undelivered flipc messages, generating NAKs if
144  *  needed.  <lport> must be locked on entry and is not unlocked on return.
145  */
146 static void
flipc_port_destroy(ipc_port_t lport)147 flipc_port_destroy(ipc_port_t lport)
148 {
149 	/* Ensure parameter is valid, and linked to an fport with a valid name */
150 	assert(IP_VALID(lport));
151 	ipc_mqueue_t port_mq = &lport->ip_messages;
152 	flipc_port_t fport = port_mq->imq_fport;
153 	assert(FPORT_VALID(fport));
154 	assert(MNL_NAME_VALID(fport->obj.name));
155 
156 	/* Dispose of any undelivered messages */
157 	int m = port_mq->imq_msgcount;
158 	if (m > 0) {
159 		ipc_kmsg_t kmsg;
160 #if DEBUG
161 		printf("flipc: destroying %p with %d undelivered msgs\n", lport, m);
162 #endif
163 
164 		/* Logic was lifted from ipc_mqueue_select_on_thread() */
165 		while (m--) {
166 			kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages);
167 			assert(kmsg != IKM_NULL);
168 			ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg);
169 			if (fport->state == FPORT_STATE_PRINCIPAL) {
170 				flipc_msg_ack(kmsg->ikm_node, port_mq, FALSE);
171 			}
172 			ipc_mqueue_release_msgcount(port_mq);
173 			port_mq->imq_seqno++;
174 		}
175 	}
176 
177 	/* Remove from name hash table, unlink co-structures, and free fport */
178 	mnl_obj_remove(fport->obj.name);
179 	lport->ip_messages.imq_fport = FPORT_NULL;
180 	fport->lport = IP_NULL;
181 	zfree(flipc_port_zone, fport);
182 }
183 
184 
185 /*
186  *	Routine:	flipc_msg_size_from_kmsg(ipc_kmsg_t kmsg)
187  *	Purpose:
188  *		Compute the size of the buffer needed to hold the translated flipc
189  *      message.  All identifiers are converted to flipc_names which are 64b.
190  *      If this node's pointers are a different size, we have to allow for
191  *      expansion of the descriptors as appropriate.
192  *	Conditions:
193  *		Nothing locked.
194  *	Returns:
195  *		size of the message as it would be sent over the flipc link.
196  */
197 static mach_msg_size_t
flipc_msg_size_from_kmsg(ipc_kmsg_t kmsg)198 flipc_msg_size_from_kmsg(ipc_kmsg_t kmsg)
199 {
200 	mach_msg_size_t fsize = ikm_header(kmsg)->msgh_size;
201 
202 	if (ikm_header(kmsg)->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
203 		PE_enter_debugger("flipc_msg_size_from_kmsg(): Complex messages not supported.");
204 	}
205 
206 	return fsize;
207 }
208 
209 
210 /*  Translate a kmsg into a flipc msg suitable to transmit over the mach node
211  *  link.  All in-line rights and objects are similarly processed.  If the msg
212  *  moves a receive right, then queued messages may need to be moved as a
213  *  result, causing this function to ultimately be recursive.
214  */
215 static kern_return_t
mnl_msg_from_kmsg(ipc_kmsg_t kmsg,mnl_msg_t * fmsgp)216 mnl_msg_from_kmsg(ipc_kmsg_t kmsg, mnl_msg_t *fmsgp)
217 {
218 	if (ikm_header(kmsg)->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
219 		printf("mnl_msg_from_kmsg(): Complex messages not supported.");
220 		return KERN_FAILURE;
221 	}
222 
223 	mach_msg_size_t fsize = flipc_msg_size_from_kmsg(kmsg);
224 
225 	mnl_msg_t fmsg = mnl_msg_alloc(fsize, 0);
226 
227 	if (fmsg == MNL_MSG_NULL) {
228 		return KERN_RESOURCE_SHORTAGE;
229 	}
230 
231 	/* Setup flipc message header */
232 	fmsg->sub = MACH_NODE_SUB_FLIPC;
233 	fmsg->cmd = FLIPC_CMD_IPCMESSAGE;
234 	fmsg->node_id = localnode_id;   // Message is from us
235 	fmsg->qos = 0; // not used
236 	fmsg->size = fsize; // Payload size (does NOT include mnl_msg header)
237 	fmsg->object = ikm_header(kmsg)->msgh_remote_port->ip_messages.imq_fport->obj.name;
238 
239 	/* Copy body of message */
240 	bcopy((const void*)ikm_header(kmsg), (void*)MNL_MSG_PAYLOAD(fmsg), fsize);
241 
242 	// Convert port fields
243 	mach_msg_header_t *mmsg = (mach_msg_header_t*)MNL_MSG_PAYLOAD(fmsg);
244 	mmsg->msgh_remote_port = (mach_port_t)fmsg->object;
245 	mmsg->msgh_local_port = (mach_port_t)
246 	    mnl_name_from_port(mmsg->msgh_local_port);
247 	mmsg->msgh_voucher_port = (mach_port_name_t)MNL_NAME_NULL;
248 
249 	*fmsgp = (mnl_msg_t)fmsg;
250 
251 	return KERN_SUCCESS;
252 }
253 
254 
255 /* lifted from ipc_mig.c:mach_msg_send_from_kernel_proper() */
256 static mach_msg_return_t
mach_msg_send_from_remote_kernel(mach_msg_header_t * msg,mach_msg_size_t send_size,mach_node_t node)257 mach_msg_send_from_remote_kernel(mach_msg_header_t      *msg,
258     mach_msg_size_t        send_size,
259     mach_node_t            node)
260 {
261 	ipc_kmsg_t kmsg;
262 	mach_msg_return_t mr;
263 
264 	mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
265 	if (mr != MACH_MSG_SUCCESS) {
266 		return mr;
267 	}
268 
269 	mr = ipc_kmsg_copyin_from_kernel(kmsg);
270 	if (mr != MACH_MSG_SUCCESS) {
271 		ipc_kmsg_free(kmsg);
272 		return mr;
273 	}
274 
275 	kmsg->ikm_node = node;  // node that needs to receive message ack
276 	mr = ipc_kmsg_send(kmsg,
277 	    MACH_SEND_KERNEL_DEFAULT,
278 	    MACH_MSG_TIMEOUT_NONE);
279 	if (mr != MACH_MSG_SUCCESS) {
280 		ipc_kmsg_destroy(kmsg, IPC_KMSG_DESTROY_ALL);
281 	}
282 
283 	return mr;
284 }
285 
286 
287 /*  Translate a flipc msg <fmsg> into a kmsg and post it to the appropriate
288  *	port.  <node> is the node that originated the message, not necessarily the
289  *	node we received it from.  This will block if the receiving port is full.
290  */
291 static mach_msg_return_t
flipc_cmd_ipc(mnl_msg_t fmsg,mach_node_t node,uint32_t flags __unused)292 flipc_cmd_ipc(mnl_msg_t     fmsg,
293     mach_node_t   node,
294     uint32_t      flags   __unused)
295 {
296 	mach_msg_header_t *mmsg;
297 
298 	// Convert flipc message into mach message in place to avoid alloc/copy
299 	mmsg = (mach_msg_header_t*)MNL_MSG_PAYLOAD(fmsg);
300 	mmsg->msgh_size = fmsg->size;
301 	mmsg->msgh_remote_port = mnl_name_to_port(fmsg->object);
302 	mmsg->msgh_local_port = mnl_name_to_port((mnl_name_t)mmsg->msgh_local_port);
303 	mmsg->msgh_voucher_port = (mach_port_name_t)MACH_PORT_NULL;
304 	mmsg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
305 	// unchanged: msgh_id
306 
307 	return mach_msg_send_from_remote_kernel(mmsg, fmsg->size, node);
308 }
309 
310 
311 /*  Called when an ACKMESSAGE packet is received. <name> indicates
312  *	the flipc name of the port holding the messages to be acknowledged.
313  *	<msg_count> indicates the number of messages being acked for this node:port.
314  */
315 static void
flipc_cmd_ack(flipc_ack_msg_t fmsg,mach_node_t node __unused,uint32_t flags __unused)316 flipc_cmd_ack(flipc_ack_msg_t   fmsg,
317     mach_node_t       node    __unused,
318     uint32_t          flags   __unused)
319 {
320 	unsigned int msg_count = fmsg->msg_count;
321 	thread_t thread = current_thread();
322 	boolean_t kick = FALSE;
323 
324 	flipc_port_t fport = (flipc_port_t)mnl_obj_lookup(fmsg->mnl.object);
325 
326 	ipc_port_t lport = fport->lport;
327 	ip_mq_lock(lport); // Revisit the lock when enabling flipc
328 
329 	ipc_mqueue_t lport_mq = &lport->ip_messages;
330 
331 	assert(fport->peek_count >= msg_count); // Can't ack what we haven't peeked!
332 
333 	while (msg_count--) {
334 		ipc_mqueue_select_on_thread_locked(lport_mq, NULL, 0, thread);
335 		fport->peek_count--;
336 		kick |= ipc_kmsg_delayed_destroy(thread->ith_kmsg);
337 	}
338 
339 	ip_mq_unlock(lport);
340 
341 	if (kick) {
342 		ipc_kmsg_reap_delayed();
343 	}
344 }
345 
346 
347 
348 /*** FLIPC Node Managment Functions (called by mach node layer) ***/
349 
350 
351 /*  flipc_node_prepare() is called by mach node layer when a remote node is
352  *  registered by a link driver, or when the bootstrap port changes for the
353  *  local node.  This is the flipc layer's opportunity to initialize per-node
354  *  flipc state, and to convert the node's bootstrap port into a flipc port.
355  *  Note that the node is not yet in the mach node table.
356  *  Returns KERN_SUCCESS on success; otherwise node is not prepared.
357  */
358 kern_return_t
flipc_node_prepare(mach_node_t node)359 flipc_node_prepare(mach_node_t node)
360 {
361 	kern_return_t kr;
362 
363 	assert(MACH_NODE_VALID(node));
364 	ipc_port_t bs_port = node->bootstrap_port;
365 	assert(IP_VALID(bs_port));
366 
367 	ip_mq_lock(bs_port);
368 
369 	kr = flipc_port_create(bs_port,
370 	    node,
371 	    MNL_NAME_BOOTSTRAP(node->info.node_id));
372 	ip_mq_unlock(bs_port);
373 
374 	return kr;
375 }
376 
377 
378 /*  flipc_node_retire() is called by mach node layer when a remote node is
379  *  terminated by a link driver, or when the local node's bootstrap port
380  *  becomes invalid.  This is the flipc layer's opportunity to free per-node
381  *  flipc state, and to revert the node's bootstrap port to a local ipc port.
382  *  <node> must be locked by the caller.
383  *  Returns KERN_SUCCESS on success.
384  */
385 kern_return_t
flipc_node_retire(mach_node_t node)386 flipc_node_retire(mach_node_t node)
387 {
388 	if (!MACH_NODE_VALID(node)) {
389 		return KERN_NODE_DOWN;
390 	}
391 
392 	ipc_port_t bs_port = node->bootstrap_port;
393 	if (IP_VALID(bs_port)) {
394 		ip_mq_lock(bs_port);  // Revisit the lock when enabling flipc
395 		flipc_port_destroy(bs_port);
396 		ip_mq_unlock(bs_port);
397 	}
398 
399 	return KERN_SUCCESS;
400 }
401 
402 
403 /*** FLIPC Message Functions (called by mach node layer) ***/
404 
405 
406 /*  The node layer calls flipc_msg_to_remote_node() to fetch the next message
407  *  for <node>.  This function will block until a message is available or the
408  *  node is terminated, in which case it returns MNL_MSG_NULL.
409  */
410 mnl_msg_t
flipc_msg_to_remote_node(mach_node_t to_node,uint32_t flags __unused)411 flipc_msg_to_remote_node(mach_node_t  to_node,
412     uint32_t     flags __unused)
413 {
414 	mach_port_seqno_t msgoff;
415 	ipc_kmsg_t kmsg = IKM_NULL;
416 	mnl_msg_t fmsg = MNL_MSG_NULL;
417 
418 	assert(to_node != localnode);
419 	assert(get_preemption_level() == 0);
420 
421 	struct waitq *pset_waitq = &to_node->proxy_port_set->ips_wqset.wqset_q;
422 	ipc_mqueue_t port_mq = IMQ_NULL;
423 
424 	while (!to_node->dead) {
425 		/* Fetch next message from proxy port */
426 		ipc_mqueue_receive(pset_waitq, MACH_PEEK_MSG, 0, 0, THREAD_ABORTSAFE);
427 
428 		thread_t thread = current_thread();
429 		if (thread->ith_state == MACH_PEEK_READY) {
430 			port_mq = thread->ith_peekq;
431 			thread->ith_peekq = IMQ_NULL;
432 		} else {
433 			panic("Unexpected thread state %d after ipc_mqueue_receive()",
434 			    thread->ith_state);
435 		}
436 
437 		assert(get_preemption_level() == 0);
438 
439 		flipc_port_t fport = port_mq->imq_fport;
440 
441 		if (FPORT_VALID(fport)) {
442 			msgoff = port_mq->imq_fport->peek_count;
443 
444 			ipc_mqueue_peek_locked(port_mq, &msgoff, NULL, NULL, NULL, &kmsg);
445 			if (kmsg != IKM_NULL) {
446 				port_mq->imq_fport->peek_count++;
447 			}
448 
449 			/* Clean up outstanding prepost on port_mq.
450 			 * This also unlocks port_mq.
451 			 */
452 			ipc_mqueue_release_peek_ref(port_mq);
453 			assert(get_preemption_level() == 0);
454 
455 			/* DANGER:  The code below must be allowed to allocate so it can't
456 			 * run under the protection of the imq_lock, but that leaves mqueue
457 			 * open for business for a small window before we examine kmsg.
458 			 * This SHOULD be OK, since we are the only thread looking.
459 			 */
460 			if (kmsg != IKM_NULL) {
461 				mnl_msg_from_kmsg(kmsg, (mnl_msg_t*)&fmsg);
462 			}
463 		} else {
464 			/* Must be from the control_port, which is not a flipc port */
465 			assert(!FPORT_VALID(port_mq->imq_fport));
466 
467 			/* This is a simplified copy of ipc_mqueue_select_on_thread() */
468 			kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages);
469 			assert(kmsg != IKM_NULL);
470 			ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg);
471 			ipc_mqueue_release_msgcount(port_mq);
472 			counter_inc(&current_task()->messages_received);
473 			ip_release(to_node->control_port); // Should derive ref from port_mq
474 
475 			/* We just pass the kmsg payload as the fmsg.
476 			 * flipc_msg_free() will notice and free the kmsg properly.
477 			 */
478 			mach_msg_header_t *hdr = ikm_header(kmsg);
479 			fmsg = (mnl_msg_t)(&hdr[1]);
480 			/* Stash kmsg pointer just before fmsg */
481 			*(ipc_kmsg_t*)((vm_offset_t)fmsg - sizeof(vm_offset_t)) = kmsg;
482 		}
483 
484 		if (MNL_MSG_VALID(fmsg)) {
485 			break;
486 		}
487 	}
488 	assert(MNL_MSG_VALID(fmsg));
489 	return fmsg;
490 }
491 
492 
493 /*  The mach node layer calls this to deliver an incoming message.  It is the
494  *  responsibility of the caller to release the received message buffer after
495  *  return.
496  */
497 void
flipc_msg_from_node(mach_node_t from_node __unused,mnl_msg_t msg,uint32_t flags)498 flipc_msg_from_node(mach_node_t from_node   __unused,
499     mnl_msg_t   msg,
500     uint32_t    flags)
501 {
502 	/*  Note that if flipc message forwarding is supported, the from_node arg
503 	 *  may not match fmsg->node_id.  The former is the node from which we
504 	 *	received the message; the latter is the node that originated the
505 	 *	message.  We use the originating node, which is where the ack goes.
506 	 */
507 	assert(msg->sub == MACH_NODE_SUB_FLIPC);
508 	mach_node_t node = mach_node_for_id_locked(msg->node_id, FALSE, FALSE);
509 	MACH_NODE_UNLOCK(node);
510 
511 	switch (msg->cmd) {
512 	case FLIPC_CMD_IPCMESSAGE:
513 		flipc_cmd_ipc(msg, node, flags);
514 		break;
515 
516 	case FLIPC_CMD_ACKMESSAGE:
517 	case FLIPC_CMD_NAKMESSAGE:
518 		flipc_cmd_ack((flipc_ack_msg_t)msg, node, flags);
519 		break;
520 
521 	default:
522 #if DEBUG
523 		PE_enter_debugger("flipc_incoming(): Invalid command");
524 #endif
525 		break;
526 	}
527 }
528 
529 
530 /*  The node layer calls flipc_msg_free() to dispose of sent messages that
531  *  originated in the FLIPC layer.  This allows us to repurpose the payload
532  *  of an ack or nak kmsg as a flipc message to avoid a copy - we detect
533  *  such messages here and free them appropriately.
534  */
535 void
flipc_msg_free(mnl_msg_t msg,uint32_t flags)536 flipc_msg_free(mnl_msg_t    msg,
537     uint32_t     flags)
538 {
539 	switch (msg->cmd) {
540 	case FLIPC_CMD_ACKMESSAGE:  // Flipc msg is a kmsg in disguise...
541 	case FLIPC_CMD_NAKMESSAGE:  // Convert back to kmsg for disposal
542 		ipc_kmsg_free(*(ipc_kmsg_t*)((vm_offset_t)msg - sizeof(vm_offset_t)));
543 		break;
544 
545 	default:    // Flipc msg is not a kmsg in disguise; dispose of normally
546 		mnl_msg_free(msg, flags);
547 		break;
548 	}
549 }
550 
551 
552 /*** FLIPC Message Functions (called by mach ipc subsystem) ***/
553 
554 /*	Ack's one message sent to <mqueue> from <node>.  A new kmsg is allocated
555  *  and filled in as an ack, then posted to the node's contol port.  This will
556  *  wake the link driver (if sleeping) and cause the ack to be included with
557  *  normal IPC traffic.
558  *
559  *  This function immediately returns if <fport> or <node> is invalid, so it
560  *  is safe & quick to call speculatively.
561  *
562  *	Called from mach ipc_mqueue.c when a flipc-originated message is consumed.
563  */
564 void
flipc_msg_ack(mach_node_t node,ipc_mqueue_t mqueue,boolean_t delivered)565 flipc_msg_ack(mach_node_t   node,
566     ipc_mqueue_t  mqueue,
567     boolean_t     delivered)
568 {
569 	flipc_port_t fport = mqueue->imq_fport;
570 
571 	assert(FPORT_VALID(fport));
572 	assert(MACH_NODE_VALID(node));
573 
574 	mnl_name_t name = MNL_NAME_NULL;
575 	mach_node_id_t nid = HOST_LOCAL_NODE;
576 	ipc_port_t ack_port = IP_NULL;
577 
578 	ip_mq_lock(fport->lport);
579 	name = fport->obj.name;
580 	ip_mq_unlock(fport->lport);
581 
582 	if (!MNL_NAME_VALID(name)) {
583 		return;
584 	}
585 
586 	MACH_NODE_LOCK(node);
587 	if (node->active) {
588 		nid = node->info.node_id;
589 		ack_port = node->control_port;
590 	}
591 	MACH_NODE_UNLOCK(node);
592 
593 	if (!IP_VALID(ack_port) || !MACH_NODE_ID_VALID(nid)) {
594 		return;
595 	}
596 
597 	/* We have a valid node id & obj name, and a port to send the ack to. */
598 	ipc_kmsg_t kmsg = ipc_kmsg_alloc(sizeof(struct flipc_ack_msg), IPC_KMSG_ALLOC_KERNEL);
599 	assert((unsigned long long)kmsg >= 4ULL);//!= IKM_NULL);
600 	mach_msg_header_t *msg = ikm_header(kmsg);
601 
602 	/* Fill in the mach_msg_header struct */
603 	msg->msgh_bits = MACH_MSGH_BITS_SET(0, 0, 0, 0);
604 	msg->msgh_size = sizeof(msg);
605 	msg->msgh_remote_port = ack_port;
606 	msg->msgh_local_port = MACH_PORT_NULL;
607 	msg->msgh_voucher_port = MACH_PORT_NULL;
608 	msg->msgh_id = FLIPC_CMD_ID;
609 
610 	/* Fill in the flipc_ack_msg struct */
611 	flipc_ack_msg_t fmsg = (flipc_ack_msg_t)(&msg[1]);
612 	fmsg->resend_to = HOST_LOCAL_NODE;
613 	fmsg->msg_count = 1; // Might want to coalesce acks to a node/name pair
614 
615 	/* Fill in the mnl_msg struct */
616 	fmsg->mnl.sub = MACH_NODE_SUB_FLIPC;
617 	fmsg->mnl.cmd = delivered ? FLIPC_CMD_ACKMESSAGE : FLIPC_CMD_NAKMESSAGE;
618 	fmsg->mnl.qos = 0;    // Doesn't do anything yet
619 	fmsg->mnl.flags = 0;
620 	fmsg->mnl.node_id = nid;
621 	fmsg->mnl.object = name;
622 	fmsg->mnl.options = 0;
623 	fmsg->mnl.size = sizeof(struct flipc_ack_msg) - sizeof(struct mnl_msg);
624 
625 #if (0)
626 	mach_msg_return_t mmr;
627 	ipc_mqueue_t ack_mqueue;
628 
629 	ip_mq_lock(ack_port); // Revisit the lock when enabling flipc
630 	ack_mqueue = &ack_port->ip_messages;
631 
632 	/* ipc_mqueue_send() unlocks ack_mqueue */
633 	mmr = ipc_mqueue_send_locked(ack_mqueue, kmsg, 0, 0);
634 #else
635 	kern_return_t kr;
636 	kr = ipc_kmsg_send(kmsg,
637 	    MACH_SEND_KERNEL_DEFAULT,
638 	    MACH_MSG_TIMEOUT_NONE);
639 #endif
640 }
641