xref: /xnu-12377.1.9/bsd/skywalk/channel/kern_channel_event.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2019-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/netif/nx_netif.h>
31 #include <skywalk/nexus/flowswitch/fsw_var.h>
32 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
33 
34 
35 /*
36  * Notification destination, can be either direct netif device,
37  * or a flowswitch.
38  */
39 struct __notif_dest {
40 	uint8_t dest_type;
41 #define __NOTIF_DEST_NONE 0
42 #define __NOTIF_DEST_FSW 1
43 #define __NOTIF_DEST_NETIF 2
44 	union {
45 		struct nx_flowswitch *dest_fsw;
46 		struct nx_netif          *dest_netif;
47 	};
48 	const char *dest_desc;
49 };
50 
51 /* Create a notification destination from an ifnet device */
52 static inline errno_t
__notif_dest_by_ifp(struct __notif_dest * dest,const ifnet_t ifp)53 __notif_dest_by_ifp(struct __notif_dest *dest, const ifnet_t ifp)
54 {
55 	struct nx_flowswitch *fsw;
56 	struct nx_netif *netif;
57 
58 	if (dest == NULL || ifp == NULL) {
59 		return EINVAL;
60 	}
61 
62 	if (!ifnet_is_fully_attached(ifp)) {
63 		return ENXIO;
64 	}
65 
66 	if ((fsw = fsw_ifp_to_fsw(ifp)) != NULL) {
67 		dest->dest_type = __NOTIF_DEST_FSW;
68 		dest->dest_fsw = fsw;
69 		dest->dest_desc = if_name(ifp);
70 		return 0;
71 	}
72 
73 	if ((netif = NA(ifp)->nifna_netif) != NULL) {
74 		dest->dest_type =  __NOTIF_DEST_NETIF;
75 		dest->dest_netif = netif;
76 		dest->dest_desc = if_name(ifp);
77 		return 0;
78 	}
79 
80 	return ENOENT;
81 }
82 
83 /* Create a notification destination from a flowswitch uuid */
84 static inline errno_t
__notif_dest_by_nx_uuid(struct __notif_dest * dest,const uuid_t nx_uuid)85 __notif_dest_by_nx_uuid(struct __notif_dest *dest, const uuid_t nx_uuid)
86 {
87 	struct kern_nexus *nx;
88 	struct nx_flowswitch *fsw;
89 	const char *__null_terminated desc = "detached fsw";
90 
91 	if (dest == NULL) {
92 		return EINVAL;
93 	}
94 
95 	if ((nx = nx_find(nx_uuid, FALSE)) == NULL) {
96 		return ENOENT;
97 	}
98 
99 	if ((fsw = NX_FSW_PRIVATE(nx)) == NULL) {
100 		return ENOENT;
101 	}
102 
103 	dest->dest_type = __NOTIF_DEST_FSW;
104 	dest->dest_fsw = fsw;
105 	dest->dest_desc = (fsw->fsw_ifp != NULL)
106 	    ? if_name(fsw->fsw_ifp)
107 	    : desc;
108 	return 0;
109 }
110 
111 /* function to send a packet channel event.
112  *
113  * Note on the event length limitations:
114  * The event that goes onto the channel is emplaced
115  * in a stack-allocated buffer, which includes
116  * the space for the packet channel event data.
117  * The size of the payload is governed by the
118  * `CHANNEL_EVENT_MAX_PAYLOAD_LEN' constant.
119  * See more details in `os_channel_event.h'
120  */
121 
122 static inline errno_t
kern_channel_packet_event_notify(struct __notif_dest * dest,os_channel_event_type_t event_type,size_t event_dlen,uint8_t * __sized_by (event_dlen)event_data,uint32_t nx_port_id)123 kern_channel_packet_event_notify(struct __notif_dest *dest,
124     os_channel_event_type_t event_type, size_t event_dlen,
125     uint8_t *__sized_by(event_dlen)event_data, uint32_t nx_port_id)
126 {
127 	char buf[CHANNEL_EVENT_MAX_LEN] __sk_aligned(64);
128 	struct __kern_channel_event *event =
129 	    (struct __kern_channel_event *)(void *)buf;
130 
131 	if (dest == NULL || dest->dest_desc == NULL) {
132 		return EINVAL;
133 	}
134 
135 	if (sizeof(buf) < sizeof(event) + event_dlen) {
136 		return EINVAL;
137 	}
138 	if ((event_type < CHANNEL_EVENT_MIN) || (CHANNEL_EVENT_MAX < event_type)) {
139 		return EINVAL;
140 	}
141 
142 	event->ev_type = event_type;
143 	event->ev_flags = 0;
144 	event->_reserved = 0;
145 	event->ev_dlen = (uint16_t)event_dlen;
146 	memcpy(event->ev_data, event_data, event_dlen);
147 
148 	SK_DF(SK_VERB_EVENTS, "%s[%d] kern_channel_event: %p dest_type: %u len: %zu "
149 	    "type: %u flags: %u res: %hu dlen: %hu",
150 	    dest->dest_desc, nx_port_id, SK_KVA(event), dest->dest_type, event_dlen,
151 	    event->ev_type, event->ev_flags, event->_reserved, event->ev_dlen);
152 
153 	switch (dest->dest_type) {
154 	case __NOTIF_DEST_NETIF:
155 		return netif_vp_na_channel_event(dest->dest_netif,
156 		           nx_port_id, event, CHANNEL_EVENT_MAX_LEN);
157 	case __NOTIF_DEST_FSW:
158 		return fsw_vp_na_channel_event(dest->dest_fsw,
159 		           nx_port_id, event, CHANNEL_EVENT_MAX_LEN);
160 	default:
161 		return EINVAL;
162 	}
163 }
164 
165 errno_t
kern_channel_event_transmit_status_with_packet(const kern_packet_t ph,const ifnet_t ifp)166 kern_channel_event_transmit_status_with_packet(const kern_packet_t ph,
167     const ifnet_t ifp)
168 {
169 	errno_t err;
170 	uint32_t nx_port_id;
171 	os_channel_event_packet_transmit_status_t pkt_tx_status;
172 	struct __notif_dest dest = {0, {NULL}, NULL};
173 
174 	if ((err = __notif_dest_by_ifp(&dest, ifp)) != 0) {
175 		return err;
176 	}
177 
178 	(void) __packet_get_tx_completion_status(ph,
179 	    &pkt_tx_status.packet_status);
180 	if (pkt_tx_status.packet_status == KERN_SUCCESS) {
181 		return 0;
182 	}
183 	err = __packet_get_packetid(ph, &pkt_tx_status.packet_id);
184 	if (__improbable(err != 0)) {
185 		return err;
186 	}
187 	err = __packet_get_tx_nx_port_id(ph, &nx_port_id);
188 	if (__improbable(err != 0)) {
189 		return err;
190 	}
191 
192 	return kern_channel_packet_event_notify(&dest,
193 	           CHANNEL_EVENT_PACKET_TRANSMIT_STATUS,
194 	           sizeof(pkt_tx_status), (uint8_t*)&pkt_tx_status, nx_port_id);
195 }
196 
197 errno_t
kern_channel_event_transmit_status(const ifnet_t ifp,os_channel_event_packet_transmit_status_t * pkt_tx_status,uint32_t nx_port_id)198 kern_channel_event_transmit_status(const ifnet_t ifp,
199     os_channel_event_packet_transmit_status_t *pkt_tx_status,
200     uint32_t nx_port_id)
201 {
202 	errno_t err;
203 	struct __notif_dest dest = {0, {NULL}, NULL};
204 	uint8_t *event_data;
205 
206 	if ((err = __notif_dest_by_ifp(&dest, ifp)) != 0) {
207 		return err;
208 	}
209 
210 	/*
211 	 * -fbounds-safety: kern_channel_packet_event_notify only accepts
212 	 * uint8_t * for event_data.
213 	 */
214 	event_data = (uint8_t * __bidi_indexable)
215 	    (os_channel_event_packet_transmit_status_t * __bidi_indexable) pkt_tx_status;
216 	return kern_channel_packet_event_notify(&dest,
217 	           CHANNEL_EVENT_PACKET_TRANSMIT_STATUS,
218 	           sizeof(*pkt_tx_status), event_data, nx_port_id);
219 }
220 
221 errno_t
kern_channel_event_transmit_status_with_nexus(const uuid_t nx_uuid,os_channel_event_packet_transmit_status_t * pkt_tx_status,uint32_t nx_port_id)222 kern_channel_event_transmit_status_with_nexus(const uuid_t nx_uuid,
223     os_channel_event_packet_transmit_status_t *pkt_tx_status,
224     uint32_t nx_port_id)
225 {
226 	errno_t err;
227 	struct __notif_dest dest = {0, {NULL}, NULL};
228 	uint8_t *event_data;
229 
230 	if ((err = __notif_dest_by_nx_uuid(&dest, nx_uuid)) != 0) {
231 		return err;
232 	}
233 
234 	/*
235 	 * -fbounds-safety: kern_channel_packet_event_notify only accepts
236 	 * uint8_t * for event_data.
237 	 */
238 	event_data = (uint8_t * __bidi_indexable)
239 	    (os_channel_event_packet_transmit_status_t * __bidi_indexable) pkt_tx_status;
240 	return kern_channel_packet_event_notify(&dest,
241 	           CHANNEL_EVENT_PACKET_TRANSMIT_STATUS,
242 	           sizeof(*pkt_tx_status), event_data, nx_port_id);
243 }
244 
245 errno_t
kern_channel_event_transmit_expired(const ifnet_t ifp,os_channel_event_packet_transmit_expired_t * pkt_tx_expired,uint32_t nx_port_id)246 kern_channel_event_transmit_expired(const ifnet_t ifp,
247     os_channel_event_packet_transmit_expired_t *pkt_tx_expired,
248     uint32_t nx_port_id)
249 {
250 	errno_t err;
251 	struct __notif_dest dest = {0, {NULL}, NULL};
252 	uint8_t *event_data;
253 
254 	if ((err = __notif_dest_by_ifp(&dest, ifp)) != 0) {
255 		return err;
256 	}
257 
258 	/*
259 	 * -fbounds-safety: kern_channel_packet_event_notify only accepts
260 	 * uint8_t * for event_data.
261 	 */
262 	event_data = (uint8_t * __bidi_indexable)
263 	    (os_channel_event_packet_transmit_expired_t * __bidi_indexable) pkt_tx_expired;
264 	return kern_channel_packet_event_notify(&dest,
265 	           CHANNEL_EVENT_PACKET_TRANSMIT_EXPIRED,
266 	           sizeof(*pkt_tx_expired), event_data, nx_port_id);
267 }
268 
269 extern errno_t
kern_channel_event_transmit_expired_with_nexus(const uuid_t nx_uuid,os_channel_event_packet_transmit_expired_t * pkt_tx_expired,uint32_t nx_port_id)270 kern_channel_event_transmit_expired_with_nexus(const uuid_t nx_uuid,
271     os_channel_event_packet_transmit_expired_t *pkt_tx_expired,
272     uint32_t nx_port_id)
273 {
274 	errno_t err;
275 	struct __notif_dest dest = {0, {NULL}, NULL};
276 	uint8_t *event_data;
277 
278 	if ((err = __notif_dest_by_nx_uuid(&dest, nx_uuid)) != 0) {
279 		return err;
280 	}
281 
282 	/*
283 	 * -fbounds-safety: kern_channel_packet_event_notify only accepts
284 	 * uint8_t * for event_data.
285 	 */
286 	event_data = (uint8_t * __bidi_indexable)
287 	    (os_channel_event_packet_transmit_expired_t * __bidi_indexable) pkt_tx_expired;
288 	return kern_channel_packet_event_notify(&dest,
289 	           CHANNEL_EVENT_PACKET_TRANSMIT_EXPIRED,
290 	           sizeof(*pkt_tx_expired), event_data, nx_port_id);
291 }
292 
293 /* routine to post kevent notification for the event ring */
294 void
kern_channel_event_notify(struct __kern_channel_ring * kring)295 kern_channel_event_notify(struct __kern_channel_ring *kring)
296 {
297 	ASSERT(kring->ckr_tx == NR_TX);
298 
299 	SK_DF(SK_VERB_EVENTS, "na \"%s\" (%p) kr %p", KRNA(kring)->na_name,
300 	    SK_KVA(KRNA(kring)), SK_KVA(kring));
301 
302 	na_post_event(kring, TRUE, FALSE, FALSE, CHAN_FILT_HINT_CHANNEL_EVENT);
303 }
304 
305 /* sync routine for the event ring */
306 int
kern_channel_event_sync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)307 kern_channel_event_sync(struct __kern_channel_ring *kring, struct proc *p,
308     uint32_t flags)
309 {
310 #pragma unused(p, flags)
311 	(void) kr_reclaim(kring);
312 	return 0;
313 }
314