1 /*
2 * Copyright (c) 2019-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/netif/nx_netif.h>
31 #include <skywalk/nexus/flowswitch/fsw_var.h>
32 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
33
34
35 /*
36 * Notification destination, can be either direct netif device,
37 * or a flowswitch.
38 */
39 struct __notif_dest {
40 uint8_t dest_type;
41 #define __NOTIF_DEST_NONE 0
42 #define __NOTIF_DEST_FSW 1
43 #define __NOTIF_DEST_NETIF 2
44 union {
45 struct nx_flowswitch *dest_fsw;
46 struct nx_netif *dest_netif;
47 };
48 const char *dest_desc;
49 };
50
51 /* Create a notification destination from an ifnet device */
52 static inline errno_t
__notif_dest_by_ifp(struct __notif_dest * dest,const ifnet_t ifp)53 __notif_dest_by_ifp(struct __notif_dest *dest, const ifnet_t ifp)
54 {
55 struct nx_flowswitch *fsw;
56 struct nx_netif *netif;
57
58 if (dest == NULL || ifp == NULL) {
59 return EINVAL;
60 }
61
62 if (!IF_FULLY_ATTACHED(ifp)) {
63 return ENXIO;
64 }
65
66 if ((fsw = fsw_ifp_to_fsw(ifp)) != NULL) {
67 dest->dest_type = __NOTIF_DEST_FSW;
68 dest->dest_fsw = fsw;
69 dest->dest_desc = if_name(ifp);
70 return 0;
71 }
72
73 if ((netif = NA(ifp)->nifna_netif) != NULL) {
74 dest->dest_type = __NOTIF_DEST_NETIF;
75 dest->dest_netif = netif;
76 dest->dest_desc = if_name(ifp);
77 return 0;
78 }
79
80 return ENOENT;
81 }
82
83 /* Create a notification destination from a flowswitch uuid */
84 static inline errno_t
__notif_dest_by_nx_uuid(struct __notif_dest * dest,const uuid_t nx_uuid)85 __notif_dest_by_nx_uuid(struct __notif_dest *dest, const uuid_t nx_uuid)
86 {
87 struct kern_nexus *nx;
88 struct nx_flowswitch *fsw;
89 const char *__null_terminated desc = "detached fsw";
90
91 if (dest == NULL) {
92 return EINVAL;
93 }
94
95 if ((nx = nx_find(nx_uuid, FALSE)) == NULL) {
96 return ENOENT;
97 }
98
99 if ((fsw = NX_FSW_PRIVATE(nx)) == NULL) {
100 return ENOENT;
101 }
102
103 dest->dest_type = __NOTIF_DEST_FSW;
104 dest->dest_fsw = fsw;
105 dest->dest_desc = (fsw->fsw_ifp != NULL)
106 ? if_name(fsw->fsw_ifp)
107 : desc;
108 return 0;
109 }
110
111 /* function to send a packet channel event.
112 *
113 * Note on the event length limitations:
114 * The event that goes onto the channel is emplaced
115 * in a stack-allocated buffer, which includes
116 * the space for the packet channel event data.
117 * The size of the payload is governed by the
118 * `CHANNEL_EVENT_MAX_PAYLOAD_LEN' constant.
119 * See more details in `os_channel_event.h'
120 */
121
122 static inline errno_t
kern_channel_packet_event_notify(struct __notif_dest * dest,os_channel_event_type_t event_type,size_t event_dlen,uint8_t * __sized_by (event_dlen)event_data,uint32_t nx_port_id)123 kern_channel_packet_event_notify(struct __notif_dest *dest,
124 os_channel_event_type_t event_type, size_t event_dlen,
125 uint8_t *__sized_by(event_dlen)event_data, uint32_t nx_port_id)
126 {
127 char buf[CHANNEL_EVENT_MAX_LEN]
128 __attribute((aligned(sizeof(uint64_t))));
129 struct __kern_channel_event *event =
130 (struct __kern_channel_event *)(void *)buf;
131
132 if (dest == NULL || dest->dest_desc == NULL) {
133 return EINVAL;
134 }
135
136 if (sizeof(buf) < sizeof(event) + event_dlen) {
137 return EINVAL;
138 }
139 if ((event_type < CHANNEL_EVENT_MIN) || (CHANNEL_EVENT_MAX < event_type)) {
140 return EINVAL;
141 }
142
143 event->ev_type = event_type;
144 event->ev_flags = 0;
145 event->_reserved = 0;
146 event->ev_dlen = (uint16_t)event_dlen;
147 memcpy(event->ev_data, event_data, event_dlen);
148
149 SK_DF(SK_VERB_EVENTS, "%s[%d] kern_channel_event: %p dest_type: %hu len: %hu "
150 "type: %u flags: %u res: %hu dlen: %hu",
151 dest->dest_desc, nx_port_id, event, event_dlen,
152 event->ev_type, event->ev_flags, event->_reserved, event->ev_dlen);
153
154 switch (dest->dest_type) {
155 case __NOTIF_DEST_NETIF:
156 return netif_vp_na_channel_event(dest->dest_netif,
157 nx_port_id, event, CHANNEL_EVENT_MAX_LEN);
158 case __NOTIF_DEST_FSW:
159 return fsw_vp_na_channel_event(dest->dest_fsw,
160 nx_port_id, event, CHANNEL_EVENT_MAX_LEN);
161 default:
162 return EINVAL;
163 }
164 }
165
166 errno_t
kern_channel_event_transmit_status_with_packet(const kern_packet_t ph,const ifnet_t ifp)167 kern_channel_event_transmit_status_with_packet(const kern_packet_t ph,
168 const ifnet_t ifp)
169 {
170 errno_t err;
171 uint32_t nx_port_id;
172 os_channel_event_packet_transmit_status_t pkt_tx_status;
173 struct __notif_dest dest = {0, {NULL}, NULL};
174
175 if ((err = __notif_dest_by_ifp(&dest, ifp)) != 0) {
176 return err;
177 }
178
179 (void) __packet_get_tx_completion_status(ph,
180 &pkt_tx_status.packet_status);
181 if (pkt_tx_status.packet_status == KERN_SUCCESS) {
182 return 0;
183 }
184 err = __packet_get_packetid(ph, &pkt_tx_status.packet_id);
185 if (__improbable(err != 0)) {
186 return err;
187 }
188 err = __packet_get_tx_nx_port_id(ph, &nx_port_id);
189 if (__improbable(err != 0)) {
190 return err;
191 }
192
193 return kern_channel_packet_event_notify(&dest,
194 CHANNEL_EVENT_PACKET_TRANSMIT_STATUS,
195 sizeof(pkt_tx_status), (uint8_t*)&pkt_tx_status, nx_port_id);
196 }
197
198 errno_t
kern_channel_event_transmit_status(const ifnet_t ifp,os_channel_event_packet_transmit_status_t * pkt_tx_status,uint32_t nx_port_id)199 kern_channel_event_transmit_status(const ifnet_t ifp,
200 os_channel_event_packet_transmit_status_t *pkt_tx_status,
201 uint32_t nx_port_id)
202 {
203 errno_t err;
204 struct __notif_dest dest = {0, {NULL}, NULL};
205 uint8_t *event_data;
206
207 if ((err = __notif_dest_by_ifp(&dest, ifp)) != 0) {
208 return err;
209 }
210
211 /*
212 * -fbounds-safety: kern_channel_packet_event_notify only accepts
213 * uint8_t * for event_data.
214 */
215 event_data = (uint8_t * __bidi_indexable)
216 (os_channel_event_packet_transmit_status_t * __bidi_indexable) pkt_tx_status;
217 return kern_channel_packet_event_notify(&dest,
218 CHANNEL_EVENT_PACKET_TRANSMIT_STATUS,
219 sizeof(*pkt_tx_status), event_data, nx_port_id);
220 }
221
222 errno_t
kern_channel_event_transmit_status_with_nexus(const uuid_t nx_uuid,os_channel_event_packet_transmit_status_t * pkt_tx_status,uint32_t nx_port_id)223 kern_channel_event_transmit_status_with_nexus(const uuid_t nx_uuid,
224 os_channel_event_packet_transmit_status_t *pkt_tx_status,
225 uint32_t nx_port_id)
226 {
227 errno_t err;
228 struct __notif_dest dest = {0, {NULL}, NULL};
229 uint8_t *event_data;
230
231 if ((err = __notif_dest_by_nx_uuid(&dest, nx_uuid)) != 0) {
232 return err;
233 }
234
235 /*
236 * -fbounds-safety: kern_channel_packet_event_notify only accepts
237 * uint8_t * for event_data.
238 */
239 event_data = (uint8_t * __bidi_indexable)
240 (os_channel_event_packet_transmit_status_t * __bidi_indexable) pkt_tx_status;
241 return kern_channel_packet_event_notify(&dest,
242 CHANNEL_EVENT_PACKET_TRANSMIT_STATUS,
243 sizeof(*pkt_tx_status), event_data, nx_port_id);
244 }
245
246 errno_t
kern_channel_event_transmit_expired(const ifnet_t ifp,os_channel_event_packet_transmit_expired_t * pkt_tx_expired,uint32_t nx_port_id)247 kern_channel_event_transmit_expired(const ifnet_t ifp,
248 os_channel_event_packet_transmit_expired_t *pkt_tx_expired,
249 uint32_t nx_port_id)
250 {
251 errno_t err;
252 struct __notif_dest dest = {0, {NULL}, NULL};
253 uint8_t *event_data;
254
255 if ((err = __notif_dest_by_ifp(&dest, ifp)) != 0) {
256 return err;
257 }
258
259 /*
260 * -fbounds-safety: kern_channel_packet_event_notify only accepts
261 * uint8_t * for event_data.
262 */
263 event_data = (uint8_t * __bidi_indexable)
264 (os_channel_event_packet_transmit_expired_t * __bidi_indexable) pkt_tx_expired;
265 return kern_channel_packet_event_notify(&dest,
266 CHANNEL_EVENT_PACKET_TRANSMIT_EXPIRED,
267 sizeof(*pkt_tx_expired), event_data, nx_port_id);
268 }
269
270 extern errno_t
kern_channel_event_transmit_expired_with_nexus(const uuid_t nx_uuid,os_channel_event_packet_transmit_expired_t * pkt_tx_expired,uint32_t nx_port_id)271 kern_channel_event_transmit_expired_with_nexus(const uuid_t nx_uuid,
272 os_channel_event_packet_transmit_expired_t *pkt_tx_expired,
273 uint32_t nx_port_id)
274 {
275 errno_t err;
276 struct __notif_dest dest = {0, {NULL}, NULL};
277 uint8_t *event_data;
278
279 if ((err = __notif_dest_by_nx_uuid(&dest, nx_uuid)) != 0) {
280 return err;
281 }
282
283 /*
284 * -fbounds-safety: kern_channel_packet_event_notify only accepts
285 * uint8_t * for event_data.
286 */
287 event_data = (uint8_t * __bidi_indexable)
288 (os_channel_event_packet_transmit_expired_t * __bidi_indexable) pkt_tx_expired;
289 return kern_channel_packet_event_notify(&dest,
290 CHANNEL_EVENT_PACKET_TRANSMIT_EXPIRED,
291 sizeof(*pkt_tx_expired), event_data, nx_port_id);
292 }
293
294 /* routine to post kevent notification for the event ring */
295 void
kern_channel_event_notify(struct __kern_channel_ring * kring)296 kern_channel_event_notify(struct __kern_channel_ring *kring)
297 {
298 ASSERT(kring->ckr_tx == NR_TX);
299
300 SK_DF(SK_VERB_EVENTS, "%s(%d) na \"%s\" (0x%llx) kr 0x%llx",
301 sk_proc_name_address(current_proc()), sk_proc_pid(current_proc()),
302 KRNA(kring)->na_name, SK_KVA(KRNA(kring)), SK_KVA(kring));
303
304 na_post_event(kring, TRUE, FALSE, FALSE, CHAN_FILT_HINT_CHANNEL_EVENT);
305 }
306
307 /* sync routine for the event ring */
308 int
kern_channel_event_sync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)309 kern_channel_event_sync(struct __kern_channel_ring *kring, struct proc *p,
310 uint32_t flags)
311 {
312 #pragma unused(p, flags)
313 (void) kr_reclaim(kring);
314 return 0;
315 }
316