1 /*
2 * Copyright (c) 2013-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /*
25 * THEORY OF OPERATION
26 *
27 * The socket content filter subsystem provides a way for user space agents to
28 * make filtering decisions based on the content of the data being sent and
29 * received by INET/INET6 sockets.
30 *
31 * A content filter user space agents gets a copy of the data and the data is
32 * also kept in kernel buffer until the user space agents makes a pass or drop
33 * decision. This unidirectional flow of content avoids unnecessary data copies
34 * back to the kernel.
35 *
36 * A user space filter agent opens a kernel control socket with the name
37 * CONTENT_FILTER_CONTROL_NAME to attach to the socket content filter subsystem.
38 * When connected, a "struct content_filter" is created and set as the
39 * "unitinfo" of the corresponding kernel control socket instance.
40 *
41 * The socket content filter subsystem exchanges messages with the user space
42 * filter agent until an ultimate pass or drop decision is made by the
43 * user space filter agent.
44 *
45 * It should be noted that messages about many INET/INET6 sockets can be multiplexed
46 * over a single kernel control socket.
47 *
48 * Notes:
49 * - The current implementation supports all INET/INET6 sockets (i.e. TCP,
50 * UDP, ICMP, etc).
51 * - The current implementation supports up to two simultaneous content filters
52 * for iOS devices and eight simultaneous content filters for OSX.
53 *
54 *
55 * NECP FILTER CONTROL UNIT
56 *
57 * A user space filter agent uses the Network Extension Control Policy (NECP)
58 * database to specify which INET/INET6 sockets need to be filtered. The NECP
59 * criteria may be based on a variety of properties like user ID or proc UUID.
60 *
61 * The NECP "filter control unit" is used by the socket content filter subsystem
62 * to deliver the relevant INET/INET6 content information to the appropriate
63 * user space filter agent via its kernel control socket instance.
64 * This works as follows:
65 *
66 * 1) The user space filter agent specifies an NECP filter control unit when
67 * in adds its filtering rules to the NECP database.
68 *
69 * 2) The user space filter agent also sets its NECP filter control unit on the
70 * content filter kernel control socket via the socket option
71 * CFIL_OPT_NECP_CONTROL_UNIT.
72 *
73 * 3) The NECP database is consulted to find out if a given INET/INET6 socket
74 * needs to be subjected to content filtering and returns the corresponding
75 * NECP filter control unit -- the NECP filter control unit is actually
76 * stored in the INET/INET6 socket structure so the NECP lookup is really simple.
77 *
78 * 4) The NECP filter control unit is then used to find the corresponding
79 * kernel control socket instance.
80 *
81 * Note: NECP currently supports a single filter control unit per INET/INET6 socket
82 * but this restriction may be soon lifted.
83 *
84 *
85 * THE MESSAGING PROTOCOL
86 *
87 * The socket content filter subsystem and a user space filter agent
88 * communicate over the kernel control socket via an asynchronous
89 * messaging protocol (this is not a request-response protocol).
90 * The socket content filter subsystem sends event messages to the user
91 * space filter agent about the INET/INET6 sockets it is interested to filter.
92 * The user space filter agent sends action messages to either allow
93 * data to pass or to disallow the data flow (and drop the connection).
94 *
95 * All messages over a content filter kernel control socket share the same
96 * common header of type "struct cfil_msg_hdr". The message type tells if
97 * it's a event message "CFM_TYPE_EVENT" or a action message "CFM_TYPE_ACTION".
98 * The message header field "cfm_sock_id" identifies a given INET/INET6 flow.
99 * For TCP, flows are per-socket. For UDP and other datagrame protocols, there
100 * could be multiple flows per socket.
101 *
102 * Note the message header length field may be padded for alignment and can
103 * be larger than the actual content of the message.
104 * The field "cfm_op" describe the kind of event or action.
105 *
106 * Here are the kinds of content filter events:
107 * - CFM_OP_SOCKET_ATTACHED: a new INET/INET6 socket is being filtered
108 * - CFM_OP_SOCKET_CLOSED: A INET/INET6 socket is closed
109 * - CFM_OP_DATA_OUT: A span of data is being sent on a INET/INET6 socket
110 * - CFM_OP_DATA_IN: A span of data is being or received on a INET/INET6 socket
111 *
112 *
113 * EVENT MESSAGES
114 *
115 * The CFM_OP_DATA_OUT and CFM_OP_DATA_IN event messages contains a span of
116 * data that is being sent or received. The position of this span of data
117 * in the data flow is described by a set of start and end offsets. These
118 * are absolute 64 bits offsets. The first byte sent (or received) starts
119 * at offset 0 and ends at offset 1. The length of the content data
120 * is given by the difference between the end offset and the start offset.
121 *
122 * After a CFM_OP_SOCKET_ATTACHED is delivered, CFM_OP_DATA_OUT and
123 * CFM_OP_DATA_OUT events are not delivered until a CFM_OP_DATA_UPDATE
124 * action message is sent by the user space filter agent.
125 *
126 * Note: absolute 64 bits offsets should be large enough for the foreseeable
127 * future. A 64-bits counter will wrap after 468 years at 10 Gbit/sec:
128 * 2E64 / ((10E9 / 8) * 60 * 60 * 24 * 365.25) = 467.63
129 *
130 * They are two kinds of primary content filter actions:
131 * - CFM_OP_DATA_UPDATE: to update pass or peek offsets for each direction.
132 * - CFM_OP_DROP: to shutdown socket and disallow further data flow
133 *
134 * There is also an action to mark a given client flow as already filtered
135 * at a higher level, CFM_OP_BLESS_CLIENT.
136 *
137 *
138 * ACTION MESSAGES
139 *
140 * The CFM_OP_DATA_UPDATE action messages let the user space filter
141 * agent allow data to flow up to the specified pass offset -- there
142 * is a pass offset for outgoing data and a pass offset for incoming data.
143 * When a new INET/INET6 socket is attached to the content filter and a flow is
144 * created, each pass offset is initially set to 0 so no data is allowed to pass by
145 * default. When the pass offset is set to CFM_MAX_OFFSET via a CFM_OP_DATA_UPDATE
146 * then the data flow becomes unrestricted.
147 *
148 * Note that pass offsets can only be incremented. A CFM_OP_DATA_UPDATE message
149 * with a pass offset smaller than the pass offset of a previous
150 * CFM_OP_DATA_UPDATE message is silently ignored.
151 *
152 * A user space filter agent also uses CFM_OP_DATA_UPDATE action messages
153 * to tell the kernel how much data it wants to see by using the peek offsets.
154 * Just like pass offsets, there is a peek offset for each direction.
155 * When a new INET/INET6 flow is created, each peek offset is initially set to 0
156 * so no CFM_OP_DATA_OUT and CFM_OP_DATA_IN event messages are dispatched by default
157 * until a CFM_OP_DATA_UPDATE action message with a greater than 0 peek offset is sent
158 * by the user space filter agent. When the peek offset is set to CFM_MAX_OFFSET via
159 * a CFM_OP_DATA_UPDATE then the flow of update data events becomes unrestricted.
160 *
161 * Note that peek offsets cannot be smaller than the corresponding pass offset.
162 * Also a peek offsets cannot be smaller than the corresponding end offset
163 * of the last CFM_OP_DATA_OUT/CFM_OP_DATA_IN message dispatched. Trying
164 * to set a too small peek value is silently ignored.
165 *
166 *
167 * PER FLOW "struct cfil_info"
168 *
169 * As soon as a INET/INET6 socket gets attached to a content filter, a
170 * "struct cfil_info" is created to hold the content filtering state for this
171 * socket. For UDP and other datagram protocols, as soon as traffic is seen for
172 * each new flow identified by its 4-tuple of source address/port and destination
173 * address/port, a "struct cfil_info" is created. Each datagram socket may
174 * have multiple flows maintained in a hash table of "struct cfil_info" entries.
175 *
176 * The content filtering state is made of the following information
177 * for each direction:
178 * - The current pass offset;
179 * - The first and last offsets of the data pending, waiting for a filtering
180 * decision;
181 * - The inject queue for data that passed the filters and that needs
182 * to be re-injected;
183 * - A content filter specific state in a set of "struct cfil_entry"
184 *
185 *
186 * CONTENT FILTER STATE "struct cfil_entry"
187 *
188 * The "struct cfil_entry" maintains the information most relevant to the
189 * message handling over a kernel control socket with a user space filter agent.
190 *
191 * The "struct cfil_entry" holds the NECP filter control unit that corresponds
192 * to the kernel control socket unit it corresponds to and also has a pointer
193 * to the corresponding "struct content_filter".
194 *
195 * For each direction, "struct cfil_entry" maintains the following information:
196 * - The pass offset
197 * - The peek offset
198 * - The offset of the last data peeked at by the filter
199 * - A queue of data that's waiting to be delivered to the user space filter
200 * agent on the kernel control socket
201 * - A queue of data for which event messages have been sent on the kernel
202 * control socket and are pending for a filtering decision.
203 *
204 *
205 * CONTENT FILTER QUEUES
206 *
207 * Data that is being filtered is steered away from the INET/INET6 socket buffer
208 * and instead will sit in one of three content filter queues until the data
209 * can be re-injected into the INET/INET6 socket buffer.
210 *
211 * A content filter queue is represented by "struct cfil_queue" that contains
212 * a list of mbufs and the start and end offset of the data span of
213 * the list of mbufs.
214 *
215 * The data moves into the three content filter queues according to this
216 * sequence:
217 * a) The "cfe_ctl_q" of "struct cfil_entry"
218 * b) The "cfe_pending_q" of "struct cfil_entry"
219 * c) The "cfi_inject_q" of "struct cfil_info"
220 *
221 * Note: The sequence (a),(b) may be repeated several times if there is more
222 * than one content filter attached to the INET/INET6 socket.
223 *
224 * The "cfe_ctl_q" queue holds data than cannot be delivered to the
225 * kernel conntrol socket for two reasons:
226 * - The peek offset is less that the end offset of the mbuf data
227 * - The kernel control socket is flow controlled
228 *
229 * The "cfe_pending_q" queue holds data for which CFM_OP_DATA_OUT or
230 * CFM_OP_DATA_IN have been successfully dispatched to the kernel control
231 * socket and are waiting for a pass action message fromn the user space
232 * filter agent. An mbuf length must be fully allowed to pass to be removed
233 * from the cfe_pending_q.
234 *
235 * The "cfi_inject_q" queue holds data that has been fully allowed to pass
236 * by the user space filter agent and that needs to be re-injected into the
237 * INET/INET6 socket.
238 *
239 *
240 * IMPACT ON FLOW CONTROL
241 *
242 * An essential aspect of the content filer subsystem is to minimize the
243 * impact on flow control of the INET/INET6 sockets being filtered.
244 *
245 * The processing overhead of the content filtering may have an effect on
246 * flow control by adding noticeable delays and cannot be eliminated --
247 * care must be taken by the user space filter agent to minimize the
248 * processing delays.
249 *
250 * The amount of data being filtered is kept in buffers while waiting for
251 * a decision by the user space filter agent. This amount of data pending
252 * needs to be subtracted from the amount of data available in the
253 * corresponding INET/INET6 socket buffer. This is done by modifying
254 * sbspace() and tcp_sbspace() to account for amount of data pending
255 * in the content filter.
256 *
257 *
258 * LOCKING STRATEGY
259 *
260 * The global state of content filter subsystem is protected by a single
261 * read-write lock "cfil_lck_rw". The data flow can be done with the
262 * cfil read-write lock held as shared so it can be re-entered from multiple
263 * threads.
264 *
265 * The per INET/INET6 socket content filterstate -- "struct cfil_info" -- is
266 * protected by the socket lock.
267 *
268 * A INET/INET6 socket lock cannot be taken while the cfil read-write lock
269 * is held. That's why we have some sequences where we drop the cfil read-write
270 * lock before taking the INET/INET6 lock.
271 *
272 * It is also important to lock the INET/INET6 socket buffer while the content
273 * filter is modifying the amount of pending data. Otherwise the calculations
274 * in sbspace() and tcp_sbspace() could be wrong.
275 *
276 * The "cfil_lck_rw" protects "struct content_filter" and also the fields
277 * "cfe_link" and "cfe_filter" of "struct cfil_entry".
278 *
279 * Actually "cfe_link" and "cfe_filter" are protected by both by
280 * "cfil_lck_rw" and the socket lock: they may be modified only when
281 * "cfil_lck_rw" is exclusive and the socket is locked.
282 *
283 * To read the other fields of "struct content_filter" we have to take
284 * "cfil_lck_rw" in shared mode.
285 *
286 * DATAGRAM SPECIFICS:
287 *
288 * The socket content filter supports all INET/INET6 protocols. However
289 * the treatments for TCP sockets and for datagram (UDP, ICMP, etc) sockets
290 * are slightly different.
291 *
292 * Each datagram socket may have multiple flows. Each flow is identified
293 * by the flow's source address/port and destination address/port tuple
294 * and is represented as a "struct cfil_info" entry. For each socket,
295 * a hash table is used to maintain the collection of flows under that socket.
296 *
297 * Each datagram flow is uniquely identified by it's "struct cfil_info" cfi_sock_id.
298 * The highest 32-bits of the cfi_sock_id contains the socket's so_gencnt. This portion
299 * of the cfi_sock_id is used locate the socket during socket lookup. The lowest 32-bits
300 * of the cfi_sock_id contains a hash of the flow's 4-tuple. This portion of the cfi_sock_id
301 * is used as the hash value for the flow hash table lookup within the parent socket.
302 *
303 * Since datagram sockets may not be connected, flow states may not be maintained in the
304 * socket structures and thus have to be saved for each packet. These saved states will be
305 * used for both outgoing and incoming reinjections. For outgoing packets, destination
306 * address/port as well as the current socket states will be saved. During reinjection,
307 * these saved states will be used instead. For incoming packets, control and address
308 * mbufs will be chained to the data. During reinjection, the whole chain will be queued
309 * onto the incoming socket buffer.
310 *
311 * LIMITATIONS
312 *
313 * - Support all INET/INET6 sockets, such as TCP, UDP, ICMP, etc
314 *
315 * - Does not support TCP unordered messages
316 */
317
318 /*
319 * TO DO LIST
320 *
321 * Deal with OOB
322 *
323 */
324
325 #include <sys/types.h>
326 #include <sys/kern_control.h>
327 #include <sys/queue.h>
328 #include <sys/domain.h>
329 #include <sys/protosw.h>
330 #include <sys/syslog.h>
331 #include <sys/systm.h>
332 #include <sys/param.h>
333 #include <sys/mbuf.h>
334
335 #include <kern/locks.h>
336 #include <kern/zalloc.h>
337 #include <kern/debug.h>
338
339 #include <net/content_filter.h>
340 #include <net/content_filter_crypto.h>
341
342 #define _IP_VHL
343 #include <netinet/ip.h>
344 #include <netinet/in_pcb.h>
345 #include <netinet/tcp.h>
346 #include <netinet/tcp_var.h>
347 #include <netinet/udp.h>
348 #include <netinet/udp_var.h>
349 #include <kern/socket_flows.h>
350
351 #include <string.h>
352 #include <libkern/libkern.h>
353 #include <kern/sched_prim.h>
354 #include <kern/task.h>
355 #include <mach/task_info.h>
356
357 #if !XNU_TARGET_OS_OSX
358 #define MAX_CONTENT_FILTER 2
359 #else
360 #define MAX_CONTENT_FILTER 8
361 #endif
362
363 extern struct inpcbinfo ripcbinfo;
364 struct cfil_entry;
365
366 /*
367 * The structure content_filter represents a user space content filter
368 * It's created and associated with a kernel control socket instance
369 */
370 struct content_filter {
371 kern_ctl_ref cf_kcref;
372 u_int32_t cf_kcunit;
373 u_int32_t cf_flags;
374
375 uint32_t cf_necp_control_unit;
376
377 uint32_t cf_sock_count;
378 TAILQ_HEAD(, cfil_entry) cf_sock_entries;
379
380 cfil_crypto_state_t cf_crypto_state;
381 };
382
383 #define CFF_ACTIVE 0x01
384 #define CFF_DETACHING 0x02
385 #define CFF_FLOW_CONTROLLED 0x04
386 #define CFF_PRESERVE_CONNECTIONS 0x08
387
388 struct content_filter **content_filters = NULL;
389 uint32_t cfil_active_count = 0; /* Number of active content filters */
390 uint32_t cfil_sock_attached_count = 0; /* Number of sockets attachements */
391 uint32_t cfil_sock_attached_stats_count = 0; /* Number of sockets requested periodic stats report */
392 uint32_t cfil_close_wait_timeout = 1000; /* in milliseconds */
393
394 static kern_ctl_ref cfil_kctlref = NULL;
395
396 static LCK_GRP_DECLARE(cfil_lck_grp, "content filter");
397 static LCK_RW_DECLARE(cfil_lck_rw, &cfil_lck_grp);
398
399 #define CFIL_RW_LCK_MAX 8
400
401 int cfil_rw_nxt_lck = 0;
402 void* cfil_rw_lock_history[CFIL_RW_LCK_MAX];
403
404 int cfil_rw_nxt_unlck = 0;
405 void* cfil_rw_unlock_history[CFIL_RW_LCK_MAX];
406
407 static ZONE_DECLARE(content_filter_zone, "content_filter",
408 sizeof(struct content_filter), ZC_NONE);
409
410 MBUFQ_HEAD(cfil_mqhead);
411
412 struct cfil_queue {
413 uint64_t q_start; /* offset of first byte in queue */
414 uint64_t q_end; /* offset of last byte in queue */
415 struct cfil_mqhead q_mq;
416 };
417
418 /*
419 * struct cfil_entry
420 *
421 * The is one entry per content filter
422 */
423 struct cfil_entry {
424 TAILQ_ENTRY(cfil_entry) cfe_link;
425 SLIST_ENTRY(cfil_entry) cfe_order_link;
426 struct content_filter *cfe_filter;
427
428 struct cfil_info *cfe_cfil_info;
429 uint32_t cfe_flags;
430 uint32_t cfe_necp_control_unit;
431 struct timeval cfe_last_event; /* To user space */
432 struct timeval cfe_last_action; /* From user space */
433 uint64_t cfe_byte_inbound_count_reported; /* stats already been reported */
434 uint64_t cfe_byte_outbound_count_reported; /* stats already been reported */
435 struct timeval cfe_stats_report_ts; /* Timestamp for last stats report */
436 uint32_t cfe_stats_report_frequency; /* Interval for stats report in msecs */
437 boolean_t cfe_laddr_sent;
438
439 struct cfe_buf {
440 /*
441 * cfe_pending_q holds data that has been delivered to
442 * the filter and for which we are waiting for an action
443 */
444 struct cfil_queue cfe_pending_q;
445 /*
446 * This queue is for data that has not be delivered to
447 * the content filter (new data, pass peek or flow control)
448 */
449 struct cfil_queue cfe_ctl_q;
450
451 uint64_t cfe_pass_offset;
452 uint64_t cfe_peek_offset;
453 uint64_t cfe_peeked;
454 } cfe_snd, cfe_rcv;
455 };
456
457 #define CFEF_CFIL_ATTACHED 0x0001 /* was attached to filter */
458 #define CFEF_SENT_SOCK_ATTACHED 0x0002 /* sock attach event was sent */
459 #define CFEF_DATA_START 0x0004 /* can send data event */
460 #define CFEF_FLOW_CONTROLLED 0x0008 /* wait for flow control lift */
461 #define CFEF_SENT_DISCONNECT_IN 0x0010 /* event was sent */
462 #define CFEF_SENT_DISCONNECT_OUT 0x0020 /* event was sent */
463 #define CFEF_SENT_SOCK_CLOSED 0x0040 /* closed event was sent */
464 #define CFEF_CFIL_DETACHED 0x0080 /* filter was detached */
465
466
467 #define CFI_ADD_TIME_LOG(cfil, t1, t0, op) \
468 struct timeval64 _tdiff; \
469 if ((cfil)->cfi_op_list_ctr < CFI_MAX_TIME_LOG_ENTRY) { \
470 timersub(t1, t0, &_tdiff); \
471 (cfil)->cfi_op_time[(cfil)->cfi_op_list_ctr] = (uint32_t)(_tdiff.tv_sec * 1000 + _tdiff.tv_usec / 1000);\
472 (cfil)->cfi_op_list[(cfil)->cfi_op_list_ctr] = (unsigned char)op; \
473 (cfil)->cfi_op_list_ctr ++; \
474 }
475
476 /*
477 * struct cfil_info
478 *
479 * There is a struct cfil_info per socket
480 */
481 struct cfil_info {
482 TAILQ_ENTRY(cfil_info) cfi_link;
483 TAILQ_ENTRY(cfil_info) cfi_link_stats;
484 struct socket *cfi_so;
485 uint64_t cfi_flags;
486 uint64_t cfi_sock_id;
487 struct timeval64 cfi_first_event;
488 uint32_t cfi_op_list_ctr;
489 uint32_t cfi_op_time[CFI_MAX_TIME_LOG_ENTRY]; /* time interval in microseconds since first event */
490 unsigned char cfi_op_list[CFI_MAX_TIME_LOG_ENTRY];
491 union sockaddr_in_4_6 cfi_so_attach_faddr; /* faddr at the time of attach */
492 union sockaddr_in_4_6 cfi_so_attach_laddr; /* laddr at the time of attach */
493
494 int cfi_dir;
495 uint64_t cfi_byte_inbound_count;
496 uint64_t cfi_byte_outbound_count;
497
498 boolean_t cfi_isSignatureLatest; /* Indicates if signature covers latest flow attributes */
499 u_int32_t cfi_filter_control_unit;
500 u_int32_t cfi_debug;
501 struct cfi_buf {
502 /*
503 * cfi_pending_first and cfi_pending_last describe the total
504 * amount of data outstanding for all the filters on
505 * this socket and data in the flow queue
506 * cfi_pending_mbcnt counts in sballoc() "chars of mbufs used"
507 */
508 uint64_t cfi_pending_first;
509 uint64_t cfi_pending_last;
510 uint32_t cfi_pending_mbcnt;
511 uint32_t cfi_pending_mbnum;
512 uint32_t cfi_tail_drop_cnt;
513 /*
514 * cfi_pass_offset is the minimum of all the filters
515 */
516 uint64_t cfi_pass_offset;
517 /*
518 * cfi_inject_q holds data that needs to be re-injected
519 * into the socket after filtering and that can
520 * be queued because of flow control
521 */
522 struct cfil_queue cfi_inject_q;
523 } cfi_snd, cfi_rcv;
524
525 struct cfil_entry cfi_entries[MAX_CONTENT_FILTER];
526 struct soflow_hash_entry *cfi_hash_entry;
527 SLIST_HEAD(, cfil_entry) cfi_ordered_entries;
528 os_refcnt_t cfi_ref_count;
529 } __attribute__((aligned(8)));
530
531 #define CFIF_DROP 0x0001 /* drop action applied */
532 #define CFIF_CLOSE_WAIT 0x0002 /* waiting for filter to close */
533 #define CFIF_SOCK_CLOSED 0x0004 /* socket is closed */
534 #define CFIF_RETRY_INJECT_IN 0x0010 /* inject in failed */
535 #define CFIF_RETRY_INJECT_OUT 0x0020 /* inject out failed */
536 #define CFIF_SHUT_WR 0x0040 /* shutdown write */
537 #define CFIF_SHUT_RD 0x0080 /* shutdown read */
538 #define CFIF_SOCKET_CONNECTED 0x0100 /* socket is connected */
539 #define CFIF_INITIAL_VERDICT 0x0200 /* received initial verdict */
540 #define CFIF_NO_CLOSE_WAIT 0x0400 /* do not wait to close */
541
542 #define CFI_MASK_GENCNT 0xFFFFFFFF00000000 /* upper 32 bits */
543 #define CFI_SHIFT_GENCNT 32
544 #define CFI_MASK_FLOWHASH 0x00000000FFFFFFFF /* lower 32 bits */
545 #define CFI_SHIFT_FLOWHASH 0
546
547 #define CFI_ENTRY_KCUNIT(i, e) ((uint32_t)(((e) - &((i)->cfi_entries[0])) + 1))
548
549 static ZONE_DECLARE(cfil_info_zone, "cfil_info",
550 sizeof(struct cfil_info), ZC_NONE);
551
552 TAILQ_HEAD(cfil_sock_head, cfil_info) cfil_sock_head;
553 TAILQ_HEAD(cfil_sock_head_stats, cfil_info) cfil_sock_head_stats;
554
555 #define CFIL_QUEUE_VERIFY(x) if (cfil_debug) cfil_queue_verify(x)
556 #define CFIL_INFO_VERIFY(x) if (cfil_debug) cfil_info_verify(x)
557
558 /*
559 * UDP Socket Support
560 */
561 #define IS_ICMP(so) (so && so->so_proto && (so->so_proto->pr_type == SOCK_RAW || so->so_proto->pr_type == SOCK_DGRAM) && \
562 (so->so_proto->pr_protocol == IPPROTO_ICMP || so->so_proto->pr_protocol == IPPROTO_ICMPV6))
563 #define IS_RAW(so) (so && so->so_proto && so->so_proto->pr_type == SOCK_RAW && so->so_proto->pr_protocol == IPPROTO_RAW)
564
565 #define OPTIONAL_IP_HEADER(so) (!IS_TCP(so) && !IS_UDP(so))
566 #define GET_SO_PROTO(so) ((so && so->so_proto) ? so->so_proto->pr_protocol : IPPROTO_MAX)
567 #define IS_INP_V6(inp) (inp && (inp->inp_vflag & INP_IPV6))
568
569 #define UNCONNECTED(inp) (inp && (((inp->inp_vflag & INP_IPV4) && (inp->inp_faddr.s_addr == INADDR_ANY)) || \
570 ((inp->inp_vflag & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))))
571 #define IS_ENTRY_ATTACHED(cfil_info, kcunit) (cfil_info != NULL && (kcunit <= MAX_CONTENT_FILTER) && \
572 cfil_info->cfi_entries[kcunit - 1].cfe_filter != NULL)
573 #define IS_DNS(local, remote) (check_port(local, 53) || check_port(remote, 53) || check_port(local, 5353) || check_port(remote, 5353))
574 #define IS_INITIAL_TFO_DATA(so) (so && (so->so_flags1 & SOF1_PRECONNECT_DATA) && (so->so_state & SS_ISCONNECTING))
575 #define NULLADDRESS(addr) ((addr.sa.sa_len == 0) || \
576 (addr.sa.sa_family == AF_INET && addr.sin.sin_addr.s_addr == 0) || \
577 (addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr.sin6.sin6_addr)))
578
579 #define SKIP_FILTER_FOR_TCP_SOCKET(so) \
580 (so == NULL || so->so_proto == NULL || so->so_proto->pr_domain == NULL || \
581 (so->so_proto->pr_domain->dom_family != PF_INET && so->so_proto->pr_domain->dom_family != PF_INET6) || \
582 so->so_proto->pr_type != SOCK_STREAM || \
583 so->so_proto->pr_protocol != IPPROTO_TCP || \
584 (so->so_flags & SOF_MP_SUBFLOW) != 0 || \
585 (so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) != 0)
586
587 /*
588 * Special handling for 0.0.0.0-faddr TCP flows. This flows will be changed to loopback addr by TCP and
589 * may result in an immediate TCP RESET and socket close. This leads to CFIL blocking the owner thread for
590 * 1 sec waiting for ack from user-space provider (ack recevied by CFIL but socket already removed from
591 * global socket list). To avoid this, identify these flows and do not perform the close-wait blocking.
592 * These flows are identified as destined to Loopback address and were disconnected shortly after connect
593 * (before initial-verdict received).
594 */
595 #define IS_LOOPBACK_FADDR(inp) \
596 (inp && ((IS_INP_V6(inp) && IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr)) || (ntohl(inp->inp_faddr.s_addr) == INADDR_LOOPBACK)))
597
598 #define SET_NO_CLOSE_WAIT(inp, cfil_info) \
599 if (inp && cfil_info && !(cfil_info->cfi_flags & CFIF_INITIAL_VERDICT) && IS_LOOPBACK_FADDR(inp)) { \
600 cfil_info->cfi_flags |= CFIF_NO_CLOSE_WAIT; \
601 }
602
603 #define IS_NO_CLOSE_WAIT(cfil_info) (cfil_info && (cfil_info->cfi_flags & CFIF_NO_CLOSE_WAIT))
604
605 os_refgrp_decl(static, cfil_refgrp, "CFILRefGroup", NULL);
606
607 #define CFIL_INFO_FREE(cfil_info) \
608 if (cfil_info && (os_ref_release(&cfil_info->cfi_ref_count) == 0)) { \
609 cfil_info_free(cfil_info); \
610 }
611
612 #define SOCKET_PID(so) ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid)
613 #define MATCH_PID(so) (so && (cfil_log_pid == SOCKET_PID(so)))
614 #define MATCH_PORT(inp, local, remote) \
615 ((inp && ntohs(inp->inp_lport) == cfil_log_port) || (inp && ntohs(inp->inp_fport) == cfil_log_port) || \
616 check_port(local, cfil_log_port) || check_port(remote, cfil_log_port))
617 #define MATCH_PROTO(so) (GET_SO_PROTO(so) == cfil_log_proto)
618
619 #define DEBUG_FLOW(inp, so, local, remote) \
620 ((cfil_log_port && MATCH_PORT(inp, local, remote)) || (cfil_log_pid && MATCH_PID(so)) || (cfil_log_proto && MATCH_PROTO(so)))
621
622 /*
623 * Periodic Statistics Report:
624 */
625 static struct thread *cfil_stats_report_thread;
626 #define CFIL_STATS_REPORT_INTERVAL_MIN_MSEC 500 // Highest report frequency
627 #define CFIL_STATS_REPORT_RUN_INTERVAL_NSEC (CFIL_STATS_REPORT_INTERVAL_MIN_MSEC * NSEC_PER_MSEC)
628 #define CFIL_STATS_REPORT_MAX_COUNT 50 // Max stats to be reported per run
629
630 /* This buffer must have same layout as struct cfil_msg_stats_report */
631 struct cfil_stats_report_buffer {
632 struct cfil_msg_hdr msghdr;
633 uint32_t count;
634 struct cfil_msg_sock_stats stats[CFIL_STATS_REPORT_MAX_COUNT];
635 };
636 static struct cfil_stats_report_buffer *global_cfil_stats_report_buffers[MAX_CONTENT_FILTER];
637 static uint32_t global_cfil_stats_counts[MAX_CONTENT_FILTER];
638
639 /*
640 * UDP Garbage Collection:
641 */
642 #define UDP_FLOW_GC_ACTION_TO 10 // Flow Action Timeout (no action from user space) in seconds
643 #define UDP_FLOW_GC_MAX_COUNT 100 // Max UDP flows to be handled per run
644
645 /*
646 * UDP flow queue thresholds
647 */
648 #define UDP_FLOW_GC_MBUF_CNT_MAX (2 << MBSHIFT) // Max mbuf byte count in flow queue (2MB)
649 #define UDP_FLOW_GC_MBUF_NUM_MAX (UDP_FLOW_GC_MBUF_CNT_MAX >> MCLSHIFT) // Max mbuf count in flow queue (1K)
650 #define UDP_FLOW_GC_MBUF_SHIFT 5 // Shift to get 1/32 of platform limits
651 /*
652 * UDP flow queue threshold globals:
653 */
654 static unsigned int cfil_udp_gc_mbuf_num_max = UDP_FLOW_GC_MBUF_NUM_MAX;
655 static unsigned int cfil_udp_gc_mbuf_cnt_max = UDP_FLOW_GC_MBUF_CNT_MAX;
656
657 /*
658 * CFIL specific mbuf tag:
659 * Save state of socket at the point of data entry into cfil.
660 * Use saved state for reinjection at protocol layer.
661 */
662 struct cfil_tag {
663 union sockaddr_in_4_6 cfil_faddr;
664 uint32_t cfil_so_state_change_cnt;
665 uint32_t cfil_so_options;
666 int cfil_inp_flags;
667 };
668
669 /*
670 * Global behavior flags:
671 */
672 #define CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS 0x00000001
673 static uint32_t cfil_behavior_flags = 0;
674
675 #define DO_PRESERVE_CONNECTIONS (cfil_behavior_flags & CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS)
676
677 /*
678 * Statistics
679 */
680
681 struct cfil_stats cfil_stats;
682
683 /*
684 * For troubleshooting
685 */
686 int cfil_log_level = LOG_ERR;
687 int cfil_log_port = 0;
688 int cfil_log_pid = 0;
689 int cfil_log_proto = 0;
690 int cfil_log_data = 0;
691 int cfil_log_stats = 0;
692 int cfil_debug = 1;
693
694 /*
695 * Sysctls for logs and statistics
696 */
697 static int sysctl_cfil_filter_list(struct sysctl_oid *, void *, int,
698 struct sysctl_req *);
699 static int sysctl_cfil_sock_list(struct sysctl_oid *, void *, int,
700 struct sysctl_req *);
701
702 SYSCTL_NODE(_net, OID_AUTO, cfil, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "cfil");
703
704 SYSCTL_INT(_net_cfil, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED,
705 &cfil_log_level, 0, "");
706
707 SYSCTL_INT(_net_cfil, OID_AUTO, log_port, CTLFLAG_RW | CTLFLAG_LOCKED,
708 &cfil_log_port, 0, "");
709
710 SYSCTL_INT(_net_cfil, OID_AUTO, log_pid, CTLFLAG_RW | CTLFLAG_LOCKED,
711 &cfil_log_pid, 0, "");
712
713 SYSCTL_INT(_net_cfil, OID_AUTO, log_proto, CTLFLAG_RW | CTLFLAG_LOCKED,
714 &cfil_log_proto, 0, "");
715
716 SYSCTL_INT(_net_cfil, OID_AUTO, log_data, CTLFLAG_RW | CTLFLAG_LOCKED,
717 &cfil_log_data, 0, "");
718
719 SYSCTL_INT(_net_cfil, OID_AUTO, log_stats, CTLFLAG_RW | CTLFLAG_LOCKED,
720 &cfil_log_stats, 0, "");
721
722 SYSCTL_INT(_net_cfil, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
723 &cfil_debug, 0, "");
724
725 SYSCTL_UINT(_net_cfil, OID_AUTO, sock_attached_count, CTLFLAG_RD | CTLFLAG_LOCKED,
726 &cfil_sock_attached_count, 0, "");
727
728 SYSCTL_UINT(_net_cfil, OID_AUTO, active_count, CTLFLAG_RD | CTLFLAG_LOCKED,
729 &cfil_active_count, 0, "");
730
731 SYSCTL_UINT(_net_cfil, OID_AUTO, close_wait_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
732 &cfil_close_wait_timeout, 0, "");
733
734 SYSCTL_UINT(_net_cfil, OID_AUTO, behavior_flags, CTLFLAG_RW | CTLFLAG_LOCKED,
735 &cfil_behavior_flags, 0, "");
736
737 static int cfil_sbtrim = 1;
738 SYSCTL_UINT(_net_cfil, OID_AUTO, sbtrim, CTLFLAG_RW | CTLFLAG_LOCKED,
739 &cfil_sbtrim, 0, "");
740
741 SYSCTL_PROC(_net_cfil, OID_AUTO, filter_list, CTLFLAG_RD | CTLFLAG_LOCKED,
742 0, 0, sysctl_cfil_filter_list, "S,cfil_filter_stat", "");
743
744 SYSCTL_PROC(_net_cfil, OID_AUTO, sock_list, CTLFLAG_RD | CTLFLAG_LOCKED,
745 0, 0, sysctl_cfil_sock_list, "S,cfil_sock_stat", "");
746
747 SYSCTL_STRUCT(_net_cfil, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
748 &cfil_stats, cfil_stats, "");
749
750 /*
751 * Forward declaration to appease the compiler
752 */
753 static int cfil_action_data_pass(struct socket *, struct cfil_info *, uint32_t, int,
754 uint64_t, uint64_t);
755 static int cfil_action_drop(struct socket *, struct cfil_info *, uint32_t);
756 static int cfil_action_bless_client(uint32_t, struct cfil_msg_hdr *);
757 static int cfil_action_set_crypto_key(uint32_t, struct cfil_msg_hdr *);
758 static int cfil_dispatch_closed_event(struct socket *, struct cfil_info *, int);
759 static int cfil_data_common(struct socket *, struct cfil_info *, int, struct sockaddr *,
760 struct mbuf *, struct mbuf *, uint32_t);
761 static int cfil_data_filter(struct socket *, struct cfil_info *, uint32_t, int,
762 struct mbuf *, uint32_t);
763 static void fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *,
764 struct in_addr, u_int16_t);
765 static void fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *,
766 struct in6_addr *, u_int16_t, uint32_t);
767
768 static int cfil_dispatch_attach_event(struct socket *, struct cfil_info *, uint32_t, int);
769 static void cfil_info_free(struct cfil_info *);
770 static struct cfil_info * cfil_info_alloc(struct socket *, struct soflow_hash_entry *);
771 static int cfil_info_attach_unit(struct socket *, uint32_t, struct cfil_info *);
772 static struct socket * cfil_socket_from_sock_id(cfil_sock_id_t, bool);
773 static struct socket * cfil_socket_from_client_uuid(uuid_t, bool *);
774 static int cfil_service_pending_queue(struct socket *, struct cfil_info *, uint32_t, int);
775 static int cfil_data_service_ctl_q(struct socket *, struct cfil_info *, uint32_t, int);
776 static void cfil_info_verify(struct cfil_info *);
777 static int cfil_update_data_offsets(struct socket *, struct cfil_info *, uint32_t, int,
778 uint64_t, uint64_t);
779 static int cfil_acquire_sockbuf(struct socket *, struct cfil_info *, int);
780 static void cfil_release_sockbuf(struct socket *, int);
781 static int cfil_filters_attached(struct socket *);
782
783 static void cfil_rw_lock_exclusive(lck_rw_t *);
784 static void cfil_rw_unlock_exclusive(lck_rw_t *);
785 static void cfil_rw_lock_shared(lck_rw_t *);
786 static void cfil_rw_unlock_shared(lck_rw_t *);
787 static boolean_t cfil_rw_lock_shared_to_exclusive(lck_rw_t *);
788 static void cfil_rw_lock_exclusive_to_shared(lck_rw_t *);
789
790 static unsigned int cfil_data_length(struct mbuf *, int *, int *);
791 static struct cfil_info *cfil_sock_udp_get_info(struct socket *, uint32_t, bool, struct soflow_hash_entry *, struct sockaddr *, struct sockaddr *);
792 static errno_t cfil_sock_udp_handle_data(bool, struct socket *, struct sockaddr *, struct sockaddr *,
793 struct mbuf *, struct mbuf *, uint32_t, struct soflow_hash_entry *);
794 static int32_t cfil_sock_udp_data_pending(struct sockbuf *, bool);
795 static void cfil_sock_udp_is_closed(struct socket *);
796 static int cfil_sock_udp_notify_shutdown(struct socket *, int, int, int);
797 static int cfil_sock_udp_shutdown(struct socket *, int *);
798 static void cfil_sock_udp_close_wait(struct socket *);
799 static void cfil_sock_udp_buf_update(struct sockbuf *);
800 static int cfil_filters_udp_attached(struct socket *, bool);
801 static void cfil_get_flow_address_v6(struct soflow_hash_entry *, struct inpcb *,
802 struct in6_addr **, struct in6_addr **,
803 u_int16_t *, u_int16_t *);
804 static void cfil_get_flow_address(struct soflow_hash_entry *, struct inpcb *,
805 struct in_addr *, struct in_addr *,
806 u_int16_t *, u_int16_t *);
807 static void cfil_info_log(int, struct cfil_info *, const char *);
808 void cfil_filter_show(u_int32_t);
809 void cfil_info_show(void);
810 bool cfil_info_action_timed_out(struct cfil_info *, int);
811 bool cfil_info_buffer_threshold_exceeded(struct cfil_info *);
812 struct m_tag *cfil_dgram_save_socket_state(struct cfil_info *, struct mbuf *);
813 boolean_t cfil_dgram_peek_socket_state(struct mbuf *m, int *inp_flags);
814 static void cfil_sock_received_verdict(struct socket *so);
815 static void cfil_fill_event_msg_addresses(struct soflow_hash_entry *, struct inpcb *,
816 union sockaddr_in_4_6 *, union sockaddr_in_4_6 *,
817 boolean_t, boolean_t);
818 static void cfil_stats_report_thread_func(void *, wait_result_t);
819 static void cfil_stats_report(void *v, wait_result_t w);
820 static bool cfil_dgram_gc_needed(struct socket *, struct soflow_hash_entry *, u_int64_t);
821 static bool cfil_dgram_gc_perform(struct socket *, struct soflow_hash_entry *);
822 static bool cfil_dgram_detach_entry(struct socket *, struct soflow_hash_entry *);
823 static bool cfil_dgram_detach_db(struct socket *, struct soflow_db *);
824 bool check_port(struct sockaddr *, u_short);
825
826 /*
827 * Content filter global read write lock
828 */
829
830 static void
cfil_rw_lock_exclusive(lck_rw_t * lck)831 cfil_rw_lock_exclusive(lck_rw_t *lck)
832 {
833 void *lr_saved;
834
835 lr_saved = __builtin_return_address(0);
836
837 lck_rw_lock_exclusive(lck);
838
839 cfil_rw_lock_history[cfil_rw_nxt_lck] = lr_saved;
840 cfil_rw_nxt_lck = (cfil_rw_nxt_lck + 1) % CFIL_RW_LCK_MAX;
841 }
842
843 static void
cfil_rw_unlock_exclusive(lck_rw_t * lck)844 cfil_rw_unlock_exclusive(lck_rw_t *lck)
845 {
846 void *lr_saved;
847
848 lr_saved = __builtin_return_address(0);
849
850 lck_rw_unlock_exclusive(lck);
851
852 cfil_rw_unlock_history[cfil_rw_nxt_unlck] = lr_saved;
853 cfil_rw_nxt_unlck = (cfil_rw_nxt_unlck + 1) % CFIL_RW_LCK_MAX;
854 }
855
856 static void
cfil_rw_lock_shared(lck_rw_t * lck)857 cfil_rw_lock_shared(lck_rw_t *lck)
858 {
859 void *lr_saved;
860
861 lr_saved = __builtin_return_address(0);
862
863 lck_rw_lock_shared(lck);
864
865 cfil_rw_lock_history[cfil_rw_nxt_lck] = lr_saved;
866 cfil_rw_nxt_lck = (cfil_rw_nxt_lck + 1) % CFIL_RW_LCK_MAX;
867 }
868
869 static void
cfil_rw_unlock_shared(lck_rw_t * lck)870 cfil_rw_unlock_shared(lck_rw_t *lck)
871 {
872 void *lr_saved;
873
874 lr_saved = __builtin_return_address(0);
875
876 lck_rw_unlock_shared(lck);
877
878 cfil_rw_unlock_history[cfil_rw_nxt_unlck] = lr_saved;
879 cfil_rw_nxt_unlck = (cfil_rw_nxt_unlck + 1) % CFIL_RW_LCK_MAX;
880 }
881
882 static boolean_t
cfil_rw_lock_shared_to_exclusive(lck_rw_t * lck)883 cfil_rw_lock_shared_to_exclusive(lck_rw_t *lck)
884 {
885 void *lr_saved;
886 boolean_t upgraded;
887
888 lr_saved = __builtin_return_address(0);
889
890 upgraded = lck_rw_lock_shared_to_exclusive(lck);
891 if (upgraded) {
892 cfil_rw_unlock_history[cfil_rw_nxt_unlck] = lr_saved;
893 cfil_rw_nxt_unlck = (cfil_rw_nxt_unlck + 1) % CFIL_RW_LCK_MAX;
894 }
895 return upgraded;
896 }
897
898 static void
cfil_rw_lock_exclusive_to_shared(lck_rw_t * lck)899 cfil_rw_lock_exclusive_to_shared(lck_rw_t *lck)
900 {
901 void *lr_saved;
902
903 lr_saved = __builtin_return_address(0);
904
905 lck_rw_lock_exclusive_to_shared(lck);
906
907 cfil_rw_lock_history[cfil_rw_nxt_lck] = lr_saved;
908 cfil_rw_nxt_lck = (cfil_rw_nxt_lck + 1) % CFIL_RW_LCK_MAX;
909 }
910
911 static void
cfil_rw_lock_assert_held(lck_rw_t * lck,int exclusive)912 cfil_rw_lock_assert_held(lck_rw_t *lck, int exclusive)
913 {
914 #if !MACH_ASSERT
915 #pragma unused(lck, exclusive)
916 #endif
917 LCK_RW_ASSERT(lck,
918 exclusive ? LCK_RW_ASSERT_EXCLUSIVE : LCK_RW_ASSERT_HELD);
919 }
920
921 /*
922 * Return the number of bytes in the mbuf chain using the same
923 * method as m_length() or sballoc()
924 *
925 * Returns data len - starting from PKT start
926 * - retmbcnt - optional param to get total mbuf bytes in chain
927 * - retmbnum - optional param to get number of mbufs in chain
928 */
929 static unsigned int
cfil_data_length(struct mbuf * m,int * retmbcnt,int * retmbnum)930 cfil_data_length(struct mbuf *m, int *retmbcnt, int *retmbnum)
931 {
932 struct mbuf *m0;
933 unsigned int pktlen = 0;
934 int mbcnt;
935 int mbnum;
936
937 // Locate M_PKTHDR and mark as start of data if present
938 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
939 if (m0->m_flags & M_PKTHDR) {
940 m = m0;
941 break;
942 }
943 }
944
945 if (retmbcnt == NULL && retmbnum == NULL) {
946 return m_length(m);
947 }
948
949 pktlen = 0;
950 mbcnt = 0;
951 mbnum = 0;
952 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
953 pktlen += m0->m_len;
954 mbnum++;
955 mbcnt += MSIZE;
956 if (m0->m_flags & M_EXT) {
957 mbcnt += m0->m_ext.ext_size;
958 }
959 }
960 if (retmbcnt) {
961 *retmbcnt = mbcnt;
962 }
963 if (retmbnum) {
964 *retmbnum = mbnum;
965 }
966 return pktlen;
967 }
968
969 static struct mbuf *
cfil_data_start(struct mbuf * m)970 cfil_data_start(struct mbuf *m)
971 {
972 struct mbuf *m0;
973
974 // Locate M_PKTHDR and use it as start of data if present
975 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
976 if (m0->m_flags & M_PKTHDR) {
977 return m0;
978 }
979 }
980 return m;
981 }
982
983 /*
984 * Common mbuf queue utilities
985 */
986
987 static inline void
cfil_queue_init(struct cfil_queue * cfq)988 cfil_queue_init(struct cfil_queue *cfq)
989 {
990 cfq->q_start = 0;
991 cfq->q_end = 0;
992 MBUFQ_INIT(&cfq->q_mq);
993 }
994
995 static inline uint64_t
cfil_queue_drain(struct cfil_queue * cfq)996 cfil_queue_drain(struct cfil_queue *cfq)
997 {
998 uint64_t drained = cfq->q_start - cfq->q_end;
999 cfq->q_start = 0;
1000 cfq->q_end = 0;
1001 MBUFQ_DRAIN(&cfq->q_mq);
1002
1003 return drained;
1004 }
1005
1006 /* Return 1 when empty, 0 otherwise */
1007 static inline int
cfil_queue_empty(struct cfil_queue * cfq)1008 cfil_queue_empty(struct cfil_queue *cfq)
1009 {
1010 return MBUFQ_EMPTY(&cfq->q_mq);
1011 }
1012
1013 static inline uint64_t
cfil_queue_offset_first(struct cfil_queue * cfq)1014 cfil_queue_offset_first(struct cfil_queue *cfq)
1015 {
1016 return cfq->q_start;
1017 }
1018
1019 static inline uint64_t
cfil_queue_offset_last(struct cfil_queue * cfq)1020 cfil_queue_offset_last(struct cfil_queue *cfq)
1021 {
1022 return cfq->q_end;
1023 }
1024
1025 static inline uint64_t
cfil_queue_len(struct cfil_queue * cfq)1026 cfil_queue_len(struct cfil_queue *cfq)
1027 {
1028 return cfq->q_end - cfq->q_start;
1029 }
1030
1031 /*
1032 * Routines to verify some fundamental assumptions
1033 */
1034
1035 static void
cfil_queue_verify(struct cfil_queue * cfq)1036 cfil_queue_verify(struct cfil_queue *cfq)
1037 {
1038 mbuf_t chain;
1039 mbuf_t m;
1040 mbuf_t n;
1041 uint64_t queuesize = 0;
1042
1043 /* Verify offset are ordered */
1044 VERIFY(cfq->q_start <= cfq->q_end);
1045
1046 /*
1047 * When queue is empty, the offsets are equal otherwise the offsets
1048 * are different
1049 */
1050 VERIFY((MBUFQ_EMPTY(&cfq->q_mq) && cfq->q_start == cfq->q_end) ||
1051 (!MBUFQ_EMPTY(&cfq->q_mq) &&
1052 cfq->q_start != cfq->q_end));
1053
1054 MBUFQ_FOREACH(chain, &cfq->q_mq) {
1055 size_t chainsize = 0;
1056 m = chain;
1057 unsigned int mlen = cfil_data_length(m, NULL, NULL);
1058 // skip the addr and control stuff if present
1059 m = cfil_data_start(m);
1060
1061 if (m == NULL ||
1062 m == (void *)M_TAG_FREE_PATTERN ||
1063 m->m_next == (void *)M_TAG_FREE_PATTERN ||
1064 m->m_nextpkt == (void *)M_TAG_FREE_PATTERN) {
1065 panic("%s - mq %p is free at %p", __func__,
1066 &cfq->q_mq, m);
1067 }
1068 for (n = m; n != NULL; n = n->m_next) {
1069 if (n->m_type != MT_DATA &&
1070 n->m_type != MT_HEADER &&
1071 n->m_type != MT_OOBDATA) {
1072 panic("%s - %p unsupported type %u", __func__,
1073 n, n->m_type);
1074 }
1075 chainsize += n->m_len;
1076 }
1077 if (mlen != chainsize) {
1078 panic("%s - %p m_length() %u != chainsize %lu",
1079 __func__, m, mlen, chainsize);
1080 }
1081 queuesize += chainsize;
1082 }
1083 if (queuesize != cfq->q_end - cfq->q_start) {
1084 panic("%s - %p queuesize %llu != offsetdiffs %llu", __func__,
1085 m, queuesize, cfq->q_end - cfq->q_start);
1086 }
1087 }
1088
1089 static void
cfil_queue_enqueue(struct cfil_queue * cfq,mbuf_t m,size_t len)1090 cfil_queue_enqueue(struct cfil_queue *cfq, mbuf_t m, size_t len)
1091 {
1092 CFIL_QUEUE_VERIFY(cfq);
1093
1094 MBUFQ_ENQUEUE(&cfq->q_mq, m);
1095 cfq->q_end += len;
1096
1097 CFIL_QUEUE_VERIFY(cfq);
1098 }
1099
1100 static void
cfil_queue_remove(struct cfil_queue * cfq,mbuf_t m,size_t len)1101 cfil_queue_remove(struct cfil_queue *cfq, mbuf_t m, size_t len)
1102 {
1103 CFIL_QUEUE_VERIFY(cfq);
1104
1105 VERIFY(cfil_data_length(m, NULL, NULL) == len);
1106
1107 MBUFQ_REMOVE(&cfq->q_mq, m);
1108 MBUFQ_NEXT(m) = NULL;
1109 cfq->q_start += len;
1110
1111 CFIL_QUEUE_VERIFY(cfq);
1112 }
1113
1114 static mbuf_t
cfil_queue_first(struct cfil_queue * cfq)1115 cfil_queue_first(struct cfil_queue *cfq)
1116 {
1117 return MBUFQ_FIRST(&cfq->q_mq);
1118 }
1119
1120 static mbuf_t
cfil_queue_next(struct cfil_queue * cfq,mbuf_t m)1121 cfil_queue_next(struct cfil_queue *cfq, mbuf_t m)
1122 {
1123 #pragma unused(cfq)
1124 return MBUFQ_NEXT(m);
1125 }
1126
1127 static void
cfil_entry_buf_verify(struct cfe_buf * cfe_buf)1128 cfil_entry_buf_verify(struct cfe_buf *cfe_buf)
1129 {
1130 CFIL_QUEUE_VERIFY(&cfe_buf->cfe_ctl_q);
1131 CFIL_QUEUE_VERIFY(&cfe_buf->cfe_pending_q);
1132
1133 /* Verify the queues are ordered so that pending is before ctl */
1134 VERIFY(cfe_buf->cfe_ctl_q.q_start >= cfe_buf->cfe_pending_q.q_end);
1135
1136 /* The peek offset cannot be less than the pass offset */
1137 VERIFY(cfe_buf->cfe_peek_offset >= cfe_buf->cfe_pass_offset);
1138
1139 /* Make sure we've updated the offset we peeked at */
1140 VERIFY(cfe_buf->cfe_ctl_q.q_start <= cfe_buf->cfe_peeked);
1141 }
1142
1143 static void
cfil_entry_verify(struct cfil_entry * entry)1144 cfil_entry_verify(struct cfil_entry *entry)
1145 {
1146 cfil_entry_buf_verify(&entry->cfe_snd);
1147 cfil_entry_buf_verify(&entry->cfe_rcv);
1148 }
1149
1150 static void
cfil_info_buf_verify(struct cfi_buf * cfi_buf)1151 cfil_info_buf_verify(struct cfi_buf *cfi_buf)
1152 {
1153 CFIL_QUEUE_VERIFY(&cfi_buf->cfi_inject_q);
1154
1155 VERIFY(cfi_buf->cfi_pending_first <= cfi_buf->cfi_pending_last);
1156 }
1157
1158 static void
cfil_info_verify(struct cfil_info * cfil_info)1159 cfil_info_verify(struct cfil_info *cfil_info)
1160 {
1161 int i;
1162
1163 if (cfil_info == NULL) {
1164 return;
1165 }
1166
1167 cfil_info_buf_verify(&cfil_info->cfi_snd);
1168 cfil_info_buf_verify(&cfil_info->cfi_rcv);
1169
1170 for (i = 0; i < MAX_CONTENT_FILTER; i++) {
1171 cfil_entry_verify(&cfil_info->cfi_entries[i]);
1172 }
1173 }
1174
1175 static void
verify_content_filter(struct content_filter * cfc)1176 verify_content_filter(struct content_filter *cfc)
1177 {
1178 struct cfil_entry *entry;
1179 uint32_t count = 0;
1180
1181 VERIFY(cfc->cf_sock_count >= 0);
1182
1183 TAILQ_FOREACH(entry, &cfc->cf_sock_entries, cfe_link) {
1184 count++;
1185 VERIFY(cfc == entry->cfe_filter);
1186 }
1187 VERIFY(count == cfc->cf_sock_count);
1188 }
1189
1190 /*
1191 * Kernel control socket callbacks
1192 */
1193 static errno_t
cfil_ctl_connect(kern_ctl_ref kctlref,struct sockaddr_ctl * sac,void ** unitinfo)1194 cfil_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac,
1195 void **unitinfo)
1196 {
1197 errno_t error = 0;
1198 struct content_filter *cfc = NULL;
1199
1200 CFIL_LOG(LOG_NOTICE, "");
1201
1202 cfc = zalloc_flags(content_filter_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1203
1204 cfil_rw_lock_exclusive(&cfil_lck_rw);
1205 if (content_filters == NULL) {
1206 struct content_filter **tmp;
1207
1208 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1209
1210 MALLOC(tmp,
1211 struct content_filter **,
1212 MAX_CONTENT_FILTER * sizeof(struct content_filter *),
1213 M_TEMP,
1214 M_WAITOK | M_ZERO);
1215
1216 cfil_rw_lock_exclusive(&cfil_lck_rw);
1217
1218 if (tmp == NULL && content_filters == NULL) {
1219 error = ENOMEM;
1220 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1221 goto done;
1222 }
1223 /* Another thread may have won the race */
1224 if (content_filters != NULL) {
1225 FREE(tmp, M_TEMP);
1226 } else {
1227 content_filters = tmp;
1228 }
1229 }
1230
1231 if (sac->sc_unit == 0 || sac->sc_unit > MAX_CONTENT_FILTER) {
1232 CFIL_LOG(LOG_ERR, "bad sc_unit %u", sac->sc_unit);
1233 error = EINVAL;
1234 } else if (content_filters[sac->sc_unit - 1] != NULL) {
1235 CFIL_LOG(LOG_ERR, "sc_unit %u in use", sac->sc_unit);
1236 error = EADDRINUSE;
1237 } else {
1238 /*
1239 * kernel control socket kcunit numbers start at 1
1240 */
1241 content_filters[sac->sc_unit - 1] = cfc;
1242
1243 cfc->cf_kcref = kctlref;
1244 cfc->cf_kcunit = sac->sc_unit;
1245 TAILQ_INIT(&cfc->cf_sock_entries);
1246
1247 *unitinfo = cfc;
1248 cfil_active_count++;
1249
1250 if (cfil_active_count == 1) {
1251 soflow_feat_set_functions(cfil_dgram_gc_needed, cfil_dgram_gc_perform,
1252 cfil_dgram_detach_entry, cfil_dgram_detach_db);
1253 }
1254
1255 // Allocate periodic stats buffer for this filter
1256 if (global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] == NULL) {
1257 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1258
1259 struct cfil_stats_report_buffer *buf;
1260
1261 buf = kalloc_type(struct cfil_stats_report_buffer,
1262 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1263
1264 cfil_rw_lock_exclusive(&cfil_lck_rw);
1265
1266 /* Another thread may have won the race */
1267 if (global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] != NULL) {
1268 kfree_type(struct cfil_stats_report_buffer, buf);
1269 } else {
1270 global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] = buf;
1271 }
1272 }
1273 }
1274 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1275 done:
1276 if (error != 0 && cfc != NULL) {
1277 zfree(content_filter_zone, cfc);
1278 }
1279
1280 if (error == 0) {
1281 OSIncrementAtomic(&cfil_stats.cfs_ctl_connect_ok);
1282 } else {
1283 OSIncrementAtomic(&cfil_stats.cfs_ctl_connect_fail);
1284 }
1285
1286 CFIL_LOG(LOG_INFO, "return %d cfil_active_count %u kcunit %u",
1287 error, cfil_active_count, sac->sc_unit);
1288
1289 return error;
1290 }
1291
1292 static void
cfil_update_behavior_flags(void)1293 cfil_update_behavior_flags(void)
1294 {
1295 struct content_filter *cfc = NULL;
1296
1297 if (content_filters == NULL) {
1298 return;
1299 }
1300
1301 // Update global flag
1302 bool preserve_connections = false;
1303 for (int i = 0; i < MAX_CONTENT_FILTER; i++) {
1304 cfc = content_filters[i];
1305 if (cfc != NULL) {
1306 if (cfc->cf_flags & CFF_PRESERVE_CONNECTIONS) {
1307 preserve_connections = true;
1308 } else {
1309 preserve_connections = false;
1310 break;
1311 }
1312 }
1313 }
1314 if (preserve_connections == true) {
1315 cfil_behavior_flags |= CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS;
1316 } else {
1317 cfil_behavior_flags &= ~CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS;
1318 }
1319 CFIL_LOG(LOG_INFO, "CFIL Preserve Connections - %s",
1320 (cfil_behavior_flags & CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS) ? "On" : "Off");
1321 }
1322
1323 static errno_t
cfil_ctl_disconnect(kern_ctl_ref kctlref,u_int32_t kcunit,void * unitinfo)1324 cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo)
1325 {
1326 #pragma unused(kctlref)
1327 errno_t error = 0;
1328 struct content_filter *cfc;
1329 struct cfil_entry *entry;
1330 uint64_t sock_flow_id = 0;
1331
1332 CFIL_LOG(LOG_NOTICE, "");
1333
1334 if (content_filters == NULL) {
1335 CFIL_LOG(LOG_ERR, "no content filter");
1336 error = EINVAL;
1337 goto done;
1338 }
1339 if (kcunit > MAX_CONTENT_FILTER) {
1340 CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)",
1341 kcunit, MAX_CONTENT_FILTER);
1342 error = EINVAL;
1343 goto done;
1344 }
1345
1346 cfc = (struct content_filter *)unitinfo;
1347 if (cfc == NULL) {
1348 goto done;
1349 }
1350
1351 cfil_rw_lock_exclusive(&cfil_lck_rw);
1352 if (content_filters[kcunit - 1] != cfc || cfc->cf_kcunit != kcunit) {
1353 CFIL_LOG(LOG_ERR, "bad unit info %u)",
1354 kcunit);
1355 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1356 goto done;
1357 }
1358 cfc->cf_flags |= CFF_DETACHING;
1359 /*
1360 * Remove all sockets from the filter
1361 */
1362 while ((entry = TAILQ_FIRST(&cfc->cf_sock_entries)) != NULL) {
1363 cfil_rw_lock_assert_held(&cfil_lck_rw, 1);
1364
1365 verify_content_filter(cfc);
1366 /*
1367 * Accept all outstanding data by pushing to next filter
1368 * or back to socket
1369 *
1370 * TBD: Actually we should make sure all data has been pushed
1371 * back to socket
1372 */
1373 if (entry->cfe_cfil_info && entry->cfe_cfil_info->cfi_so) {
1374 struct cfil_info *cfil_info = entry->cfe_cfil_info;
1375 struct socket *so = cfil_info->cfi_so;
1376 sock_flow_id = cfil_info->cfi_sock_id;
1377
1378 /* Need to let data flow immediately */
1379 entry->cfe_flags |= CFEF_SENT_SOCK_ATTACHED |
1380 CFEF_DATA_START;
1381
1382 // Before we release global lock, retain the cfil_info -
1383 // We attempt to retain a valid cfil_info to prevent any deallocation until
1384 // we are done. Abort retain if cfil_info has already entered the free code path.
1385 if (cfil_info == NULL || os_ref_retain_try(&cfil_info->cfi_ref_count) == false) {
1386 // Failing to retain cfil_info means detach is in progress already,
1387 // remove entry from filter list and move on.
1388 entry->cfe_filter = NULL;
1389 entry->cfe_necp_control_unit = 0;
1390 TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link);
1391 cfc->cf_sock_count--;
1392 continue;
1393 }
1394
1395 /*
1396 * Respect locking hierarchy
1397 */
1398 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1399
1400 // Search for socket from cfil_info sock_flow_id and lock so
1401 so = cfil_socket_from_sock_id(sock_flow_id, false);
1402 if (so == NULL || so != cfil_info->cfi_so) {
1403 cfil_rw_lock_exclusive(&cfil_lck_rw);
1404
1405 // Socket has already been disconnected and removed from socket list.
1406 // Remove entry from filter list and move on.
1407 if (entry == TAILQ_FIRST(&cfc->cf_sock_entries)) {
1408 entry->cfe_filter = NULL;
1409 entry->cfe_necp_control_unit = 0;
1410 TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link);
1411 cfc->cf_sock_count--;
1412 }
1413
1414 goto release_cfil_info;
1415 }
1416
1417 /*
1418 * When cfe_filter is NULL the filter is detached
1419 * and the entry has been removed from cf_sock_entries
1420 */
1421 if ((so->so_cfil == NULL && so->so_flow_db == NULL) || entry->cfe_filter == NULL) {
1422 cfil_rw_lock_exclusive(&cfil_lck_rw);
1423 goto release;
1424 }
1425
1426 (void) cfil_action_data_pass(so, cfil_info, kcunit, 1,
1427 CFM_MAX_OFFSET,
1428 CFM_MAX_OFFSET);
1429
1430 (void) cfil_action_data_pass(so, cfil_info, kcunit, 0,
1431 CFM_MAX_OFFSET,
1432 CFM_MAX_OFFSET);
1433
1434 cfil_rw_lock_exclusive(&cfil_lck_rw);
1435
1436 /*
1437 * Check again to make sure if the cfil_info is still valid
1438 * as the socket may have been unlocked when when calling
1439 * cfil_acquire_sockbuf()
1440 */
1441 if (entry->cfe_filter == NULL ||
1442 (so->so_cfil == NULL && soflow_db_get_feature_context(so->so_flow_db, sock_flow_id) == NULL)) {
1443 goto release;
1444 }
1445
1446 /* The filter is now detached */
1447 entry->cfe_flags |= CFEF_CFIL_DETACHED;
1448
1449 if (cfil_info->cfi_debug) {
1450 cfil_info_log(LOG_INFO, cfil_info, "CFIL: FILTER DISCONNECTED");
1451 }
1452
1453 CFIL_LOG(LOG_NOTICE, "so %llx detached %u",
1454 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit);
1455 if ((cfil_info->cfi_flags & CFIF_CLOSE_WAIT) &&
1456 cfil_filters_attached(so) == 0) {
1457 CFIL_LOG(LOG_NOTICE, "so %llx waking",
1458 (uint64_t)VM_KERNEL_ADDRPERM(so));
1459 wakeup((caddr_t)cfil_info);
1460 }
1461
1462 /*
1463 * Remove the filter entry from the content filter
1464 * but leave the rest of the state intact as the queues
1465 * may not be empty yet
1466 */
1467 entry->cfe_filter = NULL;
1468 entry->cfe_necp_control_unit = 0;
1469
1470 TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link);
1471 cfc->cf_sock_count--;
1472
1473 // This is the last filter disconnecting, clear the cfil_info
1474 // saved control unit so we will be able to drop this flow if
1475 // a new filter get installed.
1476 if (cfil_active_count == 1) {
1477 cfil_info->cfi_filter_control_unit = 0;
1478 }
1479 release:
1480 socket_unlock(so, 1);
1481
1482 release_cfil_info:
1483 /*
1484 * Release reference on cfil_info. To avoid double locking,
1485 * temporarily unlock in case it has been detached and we
1486 * end up freeing it which will take the global lock again.
1487 */
1488 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1489 CFIL_INFO_FREE(cfil_info);
1490 cfil_rw_lock_exclusive(&cfil_lck_rw);
1491 }
1492 }
1493 verify_content_filter(cfc);
1494
1495 /* Free the stats buffer for this filter */
1496 if (global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] != NULL) {
1497 kfree_type(struct cfil_stats_report_buffer,
1498 global_cfil_stats_report_buffers[cfc->cf_kcunit - 1]);
1499 global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] = NULL;
1500 }
1501 VERIFY(cfc->cf_sock_count == 0);
1502
1503 /*
1504 * Make filter inactive
1505 */
1506 content_filters[kcunit - 1] = NULL;
1507 cfil_active_count--;
1508 cfil_update_behavior_flags();
1509 cfil_rw_unlock_exclusive(&cfil_lck_rw);
1510
1511 if (cfc->cf_crypto_state != NULL) {
1512 cfil_crypto_cleanup_state(cfc->cf_crypto_state);
1513 cfc->cf_crypto_state = NULL;
1514 }
1515
1516 zfree(content_filter_zone, cfc);
1517 done:
1518 if (error == 0) {
1519 OSIncrementAtomic(&cfil_stats.cfs_ctl_disconnect_ok);
1520 } else {
1521 OSIncrementAtomic(&cfil_stats.cfs_ctl_disconnect_fail);
1522 }
1523
1524 CFIL_LOG(LOG_INFO, "return %d cfil_active_count %u kcunit %u",
1525 error, cfil_active_count, kcunit);
1526
1527 return error;
1528 }
1529
1530 /*
1531 * cfil_acquire_sockbuf()
1532 *
1533 * Prevent any other thread from acquiring the sockbuf
1534 * We use sb_cfil_thread as a semaphore to prevent other threads from
1535 * messing with the sockbuf -- see sblock()
1536 * Note: We do not set SB_LOCK here because the thread may check or modify
1537 * SB_LOCK several times until it calls cfil_release_sockbuf() -- currently
1538 * sblock(), sbunlock() or sodefunct()
1539 */
1540 static int
cfil_acquire_sockbuf(struct socket * so,struct cfil_info * cfil_info,int outgoing)1541 cfil_acquire_sockbuf(struct socket *so, struct cfil_info *cfil_info, int outgoing)
1542 {
1543 thread_t tp = current_thread();
1544 struct sockbuf *sb = outgoing ? &so->so_snd : &so->so_rcv;
1545 lck_mtx_t *mutex_held;
1546 int error = 0;
1547
1548 /*
1549 * Wait until no thread is holding the sockbuf and other content
1550 * filter threads have released the sockbuf
1551 */
1552 while ((sb->sb_flags & SB_LOCK) ||
1553 (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp)) {
1554 if (so->so_proto->pr_getlock != NULL) {
1555 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
1556 } else {
1557 mutex_held = so->so_proto->pr_domain->dom_mtx;
1558 }
1559
1560 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
1561
1562 sb->sb_wantlock++;
1563 VERIFY(sb->sb_wantlock != 0);
1564
1565 msleep(&sb->sb_flags, mutex_held, PSOCK, "cfil_acquire_sockbuf",
1566 NULL);
1567
1568 VERIFY(sb->sb_wantlock != 0);
1569 sb->sb_wantlock--;
1570 }
1571 /*
1572 * Use reference count for repetitive calls on same thread
1573 */
1574 if (sb->sb_cfil_refs == 0) {
1575 VERIFY(sb->sb_cfil_thread == NULL);
1576 VERIFY((sb->sb_flags & SB_LOCK) == 0);
1577
1578 sb->sb_cfil_thread = tp;
1579 sb->sb_flags |= SB_LOCK;
1580 }
1581 sb->sb_cfil_refs++;
1582
1583 /* We acquire the socket buffer when we need to cleanup */
1584 if (cfil_info == NULL) {
1585 CFIL_LOG(LOG_ERR, "so %llx cfil detached",
1586 (uint64_t)VM_KERNEL_ADDRPERM(so));
1587 error = 0;
1588 } else if (cfil_info->cfi_flags & CFIF_DROP) {
1589 CFIL_LOG(LOG_ERR, "so %llx drop set",
1590 (uint64_t)VM_KERNEL_ADDRPERM(so));
1591 error = EPIPE;
1592 }
1593
1594 return error;
1595 }
1596
1597 static void
cfil_release_sockbuf(struct socket * so,int outgoing)1598 cfil_release_sockbuf(struct socket *so, int outgoing)
1599 {
1600 struct sockbuf *sb = outgoing ? &so->so_snd : &so->so_rcv;
1601 thread_t tp = current_thread();
1602
1603 socket_lock_assert_owned(so);
1604
1605 if (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp) {
1606 panic("%s sb_cfil_thread %p not current %p", __func__,
1607 sb->sb_cfil_thread, tp);
1608 }
1609 /*
1610 * Don't panic if we are defunct because SB_LOCK has
1611 * been cleared by sodefunct()
1612 */
1613 if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK)) {
1614 panic("%s SB_LOCK not set on %p", __func__,
1615 sb);
1616 }
1617 /*
1618 * We can unlock when the thread unwinds to the last reference
1619 */
1620 sb->sb_cfil_refs--;
1621 if (sb->sb_cfil_refs == 0) {
1622 sb->sb_cfil_thread = NULL;
1623 sb->sb_flags &= ~SB_LOCK;
1624
1625 if (sb->sb_wantlock > 0) {
1626 wakeup(&sb->sb_flags);
1627 }
1628 }
1629 }
1630
1631 cfil_sock_id_t
cfil_sock_id_from_socket(struct socket * so)1632 cfil_sock_id_from_socket(struct socket *so)
1633 {
1634 if ((so->so_flags & SOF_CONTENT_FILTER) && so->so_cfil) {
1635 return so->so_cfil->cfi_sock_id;
1636 } else {
1637 return CFIL_SOCK_ID_NONE;
1638 }
1639 }
1640
1641 /*
1642 * cfil_socket_safe_lock -
1643 * This routine attempts to lock the socket safely.
1644 *
1645 * The passed in pcbinfo is assumed to be locked and must be unlocked once the
1646 * inp state is safeguarded and before we attempt to lock/unlock the socket.
1647 * This is to prevent getting blocked by socket_lock() while holding the pcbinfo
1648 * lock, avoiding potential deadlock with other processes contending for the same
1649 * resources. This is also to avoid double locking the pcbinfo for rip sockets
1650 * since rip_unlock() will lock ripcbinfo if it needs to dispose inpcb when
1651 * so_usecount is 0.
1652 */
1653 static bool
cfil_socket_safe_lock(struct inpcb * inp,struct inpcbinfo * pcbinfo)1654 cfil_socket_safe_lock(struct inpcb *inp, struct inpcbinfo *pcbinfo)
1655 {
1656 struct socket *so = NULL;
1657
1658 VERIFY(pcbinfo != NULL);
1659
1660 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
1661 // Safeguarded the inp state, unlock pcbinfo before locking socket.
1662 lck_rw_done(&pcbinfo->ipi_lock);
1663
1664 so = inp->inp_socket;
1665 socket_lock(so, 1);
1666 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) != WNT_STOPUSING) {
1667 return true;
1668 }
1669 } else {
1670 // Failed to safeguarded the inp state, unlock pcbinfo and abort.
1671 lck_rw_done(&pcbinfo->ipi_lock);
1672 }
1673
1674 if (so) {
1675 socket_unlock(so, 1);
1676 }
1677 return false;
1678 }
1679
1680 static struct socket *
cfil_socket_from_sock_id(cfil_sock_id_t cfil_sock_id,bool udp_only)1681 cfil_socket_from_sock_id(cfil_sock_id_t cfil_sock_id, bool udp_only)
1682 {
1683 struct socket *so = NULL;
1684 u_int64_t gencnt = cfil_sock_id >> 32;
1685 u_int32_t flowhash = (u_int32_t)(cfil_sock_id & 0x0ffffffff);
1686 struct inpcb *inp = NULL;
1687 struct inpcbinfo *pcbinfo = NULL;
1688
1689 if (udp_only) {
1690 goto find_udp;
1691 }
1692
1693 pcbinfo = &tcbinfo;
1694 lck_rw_lock_shared(&pcbinfo->ipi_lock);
1695 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1696 if (inp->inp_state != INPCB_STATE_DEAD &&
1697 inp->inp_socket != NULL &&
1698 inp->inp_flowhash == flowhash &&
1699 (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt &&
1700 inp->inp_socket->so_cfil != NULL) {
1701 if (cfil_socket_safe_lock(inp, pcbinfo)) {
1702 so = inp->inp_socket;
1703 }
1704 /* pcbinfo is already unlocked, we are done. */
1705 goto done;
1706 }
1707 }
1708 lck_rw_done(&pcbinfo->ipi_lock);
1709 if (so != NULL) {
1710 goto done;
1711 }
1712
1713 find_udp:
1714
1715 pcbinfo = &udbinfo;
1716 lck_rw_lock_shared(&pcbinfo->ipi_lock);
1717 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1718 if (inp->inp_state != INPCB_STATE_DEAD &&
1719 inp->inp_socket != NULL &&
1720 inp->inp_socket->so_flow_db != NULL &&
1721 (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) {
1722 if (cfil_socket_safe_lock(inp, pcbinfo)) {
1723 so = inp->inp_socket;
1724 }
1725 /* pcbinfo is already unlocked, we are done. */
1726 goto done;
1727 }
1728 }
1729 lck_rw_done(&pcbinfo->ipi_lock);
1730 if (so != NULL) {
1731 goto done;
1732 }
1733
1734 pcbinfo = &ripcbinfo;
1735 lck_rw_lock_shared(&pcbinfo->ipi_lock);
1736 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1737 if (inp->inp_state != INPCB_STATE_DEAD &&
1738 inp->inp_socket != NULL &&
1739 inp->inp_socket->so_flow_db != NULL &&
1740 (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) {
1741 if (cfil_socket_safe_lock(inp, pcbinfo)) {
1742 so = inp->inp_socket;
1743 }
1744 /* pcbinfo is already unlocked, we are done. */
1745 goto done;
1746 }
1747 }
1748 lck_rw_done(&pcbinfo->ipi_lock);
1749
1750 done:
1751 if (so == NULL) {
1752 OSIncrementAtomic(&cfil_stats.cfs_sock_id_not_found);
1753 CFIL_LOG(LOG_DEBUG,
1754 "no socket for sock_id %llx gencnt %llx flowhash %x",
1755 cfil_sock_id, gencnt, flowhash);
1756 }
1757
1758 return so;
1759 }
1760
1761 static struct socket *
cfil_socket_from_client_uuid(uuid_t necp_client_uuid,bool * cfil_attached)1762 cfil_socket_from_client_uuid(uuid_t necp_client_uuid, bool *cfil_attached)
1763 {
1764 struct socket *so = NULL;
1765 struct inpcb *inp = NULL;
1766 struct inpcbinfo *pcbinfo = &tcbinfo;
1767
1768 lck_rw_lock_shared(&pcbinfo->ipi_lock);
1769 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1770 if (inp->inp_state != INPCB_STATE_DEAD &&
1771 inp->inp_socket != NULL &&
1772 uuid_compare(inp->necp_client_uuid, necp_client_uuid) == 0) {
1773 *cfil_attached = (inp->inp_socket->so_cfil != NULL);
1774 if (cfil_socket_safe_lock(inp, pcbinfo)) {
1775 so = inp->inp_socket;
1776 }
1777 /* pcbinfo is already unlocked, we are done. */
1778 goto done;
1779 }
1780 }
1781 lck_rw_done(&pcbinfo->ipi_lock);
1782 if (so != NULL) {
1783 goto done;
1784 }
1785
1786 pcbinfo = &udbinfo;
1787 lck_rw_lock_shared(&pcbinfo->ipi_lock);
1788 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1789 if (inp->inp_state != INPCB_STATE_DEAD &&
1790 inp->inp_socket != NULL &&
1791 uuid_compare(inp->necp_client_uuid, necp_client_uuid) == 0) {
1792 *cfil_attached = (inp->inp_socket->so_flow_db != NULL);
1793 if (cfil_socket_safe_lock(inp, pcbinfo)) {
1794 so = inp->inp_socket;
1795 }
1796 /* pcbinfo is already unlocked, we are done. */
1797 goto done;
1798 }
1799 }
1800 lck_rw_done(&pcbinfo->ipi_lock);
1801
1802 done:
1803 return so;
1804 }
1805
1806 static void
cfil_info_stats_toggle(struct cfil_info * cfil_info,struct cfil_entry * entry,uint32_t report_frequency)1807 cfil_info_stats_toggle(struct cfil_info *cfil_info, struct cfil_entry *entry, uint32_t report_frequency)
1808 {
1809 struct cfil_info *cfil = NULL;
1810 Boolean found = FALSE;
1811 int kcunit;
1812
1813 if (cfil_info == NULL) {
1814 return;
1815 }
1816
1817 if (report_frequency) {
1818 if (entry == NULL) {
1819 return;
1820 }
1821
1822 // Update stats reporting frequency.
1823 if (entry->cfe_stats_report_frequency != report_frequency) {
1824 entry->cfe_stats_report_frequency = report_frequency;
1825 if (entry->cfe_stats_report_frequency < CFIL_STATS_REPORT_INTERVAL_MIN_MSEC) {
1826 entry->cfe_stats_report_frequency = CFIL_STATS_REPORT_INTERVAL_MIN_MSEC;
1827 }
1828 microuptime(&entry->cfe_stats_report_ts);
1829
1830 // Insert cfil_info into list only if it is not in yet.
1831 TAILQ_FOREACH(cfil, &cfil_sock_head_stats, cfi_link_stats) {
1832 if (cfil == cfil_info) {
1833 return;
1834 }
1835 }
1836
1837 TAILQ_INSERT_TAIL(&cfil_sock_head_stats, cfil_info, cfi_link_stats);
1838
1839 // Wake up stats thread if this is first flow added
1840 if (cfil_sock_attached_stats_count == 0) {
1841 thread_wakeup((caddr_t)&cfil_sock_attached_stats_count);
1842 }
1843 cfil_sock_attached_stats_count++;
1844
1845 if (cfil_info->cfi_debug && cfil_log_stats) {
1846 CFIL_LOG(LOG_DEBUG, "CFIL: VERDICT RECEIVED - STATS FLOW INSERTED: <so %llx sockID %llu> stats frequency %d msecs",
1847 cfil_info->cfi_so ? (uint64_t)VM_KERNEL_ADDRPERM(cfil_info->cfi_so) : 0,
1848 cfil_info->cfi_sock_id,
1849 entry->cfe_stats_report_frequency);
1850 }
1851 }
1852 } else {
1853 // Turn off stats reporting for this filter.
1854 if (entry != NULL) {
1855 // Already off, no change.
1856 if (entry->cfe_stats_report_frequency == 0) {
1857 return;
1858 }
1859
1860 entry->cfe_stats_report_frequency = 0;
1861 // If cfil_info still has filter(s) asking for stats, no need to remove from list.
1862 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
1863 if (cfil_info->cfi_entries[kcunit - 1].cfe_stats_report_frequency > 0) {
1864 return;
1865 }
1866 }
1867 }
1868
1869 // No more filter asking for stats for this cfil_info, remove from list.
1870 if (!TAILQ_EMPTY(&cfil_sock_head_stats)) {
1871 found = FALSE;
1872 TAILQ_FOREACH(cfil, &cfil_sock_head_stats, cfi_link_stats) {
1873 if (cfil == cfil_info) {
1874 found = TRUE;
1875 break;
1876 }
1877 }
1878 if (found) {
1879 cfil_sock_attached_stats_count--;
1880 TAILQ_REMOVE(&cfil_sock_head_stats, cfil_info, cfi_link_stats);
1881 if (cfil_info->cfi_debug && cfil_log_stats) {
1882 CFIL_LOG(LOG_DEBUG, "CFIL: VERDICT RECEIVED - STATS FLOW DELETED: <so %llx sockID %llu> stats frequency reset",
1883 cfil_info->cfi_so ? (uint64_t)VM_KERNEL_ADDRPERM(cfil_info->cfi_so) : 0,
1884 cfil_info->cfi_sock_id);
1885 }
1886 }
1887 }
1888 }
1889 }
1890
1891 static errno_t
cfil_ctl_send(kern_ctl_ref kctlref,u_int32_t kcunit,void * unitinfo,mbuf_t m,int flags)1892 cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m,
1893 int flags)
1894 {
1895 #pragma unused(kctlref, flags)
1896 errno_t error = 0;
1897 struct cfil_msg_hdr *msghdr;
1898 struct content_filter *cfc = (struct content_filter *)unitinfo;
1899 struct socket *so;
1900 struct cfil_msg_action *action_msg;
1901 struct cfil_entry *entry;
1902 struct cfil_info *cfil_info = NULL;
1903 unsigned int data_len = 0;
1904
1905 CFIL_LOG(LOG_INFO, "");
1906
1907 if (cfc == NULL) {
1908 CFIL_LOG(LOG_ERR, "no unitinfo");
1909 error = EINVAL;
1910 goto done;
1911 }
1912
1913 if (content_filters == NULL) {
1914 CFIL_LOG(LOG_ERR, "no content filter");
1915 error = EINVAL;
1916 goto done;
1917 }
1918 if (kcunit > MAX_CONTENT_FILTER) {
1919 CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)",
1920 kcunit, MAX_CONTENT_FILTER);
1921 error = EINVAL;
1922 goto done;
1923 }
1924 if (m == NULL) {
1925 CFIL_LOG(LOG_ERR, "null mbuf");
1926 error = EINVAL;
1927 goto done;
1928 }
1929 data_len = m_length(m);
1930
1931 if (data_len < sizeof(struct cfil_msg_hdr)) {
1932 CFIL_LOG(LOG_ERR, "too short %u", data_len);
1933 error = EINVAL;
1934 goto done;
1935 }
1936 msghdr = (struct cfil_msg_hdr *)mbuf_data(m);
1937 if (msghdr->cfm_version != CFM_VERSION_CURRENT) {
1938 CFIL_LOG(LOG_ERR, "bad version %u", msghdr->cfm_version);
1939 error = EINVAL;
1940 goto done;
1941 }
1942 if (msghdr->cfm_type != CFM_TYPE_ACTION) {
1943 CFIL_LOG(LOG_ERR, "bad type %u", msghdr->cfm_type);
1944 error = EINVAL;
1945 goto done;
1946 }
1947 if (msghdr->cfm_len > data_len) {
1948 CFIL_LOG(LOG_ERR, "bad length %u", msghdr->cfm_len);
1949 error = EINVAL;
1950 goto done;
1951 }
1952
1953 /* Validate action operation */
1954 switch (msghdr->cfm_op) {
1955 case CFM_OP_DATA_UPDATE:
1956 OSIncrementAtomic(
1957 &cfil_stats.cfs_ctl_action_data_update);
1958 break;
1959 case CFM_OP_DROP:
1960 OSIncrementAtomic(&cfil_stats.cfs_ctl_action_drop);
1961 break;
1962 case CFM_OP_BLESS_CLIENT:
1963 if (msghdr->cfm_len != sizeof(struct cfil_msg_bless_client)) {
1964 OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len);
1965 error = EINVAL;
1966 CFIL_LOG(LOG_ERR, "bad len: %u for op %u",
1967 msghdr->cfm_len,
1968 msghdr->cfm_op);
1969 goto done;
1970 }
1971 error = cfil_action_bless_client(kcunit, msghdr);
1972 goto done;
1973 case CFM_OP_SET_CRYPTO_KEY:
1974 if (msghdr->cfm_len != sizeof(struct cfil_msg_set_crypto_key)) {
1975 OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len);
1976 error = EINVAL;
1977 CFIL_LOG(LOG_ERR, "bad len: %u for op %u",
1978 msghdr->cfm_len,
1979 msghdr->cfm_op);
1980 goto done;
1981 }
1982 error = cfil_action_set_crypto_key(kcunit, msghdr);
1983 goto done;
1984 default:
1985 OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_op);
1986 CFIL_LOG(LOG_ERR, "bad op %u", msghdr->cfm_op);
1987 error = EINVAL;
1988 goto done;
1989 }
1990 if (msghdr->cfm_len != sizeof(struct cfil_msg_action)) {
1991 OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len);
1992 error = EINVAL;
1993 CFIL_LOG(LOG_ERR, "bad len: %u for op %u",
1994 msghdr->cfm_len,
1995 msghdr->cfm_op);
1996 goto done;
1997 }
1998 cfil_rw_lock_shared(&cfil_lck_rw);
1999 if (cfc != (void *)content_filters[kcunit - 1]) {
2000 CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u",
2001 kcunit);
2002 error = EINVAL;
2003 cfil_rw_unlock_shared(&cfil_lck_rw);
2004 goto done;
2005 }
2006 cfil_rw_unlock_shared(&cfil_lck_rw);
2007
2008 // Search for socket (TCP+UDP and lock so)
2009 so = cfil_socket_from_sock_id(msghdr->cfm_sock_id, false);
2010 if (so == NULL) {
2011 CFIL_LOG(LOG_NOTICE, "bad sock_id %llx",
2012 msghdr->cfm_sock_id);
2013 error = EINVAL;
2014 goto done;
2015 }
2016
2017 cfil_info = so->so_flow_db != NULL ?
2018 soflow_db_get_feature_context(so->so_flow_db, msghdr->cfm_sock_id) : so->so_cfil;
2019
2020 // We should not obtain global lock here in order to avoid deadlock down the path.
2021 // But we attempt to retain a valid cfil_info to prevent any deallocation until
2022 // we are done. Abort retain if cfil_info has already entered the free code path.
2023 if (cfil_info && os_ref_retain_try(&cfil_info->cfi_ref_count) == false) {
2024 socket_unlock(so, 1);
2025 goto done;
2026 }
2027
2028 if (cfil_info == NULL) {
2029 CFIL_LOG(LOG_NOTICE, "so %llx <id %llu> not attached",
2030 (uint64_t)VM_KERNEL_ADDRPERM(so), msghdr->cfm_sock_id);
2031 error = EINVAL;
2032 goto unlock;
2033 } else if (cfil_info->cfi_flags & CFIF_DROP) {
2034 CFIL_LOG(LOG_NOTICE, "so %llx drop set",
2035 (uint64_t)VM_KERNEL_ADDRPERM(so));
2036 error = EINVAL;
2037 goto unlock;
2038 }
2039
2040 if (cfil_info->cfi_debug) {
2041 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: RECEIVED MSG FROM FILTER");
2042 }
2043
2044 entry = &cfil_info->cfi_entries[kcunit - 1];
2045 if (entry->cfe_filter == NULL) {
2046 CFIL_LOG(LOG_NOTICE, "so %llx no filter",
2047 (uint64_t)VM_KERNEL_ADDRPERM(so));
2048 error = EINVAL;
2049 goto unlock;
2050 }
2051
2052 if (entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) {
2053 entry->cfe_flags |= CFEF_DATA_START;
2054 } else {
2055 CFIL_LOG(LOG_ERR,
2056 "so %llx attached not sent for %u",
2057 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit);
2058 error = EINVAL;
2059 goto unlock;
2060 }
2061
2062 microuptime(&entry->cfe_last_action);
2063 CFI_ADD_TIME_LOG(cfil_info, &entry->cfe_last_action, &cfil_info->cfi_first_event, msghdr->cfm_op);
2064
2065 action_msg = (struct cfil_msg_action *)msghdr;
2066
2067 switch (msghdr->cfm_op) {
2068 case CFM_OP_DATA_UPDATE:
2069
2070 if (cfil_info->cfi_debug) {
2071 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: RECEIVED CFM_OP_DATA_UPDATE");
2072 CFIL_LOG(LOG_DEBUG, "CFIL: VERDICT RECEIVED: <so %llx sockID %llu> <IN peek:%llu pass:%llu, OUT peek:%llu pass:%llu>",
2073 (uint64_t)VM_KERNEL_ADDRPERM(so),
2074 cfil_info->cfi_sock_id,
2075 action_msg->cfa_in_peek_offset, action_msg->cfa_in_pass_offset,
2076 action_msg->cfa_out_peek_offset, action_msg->cfa_out_pass_offset);
2077 }
2078
2079 /*
2080 * Received verdict, at this point we know this
2081 * socket connection is allowed. Unblock thread
2082 * immediately before proceeding to process the verdict.
2083 */
2084 cfil_sock_received_verdict(so);
2085
2086 if (action_msg->cfa_out_peek_offset != 0 ||
2087 action_msg->cfa_out_pass_offset != 0) {
2088 error = cfil_action_data_pass(so, cfil_info, kcunit, 1,
2089 action_msg->cfa_out_pass_offset,
2090 action_msg->cfa_out_peek_offset);
2091 }
2092 if (error == EJUSTRETURN) {
2093 error = 0;
2094 }
2095 if (error != 0) {
2096 break;
2097 }
2098 if (action_msg->cfa_in_peek_offset != 0 ||
2099 action_msg->cfa_in_pass_offset != 0) {
2100 error = cfil_action_data_pass(so, cfil_info, kcunit, 0,
2101 action_msg->cfa_in_pass_offset,
2102 action_msg->cfa_in_peek_offset);
2103 }
2104 if (error == EJUSTRETURN) {
2105 error = 0;
2106 }
2107
2108 // Toggle stats reporting according to received verdict.
2109 cfil_rw_lock_exclusive(&cfil_lck_rw);
2110 cfil_info_stats_toggle(cfil_info, entry, action_msg->cfa_stats_frequency);
2111 cfil_rw_unlock_exclusive(&cfil_lck_rw);
2112
2113 break;
2114
2115 case CFM_OP_DROP:
2116 if (cfil_info->cfi_debug) {
2117 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: RECEIVED CFM_OP_DROP");
2118 CFIL_LOG(LOG_DEBUG, "CFIL: VERDICT DROP RECEIVED: <so %llx sockID %llu> <IN peek:%llu pass:%llu, OUT peek:%llu pass:%llu>",
2119 (uint64_t)VM_KERNEL_ADDRPERM(so),
2120 cfil_info->cfi_sock_id,
2121 action_msg->cfa_in_peek_offset, action_msg->cfa_in_pass_offset,
2122 action_msg->cfa_out_peek_offset, action_msg->cfa_out_pass_offset);
2123 }
2124
2125 error = cfil_action_drop(so, cfil_info, kcunit);
2126 cfil_sock_received_verdict(so);
2127 break;
2128
2129 default:
2130 error = EINVAL;
2131 break;
2132 }
2133 unlock:
2134 CFIL_INFO_FREE(cfil_info)
2135 socket_unlock(so, 1);
2136 done:
2137 mbuf_freem(m);
2138
2139 if (error == 0) {
2140 OSIncrementAtomic(&cfil_stats.cfs_ctl_send_ok);
2141 } else {
2142 OSIncrementAtomic(&cfil_stats.cfs_ctl_send_bad);
2143 }
2144
2145 return error;
2146 }
2147
2148 static errno_t
cfil_ctl_getopt(kern_ctl_ref kctlref,u_int32_t kcunit,void * unitinfo,int opt,void * data,size_t * len)2149 cfil_ctl_getopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo,
2150 int opt, void *data, size_t *len)
2151 {
2152 #pragma unused(kctlref, opt)
2153 struct cfil_info *cfil_info = NULL;
2154 errno_t error = 0;
2155 struct content_filter *cfc = (struct content_filter *)unitinfo;
2156
2157 CFIL_LOG(LOG_NOTICE, "");
2158
2159 if (cfc == NULL) {
2160 CFIL_LOG(LOG_ERR, "no unitinfo");
2161 return EINVAL;
2162 }
2163
2164 cfil_rw_lock_shared(&cfil_lck_rw);
2165
2166 if (content_filters == NULL) {
2167 CFIL_LOG(LOG_ERR, "no content filter");
2168 error = EINVAL;
2169 goto done;
2170 }
2171 if (kcunit > MAX_CONTENT_FILTER) {
2172 CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)",
2173 kcunit, MAX_CONTENT_FILTER);
2174 error = EINVAL;
2175 goto done;
2176 }
2177 if (cfc != (void *)content_filters[kcunit - 1]) {
2178 CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u",
2179 kcunit);
2180 error = EINVAL;
2181 goto done;
2182 }
2183 switch (opt) {
2184 case CFIL_OPT_NECP_CONTROL_UNIT:
2185 if (*len < sizeof(uint32_t)) {
2186 CFIL_LOG(LOG_ERR, "len too small %lu", *len);
2187 error = EINVAL;
2188 goto done;
2189 }
2190 if (data != NULL) {
2191 *(uint32_t *)data = cfc->cf_necp_control_unit;
2192 }
2193 break;
2194 case CFIL_OPT_PRESERVE_CONNECTIONS:
2195 if (*len < sizeof(uint32_t)) {
2196 CFIL_LOG(LOG_ERR, "CFIL_OPT_PRESERVE_CONNECTIONS len too small %lu", *len);
2197 error = EINVAL;
2198 goto done;
2199 }
2200 if (data != NULL) {
2201 *(uint32_t *)data = (cfc->cf_flags & CFF_PRESERVE_CONNECTIONS) ? true : false;
2202 }
2203 break;
2204 case CFIL_OPT_GET_SOCKET_INFO:
2205 if (*len != sizeof(struct cfil_opt_sock_info)) {
2206 CFIL_LOG(LOG_ERR, "len does not match %lu", *len);
2207 error = EINVAL;
2208 goto done;
2209 }
2210 if (data == NULL) {
2211 CFIL_LOG(LOG_ERR, "data not passed");
2212 error = EINVAL;
2213 goto done;
2214 }
2215
2216 struct cfil_opt_sock_info *sock_info =
2217 (struct cfil_opt_sock_info *) data;
2218
2219 // Unlock here so that we never hold both cfil_lck_rw and the
2220 // socket_lock at the same time. Otherwise, this can deadlock
2221 // because soclose() takes the socket_lock and then exclusive
2222 // cfil_lck_rw and we require the opposite order.
2223
2224 // WARNING: Be sure to never use anything protected
2225 // by cfil_lck_rw beyond this point.
2226 // WARNING: Be sure to avoid fallthrough and
2227 // goto return_already_unlocked from this branch.
2228 cfil_rw_unlock_shared(&cfil_lck_rw);
2229
2230 // Search (TCP+UDP) and lock socket
2231 struct socket *sock =
2232 cfil_socket_from_sock_id(sock_info->cfs_sock_id, false);
2233 if (sock == NULL) {
2234 CFIL_LOG(LOG_ERR, "CFIL: GET_SOCKET_INFO failed: bad sock_id %llu",
2235 sock_info->cfs_sock_id);
2236 error = ENOENT;
2237 goto return_already_unlocked;
2238 }
2239
2240 cfil_info = (sock->so_flow_db != NULL) ?
2241 soflow_db_get_feature_context(sock->so_flow_db, sock_info->cfs_sock_id) : sock->so_cfil;
2242
2243 if (cfil_info == NULL) {
2244 CFIL_LOG(LOG_INFO, "CFIL: GET_SOCKET_INFO failed: so %llx not attached, cannot fetch info",
2245 (uint64_t)VM_KERNEL_ADDRPERM(sock));
2246 error = EINVAL;
2247 socket_unlock(sock, 1);
2248 goto return_already_unlocked;
2249 }
2250
2251 // Fill out family, type, and protocol
2252 sock_info->cfs_sock_family = sock->so_proto->pr_domain->dom_family;
2253 sock_info->cfs_sock_type = sock->so_proto->pr_type;
2254 sock_info->cfs_sock_protocol = sock->so_proto->pr_protocol;
2255
2256 // Source and destination addresses
2257 struct inpcb *inp = sotoinpcb(sock);
2258 if (inp->inp_vflag & INP_IPV6) {
2259 struct in6_addr *laddr = NULL, *faddr = NULL;
2260 u_int16_t lport = 0, fport = 0;
2261
2262 cfil_get_flow_address_v6(cfil_info->cfi_hash_entry, inp,
2263 &laddr, &faddr, &lport, &fport);
2264 fill_ip6_sockaddr_4_6(&sock_info->cfs_local, laddr, lport, inp->inp_lifscope);
2265 fill_ip6_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport, inp->inp_fifscope);
2266 } else if (inp->inp_vflag & INP_IPV4) {
2267 struct in_addr laddr = {.s_addr = 0}, faddr = {.s_addr = 0};
2268 u_int16_t lport = 0, fport = 0;
2269
2270 cfil_get_flow_address(cfil_info->cfi_hash_entry, inp,
2271 &laddr, &faddr, &lport, &fport);
2272 fill_ip_sockaddr_4_6(&sock_info->cfs_local, laddr, lport);
2273 fill_ip_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport);
2274 }
2275
2276 // Set the pid info
2277 sock_info->cfs_pid = sock->last_pid;
2278 memcpy(sock_info->cfs_uuid, sock->last_uuid, sizeof(uuid_t));
2279
2280 if (sock->so_flags & SOF_DELEGATED) {
2281 sock_info->cfs_e_pid = sock->e_pid;
2282 memcpy(sock_info->cfs_e_uuid, sock->e_uuid, sizeof(uuid_t));
2283 } else {
2284 sock_info->cfs_e_pid = sock->last_pid;
2285 memcpy(sock_info->cfs_e_uuid, sock->last_uuid, sizeof(uuid_t));
2286 }
2287
2288 socket_unlock(sock, 1);
2289
2290 goto return_already_unlocked;
2291 default:
2292 error = ENOPROTOOPT;
2293 break;
2294 }
2295 done:
2296 cfil_rw_unlock_shared(&cfil_lck_rw);
2297
2298 return error;
2299
2300 return_already_unlocked:
2301
2302 return error;
2303 }
2304
2305 static errno_t
cfil_ctl_setopt(kern_ctl_ref kctlref,u_int32_t kcunit,void * unitinfo,int opt,void * data,size_t len)2306 cfil_ctl_setopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo,
2307 int opt, void *data, size_t len)
2308 {
2309 #pragma unused(kctlref, opt)
2310 errno_t error = 0;
2311 struct content_filter *cfc = (struct content_filter *)unitinfo;
2312
2313 CFIL_LOG(LOG_NOTICE, "");
2314
2315 if (cfc == NULL) {
2316 CFIL_LOG(LOG_ERR, "no unitinfo");
2317 return EINVAL;
2318 }
2319
2320 cfil_rw_lock_exclusive(&cfil_lck_rw);
2321
2322 if (content_filters == NULL) {
2323 CFIL_LOG(LOG_ERR, "no content filter");
2324 error = EINVAL;
2325 goto done;
2326 }
2327 if (kcunit > MAX_CONTENT_FILTER) {
2328 CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)",
2329 kcunit, MAX_CONTENT_FILTER);
2330 error = EINVAL;
2331 goto done;
2332 }
2333 if (cfc != (void *)content_filters[kcunit - 1]) {
2334 CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u",
2335 kcunit);
2336 error = EINVAL;
2337 goto done;
2338 }
2339 switch (opt) {
2340 case CFIL_OPT_NECP_CONTROL_UNIT:
2341 if (len < sizeof(uint32_t)) {
2342 CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT "
2343 "len too small %lu", len);
2344 error = EINVAL;
2345 goto done;
2346 }
2347 if (cfc->cf_necp_control_unit != 0) {
2348 CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT "
2349 "already set %u",
2350 cfc->cf_necp_control_unit);
2351 error = EINVAL;
2352 goto done;
2353 }
2354 cfc->cf_necp_control_unit = *(uint32_t *)data;
2355 break;
2356 case CFIL_OPT_PRESERVE_CONNECTIONS:
2357 if (len < sizeof(uint32_t)) {
2358 CFIL_LOG(LOG_ERR, "CFIL_OPT_PRESERVE_CONNECTIONS "
2359 "len too small %lu", len);
2360 error = EINVAL;
2361 goto done;
2362 }
2363 uint32_t preserve_connections = *((uint32_t *)data);
2364 CFIL_LOG(LOG_INFO, "CFIL_OPT_PRESERVE_CONNECTIONS got %d (kcunit %d)", preserve_connections, kcunit);
2365 if (preserve_connections) {
2366 cfc->cf_flags |= CFF_PRESERVE_CONNECTIONS;
2367 } else {
2368 cfc->cf_flags &= ~CFF_PRESERVE_CONNECTIONS;
2369 }
2370
2371 cfil_update_behavior_flags();
2372 break;
2373 default:
2374 error = ENOPROTOOPT;
2375 break;
2376 }
2377 done:
2378 cfil_rw_unlock_exclusive(&cfil_lck_rw);
2379
2380 return error;
2381 }
2382
2383
2384 static void
cfil_ctl_rcvd(kern_ctl_ref kctlref,u_int32_t kcunit,void * unitinfo,int flags)2385 cfil_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, int flags)
2386 {
2387 #pragma unused(kctlref, flags)
2388 struct content_filter *cfc = (struct content_filter *)unitinfo;
2389 struct socket *so = NULL;
2390 int error;
2391 struct cfil_entry *entry;
2392 struct cfil_info *cfil_info = NULL;
2393
2394 CFIL_LOG(LOG_INFO, "");
2395
2396 if (cfc == NULL) {
2397 CFIL_LOG(LOG_ERR, "no unitinfo");
2398 OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad);
2399 return;
2400 }
2401
2402 if (content_filters == NULL) {
2403 CFIL_LOG(LOG_ERR, "no content filter");
2404 OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad);
2405 return;
2406 }
2407 if (kcunit > MAX_CONTENT_FILTER) {
2408 CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)",
2409 kcunit, MAX_CONTENT_FILTER);
2410 OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad);
2411 return;
2412 }
2413 cfil_rw_lock_shared(&cfil_lck_rw);
2414 if (cfc != (void *)content_filters[kcunit - 1]) {
2415 CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u",
2416 kcunit);
2417 OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad);
2418 goto done;
2419 }
2420 /* Let's assume the flow control is lifted */
2421 if (cfc->cf_flags & CFF_FLOW_CONTROLLED) {
2422 if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) {
2423 cfil_rw_lock_exclusive(&cfil_lck_rw);
2424 }
2425
2426 cfc->cf_flags &= ~CFF_FLOW_CONTROLLED;
2427
2428 cfil_rw_lock_exclusive_to_shared(&cfil_lck_rw);
2429 LCK_RW_ASSERT(&cfil_lck_rw, LCK_RW_ASSERT_SHARED);
2430 }
2431 /*
2432 * Flow control will be raised again as soon as an entry cannot enqueue
2433 * to the kernel control socket
2434 */
2435 while ((cfc->cf_flags & CFF_FLOW_CONTROLLED) == 0) {
2436 verify_content_filter(cfc);
2437
2438 cfil_rw_lock_assert_held(&cfil_lck_rw, 0);
2439
2440 /* Find an entry that is flow controlled */
2441 TAILQ_FOREACH(entry, &cfc->cf_sock_entries, cfe_link) {
2442 if (entry->cfe_cfil_info == NULL ||
2443 entry->cfe_cfil_info->cfi_so == NULL) {
2444 continue;
2445 }
2446 if ((entry->cfe_flags & CFEF_FLOW_CONTROLLED) == 0) {
2447 continue;
2448 }
2449 }
2450 if (entry == NULL) {
2451 break;
2452 }
2453
2454 OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_flow_lift);
2455
2456 cfil_info = entry->cfe_cfil_info;
2457 so = cfil_info->cfi_so;
2458
2459 cfil_rw_unlock_shared(&cfil_lck_rw);
2460 socket_lock(so, 1);
2461
2462 do {
2463 error = cfil_acquire_sockbuf(so, cfil_info, 1);
2464 if (error == 0) {
2465 error = cfil_data_service_ctl_q(so, cfil_info, kcunit, 1);
2466 }
2467 cfil_release_sockbuf(so, 1);
2468 if (error != 0) {
2469 break;
2470 }
2471
2472 error = cfil_acquire_sockbuf(so, cfil_info, 0);
2473 if (error == 0) {
2474 error = cfil_data_service_ctl_q(so, cfil_info, kcunit, 0);
2475 }
2476 cfil_release_sockbuf(so, 0);
2477 } while (0);
2478
2479 socket_lock_assert_owned(so);
2480 socket_unlock(so, 1);
2481
2482 cfil_rw_lock_shared(&cfil_lck_rw);
2483 }
2484 done:
2485 cfil_rw_unlock_shared(&cfil_lck_rw);
2486 }
2487
2488 void
cfil_init(void)2489 cfil_init(void)
2490 {
2491 struct kern_ctl_reg kern_ctl;
2492 errno_t error = 0;
2493 unsigned int mbuf_limit = 0;
2494
2495 CFIL_LOG(LOG_NOTICE, "");
2496
2497 /*
2498 * Compile time verifications
2499 */
2500 _CASSERT(CFIL_MAX_FILTER_COUNT == MAX_CONTENT_FILTER);
2501 _CASSERT(sizeof(struct cfil_filter_stat) % sizeof(uint32_t) == 0);
2502 _CASSERT(sizeof(struct cfil_entry_stat) % sizeof(uint32_t) == 0);
2503 _CASSERT(sizeof(struct cfil_sock_stat) % sizeof(uint32_t) == 0);
2504
2505 /*
2506 * Runtime time verifications
2507 */
2508 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_in_enqueued,
2509 sizeof(uint32_t)));
2510 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_out_enqueued,
2511 sizeof(uint32_t)));
2512 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_in_peeked,
2513 sizeof(uint32_t)));
2514 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_out_peeked,
2515 sizeof(uint32_t)));
2516
2517 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_pending_q_in_enqueued,
2518 sizeof(uint32_t)));
2519 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_pending_q_out_enqueued,
2520 sizeof(uint32_t)));
2521
2522 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_in_enqueued,
2523 sizeof(uint32_t)));
2524 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_out_enqueued,
2525 sizeof(uint32_t)));
2526 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_in_passed,
2527 sizeof(uint32_t)));
2528 VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_out_passed,
2529 sizeof(uint32_t)));
2530
2531 /*
2532 * Allocate locks
2533 */
2534 TAILQ_INIT(&cfil_sock_head);
2535 TAILQ_INIT(&cfil_sock_head_stats);
2536
2537 /*
2538 * Register kernel control
2539 */
2540 bzero(&kern_ctl, sizeof(kern_ctl));
2541 strlcpy(kern_ctl.ctl_name, CONTENT_FILTER_CONTROL_NAME,
2542 sizeof(kern_ctl.ctl_name));
2543 kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_EXTENDED;
2544 kern_ctl.ctl_sendsize = 512 * 1024; /* enough? */
2545 kern_ctl.ctl_recvsize = 512 * 1024; /* enough? */
2546 kern_ctl.ctl_connect = cfil_ctl_connect;
2547 kern_ctl.ctl_disconnect = cfil_ctl_disconnect;
2548 kern_ctl.ctl_send = cfil_ctl_send;
2549 kern_ctl.ctl_getopt = cfil_ctl_getopt;
2550 kern_ctl.ctl_setopt = cfil_ctl_setopt;
2551 kern_ctl.ctl_rcvd = cfil_ctl_rcvd;
2552 error = ctl_register(&kern_ctl, &cfil_kctlref);
2553 if (error != 0) {
2554 CFIL_LOG(LOG_ERR, "ctl_register failed: %d", error);
2555 return;
2556 }
2557
2558 // Spawn thread for statistics reporting
2559 if (kernel_thread_start(cfil_stats_report_thread_func, NULL,
2560 &cfil_stats_report_thread) != KERN_SUCCESS) {
2561 panic_plain("%s: Can't create statistics report thread", __func__);
2562 /* NOTREACHED */
2563 }
2564 /* this must not fail */
2565 VERIFY(cfil_stats_report_thread != NULL);
2566
2567 // Set UDP per-flow mbuf thresholds to 1/32 of platform max
2568 mbuf_limit = MAX(UDP_FLOW_GC_MBUF_CNT_MAX, (nmbclusters << MCLSHIFT) >> UDP_FLOW_GC_MBUF_SHIFT);
2569 cfil_udp_gc_mbuf_num_max = (mbuf_limit >> MCLSHIFT);
2570 cfil_udp_gc_mbuf_cnt_max = mbuf_limit;
2571
2572 memset(&global_cfil_stats_report_buffers, 0, sizeof(global_cfil_stats_report_buffers));
2573 }
2574
2575 struct cfil_info *
cfil_info_alloc(struct socket * so,struct soflow_hash_entry * hash_entry)2576 cfil_info_alloc(struct socket *so, struct soflow_hash_entry *hash_entry)
2577 {
2578 int kcunit;
2579 struct cfil_info *cfil_info = NULL;
2580 struct inpcb *inp = sotoinpcb(so);
2581
2582 CFIL_LOG(LOG_INFO, "");
2583
2584 socket_lock_assert_owned(so);
2585
2586 cfil_info = zalloc_flags(cfil_info_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2587 os_ref_init(&cfil_info->cfi_ref_count, &cfil_refgrp);
2588
2589 cfil_queue_init(&cfil_info->cfi_snd.cfi_inject_q);
2590 cfil_queue_init(&cfil_info->cfi_rcv.cfi_inject_q);
2591
2592 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
2593 struct cfil_entry *entry;
2594
2595 entry = &cfil_info->cfi_entries[kcunit - 1];
2596 entry->cfe_cfil_info = cfil_info;
2597
2598 /* Initialize the filter entry */
2599 entry->cfe_filter = NULL;
2600 entry->cfe_flags = 0;
2601 entry->cfe_necp_control_unit = 0;
2602 entry->cfe_snd.cfe_pass_offset = 0;
2603 entry->cfe_snd.cfe_peek_offset = 0;
2604 entry->cfe_snd.cfe_peeked = 0;
2605 entry->cfe_rcv.cfe_pass_offset = 0;
2606 entry->cfe_rcv.cfe_peek_offset = 0;
2607 entry->cfe_rcv.cfe_peeked = 0;
2608 /*
2609 * Timestamp the last action to avoid pre-maturely
2610 * triggering garbage collection
2611 */
2612 microuptime(&entry->cfe_last_action);
2613
2614 cfil_queue_init(&entry->cfe_snd.cfe_pending_q);
2615 cfil_queue_init(&entry->cfe_rcv.cfe_pending_q);
2616 cfil_queue_init(&entry->cfe_snd.cfe_ctl_q);
2617 cfil_queue_init(&entry->cfe_rcv.cfe_ctl_q);
2618 }
2619
2620 cfil_rw_lock_exclusive(&cfil_lck_rw);
2621
2622 /*
2623 * Create a cfi_sock_id that's not the socket pointer!
2624 */
2625
2626 if (hash_entry == NULL) {
2627 // This is the TCP case, cfil_info is tracked per socket
2628 if (inp->inp_flowhash == 0) {
2629 inp->inp_flowhash = inp_calc_flowhash(inp);
2630 }
2631
2632 so->so_cfil = cfil_info;
2633 cfil_info->cfi_so = so;
2634 cfil_info->cfi_sock_id =
2635 ((so->so_gencnt << 32) | inp->inp_flowhash);
2636 } else {
2637 // This is the UDP case, cfil_info is tracked in per-socket hash
2638 cfil_info->cfi_so = so;
2639 cfil_info->cfi_hash_entry = hash_entry;
2640 cfil_info->cfi_sock_id = ((so->so_gencnt << 32) | (hash_entry->soflow_flowhash & 0xffffffff));
2641 }
2642
2643 TAILQ_INSERT_TAIL(&cfil_sock_head, cfil_info, cfi_link);
2644 SLIST_INIT(&cfil_info->cfi_ordered_entries);
2645
2646 cfil_sock_attached_count++;
2647
2648 cfil_rw_unlock_exclusive(&cfil_lck_rw);
2649
2650 if (cfil_info != NULL) {
2651 OSIncrementAtomic(&cfil_stats.cfs_cfi_alloc_ok);
2652 } else {
2653 OSIncrementAtomic(&cfil_stats.cfs_cfi_alloc_fail);
2654 }
2655
2656 return cfil_info;
2657 }
2658
2659 int
cfil_info_attach_unit(struct socket * so,uint32_t filter_control_unit,struct cfil_info * cfil_info)2660 cfil_info_attach_unit(struct socket *so, uint32_t filter_control_unit, struct cfil_info *cfil_info)
2661 {
2662 int kcunit;
2663 int attached = 0;
2664
2665 CFIL_LOG(LOG_INFO, "");
2666
2667 socket_lock_assert_owned(so);
2668
2669 cfil_rw_lock_exclusive(&cfil_lck_rw);
2670
2671 for (kcunit = 1;
2672 content_filters != NULL && kcunit <= MAX_CONTENT_FILTER;
2673 kcunit++) {
2674 struct content_filter *cfc = content_filters[kcunit - 1];
2675 struct cfil_entry *entry;
2676 struct cfil_entry *iter_entry;
2677 struct cfil_entry *iter_prev;
2678
2679 if (cfc == NULL) {
2680 continue;
2681 }
2682 if (!(cfc->cf_necp_control_unit & filter_control_unit)) {
2683 continue;
2684 }
2685
2686 entry = &cfil_info->cfi_entries[kcunit - 1];
2687
2688 entry->cfe_filter = cfc;
2689 entry->cfe_necp_control_unit = cfc->cf_necp_control_unit;
2690 TAILQ_INSERT_TAIL(&cfc->cf_sock_entries, entry, cfe_link);
2691 cfc->cf_sock_count++;
2692
2693 /* Insert the entry into the list ordered by control unit */
2694 iter_prev = NULL;
2695 SLIST_FOREACH(iter_entry, &cfil_info->cfi_ordered_entries, cfe_order_link) {
2696 if (entry->cfe_necp_control_unit < iter_entry->cfe_necp_control_unit) {
2697 break;
2698 }
2699 iter_prev = iter_entry;
2700 }
2701
2702 if (iter_prev == NULL) {
2703 SLIST_INSERT_HEAD(&cfil_info->cfi_ordered_entries, entry, cfe_order_link);
2704 } else {
2705 SLIST_INSERT_AFTER(iter_prev, entry, cfe_order_link);
2706 }
2707
2708 verify_content_filter(cfc);
2709 attached = 1;
2710 entry->cfe_flags |= CFEF_CFIL_ATTACHED;
2711 }
2712
2713 cfil_rw_unlock_exclusive(&cfil_lck_rw);
2714
2715 return attached;
2716 }
2717
2718 static void
cfil_info_free(struct cfil_info * cfil_info)2719 cfil_info_free(struct cfil_info *cfil_info)
2720 {
2721 int kcunit;
2722 uint64_t in_drain = 0;
2723 uint64_t out_drained = 0;
2724
2725 if (cfil_info == NULL) {
2726 return;
2727 }
2728
2729 CFIL_LOG(LOG_INFO, "");
2730
2731 cfil_rw_lock_exclusive(&cfil_lck_rw);
2732
2733 if (cfil_info->cfi_debug) {
2734 cfil_info_log(LOG_INFO, cfil_info, "CFIL: FREEING CFIL_INFO");
2735 }
2736
2737 for (kcunit = 1;
2738 content_filters != NULL && kcunit <= MAX_CONTENT_FILTER;
2739 kcunit++) {
2740 struct cfil_entry *entry;
2741 struct content_filter *cfc;
2742
2743 entry = &cfil_info->cfi_entries[kcunit - 1];
2744
2745 /* Don't be silly and try to detach twice */
2746 if (entry->cfe_filter == NULL) {
2747 continue;
2748 }
2749
2750 cfc = content_filters[kcunit - 1];
2751
2752 VERIFY(cfc == entry->cfe_filter);
2753
2754 entry->cfe_filter = NULL;
2755 entry->cfe_necp_control_unit = 0;
2756 TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link);
2757 cfc->cf_sock_count--;
2758
2759 verify_content_filter(cfc);
2760 }
2761
2762 cfil_sock_attached_count--;
2763 TAILQ_REMOVE(&cfil_sock_head, cfil_info, cfi_link);
2764
2765 // Turn off stats reporting for cfil_info.
2766 cfil_info_stats_toggle(cfil_info, NULL, 0);
2767
2768 out_drained += cfil_queue_drain(&cfil_info->cfi_snd.cfi_inject_q);
2769 in_drain += cfil_queue_drain(&cfil_info->cfi_rcv.cfi_inject_q);
2770
2771 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
2772 struct cfil_entry *entry;
2773
2774 entry = &cfil_info->cfi_entries[kcunit - 1];
2775 out_drained += cfil_queue_drain(&entry->cfe_snd.cfe_pending_q);
2776 in_drain += cfil_queue_drain(&entry->cfe_rcv.cfe_pending_q);
2777 out_drained += cfil_queue_drain(&entry->cfe_snd.cfe_ctl_q);
2778 in_drain += cfil_queue_drain(&entry->cfe_rcv.cfe_ctl_q);
2779 }
2780 cfil_rw_unlock_exclusive(&cfil_lck_rw);
2781
2782 if (out_drained) {
2783 OSIncrementAtomic(&cfil_stats.cfs_flush_out_free);
2784 }
2785 if (in_drain) {
2786 OSIncrementAtomic(&cfil_stats.cfs_flush_in_free);
2787 }
2788
2789 zfree(cfil_info_zone, cfil_info);
2790 }
2791
2792 /*
2793 * Received a verdict from userspace for a socket.
2794 * Perform any delayed operation if needed.
2795 */
2796 static void
cfil_sock_received_verdict(struct socket * so)2797 cfil_sock_received_verdict(struct socket *so)
2798 {
2799 if (so == NULL || so->so_cfil == NULL) {
2800 return;
2801 }
2802
2803 so->so_cfil->cfi_flags |= CFIF_INITIAL_VERDICT;
2804
2805 /*
2806 * If socket has already been connected, trigger
2807 * soisconnected now.
2808 */
2809 if (so->so_cfil->cfi_flags & CFIF_SOCKET_CONNECTED) {
2810 so->so_cfil->cfi_flags &= ~CFIF_SOCKET_CONNECTED;
2811 soisconnected(so);
2812 return;
2813 }
2814 }
2815
2816 /*
2817 * Entry point from Sockets layer
2818 * The socket is locked.
2819 *
2820 * Checks if a connected socket is subject to filter and
2821 * pending the initial verdict.
2822 */
2823 boolean_t
cfil_sock_connected_pending_verdict(struct socket * so)2824 cfil_sock_connected_pending_verdict(struct socket *so)
2825 {
2826 if (so == NULL || so->so_cfil == NULL) {
2827 return false;
2828 }
2829
2830 if (so->so_cfil->cfi_flags & CFIF_INITIAL_VERDICT) {
2831 return false;
2832 } else {
2833 /*
2834 * Remember that this protocol is already connected, so
2835 * we will trigger soisconnected() upon receipt of
2836 * initial verdict later.
2837 */
2838 so->so_cfil->cfi_flags |= CFIF_SOCKET_CONNECTED;
2839 return true;
2840 }
2841 }
2842
2843 boolean_t
cfil_filter_present(void)2844 cfil_filter_present(void)
2845 {
2846 return cfil_active_count > 0;
2847 }
2848
2849 /*
2850 * Entry point from Sockets layer
2851 * The socket is locked.
2852 */
2853 errno_t
cfil_sock_attach(struct socket * so,struct sockaddr * local,struct sockaddr * remote,int dir)2854 cfil_sock_attach(struct socket *so, struct sockaddr *local, struct sockaddr *remote, int dir)
2855 {
2856 errno_t error = 0;
2857 uint32_t filter_control_unit;
2858 int debug = 0;
2859
2860 socket_lock_assert_owned(so);
2861
2862 if (so->so_flags1 & SOF1_FLOW_DIVERT_SKIP) {
2863 /*
2864 * This socket has already been evaluated (and ultimately skipped) by
2865 * flow divert, so it has also already been through content filter if there
2866 * is one.
2867 */
2868 goto done;
2869 }
2870
2871 /* Limit ourselves to TCP that are not MPTCP subflows */
2872 if (SKIP_FILTER_FOR_TCP_SOCKET(so)) {
2873 goto done;
2874 }
2875
2876 debug = DEBUG_FLOW(sotoinpcb(so), so, local, remote);
2877 if (debug) {
2878 CFIL_LOG(LOG_INFO, "CFIL: TCP (dir %d) - debug flow with port %d", dir, cfil_log_port);
2879 }
2880
2881 filter_control_unit = necp_socket_get_content_filter_control_unit(so);
2882 if (filter_control_unit == 0) {
2883 goto done;
2884 }
2885
2886 if (filter_control_unit == NECP_FILTER_UNIT_NO_FILTER) {
2887 goto done;
2888 }
2889 if ((filter_control_unit & NECP_MASK_USERSPACE_ONLY) != 0) {
2890 OSIncrementAtomic(&cfil_stats.cfs_sock_userspace_only);
2891 goto done;
2892 }
2893 if (cfil_active_count == 0) {
2894 OSIncrementAtomic(&cfil_stats.cfs_sock_attach_in_vain);
2895 goto done;
2896 }
2897 if (so->so_cfil != NULL) {
2898 OSIncrementAtomic(&cfil_stats.cfs_sock_attach_already);
2899 CFIL_LOG(LOG_ERR, "already attached");
2900 goto done;
2901 } else {
2902 cfil_info_alloc(so, NULL);
2903 if (so->so_cfil == NULL) {
2904 error = ENOMEM;
2905 OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem);
2906 goto done;
2907 }
2908 so->so_cfil->cfi_dir = dir;
2909 so->so_cfil->cfi_filter_control_unit = filter_control_unit;
2910 so->so_cfil->cfi_debug = debug;
2911 }
2912 if (cfil_info_attach_unit(so, filter_control_unit, so->so_cfil) == 0) {
2913 CFIL_LOG(LOG_ERR, "cfil_info_attach_unit(%u) failed",
2914 filter_control_unit);
2915 OSIncrementAtomic(&cfil_stats.cfs_sock_attach_failed);
2916 goto done;
2917 }
2918 CFIL_LOG(LOG_INFO, "so %llx filter_control_unit %u sockID %llx",
2919 (uint64_t)VM_KERNEL_ADDRPERM(so),
2920 filter_control_unit, so->so_cfil->cfi_sock_id);
2921
2922 so->so_flags |= SOF_CONTENT_FILTER;
2923 OSIncrementAtomic(&cfil_stats.cfs_sock_attached);
2924
2925 /* Hold a reference on the socket */
2926 so->so_usecount++;
2927
2928 /*
2929 * Save passed addresses for attach event msg (in case resend
2930 * is needed.
2931 */
2932 if (remote != NULL && (remote->sa_len <= sizeof(union sockaddr_in_4_6))) {
2933 memcpy(&so->so_cfil->cfi_so_attach_faddr, remote, remote->sa_len);
2934 }
2935 if (local != NULL && (local->sa_len <= sizeof(union sockaddr_in_4_6))) {
2936 memcpy(&so->so_cfil->cfi_so_attach_laddr, local, local->sa_len);
2937 }
2938
2939 error = cfil_dispatch_attach_event(so, so->so_cfil, 0, dir);
2940 /* We can recover from flow control or out of memory errors */
2941 if (error == ENOBUFS || error == ENOMEM) {
2942 error = 0;
2943 } else if (error != 0) {
2944 goto done;
2945 }
2946
2947 CFIL_INFO_VERIFY(so->so_cfil);
2948 done:
2949 return error;
2950 }
2951
2952 /*
2953 * Entry point from Sockets layer
2954 * The socket is locked.
2955 */
2956 errno_t
cfil_sock_detach(struct socket * so)2957 cfil_sock_detach(struct socket *so)
2958 {
2959 if (NEED_DGRAM_FLOW_TRACKING(so)) {
2960 return 0;
2961 }
2962
2963 if (so->so_cfil) {
2964 if (so->so_flags & SOF_CONTENT_FILTER) {
2965 so->so_flags &= ~SOF_CONTENT_FILTER;
2966 VERIFY(so->so_usecount > 0);
2967 so->so_usecount--;
2968 }
2969 CFIL_INFO_FREE(so->so_cfil);
2970 so->so_cfil = NULL;
2971 OSIncrementAtomic(&cfil_stats.cfs_sock_detached);
2972 }
2973 return 0;
2974 }
2975
2976 /*
2977 * Fill in the address info of an event message from either
2978 * the socket or passed in address info.
2979 */
2980 static void
cfil_fill_event_msg_addresses(struct soflow_hash_entry * entry,struct inpcb * inp,union sockaddr_in_4_6 * sin_src,union sockaddr_in_4_6 * sin_dst,boolean_t isIPv4,boolean_t outgoing)2981 cfil_fill_event_msg_addresses(struct soflow_hash_entry *entry, struct inpcb *inp,
2982 union sockaddr_in_4_6 *sin_src, union sockaddr_in_4_6 *sin_dst,
2983 boolean_t isIPv4, boolean_t outgoing)
2984 {
2985 if (isIPv4) {
2986 struct in_addr laddr = {0}, faddr = {0};
2987 u_int16_t lport = 0, fport = 0;
2988
2989 cfil_get_flow_address(entry, inp, &laddr, &faddr, &lport, &fport);
2990
2991 if (outgoing) {
2992 fill_ip_sockaddr_4_6(sin_src, laddr, lport);
2993 fill_ip_sockaddr_4_6(sin_dst, faddr, fport);
2994 } else {
2995 fill_ip_sockaddr_4_6(sin_src, faddr, fport);
2996 fill_ip_sockaddr_4_6(sin_dst, laddr, lport);
2997 }
2998 } else {
2999 struct in6_addr *laddr = NULL, *faddr = NULL;
3000 u_int16_t lport = 0, fport = 0;
3001 const u_int32_t lifscope = inp ? inp->inp_lifscope : IFSCOPE_UNKNOWN;
3002 const u_int32_t fifscope = inp ? inp->inp_fifscope : IFSCOPE_UNKNOWN;
3003
3004 cfil_get_flow_address_v6(entry, inp, &laddr, &faddr, &lport, &fport);
3005 if (outgoing) {
3006 fill_ip6_sockaddr_4_6(sin_src, laddr, lport, lifscope);
3007 fill_ip6_sockaddr_4_6(sin_dst, faddr, fport, fifscope);
3008 } else {
3009 fill_ip6_sockaddr_4_6(sin_src, faddr, fport, fifscope);
3010 fill_ip6_sockaddr_4_6(sin_dst, laddr, lport, lifscope);
3011 }
3012 }
3013 }
3014
3015 static boolean_t
cfil_dispatch_attach_event_sign(cfil_crypto_state_t crypto_state,struct cfil_info * cfil_info,struct cfil_msg_sock_attached * msg)3016 cfil_dispatch_attach_event_sign(cfil_crypto_state_t crypto_state,
3017 struct cfil_info *cfil_info,
3018 struct cfil_msg_sock_attached *msg)
3019 {
3020 struct cfil_crypto_data data = {};
3021
3022 if (crypto_state == NULL || msg == NULL || cfil_info == NULL) {
3023 return false;
3024 }
3025
3026 data.sock_id = msg->cfs_msghdr.cfm_sock_id;
3027 data.direction = msg->cfs_conn_dir;
3028
3029 data.pid = msg->cfs_pid;
3030 data.effective_pid = msg->cfs_e_pid;
3031 uuid_copy(data.uuid, msg->cfs_uuid);
3032 uuid_copy(data.effective_uuid, msg->cfs_e_uuid);
3033 data.socketProtocol = msg->cfs_sock_protocol;
3034 if (data.direction == CFS_CONNECTION_DIR_OUT) {
3035 data.remote.sin6 = msg->cfs_dst.sin6;
3036 data.local.sin6 = msg->cfs_src.sin6;
3037 } else {
3038 data.remote.sin6 = msg->cfs_src.sin6;
3039 data.local.sin6 = msg->cfs_dst.sin6;
3040 }
3041
3042 // At attach, if local address is already present, no need to re-sign subsequent data messages.
3043 if (!NULLADDRESS(data.local)) {
3044 cfil_info->cfi_isSignatureLatest = true;
3045 }
3046
3047 msg->cfs_signature_length = sizeof(cfil_crypto_signature);
3048 if (cfil_crypto_sign_data(crypto_state, &data, msg->cfs_signature, &msg->cfs_signature_length) != 0) {
3049 msg->cfs_signature_length = 0;
3050 CFIL_LOG(LOG_ERR, "CFIL: Failed to sign attached msg <sockID %llu>",
3051 msg->cfs_msghdr.cfm_sock_id);
3052 return false;
3053 }
3054
3055 return true;
3056 }
3057
3058 static boolean_t
cfil_dispatch_data_event_sign(cfil_crypto_state_t crypto_state,struct socket * so,struct cfil_info * cfil_info,struct cfil_msg_data_event * msg)3059 cfil_dispatch_data_event_sign(cfil_crypto_state_t crypto_state,
3060 struct socket *so, struct cfil_info *cfil_info,
3061 struct cfil_msg_data_event *msg)
3062 {
3063 struct cfil_crypto_data data = {};
3064
3065 if (crypto_state == NULL || msg == NULL ||
3066 so == NULL || cfil_info == NULL) {
3067 return false;
3068 }
3069
3070 data.sock_id = cfil_info->cfi_sock_id;
3071 data.direction = cfil_info->cfi_dir;
3072 data.pid = so->last_pid;
3073 memcpy(data.uuid, so->last_uuid, sizeof(uuid_t));
3074 if (so->so_flags & SOF_DELEGATED) {
3075 data.effective_pid = so->e_pid;
3076 memcpy(data.effective_uuid, so->e_uuid, sizeof(uuid_t));
3077 } else {
3078 data.effective_pid = so->last_pid;
3079 memcpy(data.effective_uuid, so->last_uuid, sizeof(uuid_t));
3080 }
3081 data.socketProtocol = so->so_proto->pr_protocol;
3082
3083 if (data.direction == CFS_CONNECTION_DIR_OUT) {
3084 data.remote.sin6 = msg->cfc_dst.sin6;
3085 data.local.sin6 = msg->cfc_src.sin6;
3086 } else {
3087 data.remote.sin6 = msg->cfc_src.sin6;
3088 data.local.sin6 = msg->cfc_dst.sin6;
3089 }
3090
3091 // At first data, local address may show up for the first time, update address cache and
3092 // no need to re-sign subsequent data messages anymore.
3093 if (!NULLADDRESS(data.local)) {
3094 memcpy(&cfil_info->cfi_so_attach_laddr, &data.local, data.local.sa.sa_len);
3095 cfil_info->cfi_isSignatureLatest = true;
3096 }
3097
3098 msg->cfd_signature_length = sizeof(cfil_crypto_signature);
3099 if (cfil_crypto_sign_data(crypto_state, &data, msg->cfd_signature, &msg->cfd_signature_length) != 0) {
3100 msg->cfd_signature_length = 0;
3101 CFIL_LOG(LOG_ERR, "CFIL: Failed to sign data msg <sockID %llu>",
3102 msg->cfd_msghdr.cfm_sock_id);
3103 return false;
3104 }
3105
3106 return true;
3107 }
3108
3109 static boolean_t
cfil_dispatch_closed_event_sign(cfil_crypto_state_t crypto_state,struct socket * so,struct cfil_info * cfil_info,struct cfil_msg_sock_closed * msg)3110 cfil_dispatch_closed_event_sign(cfil_crypto_state_t crypto_state,
3111 struct socket *so, struct cfil_info *cfil_info,
3112 struct cfil_msg_sock_closed *msg)
3113 {
3114 struct cfil_crypto_data data = {};
3115 struct soflow_hash_entry hash_entry = {};
3116 struct soflow_hash_entry *hash_entry_ptr = NULL;
3117 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3118
3119 if (crypto_state == NULL || msg == NULL ||
3120 so == NULL || inp == NULL || cfil_info == NULL) {
3121 return false;
3122 }
3123
3124 data.sock_id = cfil_info->cfi_sock_id;
3125 data.direction = cfil_info->cfi_dir;
3126
3127 data.pid = so->last_pid;
3128 memcpy(data.uuid, so->last_uuid, sizeof(uuid_t));
3129 if (so->so_flags & SOF_DELEGATED) {
3130 data.effective_pid = so->e_pid;
3131 memcpy(data.effective_uuid, so->e_uuid, sizeof(uuid_t));
3132 } else {
3133 data.effective_pid = so->last_pid;
3134 memcpy(data.effective_uuid, so->last_uuid, sizeof(uuid_t));
3135 }
3136 data.socketProtocol = so->so_proto->pr_protocol;
3137
3138 /*
3139 * Fill in address info:
3140 * For UDP, use the cfil_info hash entry directly.
3141 * For TCP, compose an hash entry with the saved addresses.
3142 */
3143 if (cfil_info->cfi_hash_entry != NULL) {
3144 hash_entry_ptr = cfil_info->cfi_hash_entry;
3145 } else if (cfil_info->cfi_so_attach_faddr.sa.sa_len > 0 ||
3146 cfil_info->cfi_so_attach_laddr.sa.sa_len > 0) {
3147 soflow_fill_hash_entry_from_address(&hash_entry, TRUE, &cfil_info->cfi_so_attach_laddr.sa, FALSE);
3148 soflow_fill_hash_entry_from_address(&hash_entry, FALSE, &cfil_info->cfi_so_attach_faddr.sa, FALSE);
3149 hash_entry_ptr = &hash_entry;
3150 }
3151 if (hash_entry_ptr != NULL) {
3152 boolean_t outgoing = (cfil_info->cfi_dir == CFS_CONNECTION_DIR_OUT);
3153 union sockaddr_in_4_6 *src = outgoing ? &data.local : &data.remote;
3154 union sockaddr_in_4_6 *dst = outgoing ? &data.remote : &data.local;
3155 cfil_fill_event_msg_addresses(hash_entry_ptr, inp, src, dst, !IS_INP_V6(inp), outgoing);
3156 }
3157
3158 data.byte_count_in = cfil_info->cfi_byte_inbound_count;
3159 data.byte_count_out = cfil_info->cfi_byte_outbound_count;
3160
3161 msg->cfc_signature_length = sizeof(cfil_crypto_signature);
3162 if (cfil_crypto_sign_data(crypto_state, &data, msg->cfc_signature, &msg->cfc_signature_length) != 0) {
3163 msg->cfc_signature_length = 0;
3164 CFIL_LOG(LOG_ERR, "CFIL: Failed to sign closed msg <sockID %llu>",
3165 msg->cfc_msghdr.cfm_sock_id);
3166 return false;
3167 }
3168
3169 return true;
3170 }
3171
3172 static int
cfil_dispatch_attach_event(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int conn_dir)3173 cfil_dispatch_attach_event(struct socket *so, struct cfil_info *cfil_info,
3174 uint32_t kcunit, int conn_dir)
3175 {
3176 errno_t error = 0;
3177 struct cfil_entry *entry = NULL;
3178 struct cfil_msg_sock_attached msg_attached;
3179 struct content_filter *cfc = NULL;
3180 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3181 struct soflow_hash_entry *hash_entry_ptr = NULL;
3182 struct soflow_hash_entry hash_entry;
3183
3184 memset(&hash_entry, 0, sizeof(struct soflow_hash_entry));
3185 proc_t p = PROC_NULL;
3186 task_t t = TASK_NULL;
3187
3188 socket_lock_assert_owned(so);
3189
3190 cfil_rw_lock_shared(&cfil_lck_rw);
3191
3192 if (so->so_proto == NULL || so->so_proto->pr_domain == NULL) {
3193 error = EINVAL;
3194 goto done;
3195 }
3196
3197 if (kcunit == 0) {
3198 entry = SLIST_FIRST(&cfil_info->cfi_ordered_entries);
3199 } else {
3200 entry = &cfil_info->cfi_entries[kcunit - 1];
3201 }
3202
3203 if (entry == NULL) {
3204 goto done;
3205 }
3206
3207 cfc = entry->cfe_filter;
3208 if (cfc == NULL) {
3209 goto done;
3210 }
3211
3212 if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED)) {
3213 goto done;
3214 }
3215
3216 if (kcunit == 0) {
3217 kcunit = CFI_ENTRY_KCUNIT(cfil_info, entry);
3218 }
3219
3220 CFIL_LOG(LOG_INFO, "so %llx filter_control_unit %u kcunit %u",
3221 (uint64_t)VM_KERNEL_ADDRPERM(so), entry->cfe_necp_control_unit, kcunit);
3222
3223 /* Would be wasteful to try when flow controlled */
3224 if (cfc->cf_flags & CFF_FLOW_CONTROLLED) {
3225 error = ENOBUFS;
3226 goto done;
3227 }
3228
3229 bzero(&msg_attached, sizeof(struct cfil_msg_sock_attached));
3230 msg_attached.cfs_msghdr.cfm_len = sizeof(struct cfil_msg_sock_attached);
3231 msg_attached.cfs_msghdr.cfm_version = CFM_VERSION_CURRENT;
3232 msg_attached.cfs_msghdr.cfm_type = CFM_TYPE_EVENT;
3233 msg_attached.cfs_msghdr.cfm_op = CFM_OP_SOCKET_ATTACHED;
3234 msg_attached.cfs_msghdr.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id;
3235
3236 msg_attached.cfs_sock_family = so->so_proto->pr_domain->dom_family;
3237 msg_attached.cfs_sock_type = so->so_proto->pr_type;
3238 msg_attached.cfs_sock_protocol = so->so_proto->pr_protocol;
3239 msg_attached.cfs_pid = so->last_pid;
3240 memcpy(msg_attached.cfs_uuid, so->last_uuid, sizeof(uuid_t));
3241 if (so->so_flags & SOF_DELEGATED) {
3242 msg_attached.cfs_e_pid = so->e_pid;
3243 memcpy(msg_attached.cfs_e_uuid, so->e_uuid, sizeof(uuid_t));
3244 } else {
3245 msg_attached.cfs_e_pid = so->last_pid;
3246 memcpy(msg_attached.cfs_e_uuid, so->last_uuid, sizeof(uuid_t));
3247 }
3248
3249 /*
3250 * Fill in address info:
3251 * For UDP, use the cfil_info hash entry directly.
3252 * For TCP, compose an hash entry with the saved addresses.
3253 */
3254 if (cfil_info->cfi_hash_entry != NULL) {
3255 hash_entry_ptr = cfil_info->cfi_hash_entry;
3256 } else if (cfil_info->cfi_so_attach_faddr.sa.sa_len > 0 ||
3257 cfil_info->cfi_so_attach_laddr.sa.sa_len > 0) {
3258 soflow_fill_hash_entry_from_address(&hash_entry, TRUE, &cfil_info->cfi_so_attach_laddr.sa, FALSE);
3259 soflow_fill_hash_entry_from_address(&hash_entry, FALSE, &cfil_info->cfi_so_attach_faddr.sa, FALSE);
3260 hash_entry_ptr = &hash_entry;
3261 }
3262 if (hash_entry_ptr != NULL) {
3263 cfil_fill_event_msg_addresses(hash_entry_ptr, inp,
3264 &msg_attached.cfs_src, &msg_attached.cfs_dst,
3265 !IS_INP_V6(inp), conn_dir == CFS_CONNECTION_DIR_OUT);
3266 }
3267 msg_attached.cfs_conn_dir = conn_dir;
3268
3269 if (msg_attached.cfs_e_pid != 0) {
3270 p = proc_find(msg_attached.cfs_e_pid);
3271 if (p != PROC_NULL) {
3272 t = proc_task(p);
3273 if (t != TASK_NULL) {
3274 audit_token_t audit_token;
3275 mach_msg_type_number_t count = TASK_AUDIT_TOKEN_COUNT;
3276 if (task_info(t, TASK_AUDIT_TOKEN, (task_info_t)&audit_token, &count) == KERN_SUCCESS) {
3277 memcpy(&msg_attached.cfs_audit_token, &audit_token, sizeof(msg_attached.cfs_audit_token));
3278 } else {
3279 CFIL_LOG(LOG_ERR, "CFIL: Failed to get process audit token <sockID %llu> ",
3280 entry->cfe_cfil_info->cfi_sock_id);
3281 }
3282 }
3283 proc_rele(p);
3284 }
3285 }
3286
3287 if (cfil_info->cfi_debug) {
3288 cfil_info_log(LOG_INFO, cfil_info, "CFIL: SENDING ATTACH UP");
3289 }
3290
3291 cfil_dispatch_attach_event_sign(entry->cfe_filter->cf_crypto_state, cfil_info, &msg_attached);
3292
3293 error = ctl_enqueuedata(entry->cfe_filter->cf_kcref,
3294 entry->cfe_filter->cf_kcunit,
3295 &msg_attached,
3296 sizeof(struct cfil_msg_sock_attached),
3297 CTL_DATA_EOR);
3298 if (error != 0) {
3299 CFIL_LOG(LOG_ERR, "ctl_enqueuedata() failed: %d", error);
3300 goto done;
3301 }
3302 microuptime(&entry->cfe_last_event);
3303 cfil_info->cfi_first_event.tv_sec = entry->cfe_last_event.tv_sec;
3304 cfil_info->cfi_first_event.tv_usec = entry->cfe_last_event.tv_usec;
3305
3306 entry->cfe_flags |= CFEF_SENT_SOCK_ATTACHED;
3307 OSIncrementAtomic(&cfil_stats.cfs_attach_event_ok);
3308 done:
3309
3310 /* We can recover from flow control */
3311 if (error == ENOBUFS) {
3312 entry->cfe_flags |= CFEF_FLOW_CONTROLLED;
3313 OSIncrementAtomic(&cfil_stats.cfs_attach_event_flow_control);
3314
3315 if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) {
3316 cfil_rw_lock_exclusive(&cfil_lck_rw);
3317 }
3318
3319 cfc->cf_flags |= CFF_FLOW_CONTROLLED;
3320
3321 cfil_rw_unlock_exclusive(&cfil_lck_rw);
3322 } else {
3323 if (error != 0) {
3324 OSIncrementAtomic(&cfil_stats.cfs_attach_event_fail);
3325 }
3326
3327 cfil_rw_unlock_shared(&cfil_lck_rw);
3328 }
3329 return error;
3330 }
3331
3332 static int
cfil_dispatch_disconnect_event(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int outgoing)3333 cfil_dispatch_disconnect_event(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing)
3334 {
3335 errno_t error = 0;
3336 struct mbuf *msg = NULL;
3337 struct cfil_entry *entry;
3338 struct cfe_buf *entrybuf;
3339 struct cfil_msg_hdr msg_disconnected;
3340 struct content_filter *cfc;
3341
3342 socket_lock_assert_owned(so);
3343
3344 cfil_rw_lock_shared(&cfil_lck_rw);
3345
3346 entry = &cfil_info->cfi_entries[kcunit - 1];
3347 if (outgoing) {
3348 entrybuf = &entry->cfe_snd;
3349 } else {
3350 entrybuf = &entry->cfe_rcv;
3351 }
3352
3353 cfc = entry->cfe_filter;
3354 if (cfc == NULL) {
3355 goto done;
3356 }
3357
3358 // Mark if this flow qualifies for immediate close.
3359 SET_NO_CLOSE_WAIT(sotoinpcb(so), cfil_info);
3360
3361 CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d",
3362 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing);
3363
3364 /*
3365 * Send the disconnection event once
3366 */
3367 if ((outgoing && (entry->cfe_flags & CFEF_SENT_DISCONNECT_OUT)) ||
3368 (!outgoing && (entry->cfe_flags & CFEF_SENT_DISCONNECT_IN))) {
3369 CFIL_LOG(LOG_INFO, "so %llx disconnect already sent",
3370 (uint64_t)VM_KERNEL_ADDRPERM(so));
3371 goto done;
3372 }
3373
3374 /*
3375 * We're not disconnected as long as some data is waiting
3376 * to be delivered to the filter
3377 */
3378 if (outgoing && cfil_queue_empty(&entrybuf->cfe_ctl_q) == 0) {
3379 CFIL_LOG(LOG_INFO, "so %llx control queue not empty",
3380 (uint64_t)VM_KERNEL_ADDRPERM(so));
3381 error = EBUSY;
3382 goto done;
3383 }
3384 /* Would be wasteful to try when flow controlled */
3385 if (cfc->cf_flags & CFF_FLOW_CONTROLLED) {
3386 error = ENOBUFS;
3387 goto done;
3388 }
3389
3390 if (cfil_info->cfi_debug) {
3391 cfil_info_log(LOG_INFO, cfil_info, outgoing ?
3392 "CFIL: OUT - SENDING DISCONNECT UP":
3393 "CFIL: IN - SENDING DISCONNECT UP");
3394 }
3395
3396 bzero(&msg_disconnected, sizeof(struct cfil_msg_hdr));
3397 msg_disconnected.cfm_len = sizeof(struct cfil_msg_hdr);
3398 msg_disconnected.cfm_version = CFM_VERSION_CURRENT;
3399 msg_disconnected.cfm_type = CFM_TYPE_EVENT;
3400 msg_disconnected.cfm_op = outgoing ? CFM_OP_DISCONNECT_OUT :
3401 CFM_OP_DISCONNECT_IN;
3402 msg_disconnected.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id;
3403 error = ctl_enqueuedata(entry->cfe_filter->cf_kcref,
3404 entry->cfe_filter->cf_kcunit,
3405 &msg_disconnected,
3406 sizeof(struct cfil_msg_hdr),
3407 CTL_DATA_EOR);
3408 if (error != 0) {
3409 CFIL_LOG(LOG_ERR, "ctl_enqueuembuf() failed: %d", error);
3410 mbuf_freem(msg);
3411 goto done;
3412 }
3413 microuptime(&entry->cfe_last_event);
3414 CFI_ADD_TIME_LOG(cfil_info, &entry->cfe_last_event, &cfil_info->cfi_first_event, msg_disconnected.cfm_op);
3415
3416 /* Remember we have sent the disconnection message */
3417 if (outgoing) {
3418 entry->cfe_flags |= CFEF_SENT_DISCONNECT_OUT;
3419 OSIncrementAtomic(&cfil_stats.cfs_disconnect_out_event_ok);
3420 } else {
3421 entry->cfe_flags |= CFEF_SENT_DISCONNECT_IN;
3422 OSIncrementAtomic(&cfil_stats.cfs_disconnect_in_event_ok);
3423 }
3424 done:
3425 if (error == ENOBUFS) {
3426 entry->cfe_flags |= CFEF_FLOW_CONTROLLED;
3427 OSIncrementAtomic(
3428 &cfil_stats.cfs_disconnect_event_flow_control);
3429
3430 if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) {
3431 cfil_rw_lock_exclusive(&cfil_lck_rw);
3432 }
3433
3434 cfc->cf_flags |= CFF_FLOW_CONTROLLED;
3435
3436 cfil_rw_unlock_exclusive(&cfil_lck_rw);
3437 } else {
3438 if (error != 0) {
3439 OSIncrementAtomic(
3440 &cfil_stats.cfs_disconnect_event_fail);
3441 }
3442
3443 cfil_rw_unlock_shared(&cfil_lck_rw);
3444 }
3445 return error;
3446 }
3447
3448 int
cfil_dispatch_closed_event(struct socket * so,struct cfil_info * cfil_info,int kcunit)3449 cfil_dispatch_closed_event(struct socket *so, struct cfil_info *cfil_info, int kcunit)
3450 {
3451 struct cfil_entry *entry;
3452 struct cfil_msg_sock_closed msg_closed;
3453 errno_t error = 0;
3454 struct content_filter *cfc;
3455
3456 socket_lock_assert_owned(so);
3457
3458 cfil_rw_lock_shared(&cfil_lck_rw);
3459
3460 entry = &cfil_info->cfi_entries[kcunit - 1];
3461 cfc = entry->cfe_filter;
3462 if (cfc == NULL) {
3463 goto done;
3464 }
3465
3466 CFIL_LOG(LOG_INFO, "so %llx kcunit %d",
3467 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit);
3468
3469 /* Would be wasteful to try when flow controlled */
3470 if (cfc->cf_flags & CFF_FLOW_CONTROLLED) {
3471 error = ENOBUFS;
3472 goto done;
3473 }
3474 /*
3475 * Send a single closed message per filter
3476 */
3477 if ((entry->cfe_flags & CFEF_SENT_SOCK_CLOSED) != 0) {
3478 goto done;
3479 }
3480 if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) {
3481 goto done;
3482 }
3483
3484 microuptime(&entry->cfe_last_event);
3485 CFI_ADD_TIME_LOG(cfil_info, &entry->cfe_last_event, &cfil_info->cfi_first_event, CFM_OP_SOCKET_CLOSED);
3486
3487 bzero(&msg_closed, sizeof(struct cfil_msg_sock_closed));
3488 msg_closed.cfc_msghdr.cfm_len = sizeof(struct cfil_msg_sock_closed);
3489 msg_closed.cfc_msghdr.cfm_version = CFM_VERSION_CURRENT;
3490 msg_closed.cfc_msghdr.cfm_type = CFM_TYPE_EVENT;
3491 msg_closed.cfc_msghdr.cfm_op = CFM_OP_SOCKET_CLOSED;
3492 msg_closed.cfc_msghdr.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id;
3493 msg_closed.cfc_first_event.tv_sec = cfil_info->cfi_first_event.tv_sec;
3494 msg_closed.cfc_first_event.tv_usec = cfil_info->cfi_first_event.tv_usec;
3495 memcpy(msg_closed.cfc_op_time, cfil_info->cfi_op_time, sizeof(uint32_t) * CFI_MAX_TIME_LOG_ENTRY);
3496 memcpy(msg_closed.cfc_op_list, cfil_info->cfi_op_list, sizeof(unsigned char) * CFI_MAX_TIME_LOG_ENTRY);
3497 msg_closed.cfc_op_list_ctr = cfil_info->cfi_op_list_ctr;
3498 msg_closed.cfc_byte_inbound_count = cfil_info->cfi_byte_inbound_count;
3499 msg_closed.cfc_byte_outbound_count = cfil_info->cfi_byte_outbound_count;
3500
3501 cfil_dispatch_closed_event_sign(entry->cfe_filter->cf_crypto_state, so, cfil_info, &msg_closed);
3502
3503 if (cfil_info->cfi_debug) {
3504 cfil_info_log(LOG_INFO, cfil_info, "CFIL: SENDING CLOSED UP");
3505 }
3506
3507 /* for debugging
3508 * if (msg_closed.cfc_op_list_ctr > CFI_MAX_TIME_LOG_ENTRY) {
3509 * msg_closed.cfc_op_list_ctr = CFI_MAX_TIME_LOG_ENTRY; // just in case
3510 * }
3511 * for (unsigned int i = 0; i < msg_closed.cfc_op_list_ctr ; i++) {
3512 * CFIL_LOG(LOG_ERR, "MD: socket %llu event %2u, time + %u msec", msg_closed.cfc_msghdr.cfm_sock_id, (unsigned short)msg_closed.cfc_op_list[i], msg_closed.cfc_op_time[i]);
3513 * }
3514 */
3515
3516 error = ctl_enqueuedata(entry->cfe_filter->cf_kcref,
3517 entry->cfe_filter->cf_kcunit,
3518 &msg_closed,
3519 sizeof(struct cfil_msg_sock_closed),
3520 CTL_DATA_EOR);
3521 if (error != 0) {
3522 CFIL_LOG(LOG_ERR, "ctl_enqueuedata() failed: %d",
3523 error);
3524 goto done;
3525 }
3526
3527 entry->cfe_flags |= CFEF_SENT_SOCK_CLOSED;
3528 OSIncrementAtomic(&cfil_stats.cfs_closed_event_ok);
3529 done:
3530 /* We can recover from flow control */
3531 if (error == ENOBUFS) {
3532 entry->cfe_flags |= CFEF_FLOW_CONTROLLED;
3533 OSIncrementAtomic(&cfil_stats.cfs_closed_event_flow_control);
3534
3535 if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) {
3536 cfil_rw_lock_exclusive(&cfil_lck_rw);
3537 }
3538
3539 cfc->cf_flags |= CFF_FLOW_CONTROLLED;
3540
3541 cfil_rw_unlock_exclusive(&cfil_lck_rw);
3542 } else {
3543 if (error != 0) {
3544 OSIncrementAtomic(&cfil_stats.cfs_closed_event_fail);
3545 }
3546
3547 cfil_rw_unlock_shared(&cfil_lck_rw);
3548 }
3549
3550 return error;
3551 }
3552
3553 static void
fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 * sin46,struct in6_addr * ip6,u_int16_t port,uint32_t ifscope)3554 fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *sin46,
3555 struct in6_addr *ip6, u_int16_t port, uint32_t ifscope)
3556 {
3557 if (sin46 == NULL) {
3558 return;
3559 }
3560
3561 struct sockaddr_in6 *sin6 = &sin46->sin6;
3562
3563 sin6->sin6_family = AF_INET6;
3564 sin6->sin6_len = sizeof(*sin6);
3565 sin6->sin6_port = port;
3566 sin6->sin6_addr = *ip6;
3567 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
3568 sin6->sin6_scope_id = ifscope;
3569 if (in6_embedded_scope) {
3570 in6_verify_ifscope(&sin6->sin6_addr, sin6->sin6_scope_id);
3571 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
3572 sin6->sin6_addr.s6_addr16[1] = 0;
3573 }
3574 }
3575 }
3576
3577 static void
fill_ip_sockaddr_4_6(union sockaddr_in_4_6 * sin46,struct in_addr ip,u_int16_t port)3578 fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *sin46,
3579 struct in_addr ip, u_int16_t port)
3580 {
3581 if (sin46 == NULL) {
3582 return;
3583 }
3584
3585 struct sockaddr_in *sin = &sin46->sin;
3586
3587 sin->sin_family = AF_INET;
3588 sin->sin_len = sizeof(*sin);
3589 sin->sin_port = port;
3590 sin->sin_addr.s_addr = ip.s_addr;
3591 }
3592
3593 static void
cfil_get_flow_address_v6(struct soflow_hash_entry * entry,struct inpcb * inp,struct in6_addr ** laddr,struct in6_addr ** faddr,u_int16_t * lport,u_int16_t * fport)3594 cfil_get_flow_address_v6(struct soflow_hash_entry *entry, struct inpcb *inp,
3595 struct in6_addr **laddr, struct in6_addr **faddr,
3596 u_int16_t *lport, u_int16_t *fport)
3597 {
3598 if (entry != NULL) {
3599 *laddr = &entry->soflow_laddr.addr6;
3600 *faddr = &entry->soflow_faddr.addr6;
3601 *lport = entry->soflow_lport;
3602 *fport = entry->soflow_fport;
3603 } else {
3604 *laddr = &inp->in6p_laddr;
3605 *faddr = &inp->in6p_faddr;
3606 *lport = inp->inp_lport;
3607 *fport = inp->inp_fport;
3608 }
3609 }
3610
3611 static void
cfil_get_flow_address(struct soflow_hash_entry * entry,struct inpcb * inp,struct in_addr * laddr,struct in_addr * faddr,u_int16_t * lport,u_int16_t * fport)3612 cfil_get_flow_address(struct soflow_hash_entry *entry, struct inpcb *inp,
3613 struct in_addr *laddr, struct in_addr *faddr,
3614 u_int16_t *lport, u_int16_t *fport)
3615 {
3616 if (entry != NULL) {
3617 *laddr = entry->soflow_laddr.addr46.ia46_addr4;
3618 *faddr = entry->soflow_faddr.addr46.ia46_addr4;
3619 *lport = entry->soflow_lport;
3620 *fport = entry->soflow_fport;
3621 } else {
3622 *laddr = inp->inp_laddr;
3623 *faddr = inp->inp_faddr;
3624 *lport = inp->inp_lport;
3625 *fport = inp->inp_fport;
3626 }
3627 }
3628
3629 static int
cfil_dispatch_data_event(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int outgoing,struct mbuf * data,unsigned int copyoffset,unsigned int copylen)3630 cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing,
3631 struct mbuf *data, unsigned int copyoffset, unsigned int copylen)
3632 {
3633 errno_t error = 0;
3634 struct mbuf *copy = NULL;
3635 struct mbuf *msg = NULL;
3636 unsigned int one = 1;
3637 struct cfil_msg_data_event *data_req;
3638 size_t hdrsize;
3639 struct inpcb *inp = (struct inpcb *)so->so_pcb;
3640 struct cfil_entry *entry;
3641 struct cfe_buf *entrybuf;
3642 struct content_filter *cfc;
3643 struct timeval tv;
3644 int inp_flags = 0;
3645
3646 cfil_rw_lock_shared(&cfil_lck_rw);
3647
3648 entry = &cfil_info->cfi_entries[kcunit - 1];
3649 if (outgoing) {
3650 entrybuf = &entry->cfe_snd;
3651 } else {
3652 entrybuf = &entry->cfe_rcv;
3653 }
3654
3655 cfc = entry->cfe_filter;
3656 if (cfc == NULL) {
3657 goto done;
3658 }
3659
3660 data = cfil_data_start(data);
3661 if (data == NULL) {
3662 CFIL_LOG(LOG_ERR, "No data start");
3663 goto done;
3664 }
3665
3666 CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d",
3667 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing);
3668
3669 socket_lock_assert_owned(so);
3670
3671 /* Would be wasteful to try */
3672 if (cfc->cf_flags & CFF_FLOW_CONTROLLED) {
3673 error = ENOBUFS;
3674 goto done;
3675 }
3676
3677 /* Make a copy of the data to pass to kernel control socket */
3678 copy = m_copym_mode(data, copyoffset, copylen, M_DONTWAIT,
3679 M_COPYM_NOOP_HDR);
3680 if (copy == NULL) {
3681 CFIL_LOG(LOG_ERR, "m_copym_mode() failed");
3682 error = ENOMEM;
3683 goto done;
3684 }
3685
3686 /* We need an mbuf packet for the message header */
3687 hdrsize = sizeof(struct cfil_msg_data_event);
3688 error = mbuf_allocpacket(MBUF_DONTWAIT, hdrsize, &one, &msg);
3689 if (error != 0) {
3690 CFIL_LOG(LOG_ERR, "mbuf_allocpacket() failed");
3691 m_freem(copy);
3692 /*
3693 * ENOBUFS is to indicate flow control
3694 */
3695 error = ENOMEM;
3696 goto done;
3697 }
3698 mbuf_setlen(msg, hdrsize);
3699 mbuf_pkthdr_setlen(msg, hdrsize + copylen);
3700 msg->m_next = copy;
3701 data_req = (struct cfil_msg_data_event *)mbuf_data(msg);
3702 bzero(data_req, hdrsize);
3703 data_req->cfd_msghdr.cfm_len = (uint32_t)hdrsize + copylen;
3704 data_req->cfd_msghdr.cfm_version = 1;
3705 data_req->cfd_msghdr.cfm_type = CFM_TYPE_EVENT;
3706 data_req->cfd_msghdr.cfm_op =
3707 outgoing ? CFM_OP_DATA_OUT : CFM_OP_DATA_IN;
3708 data_req->cfd_msghdr.cfm_sock_id =
3709 entry->cfe_cfil_info->cfi_sock_id;
3710 data_req->cfd_start_offset = entrybuf->cfe_peeked;
3711 data_req->cfd_end_offset = entrybuf->cfe_peeked + copylen;
3712
3713 data_req->cfd_flags = 0;
3714 if (OPTIONAL_IP_HEADER(so)) {
3715 /*
3716 * For non-UDP/TCP traffic, indicate to filters if optional
3717 * IP header is present:
3718 * outgoing - indicate according to INP_HDRINCL flag
3719 * incoming - For IPv4 only, stripping of IP header is
3720 * optional. But for CFIL, we delay stripping
3721 * at rip_input. So CFIL always expects IP
3722 * frames. IP header will be stripped according
3723 * to INP_STRIPHDR flag later at reinjection.
3724 */
3725 if ((!outgoing && !IS_INP_V6(inp)) ||
3726 (outgoing && cfil_dgram_peek_socket_state(data, &inp_flags) && (inp_flags & INP_HDRINCL))) {
3727 data_req->cfd_flags |= CFD_DATA_FLAG_IP_HEADER;
3728 }
3729 }
3730
3731 /*
3732 * Copy address/port into event msg.
3733 * For non connected sockets need to copy addresses from passed
3734 * parameters
3735 */
3736 cfil_fill_event_msg_addresses(cfil_info->cfi_hash_entry, inp,
3737 &data_req->cfc_src, &data_req->cfc_dst,
3738 !IS_INP_V6(inp), outgoing);
3739
3740 if (cfil_info->cfi_debug && cfil_log_data) {
3741 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: SENDING DATA UP");
3742 }
3743
3744 if (cfil_info->cfi_isSignatureLatest == false) {
3745 cfil_dispatch_data_event_sign(entry->cfe_filter->cf_crypto_state, so, cfil_info, data_req);
3746 }
3747
3748 microuptime(&tv);
3749 CFI_ADD_TIME_LOG(cfil_info, &tv, &cfil_info->cfi_first_event, data_req->cfd_msghdr.cfm_op);
3750
3751 /* Pass the message to the content filter */
3752 error = ctl_enqueuembuf(entry->cfe_filter->cf_kcref,
3753 entry->cfe_filter->cf_kcunit,
3754 msg, CTL_DATA_EOR);
3755 if (error != 0) {
3756 CFIL_LOG(LOG_ERR, "ctl_enqueuembuf() failed: %d", error);
3757 mbuf_freem(msg);
3758 goto done;
3759 }
3760 entry->cfe_flags &= ~CFEF_FLOW_CONTROLLED;
3761 OSIncrementAtomic(&cfil_stats.cfs_data_event_ok);
3762
3763 if (cfil_info->cfi_debug && cfil_log_data) {
3764 CFIL_LOG(LOG_DEBUG, "CFIL: VERDICT ACTION: so %llx sockID %llu outgoing %d: mbuf %llx copyoffset %u copylen %u (%s)",
3765 (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, outgoing, (uint64_t)VM_KERNEL_ADDRPERM(data), copyoffset, copylen,
3766 data_req->cfd_flags & CFD_DATA_FLAG_IP_HEADER ? "IP HDR" : "NO IP HDR");
3767 }
3768
3769 done:
3770 if (error == ENOBUFS) {
3771 entry->cfe_flags |= CFEF_FLOW_CONTROLLED;
3772 OSIncrementAtomic(
3773 &cfil_stats.cfs_data_event_flow_control);
3774
3775 if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) {
3776 cfil_rw_lock_exclusive(&cfil_lck_rw);
3777 }
3778
3779 cfc->cf_flags |= CFF_FLOW_CONTROLLED;
3780
3781 cfil_rw_unlock_exclusive(&cfil_lck_rw);
3782 } else {
3783 if (error != 0) {
3784 OSIncrementAtomic(&cfil_stats.cfs_data_event_fail);
3785 }
3786
3787 cfil_rw_unlock_shared(&cfil_lck_rw);
3788 }
3789 return error;
3790 }
3791
3792 /*
3793 * Process the queue of data waiting to be delivered to content filter
3794 */
3795 static int
cfil_data_service_ctl_q(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int outgoing)3796 cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing)
3797 {
3798 errno_t error = 0;
3799 struct mbuf *data, *tmp = NULL;
3800 unsigned int datalen = 0, copylen = 0, copyoffset = 0;
3801 struct cfil_entry *entry;
3802 struct cfe_buf *entrybuf;
3803 uint64_t currentoffset = 0;
3804
3805 if (cfil_info == NULL) {
3806 return 0;
3807 }
3808
3809 CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d",
3810 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing);
3811
3812 socket_lock_assert_owned(so);
3813
3814 entry = &cfil_info->cfi_entries[kcunit - 1];
3815 if (outgoing) {
3816 entrybuf = &entry->cfe_snd;
3817 } else {
3818 entrybuf = &entry->cfe_rcv;
3819 }
3820
3821 /* Send attached message if not yet done */
3822 if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) {
3823 error = cfil_dispatch_attach_event(so, cfil_info, CFI_ENTRY_KCUNIT(cfil_info, entry),
3824 cfil_info->cfi_dir);
3825 if (error != 0) {
3826 /* We can recover from flow control */
3827 if (error == ENOBUFS || error == ENOMEM) {
3828 error = 0;
3829 }
3830 goto done;
3831 }
3832 } else if ((entry->cfe_flags & CFEF_DATA_START) == 0) {
3833 OSIncrementAtomic(&cfil_stats.cfs_ctl_q_not_started);
3834 goto done;
3835 }
3836
3837 if (cfil_info->cfi_debug && cfil_log_data) {
3838 CFIL_LOG(LOG_DEBUG, "CFIL: SERVICE CTL-Q: pass_offset %llu peeked %llu peek_offset %llu",
3839 entrybuf->cfe_pass_offset,
3840 entrybuf->cfe_peeked,
3841 entrybuf->cfe_peek_offset);
3842 }
3843
3844 /* Move all data that can pass */
3845 while ((data = cfil_queue_first(&entrybuf->cfe_ctl_q)) != NULL &&
3846 entrybuf->cfe_ctl_q.q_start < entrybuf->cfe_pass_offset) {
3847 datalen = cfil_data_length(data, NULL, NULL);
3848 tmp = data;
3849
3850 if (entrybuf->cfe_ctl_q.q_start + datalen <=
3851 entrybuf->cfe_pass_offset) {
3852 /*
3853 * The first mbuf can fully pass
3854 */
3855 copylen = datalen;
3856 } else {
3857 /*
3858 * The first mbuf can partially pass
3859 */
3860 copylen = (unsigned int)(entrybuf->cfe_pass_offset - entrybuf->cfe_ctl_q.q_start);
3861 }
3862 VERIFY(copylen <= datalen);
3863
3864 if (cfil_info->cfi_debug && cfil_log_data) {
3865 CFIL_LOG(LOG_DEBUG,
3866 "CFIL: SERVICE CTL-Q PASSING: %llx first %llu peeked %llu pass %llu peek %llu"
3867 "datalen %u copylen %u",
3868 (uint64_t)VM_KERNEL_ADDRPERM(tmp),
3869 entrybuf->cfe_ctl_q.q_start,
3870 entrybuf->cfe_peeked,
3871 entrybuf->cfe_pass_offset,
3872 entrybuf->cfe_peek_offset,
3873 datalen, copylen);
3874 }
3875
3876 /*
3877 * Data that passes has been peeked at explicitly or
3878 * implicitly
3879 */
3880 if (entrybuf->cfe_ctl_q.q_start + copylen >
3881 entrybuf->cfe_peeked) {
3882 entrybuf->cfe_peeked =
3883 entrybuf->cfe_ctl_q.q_start + copylen;
3884 }
3885 /*
3886 * Stop on partial pass
3887 */
3888 if (copylen < datalen) {
3889 break;
3890 }
3891
3892 /* All good, move full data from ctl queue to pending queue */
3893 cfil_queue_remove(&entrybuf->cfe_ctl_q, data, datalen);
3894
3895 cfil_queue_enqueue(&entrybuf->cfe_pending_q, data, datalen);
3896 if (outgoing) {
3897 OSAddAtomic64(datalen,
3898 &cfil_stats.cfs_pending_q_out_enqueued);
3899 } else {
3900 OSAddAtomic64(datalen,
3901 &cfil_stats.cfs_pending_q_in_enqueued);
3902 }
3903 }
3904 CFIL_INFO_VERIFY(cfil_info);
3905 if (tmp != NULL) {
3906 CFIL_LOG(LOG_DEBUG,
3907 "%llx first %llu peeked %llu pass %llu peek %llu"
3908 "datalen %u copylen %u",
3909 (uint64_t)VM_KERNEL_ADDRPERM(tmp),
3910 entrybuf->cfe_ctl_q.q_start,
3911 entrybuf->cfe_peeked,
3912 entrybuf->cfe_pass_offset,
3913 entrybuf->cfe_peek_offset,
3914 datalen, copylen);
3915 }
3916 tmp = NULL;
3917
3918 /* Now deal with remaining data the filter wants to peek at */
3919 for (data = cfil_queue_first(&entrybuf->cfe_ctl_q),
3920 currentoffset = entrybuf->cfe_ctl_q.q_start;
3921 data != NULL && currentoffset < entrybuf->cfe_peek_offset;
3922 data = cfil_queue_next(&entrybuf->cfe_ctl_q, data),
3923 currentoffset += datalen) {
3924 datalen = cfil_data_length(data, NULL, NULL);
3925 tmp = data;
3926
3927 /* We've already peeked at this mbuf */
3928 if (currentoffset + datalen <= entrybuf->cfe_peeked) {
3929 continue;
3930 }
3931 /*
3932 * The data in the first mbuf may have been
3933 * partially peeked at
3934 */
3935 copyoffset = (unsigned int)(entrybuf->cfe_peeked - currentoffset);
3936 VERIFY(copyoffset < datalen);
3937 copylen = datalen - copyoffset;
3938 VERIFY(copylen <= datalen);
3939 /*
3940 * Do not copy more than needed
3941 */
3942 if (currentoffset + copyoffset + copylen >
3943 entrybuf->cfe_peek_offset) {
3944 copylen = (unsigned int)(entrybuf->cfe_peek_offset -
3945 (currentoffset + copyoffset));
3946 }
3947
3948 if (cfil_info->cfi_debug && cfil_log_data) {
3949 CFIL_LOG(LOG_DEBUG,
3950 "CFIL: SERVICE CTL-Q PEEKING: %llx current %llu peeked %llu pass %llu peek %llu "
3951 "datalen %u copylen %u copyoffset %u",
3952 (uint64_t)VM_KERNEL_ADDRPERM(tmp),
3953 currentoffset,
3954 entrybuf->cfe_peeked,
3955 entrybuf->cfe_pass_offset,
3956 entrybuf->cfe_peek_offset,
3957 datalen, copylen, copyoffset);
3958 }
3959
3960 /*
3961 * Stop if there is nothing more to peek at
3962 */
3963 if (copylen == 0) {
3964 break;
3965 }
3966 /*
3967 * Let the filter get a peek at this span of data
3968 */
3969 error = cfil_dispatch_data_event(so, cfil_info, kcunit,
3970 outgoing, data, copyoffset, copylen);
3971 if (error != 0) {
3972 /* On error, leave data in ctl_q */
3973 break;
3974 }
3975 entrybuf->cfe_peeked += copylen;
3976 if (outgoing) {
3977 OSAddAtomic64(copylen,
3978 &cfil_stats.cfs_ctl_q_out_peeked);
3979 } else {
3980 OSAddAtomic64(copylen,
3981 &cfil_stats.cfs_ctl_q_in_peeked);
3982 }
3983
3984 /* Stop when data could not be fully peeked at */
3985 if (copylen + copyoffset < datalen) {
3986 break;
3987 }
3988 }
3989 CFIL_INFO_VERIFY(cfil_info);
3990 if (tmp != NULL) {
3991 CFIL_LOG(LOG_DEBUG,
3992 "%llx first %llu peeked %llu pass %llu peek %llu"
3993 "datalen %u copylen %u copyoffset %u",
3994 (uint64_t)VM_KERNEL_ADDRPERM(tmp),
3995 currentoffset,
3996 entrybuf->cfe_peeked,
3997 entrybuf->cfe_pass_offset,
3998 entrybuf->cfe_peek_offset,
3999 datalen, copylen, copyoffset);
4000 }
4001
4002 /*
4003 * Process data that has passed the filter
4004 */
4005 error = cfil_service_pending_queue(so, cfil_info, kcunit, outgoing);
4006 if (error != 0) {
4007 CFIL_LOG(LOG_ERR, "cfil_service_pending_queue() error %d",
4008 error);
4009 goto done;
4010 }
4011
4012 /*
4013 * Dispatch disconnect events that could not be sent
4014 */
4015 if (cfil_info == NULL) {
4016 goto done;
4017 } else if (outgoing) {
4018 if ((cfil_info->cfi_flags & CFIF_SHUT_WR) &&
4019 !(entry->cfe_flags & CFEF_SENT_DISCONNECT_OUT)) {
4020 cfil_dispatch_disconnect_event(so, cfil_info, kcunit, 1);
4021 }
4022 } else {
4023 if ((cfil_info->cfi_flags & CFIF_SHUT_RD) &&
4024 !(entry->cfe_flags & CFEF_SENT_DISCONNECT_IN)) {
4025 cfil_dispatch_disconnect_event(so, cfil_info, kcunit, 0);
4026 }
4027 }
4028
4029 done:
4030 CFIL_LOG(LOG_DEBUG,
4031 "first %llu peeked %llu pass %llu peek %llu",
4032 entrybuf->cfe_ctl_q.q_start,
4033 entrybuf->cfe_peeked,
4034 entrybuf->cfe_pass_offset,
4035 entrybuf->cfe_peek_offset);
4036
4037 CFIL_INFO_VERIFY(cfil_info);
4038 return error;
4039 }
4040
4041 /*
4042 * cfil_data_filter()
4043 *
4044 * Process data for a content filter installed on a socket
4045 */
4046 int
cfil_data_filter(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int outgoing,struct mbuf * data,uint32_t datalen)4047 cfil_data_filter(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing,
4048 struct mbuf *data, uint32_t datalen)
4049 {
4050 errno_t error = 0;
4051 struct cfil_entry *entry;
4052 struct cfe_buf *entrybuf;
4053
4054 CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d",
4055 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing);
4056
4057 socket_lock_assert_owned(so);
4058
4059 entry = &cfil_info->cfi_entries[kcunit - 1];
4060 if (outgoing) {
4061 entrybuf = &entry->cfe_snd;
4062 } else {
4063 entrybuf = &entry->cfe_rcv;
4064 }
4065
4066 /* Are we attached to the filter? */
4067 if (entry->cfe_filter == NULL) {
4068 error = 0;
4069 goto done;
4070 }
4071
4072 /* Dispatch to filters */
4073 cfil_queue_enqueue(&entrybuf->cfe_ctl_q, data, datalen);
4074 if (outgoing) {
4075 OSAddAtomic64(datalen,
4076 &cfil_stats.cfs_ctl_q_out_enqueued);
4077 } else {
4078 OSAddAtomic64(datalen,
4079 &cfil_stats.cfs_ctl_q_in_enqueued);
4080 }
4081
4082 error = cfil_data_service_ctl_q(so, cfil_info, kcunit, outgoing);
4083 if (error != 0) {
4084 CFIL_LOG(LOG_ERR, "cfil_data_service_ctl_q() error %d",
4085 error);
4086 }
4087 /*
4088 * We have to return EJUSTRETURN in all cases to avoid double free
4089 * by socket layer
4090 */
4091 error = EJUSTRETURN;
4092 done:
4093 CFIL_INFO_VERIFY(cfil_info);
4094
4095 CFIL_LOG(LOG_INFO, "return %d", error);
4096 return error;
4097 }
4098
4099 /*
4100 * cfil_service_inject_queue() re-inject data that passed the
4101 * content filters
4102 */
4103 static int
cfil_service_inject_queue(struct socket * so,struct cfil_info * cfil_info,int outgoing)4104 cfil_service_inject_queue(struct socket *so, struct cfil_info *cfil_info, int outgoing)
4105 {
4106 mbuf_t data;
4107 unsigned int datalen;
4108 int mbcnt = 0;
4109 int mbnum = 0;
4110 errno_t error = 0;
4111 struct cfi_buf *cfi_buf;
4112 struct cfil_queue *inject_q;
4113 int need_rwakeup = 0;
4114 int count = 0;
4115 struct inpcb *inp = NULL;
4116 struct ip *ip = NULL;
4117 unsigned int hlen;
4118
4119 if (cfil_info == NULL) {
4120 return 0;
4121 }
4122
4123 socket_lock_assert_owned(so);
4124
4125 if (so->so_state & SS_DEFUNCT) {
4126 return 0;
4127 }
4128
4129 if (outgoing) {
4130 cfi_buf = &cfil_info->cfi_snd;
4131 cfil_info->cfi_flags &= ~CFIF_RETRY_INJECT_OUT;
4132 } else {
4133 cfi_buf = &cfil_info->cfi_rcv;
4134 cfil_info->cfi_flags &= ~CFIF_RETRY_INJECT_IN;
4135 }
4136 inject_q = &cfi_buf->cfi_inject_q;
4137
4138 if (cfil_queue_empty(inject_q)) {
4139 return 0;
4140 }
4141
4142 if (cfil_info->cfi_debug && cfil_log_data) {
4143 CFIL_LOG(LOG_DEBUG, "CFIL: SERVICE INJECT-Q: <so %llx> outgoing %d queue len %llu",
4144 (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing, cfil_queue_len(inject_q));
4145 }
4146
4147 while ((data = cfil_queue_first(inject_q)) != NULL) {
4148 datalen = cfil_data_length(data, &mbcnt, &mbnum);
4149
4150 if (cfil_info->cfi_debug && cfil_log_data) {
4151 CFIL_LOG(LOG_DEBUG, "CFIL: SERVICE INJECT-Q: <so %llx> data %llx datalen %u (mbcnt %u)",
4152 (uint64_t)VM_KERNEL_ADDRPERM(so), (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, mbcnt);
4153 }
4154
4155 /* Remove data from queue and adjust stats */
4156 cfil_queue_remove(inject_q, data, datalen);
4157 cfi_buf->cfi_pending_first += datalen;
4158 cfi_buf->cfi_pending_mbcnt -= mbcnt;
4159 cfi_buf->cfi_pending_mbnum -= mbnum;
4160 cfil_info_buf_verify(cfi_buf);
4161
4162 if (outgoing) {
4163 error = sosend_reinject(so, NULL, data, NULL, 0);
4164 if (error != 0) {
4165 cfil_info_log(LOG_ERR, cfil_info, "CFIL: Error: sosend_reinject() failed");
4166 CFIL_LOG(LOG_ERR, "CFIL: sosend() failed %d", error);
4167 break;
4168 }
4169 // At least one injection succeeded, need to wake up pending threads.
4170 need_rwakeup = 1;
4171 } else {
4172 data->m_flags |= M_SKIPCFIL;
4173
4174 /*
4175 * NOTE: We currently only support TCP, UDP, ICMP,
4176 * ICMPv6 and RAWIP. For MPTCP and message TCP we'll
4177 * need to call the appropriate sbappendxxx()
4178 * of fix sock_inject_data_in()
4179 */
4180 if (NEED_DGRAM_FLOW_TRACKING(so)) {
4181 if (OPTIONAL_IP_HEADER(so)) {
4182 inp = sotoinpcb(so);
4183 if (inp && (inp->inp_flags & INP_STRIPHDR)) {
4184 mbuf_t data_start = cfil_data_start(data);
4185 if (data_start != NULL && (data_start->m_flags & M_PKTHDR)) {
4186 ip = mtod(data_start, struct ip *);
4187 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
4188 data_start->m_len -= hlen;
4189 data_start->m_pkthdr.len -= hlen;
4190 data_start->m_data += hlen;
4191 }
4192 }
4193 }
4194
4195 if (sbappendchain(&so->so_rcv, data, 0)) {
4196 need_rwakeup = 1;
4197 }
4198 } else {
4199 if (sbappendstream(&so->so_rcv, data)) {
4200 need_rwakeup = 1;
4201 }
4202 }
4203 }
4204
4205 if (outgoing) {
4206 OSAddAtomic64(datalen,
4207 &cfil_stats.cfs_inject_q_out_passed);
4208 } else {
4209 OSAddAtomic64(datalen,
4210 &cfil_stats.cfs_inject_q_in_passed);
4211 }
4212
4213 count++;
4214 }
4215
4216 if (cfil_info->cfi_debug && cfil_log_data) {
4217 CFIL_LOG(LOG_DEBUG, "CFIL: SERVICE INJECT-Q: <so %llx> injected %d",
4218 (uint64_t)VM_KERNEL_ADDRPERM(so), count);
4219 }
4220
4221 /* A single wakeup is for several packets is more efficient */
4222 if (need_rwakeup) {
4223 if (outgoing == TRUE) {
4224 sowwakeup(so);
4225 } else {
4226 sorwakeup(so);
4227 }
4228 }
4229
4230 if (error != 0 && cfil_info) {
4231 if (error == ENOBUFS) {
4232 OSIncrementAtomic(&cfil_stats.cfs_inject_q_nobufs);
4233 }
4234 if (error == ENOMEM) {
4235 OSIncrementAtomic(&cfil_stats.cfs_inject_q_nomem);
4236 }
4237
4238 if (outgoing) {
4239 cfil_info->cfi_flags |= CFIF_RETRY_INJECT_OUT;
4240 OSIncrementAtomic(&cfil_stats.cfs_inject_q_out_fail);
4241 } else {
4242 cfil_info->cfi_flags |= CFIF_RETRY_INJECT_IN;
4243 OSIncrementAtomic(&cfil_stats.cfs_inject_q_in_fail);
4244 }
4245 }
4246
4247 /*
4248 * Notify
4249 */
4250 if (cfil_info && (cfil_info->cfi_flags & CFIF_SHUT_WR)) {
4251 cfil_sock_notify_shutdown(so, SHUT_WR);
4252 if (cfil_sock_data_pending(&so->so_snd) == 0) {
4253 soshutdownlock_final(so, SHUT_WR);
4254 }
4255 }
4256 if (cfil_info && (cfil_info->cfi_flags & CFIF_CLOSE_WAIT)) {
4257 if (cfil_filters_attached(so) == 0) {
4258 CFIL_LOG(LOG_INFO, "so %llx waking",
4259 (uint64_t)VM_KERNEL_ADDRPERM(so));
4260 wakeup((caddr_t)cfil_info);
4261 }
4262 }
4263
4264 CFIL_INFO_VERIFY(cfil_info);
4265
4266 return error;
4267 }
4268
4269 static int
cfil_service_pending_queue(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int outgoing)4270 cfil_service_pending_queue(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing)
4271 {
4272 uint64_t passlen, curlen;
4273 mbuf_t data;
4274 unsigned int datalen;
4275 errno_t error = 0;
4276 struct cfil_entry *entry;
4277 struct cfe_buf *entrybuf;
4278 struct cfil_queue *pending_q;
4279 struct cfil_entry *iter_entry = NULL;
4280
4281 CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d",
4282 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing);
4283
4284 socket_lock_assert_owned(so);
4285
4286 entry = &cfil_info->cfi_entries[kcunit - 1];
4287 if (outgoing) {
4288 entrybuf = &entry->cfe_snd;
4289 } else {
4290 entrybuf = &entry->cfe_rcv;
4291 }
4292
4293 pending_q = &entrybuf->cfe_pending_q;
4294
4295 passlen = entrybuf->cfe_pass_offset - pending_q->q_start;
4296
4297 if (cfil_queue_empty(pending_q)) {
4298 for (iter_entry = SLIST_NEXT(entry, cfe_order_link);
4299 iter_entry != NULL;
4300 iter_entry = SLIST_NEXT(iter_entry, cfe_order_link)) {
4301 error = cfil_data_service_ctl_q(so, cfil_info, CFI_ENTRY_KCUNIT(cfil_info, iter_entry), outgoing);
4302 /* 0 means passed so we can continue */
4303 if (error != 0) {
4304 break;
4305 }
4306 }
4307 goto done;
4308 }
4309
4310 /*
4311 * Locate the chunks of data that we can pass to the next filter
4312 * A data chunk must be on mbuf boundaries
4313 */
4314 curlen = 0;
4315 while ((data = cfil_queue_first(pending_q)) != NULL) {
4316 datalen = cfil_data_length(data, NULL, NULL);
4317
4318 if (cfil_info->cfi_debug && cfil_log_data) {
4319 CFIL_LOG(LOG_DEBUG,
4320 "CFIL: SERVICE PENDING-Q: data %llx datalen %u passlen %llu curlen %llu",
4321 (uint64_t)VM_KERNEL_ADDRPERM(data), datalen,
4322 passlen, curlen);
4323 }
4324
4325 if (curlen + datalen > passlen) {
4326 break;
4327 }
4328
4329 cfil_queue_remove(pending_q, data, datalen);
4330
4331 curlen += datalen;
4332
4333 for (iter_entry = SLIST_NEXT(entry, cfe_order_link);
4334 iter_entry != NULL;
4335 iter_entry = SLIST_NEXT(iter_entry, cfe_order_link)) {
4336 error = cfil_data_filter(so, cfil_info, CFI_ENTRY_KCUNIT(cfil_info, iter_entry), outgoing,
4337 data, datalen);
4338 /* 0 means passed so we can continue */
4339 if (error != 0) {
4340 break;
4341 }
4342 }
4343 /* When data has passed all filters, re-inject */
4344 if (error == 0) {
4345 if (outgoing) {
4346 cfil_queue_enqueue(
4347 &cfil_info->cfi_snd.cfi_inject_q,
4348 data, datalen);
4349 OSAddAtomic64(datalen,
4350 &cfil_stats.cfs_inject_q_out_enqueued);
4351 } else {
4352 cfil_queue_enqueue(
4353 &cfil_info->cfi_rcv.cfi_inject_q,
4354 data, datalen);
4355 OSAddAtomic64(datalen,
4356 &cfil_stats.cfs_inject_q_in_enqueued);
4357 }
4358 }
4359 }
4360
4361 done:
4362 CFIL_INFO_VERIFY(cfil_info);
4363
4364 return error;
4365 }
4366
4367 int
cfil_update_data_offsets(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int outgoing,uint64_t pass_offset,uint64_t peek_offset)4368 cfil_update_data_offsets(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing,
4369 uint64_t pass_offset, uint64_t peek_offset)
4370 {
4371 errno_t error = 0;
4372 struct cfil_entry *entry = NULL;
4373 struct cfe_buf *entrybuf;
4374 int updated = 0;
4375
4376 CFIL_LOG(LOG_INFO, "pass %llu peek %llu", pass_offset, peek_offset);
4377
4378 socket_lock_assert_owned(so);
4379
4380 if (cfil_info == NULL) {
4381 CFIL_LOG(LOG_ERR, "so %llx cfil detached",
4382 (uint64_t)VM_KERNEL_ADDRPERM(so));
4383 error = 0;
4384 goto done;
4385 } else if (cfil_info->cfi_flags & CFIF_DROP) {
4386 CFIL_LOG(LOG_ERR, "so %llx drop set",
4387 (uint64_t)VM_KERNEL_ADDRPERM(so));
4388 error = EPIPE;
4389 goto done;
4390 }
4391
4392 entry = &cfil_info->cfi_entries[kcunit - 1];
4393 if (outgoing) {
4394 entrybuf = &entry->cfe_snd;
4395 } else {
4396 entrybuf = &entry->cfe_rcv;
4397 }
4398
4399 /* Record updated offsets for this content filter */
4400 if (pass_offset > entrybuf->cfe_pass_offset) {
4401 entrybuf->cfe_pass_offset = pass_offset;
4402
4403 if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) {
4404 entrybuf->cfe_peek_offset = entrybuf->cfe_pass_offset;
4405 }
4406 updated = 1;
4407 } else {
4408 CFIL_LOG(LOG_INFO, "pass_offset %llu <= cfe_pass_offset %llu",
4409 pass_offset, entrybuf->cfe_pass_offset);
4410 }
4411 /* Filter does not want or need to see data that's allowed to pass */
4412 if (peek_offset > entrybuf->cfe_pass_offset &&
4413 peek_offset > entrybuf->cfe_peek_offset) {
4414 entrybuf->cfe_peek_offset = peek_offset;
4415 updated = 1;
4416 }
4417 /* Nothing to do */
4418 if (updated == 0) {
4419 goto done;
4420 }
4421
4422 /* Move data held in control queue to pending queue if needed */
4423 error = cfil_data_service_ctl_q(so, cfil_info, kcunit, outgoing);
4424 if (error != 0) {
4425 CFIL_LOG(LOG_ERR, "cfil_data_service_ctl_q() error %d",
4426 error);
4427 goto done;
4428 }
4429 error = EJUSTRETURN;
4430
4431 done:
4432 /*
4433 * The filter is effectively detached when pass all from both sides
4434 * or when the socket is closed and no more data is waiting
4435 * to be delivered to the filter
4436 */
4437 if (entry != NULL &&
4438 ((entry->cfe_snd.cfe_pass_offset == CFM_MAX_OFFSET &&
4439 entry->cfe_rcv.cfe_pass_offset == CFM_MAX_OFFSET) ||
4440 ((cfil_info->cfi_flags & CFIF_CLOSE_WAIT) &&
4441 cfil_queue_empty(&entry->cfe_snd.cfe_ctl_q) &&
4442 cfil_queue_empty(&entry->cfe_rcv.cfe_ctl_q)))) {
4443 entry->cfe_flags |= CFEF_CFIL_DETACHED;
4444
4445 if (cfil_info->cfi_debug) {
4446 cfil_info_log(LOG_INFO, cfil_info, outgoing ?
4447 "CFIL: OUT - PASSED ALL - DETACH":
4448 "CFIL: IN - PASSED ALL - DETACH");
4449 }
4450
4451 CFIL_LOG(LOG_INFO, "so %llx detached %u",
4452 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit);
4453 if ((cfil_info->cfi_flags & CFIF_CLOSE_WAIT) &&
4454 cfil_filters_attached(so) == 0) {
4455 if (cfil_info->cfi_debug) {
4456 cfil_info_log(LOG_INFO, cfil_info, "CFIL: WAKING");
4457 }
4458 CFIL_LOG(LOG_INFO, "so %llx waking",
4459 (uint64_t)VM_KERNEL_ADDRPERM(so));
4460 wakeup((caddr_t)cfil_info);
4461 }
4462 }
4463 CFIL_INFO_VERIFY(cfil_info);
4464 CFIL_LOG(LOG_INFO, "return %d", error);
4465 return error;
4466 }
4467
4468 /*
4469 * Update pass offset for socket when no data is pending
4470 */
4471 static int
cfil_set_socket_pass_offset(struct socket * so,struct cfil_info * cfil_info,int outgoing)4472 cfil_set_socket_pass_offset(struct socket *so, struct cfil_info *cfil_info, int outgoing)
4473 {
4474 struct cfi_buf *cfi_buf;
4475 struct cfil_entry *entry;
4476 struct cfe_buf *entrybuf;
4477 uint32_t kcunit;
4478 uint64_t pass_offset = 0;
4479 boolean_t first = true;
4480
4481 if (cfil_info == NULL) {
4482 return 0;
4483 }
4484
4485 if (cfil_info->cfi_debug && cfil_log_data) {
4486 CFIL_LOG(LOG_DEBUG, "so %llx outgoing %d",
4487 (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing);
4488 }
4489
4490 socket_lock_assert_owned(so);
4491
4492 if (outgoing) {
4493 cfi_buf = &cfil_info->cfi_snd;
4494 } else {
4495 cfi_buf = &cfil_info->cfi_rcv;
4496 }
4497
4498 if (cfil_info->cfi_debug && cfil_log_data) {
4499 CFIL_LOG(LOG_DEBUG, "CFIL: <so %llx, sockID %llu> outgoing %d cfi_pending_first %llu cfi_pending_last %llu",
4500 (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, outgoing,
4501 cfi_buf->cfi_pending_first, cfi_buf->cfi_pending_last);
4502 }
4503
4504 if (cfi_buf->cfi_pending_last - cfi_buf->cfi_pending_first == 0) {
4505 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
4506 entry = &cfil_info->cfi_entries[kcunit - 1];
4507
4508 /* Are we attached to a filter? */
4509 if (entry->cfe_filter == NULL) {
4510 continue;
4511 }
4512
4513 if (outgoing) {
4514 entrybuf = &entry->cfe_snd;
4515 } else {
4516 entrybuf = &entry->cfe_rcv;
4517 }
4518
4519 // Keep track of the smallest pass_offset among filters.
4520 if (first == true ||
4521 entrybuf->cfe_pass_offset < pass_offset) {
4522 pass_offset = entrybuf->cfe_pass_offset;
4523 first = false;
4524 }
4525 }
4526 cfi_buf->cfi_pass_offset = pass_offset;
4527 }
4528
4529 if (cfil_info->cfi_debug && cfil_log_data) {
4530 CFIL_LOG(LOG_DEBUG, "CFIL: <so %llx, sockID %llu>, cfi_pass_offset %llu",
4531 (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, cfi_buf->cfi_pass_offset);
4532 }
4533
4534 return 0;
4535 }
4536
4537 int
cfil_action_data_pass(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit,int outgoing,uint64_t pass_offset,uint64_t peek_offset)4538 cfil_action_data_pass(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing,
4539 uint64_t pass_offset, uint64_t peek_offset)
4540 {
4541 errno_t error = 0;
4542
4543 CFIL_LOG(LOG_INFO, "");
4544
4545 socket_lock_assert_owned(so);
4546
4547 error = cfil_acquire_sockbuf(so, cfil_info, outgoing);
4548 if (error != 0) {
4549 CFIL_LOG(LOG_INFO, "so %llx %s dropped",
4550 (uint64_t)VM_KERNEL_ADDRPERM(so),
4551 outgoing ? "out" : "in");
4552 goto release;
4553 }
4554
4555 error = cfil_update_data_offsets(so, cfil_info, kcunit, outgoing,
4556 pass_offset, peek_offset);
4557
4558 cfil_service_inject_queue(so, cfil_info, outgoing);
4559
4560 cfil_set_socket_pass_offset(so, cfil_info, outgoing);
4561 release:
4562 CFIL_INFO_VERIFY(cfil_info);
4563 cfil_release_sockbuf(so, outgoing);
4564
4565 return error;
4566 }
4567
4568
4569 static void
cfil_flush_queues(struct socket * so,struct cfil_info * cfil_info)4570 cfil_flush_queues(struct socket *so, struct cfil_info *cfil_info)
4571 {
4572 struct cfil_entry *entry;
4573 int kcunit;
4574 uint64_t drained;
4575
4576 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || cfil_info == NULL) {
4577 goto done;
4578 }
4579
4580 socket_lock_assert_owned(so);
4581
4582 /*
4583 * Flush the output queues and ignore errors as long as
4584 * we are attached
4585 */
4586 (void) cfil_acquire_sockbuf(so, cfil_info, 1);
4587 if (cfil_info != NULL) {
4588 drained = 0;
4589 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
4590 entry = &cfil_info->cfi_entries[kcunit - 1];
4591
4592 drained += cfil_queue_drain(&entry->cfe_snd.cfe_ctl_q);
4593 drained += cfil_queue_drain(&entry->cfe_snd.cfe_pending_q);
4594 }
4595 drained += cfil_queue_drain(&cfil_info->cfi_snd.cfi_inject_q);
4596
4597 if (drained) {
4598 if (cfil_info->cfi_flags & CFIF_DROP) {
4599 OSIncrementAtomic(
4600 &cfil_stats.cfs_flush_out_drop);
4601 } else {
4602 OSIncrementAtomic(
4603 &cfil_stats.cfs_flush_out_close);
4604 }
4605 }
4606 }
4607 cfil_release_sockbuf(so, 1);
4608
4609 /*
4610 * Flush the input queues
4611 */
4612 (void) cfil_acquire_sockbuf(so, cfil_info, 0);
4613 if (cfil_info != NULL) {
4614 drained = 0;
4615 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
4616 entry = &cfil_info->cfi_entries[kcunit - 1];
4617
4618 drained += cfil_queue_drain(
4619 &entry->cfe_rcv.cfe_ctl_q);
4620 drained += cfil_queue_drain(
4621 &entry->cfe_rcv.cfe_pending_q);
4622 }
4623 drained += cfil_queue_drain(&cfil_info->cfi_rcv.cfi_inject_q);
4624
4625 if (drained) {
4626 if (cfil_info->cfi_flags & CFIF_DROP) {
4627 OSIncrementAtomic(
4628 &cfil_stats.cfs_flush_in_drop);
4629 } else {
4630 OSIncrementAtomic(
4631 &cfil_stats.cfs_flush_in_close);
4632 }
4633 }
4634 }
4635 cfil_release_sockbuf(so, 0);
4636 done:
4637 CFIL_INFO_VERIFY(cfil_info);
4638 }
4639
4640 int
cfil_action_drop(struct socket * so,struct cfil_info * cfil_info,uint32_t kcunit)4641 cfil_action_drop(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit)
4642 {
4643 errno_t error = 0;
4644 struct cfil_entry *entry;
4645 struct proc *p;
4646
4647 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || cfil_info == NULL) {
4648 goto done;
4649 }
4650
4651 socket_lock_assert_owned(so);
4652
4653 entry = &cfil_info->cfi_entries[kcunit - 1];
4654
4655 /* Are we attached to the filter? */
4656 if (entry->cfe_filter == NULL) {
4657 goto done;
4658 }
4659
4660 cfil_info->cfi_flags |= CFIF_DROP;
4661
4662 p = current_proc();
4663
4664 /*
4665 * Force the socket to be marked defunct
4666 * (forcing fixed along with rdar://19391339)
4667 */
4668 if (so->so_flow_db == NULL) {
4669 error = sosetdefunct(p, so,
4670 SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL,
4671 FALSE);
4672
4673 /* Flush the socket buffer and disconnect */
4674 if (error == 0) {
4675 error = sodefunct(p, so,
4676 SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
4677 }
4678 }
4679
4680 /* The filter is done, mark as detached */
4681 entry->cfe_flags |= CFEF_CFIL_DETACHED;
4682
4683 if (cfil_info->cfi_debug) {
4684 cfil_info_log(LOG_INFO, cfil_info, "CFIL: DROP - DETACH");
4685 }
4686
4687 CFIL_LOG(LOG_INFO, "so %llx detached %u",
4688 (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit);
4689
4690 /* Pending data needs to go */
4691 cfil_flush_queues(so, cfil_info);
4692
4693 if (cfil_info && (cfil_info->cfi_flags & CFIF_CLOSE_WAIT)) {
4694 if (cfil_filters_attached(so) == 0) {
4695 CFIL_LOG(LOG_INFO, "so %llx waking",
4696 (uint64_t)VM_KERNEL_ADDRPERM(so));
4697 wakeup((caddr_t)cfil_info);
4698 }
4699 }
4700 done:
4701 return error;
4702 }
4703
4704 int
cfil_action_bless_client(uint32_t kcunit,struct cfil_msg_hdr * msghdr)4705 cfil_action_bless_client(uint32_t kcunit, struct cfil_msg_hdr *msghdr)
4706 {
4707 errno_t error = 0;
4708 struct cfil_info *cfil_info = NULL;
4709
4710 bool cfil_attached = false;
4711 struct cfil_msg_bless_client *blessmsg = (struct cfil_msg_bless_client *)msghdr;
4712
4713 // Search and lock socket
4714 struct socket *so = cfil_socket_from_client_uuid(blessmsg->cfb_client_uuid, &cfil_attached);
4715 if (so == NULL) {
4716 error = ENOENT;
4717 } else {
4718 // The client gets a pass automatically
4719 cfil_info = (so->so_flow_db != NULL) ?
4720 soflow_db_get_feature_context(so->so_flow_db, msghdr->cfm_sock_id) : so->so_cfil;
4721
4722 if (cfil_attached) {
4723 if (cfil_info != NULL && cfil_info->cfi_debug) {
4724 cfil_info_log(LOG_INFO, cfil_info, "CFIL: VERDICT RECEIVED: BLESS");
4725 }
4726 cfil_sock_received_verdict(so);
4727 (void)cfil_action_data_pass(so, cfil_info, kcunit, 1, CFM_MAX_OFFSET, CFM_MAX_OFFSET);
4728 (void)cfil_action_data_pass(so, cfil_info, kcunit, 0, CFM_MAX_OFFSET, CFM_MAX_OFFSET);
4729 } else {
4730 so->so_flags1 |= SOF1_CONTENT_FILTER_SKIP;
4731 }
4732 socket_unlock(so, 1);
4733 }
4734
4735 return error;
4736 }
4737
4738 int
cfil_action_set_crypto_key(uint32_t kcunit,struct cfil_msg_hdr * msghdr)4739 cfil_action_set_crypto_key(uint32_t kcunit, struct cfil_msg_hdr *msghdr)
4740 {
4741 struct content_filter *cfc = NULL;
4742 cfil_crypto_state_t crypto_state = NULL;
4743 struct cfil_msg_set_crypto_key *keymsg = (struct cfil_msg_set_crypto_key *)msghdr;
4744
4745 CFIL_LOG(LOG_NOTICE, "");
4746
4747 if (content_filters == NULL) {
4748 CFIL_LOG(LOG_ERR, "no content filter");
4749 return EINVAL;
4750 }
4751 if (kcunit > MAX_CONTENT_FILTER) {
4752 CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)",
4753 kcunit, MAX_CONTENT_FILTER);
4754 return EINVAL;
4755 }
4756 crypto_state = cfil_crypto_init_client((uint8_t *)keymsg->crypto_key);
4757 if (crypto_state == NULL) {
4758 CFIL_LOG(LOG_ERR, "failed to initialize crypto state for unit %u)",
4759 kcunit);
4760 return EINVAL;
4761 }
4762
4763 cfil_rw_lock_exclusive(&cfil_lck_rw);
4764
4765 cfc = content_filters[kcunit - 1];
4766 if (cfc->cf_kcunit != kcunit) {
4767 CFIL_LOG(LOG_ERR, "bad unit info %u)",
4768 kcunit);
4769 cfil_rw_unlock_exclusive(&cfil_lck_rw);
4770 cfil_crypto_cleanup_state(crypto_state);
4771 return EINVAL;
4772 }
4773 if (cfc->cf_crypto_state != NULL) {
4774 cfil_crypto_cleanup_state(cfc->cf_crypto_state);
4775 cfc->cf_crypto_state = NULL;
4776 }
4777 cfc->cf_crypto_state = crypto_state;
4778
4779 cfil_rw_unlock_exclusive(&cfil_lck_rw);
4780 return 0;
4781 }
4782
4783 static int
cfil_update_entry_offsets(struct socket * so,struct cfil_info * cfil_info,int outgoing,unsigned int datalen)4784 cfil_update_entry_offsets(struct socket *so, struct cfil_info *cfil_info, int outgoing, unsigned int datalen)
4785 {
4786 struct cfil_entry *entry;
4787 struct cfe_buf *entrybuf;
4788 uint32_t kcunit;
4789
4790 CFIL_LOG(LOG_INFO, "so %llx outgoing %d datalen %u",
4791 (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing, datalen);
4792
4793 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
4794 entry = &cfil_info->cfi_entries[kcunit - 1];
4795
4796 /* Are we attached to the filter? */
4797 if (entry->cfe_filter == NULL) {
4798 continue;
4799 }
4800
4801 if (outgoing) {
4802 entrybuf = &entry->cfe_snd;
4803 } else {
4804 entrybuf = &entry->cfe_rcv;
4805 }
4806
4807 entrybuf->cfe_ctl_q.q_start += datalen;
4808 if (entrybuf->cfe_pass_offset < entrybuf->cfe_ctl_q.q_start) {
4809 entrybuf->cfe_pass_offset = entrybuf->cfe_ctl_q.q_start;
4810 }
4811 entrybuf->cfe_peeked = entrybuf->cfe_ctl_q.q_start;
4812 if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) {
4813 entrybuf->cfe_peek_offset = entrybuf->cfe_pass_offset;
4814 }
4815
4816 entrybuf->cfe_ctl_q.q_end += datalen;
4817
4818 entrybuf->cfe_pending_q.q_start += datalen;
4819 entrybuf->cfe_pending_q.q_end += datalen;
4820 }
4821 CFIL_INFO_VERIFY(cfil_info);
4822 return 0;
4823 }
4824
4825 int
cfil_data_common(struct socket * so,struct cfil_info * cfil_info,int outgoing,struct sockaddr * to,struct mbuf * data,struct mbuf * control,uint32_t flags)4826 cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, struct sockaddr *to,
4827 struct mbuf *data, struct mbuf *control, uint32_t flags)
4828 {
4829 #pragma unused(to, control, flags)
4830 errno_t error = 0;
4831 unsigned int datalen;
4832 int mbcnt = 0;
4833 int mbnum = 0;
4834 int kcunit;
4835 struct cfi_buf *cfi_buf;
4836 struct mbuf *chain = NULL;
4837
4838 if (cfil_info == NULL) {
4839 CFIL_LOG(LOG_ERR, "so %llx cfil detached",
4840 (uint64_t)VM_KERNEL_ADDRPERM(so));
4841 error = 0;
4842 goto done;
4843 } else if (cfil_info->cfi_flags & CFIF_DROP) {
4844 CFIL_LOG(LOG_ERR, "so %llx drop set",
4845 (uint64_t)VM_KERNEL_ADDRPERM(so));
4846 error = EPIPE;
4847 goto done;
4848 }
4849
4850 datalen = cfil_data_length(data, &mbcnt, &mbnum);
4851
4852 if (datalen == 0) {
4853 error = 0;
4854 goto done;
4855 }
4856
4857 if (outgoing) {
4858 cfi_buf = &cfil_info->cfi_snd;
4859 cfil_info->cfi_byte_outbound_count += datalen;
4860 } else {
4861 cfi_buf = &cfil_info->cfi_rcv;
4862 cfil_info->cfi_byte_inbound_count += datalen;
4863 }
4864
4865 cfi_buf->cfi_pending_last += datalen;
4866 cfi_buf->cfi_pending_mbcnt += mbcnt;
4867 cfi_buf->cfi_pending_mbnum += mbnum;
4868
4869 if (NEED_DGRAM_FLOW_TRACKING(so)) {
4870 if (cfi_buf->cfi_pending_mbnum > cfil_udp_gc_mbuf_num_max ||
4871 cfi_buf->cfi_pending_mbcnt > cfil_udp_gc_mbuf_cnt_max) {
4872 cfi_buf->cfi_tail_drop_cnt++;
4873 cfi_buf->cfi_pending_mbcnt -= mbcnt;
4874 cfi_buf->cfi_pending_mbnum -= mbnum;
4875 return EPIPE;
4876 }
4877 }
4878
4879 cfil_info_buf_verify(cfi_buf);
4880
4881 if (cfil_info->cfi_debug && cfil_log_data) {
4882 CFIL_LOG(LOG_DEBUG, "CFIL: QUEUEING DATA: <so %llx> %s: data %llx len %u flags 0x%x nextpkt %llx - cfi_pending_last %llu cfi_pending_mbcnt %u cfi_pass_offset %llu",
4883 (uint64_t)VM_KERNEL_ADDRPERM(so),
4884 outgoing ? "OUT" : "IN",
4885 (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, data->m_flags,
4886 (uint64_t)VM_KERNEL_ADDRPERM(data->m_nextpkt),
4887 cfi_buf->cfi_pending_last,
4888 cfi_buf->cfi_pending_mbcnt,
4889 cfi_buf->cfi_pass_offset);
4890 }
4891
4892 /* Fast path when below pass offset */
4893 if (cfi_buf->cfi_pending_last <= cfi_buf->cfi_pass_offset) {
4894 cfil_update_entry_offsets(so, cfil_info, outgoing, datalen);
4895 if (cfil_info->cfi_debug && cfil_log_data) {
4896 CFIL_LOG(LOG_DEBUG, "CFIL: QUEUEING DATA: FAST PATH");
4897 }
4898 } else {
4899 struct cfil_entry *iter_entry;
4900 SLIST_FOREACH(iter_entry, &cfil_info->cfi_ordered_entries, cfe_order_link) {
4901 // Is cfil attached to this filter?
4902 kcunit = CFI_ENTRY_KCUNIT(cfil_info, iter_entry);
4903 if (IS_ENTRY_ATTACHED(cfil_info, kcunit)) {
4904 if (NEED_DGRAM_FLOW_TRACKING(so) && chain == NULL) {
4905 /* Datagrams only:
4906 * Chain addr (incoming only TDB), control (optional) and data into one chain.
4907 * This full chain will be reinjected into socket after recieving verdict.
4908 */
4909 (void) cfil_dgram_save_socket_state(cfil_info, data);
4910 chain = sbconcat_mbufs(NULL, outgoing ? NULL : to, data, control);
4911 if (chain == NULL) {
4912 return ENOBUFS;
4913 }
4914 data = chain;
4915 }
4916 error = cfil_data_filter(so, cfil_info, kcunit, outgoing, data,
4917 datalen);
4918 }
4919 /* 0 means passed so continue with next filter */
4920 if (error != 0) {
4921 break;
4922 }
4923 }
4924 }
4925
4926 /* Move cursor if no filter claimed the data */
4927 if (error == 0) {
4928 cfi_buf->cfi_pending_first += datalen;
4929 cfi_buf->cfi_pending_mbcnt -= mbcnt;
4930 cfi_buf->cfi_pending_mbnum -= mbnum;
4931 cfil_info_buf_verify(cfi_buf);
4932 }
4933 done:
4934 CFIL_INFO_VERIFY(cfil_info);
4935
4936 return error;
4937 }
4938
4939 /*
4940 * Callback from socket layer sosendxxx()
4941 */
4942 int
cfil_sock_data_out(struct socket * so,struct sockaddr * to,struct mbuf * data,struct mbuf * control,uint32_t flags,struct soflow_hash_entry * flow_entry)4943 cfil_sock_data_out(struct socket *so, struct sockaddr *to,
4944 struct mbuf *data, struct mbuf *control, uint32_t flags, struct soflow_hash_entry *flow_entry)
4945 {
4946 int error = 0;
4947 int new_filter_control_unit = 0;
4948
4949 if (NEED_DGRAM_FLOW_TRACKING(so)) {
4950 return cfil_sock_udp_handle_data(TRUE, so, NULL, to, data, control, flags, flow_entry);
4951 }
4952
4953 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
4954 /* Drop pre-existing TCP sockets if filter is enabled now */
4955 if (!DO_PRESERVE_CONNECTIONS && cfil_active_count > 0 && !SKIP_FILTER_FOR_TCP_SOCKET(so)) {
4956 new_filter_control_unit = necp_socket_get_content_filter_control_unit(so);
4957 if (new_filter_control_unit > 0) {
4958 CFIL_LOG(LOG_NOTICE, "CFIL: TCP(OUT) <so %llx> - filter state changed - dropped pre-existing flow", (uint64_t)VM_KERNEL_ADDRPERM(so));
4959 return EPIPE;
4960 }
4961 }
4962 return 0;
4963 }
4964
4965 /* Drop pre-existing TCP sockets when filter state changed */
4966 new_filter_control_unit = necp_socket_get_content_filter_control_unit(so);
4967 if (new_filter_control_unit > 0 && new_filter_control_unit != so->so_cfil->cfi_filter_control_unit && !SKIP_FILTER_FOR_TCP_SOCKET(so)) {
4968 if (DO_PRESERVE_CONNECTIONS) {
4969 so->so_cfil->cfi_filter_control_unit = new_filter_control_unit;
4970 } else {
4971 CFIL_LOG(LOG_NOTICE, "CFIL: TCP(OUT) <so %llx> - filter state changed - dropped pre-existing flow (old state 0x%x new state 0x%x)",
4972 (uint64_t)VM_KERNEL_ADDRPERM(so),
4973 so->so_cfil->cfi_filter_control_unit, new_filter_control_unit);
4974 return EPIPE;
4975 }
4976 }
4977
4978 /*
4979 * Pass initial data for TFO.
4980 */
4981 if (IS_INITIAL_TFO_DATA(so)) {
4982 return 0;
4983 }
4984
4985 socket_lock_assert_owned(so);
4986
4987 if (so->so_cfil->cfi_flags & CFIF_DROP) {
4988 CFIL_LOG(LOG_ERR, "so %llx drop set",
4989 (uint64_t)VM_KERNEL_ADDRPERM(so));
4990 return EPIPE;
4991 }
4992 if (control != NULL) {
4993 CFIL_LOG(LOG_ERR, "so %llx control",
4994 (uint64_t)VM_KERNEL_ADDRPERM(so));
4995 OSIncrementAtomic(&cfil_stats.cfs_data_out_control);
4996 }
4997 if ((flags & MSG_OOB)) {
4998 CFIL_LOG(LOG_ERR, "so %llx MSG_OOB",
4999 (uint64_t)VM_KERNEL_ADDRPERM(so));
5000 OSIncrementAtomic(&cfil_stats.cfs_data_out_oob);
5001 }
5002 if ((so->so_snd.sb_flags & SB_LOCK) == 0) {
5003 panic("so %p SB_LOCK not set", so);
5004 }
5005
5006 if (so->so_snd.sb_cfil_thread != NULL) {
5007 panic("%s sb_cfil_thread %p not NULL", __func__,
5008 so->so_snd.sb_cfil_thread);
5009 }
5010
5011 error = cfil_data_common(so, so->so_cfil, 1, to, data, control, flags);
5012
5013 return error;
5014 }
5015
5016 /*
5017 * Callback from socket layer sbappendxxx()
5018 */
5019 int
cfil_sock_data_in(struct socket * so,struct sockaddr * from,struct mbuf * data,struct mbuf * control,uint32_t flags,struct soflow_hash_entry * flow_entry)5020 cfil_sock_data_in(struct socket *so, struct sockaddr *from,
5021 struct mbuf *data, struct mbuf *control, uint32_t flags, struct soflow_hash_entry *flow_entry)
5022 {
5023 int error = 0;
5024 int new_filter_control_unit = 0;
5025
5026 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5027 return cfil_sock_udp_handle_data(FALSE, so, NULL, from, data, control, flags, flow_entry);
5028 }
5029
5030 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
5031 /* Drop pre-existing TCP sockets if filter is enabled now */
5032 if (!DO_PRESERVE_CONNECTIONS && cfil_active_count > 0 && !SKIP_FILTER_FOR_TCP_SOCKET(so)) {
5033 new_filter_control_unit = necp_socket_get_content_filter_control_unit(so);
5034 if (new_filter_control_unit > 0) {
5035 CFIL_LOG(LOG_NOTICE, "CFIL: TCP(IN) <so %llx> - filter state changed - dropped pre-existing flow", (uint64_t)VM_KERNEL_ADDRPERM(so));
5036 return EPIPE;
5037 }
5038 }
5039 return 0;
5040 }
5041
5042 /* Drop pre-existing TCP sockets when filter state changed */
5043 new_filter_control_unit = necp_socket_get_content_filter_control_unit(so);
5044 if (new_filter_control_unit > 0 && new_filter_control_unit != so->so_cfil->cfi_filter_control_unit && !SKIP_FILTER_FOR_TCP_SOCKET(so)) {
5045 if (DO_PRESERVE_CONNECTIONS) {
5046 so->so_cfil->cfi_filter_control_unit = new_filter_control_unit;
5047 } else {
5048 CFIL_LOG(LOG_NOTICE, "CFIL: TCP(IN) <so %llx> - filter state changed - dropped pre-existing flow (old state 0x%x new state 0x%x)",
5049 (uint64_t)VM_KERNEL_ADDRPERM(so),
5050 so->so_cfil->cfi_filter_control_unit, new_filter_control_unit);
5051 return EPIPE;
5052 }
5053 }
5054
5055 /*
5056 * Pass initial data for TFO.
5057 */
5058 if (IS_INITIAL_TFO_DATA(so)) {
5059 return 0;
5060 }
5061
5062 socket_lock_assert_owned(so);
5063
5064 if (so->so_cfil->cfi_flags & CFIF_DROP) {
5065 CFIL_LOG(LOG_ERR, "so %llx drop set",
5066 (uint64_t)VM_KERNEL_ADDRPERM(so));
5067 return EPIPE;
5068 }
5069 if (control != NULL) {
5070 CFIL_LOG(LOG_ERR, "so %llx control",
5071 (uint64_t)VM_KERNEL_ADDRPERM(so));
5072 OSIncrementAtomic(&cfil_stats.cfs_data_in_control);
5073 }
5074 if (data->m_type == MT_OOBDATA) {
5075 CFIL_LOG(LOG_ERR, "so %llx MSG_OOB",
5076 (uint64_t)VM_KERNEL_ADDRPERM(so));
5077 OSIncrementAtomic(&cfil_stats.cfs_data_in_oob);
5078 }
5079 error = cfil_data_common(so, so->so_cfil, 0, from, data, control, flags);
5080
5081 return error;
5082 }
5083
5084 /*
5085 * Callback from socket layer soshutdownxxx()
5086 *
5087 * We may delay the shutdown write if there's outgoing data in process.
5088 *
5089 * There is no point in delaying the shutdown read because the process
5090 * indicated that it does not want to read anymore data.
5091 */
5092 int
cfil_sock_shutdown(struct socket * so,int * how)5093 cfil_sock_shutdown(struct socket *so, int *how)
5094 {
5095 int error = 0;
5096
5097 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5098 return cfil_sock_udp_shutdown(so, how);
5099 }
5100
5101 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
5102 goto done;
5103 }
5104
5105 socket_lock_assert_owned(so);
5106
5107 CFIL_LOG(LOG_INFO, "so %llx how %d",
5108 (uint64_t)VM_KERNEL_ADDRPERM(so), *how);
5109
5110 /*
5111 * Check the state of the socket before the content filter
5112 */
5113 if (*how != SHUT_WR && (so->so_state & SS_CANTRCVMORE) != 0) {
5114 /* read already shut down */
5115 error = ENOTCONN;
5116 goto done;
5117 }
5118 if (*how != SHUT_RD && (so->so_state & SS_CANTSENDMORE) != 0) {
5119 /* write already shut down */
5120 error = ENOTCONN;
5121 goto done;
5122 }
5123
5124 if ((so->so_cfil->cfi_flags & CFIF_DROP) != 0) {
5125 CFIL_LOG(LOG_ERR, "so %llx drop set",
5126 (uint64_t)VM_KERNEL_ADDRPERM(so));
5127 goto done;
5128 }
5129
5130 /*
5131 * shutdown read: SHUT_RD or SHUT_RDWR
5132 */
5133 if (*how != SHUT_WR) {
5134 if (so->so_cfil->cfi_flags & CFIF_SHUT_RD) {
5135 error = ENOTCONN;
5136 goto done;
5137 }
5138 so->so_cfil->cfi_flags |= CFIF_SHUT_RD;
5139 cfil_sock_notify_shutdown(so, SHUT_RD);
5140 }
5141 /*
5142 * shutdown write: SHUT_WR or SHUT_RDWR
5143 */
5144 if (*how != SHUT_RD) {
5145 if (so->so_cfil->cfi_flags & CFIF_SHUT_WR) {
5146 error = ENOTCONN;
5147 goto done;
5148 }
5149 so->so_cfil->cfi_flags |= CFIF_SHUT_WR;
5150 cfil_sock_notify_shutdown(so, SHUT_WR);
5151 /*
5152 * When outgoing data is pending, we delay the shutdown at the
5153 * protocol level until the content filters give the final
5154 * verdict on the pending data.
5155 */
5156 if (cfil_sock_data_pending(&so->so_snd) != 0) {
5157 /*
5158 * When shutting down the read and write sides at once
5159 * we can proceed to the final shutdown of the read
5160 * side. Otherwise, we just return.
5161 */
5162 if (*how == SHUT_WR) {
5163 error = EJUSTRETURN;
5164 } else if (*how == SHUT_RDWR) {
5165 *how = SHUT_RD;
5166 }
5167 }
5168 }
5169 done:
5170 return error;
5171 }
5172
5173 /*
5174 * This is called when the socket is closed and there is no more
5175 * opportunity for filtering
5176 */
5177 void
cfil_sock_is_closed(struct socket * so)5178 cfil_sock_is_closed(struct socket *so)
5179 {
5180 errno_t error = 0;
5181 int kcunit;
5182
5183 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5184 cfil_sock_udp_is_closed(so);
5185 return;
5186 }
5187
5188 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
5189 return;
5190 }
5191
5192 CFIL_LOG(LOG_INFO, "so %llx", (uint64_t)VM_KERNEL_ADDRPERM(so));
5193
5194 socket_lock_assert_owned(so);
5195
5196 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
5197 /* Let the filters know of the closing */
5198 error = cfil_dispatch_closed_event(so, so->so_cfil, kcunit);
5199 }
5200
5201 /* Last chance to push passed data out */
5202 error = cfil_acquire_sockbuf(so, so->so_cfil, 1);
5203 if (error == 0) {
5204 cfil_service_inject_queue(so, so->so_cfil, 1);
5205 }
5206 cfil_release_sockbuf(so, 1);
5207
5208 so->so_cfil->cfi_flags |= CFIF_SOCK_CLOSED;
5209
5210 /* Pending data needs to go */
5211 cfil_flush_queues(so, so->so_cfil);
5212
5213 CFIL_INFO_VERIFY(so->so_cfil);
5214 }
5215
5216 /*
5217 * This is called when the socket is disconnected so let the filters
5218 * know about the disconnection and that no more data will come
5219 *
5220 * The how parameter has the same values as soshutown()
5221 */
5222 void
cfil_sock_notify_shutdown(struct socket * so,int how)5223 cfil_sock_notify_shutdown(struct socket *so, int how)
5224 {
5225 errno_t error = 0;
5226 int kcunit;
5227
5228 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5229 cfil_sock_udp_notify_shutdown(so, how, 0, 0);
5230 return;
5231 }
5232
5233 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
5234 return;
5235 }
5236
5237 CFIL_LOG(LOG_INFO, "so %llx how %d",
5238 (uint64_t)VM_KERNEL_ADDRPERM(so), how);
5239
5240 socket_lock_assert_owned(so);
5241
5242 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
5243 /* Disconnect incoming side */
5244 if (how != SHUT_WR) {
5245 error = cfil_dispatch_disconnect_event(so, so->so_cfil, kcunit, 0);
5246 }
5247 /* Disconnect outgoing side */
5248 if (how != SHUT_RD) {
5249 error = cfil_dispatch_disconnect_event(so, so->so_cfil, kcunit, 1);
5250 }
5251 }
5252 }
5253
5254 static int
cfil_filters_attached(struct socket * so)5255 cfil_filters_attached(struct socket *so)
5256 {
5257 struct cfil_entry *entry;
5258 uint32_t kcunit;
5259 int attached = 0;
5260
5261 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5262 return cfil_filters_udp_attached(so, FALSE);
5263 }
5264
5265 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
5266 return 0;
5267 }
5268
5269 socket_lock_assert_owned(so);
5270
5271 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
5272 entry = &so->so_cfil->cfi_entries[kcunit - 1];
5273
5274 /* Are we attached to the filter? */
5275 if (entry->cfe_filter == NULL) {
5276 continue;
5277 }
5278 if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) {
5279 continue;
5280 }
5281 if ((entry->cfe_flags & CFEF_CFIL_DETACHED) != 0) {
5282 continue;
5283 }
5284 attached = 1;
5285 break;
5286 }
5287
5288 return attached;
5289 }
5290
5291 /*
5292 * This is called when the socket is closed and we are waiting for
5293 * the filters to gives the final pass or drop
5294 */
5295 void
cfil_sock_close_wait(struct socket * so)5296 cfil_sock_close_wait(struct socket *so)
5297 {
5298 lck_mtx_t *mutex_held;
5299 struct timespec ts;
5300 int error;
5301
5302 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5303 cfil_sock_udp_close_wait(so);
5304 return;
5305 }
5306
5307 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
5308 return;
5309 }
5310
5311 // This flow does not need to wait for close ack from user-space
5312 if (IS_NO_CLOSE_WAIT(so->so_cfil)) {
5313 if (so->so_cfil->cfi_debug) {
5314 cfil_info_log(LOG_INFO, so->so_cfil, "CFIL: SKIP CLOSE WAIT");
5315 }
5316 return;
5317 }
5318
5319 CFIL_LOG(LOG_INFO, "so %llx", (uint64_t)VM_KERNEL_ADDRPERM(so));
5320
5321 if (so->so_proto->pr_getlock != NULL) {
5322 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
5323 } else {
5324 mutex_held = so->so_proto->pr_domain->dom_mtx;
5325 }
5326 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
5327
5328 while (cfil_filters_attached(so)) {
5329 /*
5330 * Notify the filters we are going away so they can detach
5331 */
5332 cfil_sock_notify_shutdown(so, SHUT_RDWR);
5333
5334 /*
5335 * Make sure we need to wait after the filter are notified
5336 * of the disconnection
5337 */
5338 if (cfil_filters_attached(so) == 0) {
5339 break;
5340 }
5341
5342 CFIL_LOG(LOG_INFO, "so %llx waiting",
5343 (uint64_t)VM_KERNEL_ADDRPERM(so));
5344
5345 ts.tv_sec = cfil_close_wait_timeout / 1000;
5346 ts.tv_nsec = (cfil_close_wait_timeout % 1000) *
5347 NSEC_PER_USEC * 1000;
5348
5349 OSIncrementAtomic(&cfil_stats.cfs_close_wait);
5350 so->so_cfil->cfi_flags |= CFIF_CLOSE_WAIT;
5351 error = msleep((caddr_t)so->so_cfil, mutex_held,
5352 PSOCK | PCATCH, "cfil_sock_close_wait", &ts);
5353 so->so_cfil->cfi_flags &= ~CFIF_CLOSE_WAIT;
5354
5355 CFIL_LOG(LOG_NOTICE, "so %llx timed out %d",
5356 (uint64_t)VM_KERNEL_ADDRPERM(so), (error != 0));
5357
5358 /*
5359 * Force close in case of timeout
5360 */
5361 if (error != 0) {
5362 OSIncrementAtomic(&cfil_stats.cfs_close_wait_timeout);
5363 break;
5364 }
5365 }
5366 }
5367
5368 /*
5369 * Returns the size of the data held by the content filter by using
5370 */
5371 int32_t
cfil_sock_data_pending(struct sockbuf * sb)5372 cfil_sock_data_pending(struct sockbuf *sb)
5373 {
5374 struct socket *so = sb->sb_so;
5375 uint64_t pending = 0;
5376
5377 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5378 return cfil_sock_udp_data_pending(sb, FALSE);
5379 }
5380
5381 if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil != NULL) {
5382 struct cfi_buf *cfi_buf;
5383
5384 socket_lock_assert_owned(so);
5385
5386 if ((sb->sb_flags & SB_RECV) == 0) {
5387 cfi_buf = &so->so_cfil->cfi_snd;
5388 } else {
5389 cfi_buf = &so->so_cfil->cfi_rcv;
5390 }
5391
5392 pending = cfi_buf->cfi_pending_last -
5393 cfi_buf->cfi_pending_first;
5394
5395 /*
5396 * If we are limited by the "chars of mbufs used" roughly
5397 * adjust so we won't overcommit
5398 */
5399 if (pending > (uint64_t)cfi_buf->cfi_pending_mbcnt) {
5400 pending = cfi_buf->cfi_pending_mbcnt;
5401 }
5402 }
5403
5404 VERIFY(pending < INT32_MAX);
5405
5406 return (int32_t)(pending);
5407 }
5408
5409 /*
5410 * Return the socket buffer space used by data being held by content filters
5411 * so processes won't clog the socket buffer
5412 */
5413 int32_t
cfil_sock_data_space(struct sockbuf * sb)5414 cfil_sock_data_space(struct sockbuf *sb)
5415 {
5416 struct socket *so = sb->sb_so;
5417 uint64_t pending = 0;
5418
5419 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5420 return cfil_sock_udp_data_pending(sb, TRUE);
5421 }
5422
5423 if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil != NULL &&
5424 so->so_snd.sb_cfil_thread != current_thread()) {
5425 struct cfi_buf *cfi_buf;
5426
5427 socket_lock_assert_owned(so);
5428
5429 if ((sb->sb_flags & SB_RECV) == 0) {
5430 cfi_buf = &so->so_cfil->cfi_snd;
5431 } else {
5432 cfi_buf = &so->so_cfil->cfi_rcv;
5433 }
5434
5435 pending = cfi_buf->cfi_pending_last -
5436 cfi_buf->cfi_pending_first;
5437
5438 /*
5439 * If we are limited by the "chars of mbufs used" roughly
5440 * adjust so we won't overcommit
5441 */
5442 if ((uint64_t)cfi_buf->cfi_pending_mbcnt > pending) {
5443 pending = cfi_buf->cfi_pending_mbcnt;
5444 }
5445 }
5446
5447 VERIFY(pending < INT32_MAX);
5448
5449 return (int32_t)(pending);
5450 }
5451
5452 /*
5453 * A callback from the socket and protocol layer when data becomes
5454 * available in the socket buffer to give a chance for the content filter
5455 * to re-inject data that was held back
5456 */
5457 void
cfil_sock_buf_update(struct sockbuf * sb)5458 cfil_sock_buf_update(struct sockbuf *sb)
5459 {
5460 int outgoing;
5461 int error;
5462 struct socket *so = sb->sb_so;
5463
5464 if (NEED_DGRAM_FLOW_TRACKING(so)) {
5465 cfil_sock_udp_buf_update(sb);
5466 return;
5467 }
5468
5469 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) {
5470 return;
5471 }
5472
5473 if (!cfil_sbtrim) {
5474 return;
5475 }
5476
5477 socket_lock_assert_owned(so);
5478
5479 if ((sb->sb_flags & SB_RECV) == 0) {
5480 if ((so->so_cfil->cfi_flags & CFIF_RETRY_INJECT_OUT) == 0) {
5481 return;
5482 }
5483 outgoing = 1;
5484 OSIncrementAtomic(&cfil_stats.cfs_inject_q_out_retry);
5485 } else {
5486 if ((so->so_cfil->cfi_flags & CFIF_RETRY_INJECT_IN) == 0) {
5487 return;
5488 }
5489 outgoing = 0;
5490 OSIncrementAtomic(&cfil_stats.cfs_inject_q_in_retry);
5491 }
5492
5493 CFIL_LOG(LOG_NOTICE, "so %llx outgoing %d",
5494 (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing);
5495
5496 error = cfil_acquire_sockbuf(so, so->so_cfil, outgoing);
5497 if (error == 0) {
5498 cfil_service_inject_queue(so, so->so_cfil, outgoing);
5499 }
5500 cfil_release_sockbuf(so, outgoing);
5501 }
5502
5503 int
sysctl_cfil_filter_list(struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)5504 sysctl_cfil_filter_list(struct sysctl_oid *oidp, void *arg1, int arg2,
5505 struct sysctl_req *req)
5506 {
5507 #pragma unused(oidp, arg1, arg2)
5508 int error = 0;
5509 size_t len = 0;
5510 u_int32_t i;
5511
5512 /* Read only */
5513 if (req->newptr != USER_ADDR_NULL) {
5514 return EPERM;
5515 }
5516
5517 cfil_rw_lock_shared(&cfil_lck_rw);
5518
5519 for (i = 0; content_filters != NULL && i < MAX_CONTENT_FILTER; i++) {
5520 struct cfil_filter_stat filter_stat;
5521 struct content_filter *cfc = content_filters[i];
5522
5523 if (cfc == NULL) {
5524 continue;
5525 }
5526
5527 /* If just asking for the size */
5528 if (req->oldptr == USER_ADDR_NULL) {
5529 len += sizeof(struct cfil_filter_stat);
5530 continue;
5531 }
5532
5533 bzero(&filter_stat, sizeof(struct cfil_filter_stat));
5534 filter_stat.cfs_len = sizeof(struct cfil_filter_stat);
5535 filter_stat.cfs_filter_id = cfc->cf_kcunit;
5536 filter_stat.cfs_flags = cfc->cf_flags;
5537 filter_stat.cfs_sock_count = cfc->cf_sock_count;
5538 filter_stat.cfs_necp_control_unit = cfc->cf_necp_control_unit;
5539
5540 error = SYSCTL_OUT(req, &filter_stat,
5541 sizeof(struct cfil_filter_stat));
5542 if (error != 0) {
5543 break;
5544 }
5545 }
5546 /* If just asking for the size */
5547 if (req->oldptr == USER_ADDR_NULL) {
5548 req->oldidx = len;
5549 }
5550
5551 cfil_rw_unlock_shared(&cfil_lck_rw);
5552
5553 if (cfil_log_level >= LOG_DEBUG) {
5554 if (req->oldptr != USER_ADDR_NULL) {
5555 for (i = 1; content_filters != NULL && i <= MAX_CONTENT_FILTER; i++) {
5556 cfil_filter_show(i);
5557 }
5558 }
5559 }
5560
5561 return error;
5562 }
5563
5564 static int
sysctl_cfil_sock_list(struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)5565 sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2,
5566 struct sysctl_req *req)
5567 {
5568 #pragma unused(oidp, arg1, arg2)
5569 int error = 0;
5570 u_int32_t i;
5571 struct cfil_info *cfi;
5572
5573 /* Read only */
5574 if (req->newptr != USER_ADDR_NULL) {
5575 return EPERM;
5576 }
5577
5578 cfil_rw_lock_shared(&cfil_lck_rw);
5579
5580 /*
5581 * If just asking for the size,
5582 */
5583 if (req->oldptr == USER_ADDR_NULL) {
5584 req->oldidx = cfil_sock_attached_count *
5585 sizeof(struct cfil_sock_stat);
5586 /* Bump the length in case new sockets gets attached */
5587 req->oldidx += req->oldidx >> 3;
5588 goto done;
5589 }
5590
5591 TAILQ_FOREACH(cfi, &cfil_sock_head, cfi_link) {
5592 struct cfil_entry *entry;
5593 struct cfil_sock_stat stat;
5594 struct socket *so = cfi->cfi_so;
5595
5596 bzero(&stat, sizeof(struct cfil_sock_stat));
5597 stat.cfs_len = sizeof(struct cfil_sock_stat);
5598 stat.cfs_sock_id = cfi->cfi_sock_id;
5599 stat.cfs_flags = cfi->cfi_flags;
5600
5601 if (so != NULL) {
5602 stat.cfs_pid = so->last_pid;
5603 memcpy(stat.cfs_uuid, so->last_uuid,
5604 sizeof(uuid_t));
5605 if (so->so_flags & SOF_DELEGATED) {
5606 stat.cfs_e_pid = so->e_pid;
5607 memcpy(stat.cfs_e_uuid, so->e_uuid,
5608 sizeof(uuid_t));
5609 } else {
5610 stat.cfs_e_pid = so->last_pid;
5611 memcpy(stat.cfs_e_uuid, so->last_uuid,
5612 sizeof(uuid_t));
5613 }
5614
5615 stat.cfs_sock_family = so->so_proto->pr_domain->dom_family;
5616 stat.cfs_sock_type = so->so_proto->pr_type;
5617 stat.cfs_sock_protocol = so->so_proto->pr_protocol;
5618 }
5619
5620 stat.cfs_snd.cbs_pending_first =
5621 cfi->cfi_snd.cfi_pending_first;
5622 stat.cfs_snd.cbs_pending_last =
5623 cfi->cfi_snd.cfi_pending_last;
5624 stat.cfs_snd.cbs_inject_q_len =
5625 cfil_queue_len(&cfi->cfi_snd.cfi_inject_q);
5626 stat.cfs_snd.cbs_pass_offset =
5627 cfi->cfi_snd.cfi_pass_offset;
5628
5629 stat.cfs_rcv.cbs_pending_first =
5630 cfi->cfi_rcv.cfi_pending_first;
5631 stat.cfs_rcv.cbs_pending_last =
5632 cfi->cfi_rcv.cfi_pending_last;
5633 stat.cfs_rcv.cbs_inject_q_len =
5634 cfil_queue_len(&cfi->cfi_rcv.cfi_inject_q);
5635 stat.cfs_rcv.cbs_pass_offset =
5636 cfi->cfi_rcv.cfi_pass_offset;
5637
5638 for (i = 0; i < MAX_CONTENT_FILTER; i++) {
5639 struct cfil_entry_stat *estat;
5640 struct cfe_buf *ebuf;
5641 struct cfe_buf_stat *sbuf;
5642
5643 entry = &cfi->cfi_entries[i];
5644
5645 estat = &stat.ces_entries[i];
5646
5647 estat->ces_len = sizeof(struct cfil_entry_stat);
5648 estat->ces_filter_id = entry->cfe_filter ?
5649 entry->cfe_filter->cf_kcunit : 0;
5650 estat->ces_flags = entry->cfe_flags;
5651 estat->ces_necp_control_unit =
5652 entry->cfe_necp_control_unit;
5653
5654 estat->ces_last_event.tv_sec =
5655 (int64_t)entry->cfe_last_event.tv_sec;
5656 estat->ces_last_event.tv_usec =
5657 (int64_t)entry->cfe_last_event.tv_usec;
5658
5659 estat->ces_last_action.tv_sec =
5660 (int64_t)entry->cfe_last_action.tv_sec;
5661 estat->ces_last_action.tv_usec =
5662 (int64_t)entry->cfe_last_action.tv_usec;
5663
5664 ebuf = &entry->cfe_snd;
5665 sbuf = &estat->ces_snd;
5666 sbuf->cbs_pending_first =
5667 cfil_queue_offset_first(&ebuf->cfe_pending_q);
5668 sbuf->cbs_pending_last =
5669 cfil_queue_offset_last(&ebuf->cfe_pending_q);
5670 sbuf->cbs_ctl_first =
5671 cfil_queue_offset_first(&ebuf->cfe_ctl_q);
5672 sbuf->cbs_ctl_last =
5673 cfil_queue_offset_last(&ebuf->cfe_ctl_q);
5674 sbuf->cbs_pass_offset = ebuf->cfe_pass_offset;
5675 sbuf->cbs_peek_offset = ebuf->cfe_peek_offset;
5676 sbuf->cbs_peeked = ebuf->cfe_peeked;
5677
5678 ebuf = &entry->cfe_rcv;
5679 sbuf = &estat->ces_rcv;
5680 sbuf->cbs_pending_first =
5681 cfil_queue_offset_first(&ebuf->cfe_pending_q);
5682 sbuf->cbs_pending_last =
5683 cfil_queue_offset_last(&ebuf->cfe_pending_q);
5684 sbuf->cbs_ctl_first =
5685 cfil_queue_offset_first(&ebuf->cfe_ctl_q);
5686 sbuf->cbs_ctl_last =
5687 cfil_queue_offset_last(&ebuf->cfe_ctl_q);
5688 sbuf->cbs_pass_offset = ebuf->cfe_pass_offset;
5689 sbuf->cbs_peek_offset = ebuf->cfe_peek_offset;
5690 sbuf->cbs_peeked = ebuf->cfe_peeked;
5691 }
5692 error = SYSCTL_OUT(req, &stat,
5693 sizeof(struct cfil_sock_stat));
5694 if (error != 0) {
5695 break;
5696 }
5697 }
5698 done:
5699 cfil_rw_unlock_shared(&cfil_lck_rw);
5700
5701 if (cfil_log_level >= LOG_DEBUG) {
5702 if (req->oldptr != USER_ADDR_NULL) {
5703 cfil_info_show();
5704 }
5705 }
5706
5707 return error;
5708 }
5709
5710 /*
5711 * UDP Socket Support
5712 */
5713 static void
cfil_hash_entry_log(int level,struct socket * so,struct soflow_hash_entry * entry,uint64_t sockId,const char * msg)5714 cfil_hash_entry_log(int level, struct socket *so, struct soflow_hash_entry *entry, uint64_t sockId, const char* msg)
5715 {
5716 char local[MAX_IPv6_STR_LEN + 6];
5717 char remote[MAX_IPv6_STR_LEN + 6];
5718 const void *addr;
5719
5720 // No sock or not UDP, no-op
5721 if (so == NULL || entry == NULL) {
5722 return;
5723 }
5724
5725 local[0] = remote[0] = 0x0;
5726
5727 switch (entry->soflow_family) {
5728 case AF_INET6:
5729 addr = &entry->soflow_laddr.addr6;
5730 inet_ntop(AF_INET6, addr, local, sizeof(local));
5731 addr = &entry->soflow_faddr.addr6;
5732 inet_ntop(AF_INET6, addr, remote, sizeof(local));
5733 break;
5734 case AF_INET:
5735 addr = &entry->soflow_laddr.addr46.ia46_addr4.s_addr;
5736 inet_ntop(AF_INET, addr, local, sizeof(local));
5737 addr = &entry->soflow_faddr.addr46.ia46_addr4.s_addr;
5738 inet_ntop(AF_INET, addr, remote, sizeof(local));
5739 break;
5740 default:
5741 return;
5742 }
5743
5744 CFIL_LOG(level, "<%s>: <%s(%d) so %llx cfil %p, entry %p, sockID %llu <%llu>> lport %d fport %d laddr %s faddr %s hash %X",
5745 msg,
5746 IS_UDP(so) ? "UDP" : "proto", GET_SO_PROTO(so),
5747 (uint64_t)VM_KERNEL_ADDRPERM(so), entry->soflow_feat_ctxt, entry, sockId, entry->soflow_feat_ctxt_id,
5748 ntohs(entry->soflow_lport), ntohs(entry->soflow_fport), local, remote,
5749 entry->soflow_flowhash);
5750 }
5751
5752 static void
cfil_inp_log(int level,struct socket * so,const char * msg)5753 cfil_inp_log(int level, struct socket *so, const char* msg)
5754 {
5755 struct inpcb *inp = NULL;
5756 char local[MAX_IPv6_STR_LEN + 6];
5757 char remote[MAX_IPv6_STR_LEN + 6];
5758 const void *addr;
5759
5760 if (so == NULL) {
5761 return;
5762 }
5763
5764 inp = sotoinpcb(so);
5765 if (inp == NULL) {
5766 return;
5767 }
5768
5769 local[0] = remote[0] = 0x0;
5770
5771 if (inp->inp_vflag & INP_IPV6) {
5772 addr = &inp->in6p_laddr.s6_addr32;
5773 inet_ntop(AF_INET6, addr, local, sizeof(local));
5774 addr = &inp->in6p_faddr.s6_addr32;
5775 inet_ntop(AF_INET6, addr, remote, sizeof(local));
5776 } else {
5777 addr = &inp->inp_laddr.s_addr;
5778 inet_ntop(AF_INET, addr, local, sizeof(local));
5779 addr = &inp->inp_faddr.s_addr;
5780 inet_ntop(AF_INET, addr, remote, sizeof(local));
5781 }
5782
5783 if (so->so_cfil != NULL) {
5784 CFIL_LOG(level, "<%s>: <%s so %llx cfil %p - flags 0x%x 0x%x, sockID %llu> lport %d fport %d laddr %s faddr %s",
5785 msg, IS_UDP(so) ? "UDP" : "TCP",
5786 (uint64_t)VM_KERNEL_ADDRPERM(so), so->so_cfil, inp->inp_flags, inp->inp_socket->so_flags, so->so_cfil->cfi_sock_id,
5787 ntohs(inp->inp_lport), ntohs(inp->inp_fport), local, remote);
5788 } else {
5789 CFIL_LOG(level, "<%s>: <%s so %llx - flags 0x%x 0x%x> lport %d fport %d laddr %s faddr %s",
5790 msg, IS_UDP(so) ? "UDP" : "TCP",
5791 (uint64_t)VM_KERNEL_ADDRPERM(so), inp->inp_flags, inp->inp_socket->so_flags,
5792 ntohs(inp->inp_lport), ntohs(inp->inp_fport), local, remote);
5793 }
5794 }
5795
5796 static void
cfil_info_log(int level,struct cfil_info * cfil_info,const char * msg)5797 cfil_info_log(int level, struct cfil_info *cfil_info, const char* msg)
5798 {
5799 if (cfil_info == NULL) {
5800 return;
5801 }
5802
5803 if (cfil_info->cfi_hash_entry != NULL) {
5804 cfil_hash_entry_log(level, cfil_info->cfi_so, cfil_info->cfi_hash_entry, cfil_info->cfi_sock_id, msg);
5805 } else {
5806 cfil_inp_log(level, cfil_info->cfi_so, msg);
5807 }
5808 }
5809
5810 static void
cfil_sock_udp_unlink_flow(struct socket * so,struct soflow_hash_entry * hash_entry,struct cfil_info * cfil_info)5811 cfil_sock_udp_unlink_flow(struct socket *so, struct soflow_hash_entry *hash_entry, struct cfil_info *cfil_info)
5812 {
5813 if (so == NULL || hash_entry == NULL || cfil_info == NULL) {
5814 return;
5815 }
5816
5817 if (so->so_flags & SOF_CONTENT_FILTER) {
5818 VERIFY(so->so_usecount > 0);
5819 so->so_usecount--;
5820 }
5821
5822 // Hold exclusive lock before clearing cfil_info hash entry link
5823 cfil_rw_lock_exclusive(&cfil_lck_rw);
5824
5825 cfil_info->cfi_hash_entry = NULL;
5826
5827 if (cfil_info->cfi_debug) {
5828 CFIL_LOG(LOG_INFO, "CFIL <%s>: <so %llx> - use count %d",
5829 IS_UDP(so) ? "UDP" : "TCP", (uint64_t)VM_KERNEL_ADDRPERM(so), so->so_usecount);
5830 }
5831
5832 cfil_rw_unlock_exclusive(&cfil_lck_rw);
5833 }
5834
5835 bool
check_port(struct sockaddr * addr,u_short port)5836 check_port(struct sockaddr *addr, u_short port)
5837 {
5838 struct sockaddr_in *sin = NULL;
5839 struct sockaddr_in6 *sin6 = NULL;
5840
5841 if (addr == NULL || port == 0) {
5842 return FALSE;
5843 }
5844
5845 switch (addr->sa_family) {
5846 case AF_INET:
5847 sin = satosin(addr);
5848 if (sin->sin_len != sizeof(*sin)) {
5849 return FALSE;
5850 }
5851 if (port == ntohs(sin->sin_port)) {
5852 return TRUE;
5853 }
5854 break;
5855 case AF_INET6:
5856 sin6 = satosin6(addr);
5857 if (sin6->sin6_len != sizeof(*sin6)) {
5858 return FALSE;
5859 }
5860 if (port == ntohs(sin6->sin6_port)) {
5861 return TRUE;
5862 }
5863 break;
5864 default:
5865 break;
5866 }
5867 return FALSE;
5868 }
5869
5870 cfil_sock_id_t
cfil_sock_id_from_datagram_socket(struct socket * so,struct sockaddr * local,struct sockaddr * remote)5871 cfil_sock_id_from_datagram_socket(struct socket *so, struct sockaddr *local, struct sockaddr *remote)
5872 {
5873 socket_lock_assert_owned(so);
5874
5875 if (so->so_flow_db == NULL) {
5876 return CFIL_SOCK_ID_NONE;
5877 }
5878 return (cfil_sock_id_t)soflow_db_get_feature_context_id(so->so_flow_db, local, remote);
5879 }
5880
5881 static struct cfil_info *
cfil_sock_udp_get_info(struct socket * so,uint32_t filter_control_unit,bool outgoing,struct soflow_hash_entry * hash_entry,struct sockaddr * local,struct sockaddr * remote)5882 cfil_sock_udp_get_info(struct socket *so, uint32_t filter_control_unit, bool outgoing, struct soflow_hash_entry *hash_entry,
5883 struct sockaddr *local, struct sockaddr *remote)
5884 {
5885 int new_filter_control_unit = 0;
5886 struct cfil_info *cfil_info = NULL;
5887
5888 errno_t error = 0;
5889 socket_lock_assert_owned(so);
5890
5891 if (hash_entry == NULL || hash_entry->soflow_db == NULL) {
5892 return NULL;
5893 }
5894
5895 if (hash_entry->soflow_feat_ctxt != NULL && hash_entry->soflow_feat_ctxt_id != 0) {
5896 /* Drop pre-existing UDP flow if filter state changed */
5897 cfil_info = (struct cfil_info *) hash_entry->soflow_feat_ctxt;
5898 new_filter_control_unit = necp_socket_get_content_filter_control_unit(so);
5899 if (new_filter_control_unit > 0 &&
5900 new_filter_control_unit != cfil_info->cfi_filter_control_unit) {
5901 if (DO_PRESERVE_CONNECTIONS) {
5902 cfil_info->cfi_filter_control_unit = new_filter_control_unit;
5903 } else {
5904 CFIL_LOG(LOG_NOTICE, "CFIL: UDP(%s) <so %llx> - filter state changed - dropped pre-existing flow (old state 0x%x new state 0x%x)",
5905 outgoing ? "OUT" : "IN", (uint64_t)VM_KERNEL_ADDRPERM(so),
5906 cfil_info->cfi_filter_control_unit, new_filter_control_unit);
5907 return NULL;
5908 }
5909 }
5910 return cfil_info;
5911 }
5912
5913 cfil_info = cfil_info_alloc(so, hash_entry);
5914 if (cfil_info == NULL) {
5915 CFIL_LOG(LOG_ERR, "CFIL: UDP failed to alloc cfil_info");
5916 OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem);
5917 return NULL;
5918 }
5919 cfil_info->cfi_filter_control_unit = filter_control_unit;
5920 cfil_info->cfi_dir = outgoing ? CFS_CONNECTION_DIR_OUT : CFS_CONNECTION_DIR_IN;
5921 cfil_info->cfi_debug = DEBUG_FLOW(sotoinpcb(so), so, local, remote);
5922 if (cfil_info->cfi_debug) {
5923 CFIL_LOG(LOG_INFO, "CFIL: UDP (outgoing %d) - debug flow with port %d", outgoing, cfil_log_port);
5924 CFIL_LOG(LOG_INFO, "CFIL: UDP so_gencnt %llx entry flowhash %x cfil %p sockID %llx",
5925 so->so_gencnt, hash_entry->soflow_flowhash, cfil_info, cfil_info->cfi_sock_id);
5926 }
5927
5928 if (cfil_info_attach_unit(so, filter_control_unit, cfil_info) == 0) {
5929 CFIL_INFO_FREE(cfil_info);
5930 CFIL_LOG(LOG_ERR, "CFIL: UDP cfil_info_attach_unit(%u) failed",
5931 filter_control_unit);
5932 OSIncrementAtomic(&cfil_stats.cfs_sock_attach_failed);
5933 return NULL;
5934 }
5935
5936 if (cfil_info->cfi_debug) {
5937 CFIL_LOG(LOG_DEBUG, "CFIL: UDP <so %llx> filter_control_unit %u sockID %llu attached",
5938 (uint64_t)VM_KERNEL_ADDRPERM(so),
5939 filter_control_unit, cfil_info->cfi_sock_id);
5940 }
5941
5942 so->so_flags |= SOF_CONTENT_FILTER;
5943 OSIncrementAtomic(&cfil_stats.cfs_sock_attached);
5944
5945 /* Hold a reference on the socket for each flow */
5946 so->so_usecount++;
5947
5948 /* link cfil_info to flow */
5949 hash_entry->soflow_feat_ctxt = cfil_info;
5950 hash_entry->soflow_feat_ctxt_id = cfil_info->cfi_sock_id;
5951
5952 if (cfil_info->cfi_debug) {
5953 cfil_info_log(LOG_INFO, cfil_info, "CFIL: ADDED");
5954 }
5955
5956 error = cfil_dispatch_attach_event(so, cfil_info, 0,
5957 outgoing ? CFS_CONNECTION_DIR_OUT : CFS_CONNECTION_DIR_IN);
5958 /* We can recover from flow control or out of memory errors */
5959 if (error != 0 && error != ENOBUFS && error != ENOMEM) {
5960 return NULL;
5961 }
5962
5963 CFIL_INFO_VERIFY(cfil_info);
5964 return cfil_info;
5965 }
5966
5967 errno_t
cfil_sock_udp_handle_data(bool outgoing,struct socket * so,struct sockaddr * local,struct sockaddr * remote,struct mbuf * data,struct mbuf * control,uint32_t flags,struct soflow_hash_entry * hash_entry)5968 cfil_sock_udp_handle_data(bool outgoing, struct socket *so,
5969 struct sockaddr *local, struct sockaddr *remote,
5970 struct mbuf *data, struct mbuf *control, uint32_t flags,
5971 struct soflow_hash_entry *hash_entry)
5972 {
5973 #pragma unused(outgoing, so, local, remote, data, control, flags)
5974 errno_t error = 0;
5975 uint32_t filter_control_unit;
5976 struct cfil_info *cfil_info = NULL;
5977
5978 socket_lock_assert_owned(so);
5979
5980 if (cfil_active_count == 0) {
5981 CFIL_LOG(LOG_DEBUG, "CFIL: UDP no active filter");
5982 OSIncrementAtomic(&cfil_stats.cfs_sock_attach_in_vain);
5983 return error;
5984 }
5985
5986 // Socket has been blessed
5987 if ((so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) != 0) {
5988 return error;
5989 }
5990
5991 filter_control_unit = necp_socket_get_content_filter_control_unit(so);
5992 if (filter_control_unit == 0) {
5993 CFIL_LOG(LOG_DEBUG, "CFIL: UDP failed to get control unit");
5994 return error;
5995 }
5996
5997 if (filter_control_unit == NECP_FILTER_UNIT_NO_FILTER) {
5998 return error;
5999 }
6000
6001 if ((filter_control_unit & NECP_MASK_USERSPACE_ONLY) != 0) {
6002 CFIL_LOG(LOG_DEBUG, "CFIL: UDP user space only");
6003 OSIncrementAtomic(&cfil_stats.cfs_sock_userspace_only);
6004 return error;
6005 }
6006
6007 cfil_info = cfil_sock_udp_get_info(so, filter_control_unit, outgoing, hash_entry, local, remote);
6008 if (cfil_info == NULL) {
6009 CFIL_LOG(LOG_ERR, "CFIL: <so %llx> Falied to get UDP cfil_info", (uint64_t)VM_KERNEL_ADDRPERM(so));
6010 return EPIPE;
6011 }
6012 // Update last used timestamp, this is for flow Idle TO
6013
6014 if (cfil_info->cfi_debug) {
6015 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: Got flow");
6016 }
6017
6018 if (cfil_info->cfi_flags & CFIF_DROP) {
6019 if (cfil_info->cfi_debug) {
6020 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP DROP");
6021 }
6022 return EPIPE;
6023 }
6024 if (control != NULL) {
6025 OSIncrementAtomic(&cfil_stats.cfs_data_in_control);
6026 }
6027 if (data->m_type == MT_OOBDATA) {
6028 CFIL_LOG(LOG_ERR, "so %llx MSG_OOB",
6029 (uint64_t)VM_KERNEL_ADDRPERM(so));
6030 OSIncrementAtomic(&cfil_stats.cfs_data_in_oob);
6031 }
6032
6033 error = cfil_data_common(so, cfil_info, outgoing, remote, data, control, flags);
6034
6035 return error;
6036 }
6037
6038 struct cfil_udp_attached_context {
6039 bool need_wait;
6040 lck_mtx_t *mutex_held;
6041 int attached;
6042 };
6043
6044 static bool
cfil_filters_udp_attached_per_flow(struct socket * so,struct soflow_hash_entry * hash_entry,void * context)6045 cfil_filters_udp_attached_per_flow(struct socket *so,
6046 struct soflow_hash_entry *hash_entry,
6047 void *context)
6048 {
6049 struct cfil_udp_attached_context *apply_context = NULL;
6050 struct cfil_info *cfil_info = NULL;
6051 struct cfil_entry *entry = NULL;
6052 uint64_t sock_flow_id = 0;
6053 struct timespec ts;
6054 errno_t error = 0;
6055 int kcunit;
6056
6057 if (hash_entry->soflow_feat_ctxt == NULL || context == NULL) {
6058 return true;
6059 }
6060
6061 cfil_info = hash_entry->soflow_feat_ctxt;
6062 apply_context = (struct cfil_udp_attached_context *)context;
6063
6064 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
6065 entry = &cfil_info->cfi_entries[kcunit - 1];
6066
6067 /* Are we attached to the filter? */
6068 if (entry->cfe_filter == NULL) {
6069 continue;
6070 }
6071
6072 if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) {
6073 continue;
6074 }
6075 if ((entry->cfe_flags & CFEF_CFIL_DETACHED) != 0) {
6076 continue;
6077 }
6078
6079 apply_context->attached = 1;
6080
6081 if (apply_context->need_wait == TRUE) {
6082 if (cfil_info->cfi_debug) {
6083 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP PER-FLOW WAIT FOR FLOW TO FINISH");
6084 }
6085
6086 ts.tv_sec = cfil_close_wait_timeout / 1000;
6087 ts.tv_nsec = (cfil_close_wait_timeout % 1000) * NSEC_PER_USEC * 1000;
6088
6089 OSIncrementAtomic(&cfil_stats.cfs_close_wait);
6090 cfil_info->cfi_flags |= CFIF_CLOSE_WAIT;
6091 sock_flow_id = cfil_info->cfi_sock_id;
6092
6093 error = msleep((caddr_t)cfil_info, apply_context->mutex_held,
6094 PSOCK | PCATCH, "cfil_filters_udp_attached_per_flow", &ts);
6095
6096 // Woke up from sleep, validate if cfil_info is still valid
6097 if (so->so_flow_db == NULL ||
6098 (cfil_info != soflow_db_get_feature_context(so->so_flow_db, sock_flow_id))) {
6099 // cfil_info is not valid, do not continue
6100 return false;
6101 }
6102
6103 cfil_info->cfi_flags &= ~CFIF_CLOSE_WAIT;
6104
6105 if (cfil_info->cfi_debug) {
6106 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP PER-FLOW WAIT FOR FLOW DONE");
6107 }
6108
6109 /*
6110 * Force close in case of timeout
6111 */
6112 if (error != 0) {
6113 OSIncrementAtomic(&cfil_stats.cfs_close_wait_timeout);
6114
6115 if (cfil_info->cfi_debug) {
6116 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP PER-FLOW WAIT FOR FLOW TIMED OUT, FORCE DETACH");
6117 }
6118
6119 entry->cfe_flags |= CFEF_CFIL_DETACHED;
6120 }
6121 }
6122 return false;
6123 }
6124 return true;
6125 }
6126
6127 /*
6128 * Go through all UDP flows for specified socket and returns TRUE if
6129 * any flow is still attached. If need_wait is TRUE, wait on first
6130 * attached flow.
6131 */
6132 static int
cfil_filters_udp_attached(struct socket * so,bool need_wait)6133 cfil_filters_udp_attached(struct socket *so, bool need_wait)
6134 {
6135 struct cfil_udp_attached_context apply_context = { 0 };
6136 lck_mtx_t *mutex_held;
6137
6138 socket_lock_assert_owned(so);
6139
6140 if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_flow_db != NULL) {
6141 if (so->so_proto->pr_getlock != NULL) {
6142 mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK);
6143 } else {
6144 mutex_held = so->so_proto->pr_domain->dom_mtx;
6145 }
6146 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
6147
6148 apply_context.need_wait = need_wait;
6149 apply_context.mutex_held = mutex_held;
6150 soflow_db_apply(so->so_flow_db, cfil_filters_udp_attached_per_flow, (void *)&apply_context);
6151 }
6152
6153 return apply_context.attached;
6154 }
6155
6156 struct cfil_udp_data_pending_context {
6157 struct sockbuf *sb;
6158 uint64_t total_pending;
6159 };
6160
6161 static bool
cfil_sock_udp_data_pending_per_flow(struct socket * so,struct soflow_hash_entry * hash_entry,void * context)6162 cfil_sock_udp_data_pending_per_flow(struct socket *so,
6163 struct soflow_hash_entry *hash_entry,
6164 void *context)
6165 {
6166 #pragma unused(so)
6167 struct cfil_udp_data_pending_context *apply_context = NULL;
6168 struct cfil_info *cfil_info = NULL;
6169 struct cfi_buf *cfi_buf;
6170
6171 uint64_t pending = 0;
6172
6173 if (hash_entry->soflow_feat_ctxt == NULL || context == NULL) {
6174 return true;
6175 }
6176
6177 cfil_info = hash_entry->soflow_feat_ctxt;
6178 apply_context = (struct cfil_udp_data_pending_context *)context;
6179
6180 if (apply_context->sb == NULL) {
6181 return true;
6182 }
6183
6184 if ((apply_context->sb->sb_flags & SB_RECV) == 0) {
6185 cfi_buf = &cfil_info->cfi_snd;
6186 } else {
6187 cfi_buf = &cfil_info->cfi_rcv;
6188 }
6189
6190 pending = cfi_buf->cfi_pending_last - cfi_buf->cfi_pending_first;
6191 /*
6192 * If we are limited by the "chars of mbufs used" roughly
6193 * adjust so we won't overcommit
6194 */
6195 if ((uint64_t)cfi_buf->cfi_pending_mbcnt > pending) {
6196 pending = cfi_buf->cfi_pending_mbcnt;
6197 }
6198
6199 apply_context->total_pending += pending;
6200 return true;
6201 }
6202
6203 int32_t
cfil_sock_udp_data_pending(struct sockbuf * sb,bool check_thread)6204 cfil_sock_udp_data_pending(struct sockbuf *sb, bool check_thread)
6205 {
6206 struct cfil_udp_data_pending_context apply_context = { 0 };
6207 struct socket *so = sb->sb_so;
6208
6209 socket_lock_assert_owned(so);
6210
6211 if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_flow_db != NULL &&
6212 (check_thread == FALSE || so->so_snd.sb_cfil_thread != current_thread())) {
6213 apply_context.sb = sb;
6214 soflow_db_apply(so->so_flow_db, cfil_sock_udp_data_pending_per_flow, (void *)&apply_context);
6215
6216 VERIFY(apply_context.total_pending < INT32_MAX);
6217 }
6218
6219 return (int32_t)(apply_context.total_pending);
6220 }
6221
6222 struct cfil_udp_notify_shutdown_context {
6223 int how;
6224 int drop_flag;
6225 int shut_flag;
6226 int done_count;
6227 };
6228
6229 static bool
cfil_sock_udp_notify_shutdown_per_flow(struct socket * so,struct soflow_hash_entry * hash_entry,void * context)6230 cfil_sock_udp_notify_shutdown_per_flow(struct socket *so,
6231 struct soflow_hash_entry *hash_entry,
6232 void *context)
6233 {
6234 struct cfil_udp_notify_shutdown_context *apply_context = NULL;
6235 struct cfil_info *cfil_info = NULL;
6236 errno_t error = 0;
6237 int kcunit;
6238
6239 if (hash_entry->soflow_feat_ctxt == NULL || context == NULL) {
6240 return true;
6241 }
6242
6243 cfil_info = hash_entry->soflow_feat_ctxt;
6244 apply_context = (struct cfil_udp_notify_shutdown_context *)context;
6245
6246 // This flow is marked as DROP
6247 if (cfil_info->cfi_flags & apply_context->drop_flag) {
6248 apply_context->done_count++;
6249 return true;
6250 }
6251
6252 // This flow has been shut already, skip
6253 if (cfil_info->cfi_flags & apply_context->shut_flag) {
6254 return true;
6255 }
6256 // Mark flow as shut
6257 cfil_info->cfi_flags |= apply_context->shut_flag;
6258 apply_context->done_count++;
6259
6260 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
6261 /* Disconnect incoming side */
6262 if (apply_context->how != SHUT_WR) {
6263 error = cfil_dispatch_disconnect_event(so, cfil_info, kcunit, 0);
6264 }
6265 /* Disconnect outgoing side */
6266 if (apply_context->how != SHUT_RD) {
6267 error = cfil_dispatch_disconnect_event(so, cfil_info, kcunit, 1);
6268 }
6269 }
6270
6271 if (cfil_info->cfi_debug) {
6272 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP PER-FLOW NOTIFY_SHUTDOWN");
6273 }
6274
6275 return true;
6276 }
6277
6278 int
cfil_sock_udp_notify_shutdown(struct socket * so,int how,int drop_flag,int shut_flag)6279 cfil_sock_udp_notify_shutdown(struct socket *so, int how, int drop_flag, int shut_flag)
6280 {
6281 struct cfil_udp_notify_shutdown_context apply_context = { 0 };
6282 errno_t error = 0;
6283
6284 socket_lock_assert_owned(so);
6285
6286 if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_flow_db != NULL) {
6287 apply_context.how = how;
6288 apply_context.drop_flag = drop_flag;
6289 apply_context.shut_flag = shut_flag;
6290
6291 soflow_db_apply(so->so_flow_db, cfil_sock_udp_notify_shutdown_per_flow, (void *)&apply_context);
6292 }
6293
6294 if (apply_context.done_count == 0) {
6295 error = ENOTCONN;
6296 }
6297 return error;
6298 }
6299
6300 int
cfil_sock_udp_shutdown(struct socket * so,int * how)6301 cfil_sock_udp_shutdown(struct socket *so, int *how)
6302 {
6303 int error = 0;
6304
6305 if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || (so->so_flow_db == NULL)) {
6306 goto done;
6307 }
6308
6309 socket_lock_assert_owned(so);
6310
6311 CFIL_LOG(LOG_INFO, "so %llx how %d",
6312 (uint64_t)VM_KERNEL_ADDRPERM(so), *how);
6313
6314 /*
6315 * Check the state of the socket before the content filter
6316 */
6317 if (*how != SHUT_WR && (so->so_state & SS_CANTRCVMORE) != 0) {
6318 /* read already shut down */
6319 error = ENOTCONN;
6320 goto done;
6321 }
6322 if (*how != SHUT_RD && (so->so_state & SS_CANTSENDMORE) != 0) {
6323 /* write already shut down */
6324 error = ENOTCONN;
6325 goto done;
6326 }
6327
6328 /*
6329 * shutdown read: SHUT_RD or SHUT_RDWR
6330 */
6331 if (*how != SHUT_WR) {
6332 error = cfil_sock_udp_notify_shutdown(so, SHUT_RD, CFIF_DROP, CFIF_SHUT_RD);
6333 if (error != 0) {
6334 goto done;
6335 }
6336 }
6337 /*
6338 * shutdown write: SHUT_WR or SHUT_RDWR
6339 */
6340 if (*how != SHUT_RD) {
6341 error = cfil_sock_udp_notify_shutdown(so, SHUT_WR, CFIF_DROP, CFIF_SHUT_WR);
6342 if (error != 0) {
6343 goto done;
6344 }
6345
6346 /*
6347 * When outgoing data is pending, we delay the shutdown at the
6348 * protocol level until the content filters give the final
6349 * verdict on the pending data.
6350 */
6351 if (cfil_sock_data_pending(&so->so_snd) != 0) {
6352 /*
6353 * When shutting down the read and write sides at once
6354 * we can proceed to the final shutdown of the read
6355 * side. Otherwise, we just return.
6356 */
6357 if (*how == SHUT_WR) {
6358 error = EJUSTRETURN;
6359 } else if (*how == SHUT_RDWR) {
6360 *how = SHUT_RD;
6361 }
6362 }
6363 }
6364 done:
6365 return error;
6366 }
6367
6368 void
cfil_sock_udp_close_wait(struct socket * so)6369 cfil_sock_udp_close_wait(struct socket *so)
6370 {
6371 socket_lock_assert_owned(so);
6372
6373 while (cfil_filters_udp_attached(so, FALSE)) {
6374 /*
6375 * Notify the filters we are going away so they can detach
6376 */
6377 cfil_sock_udp_notify_shutdown(so, SHUT_RDWR, 0, 0);
6378
6379 /*
6380 * Make sure we need to wait after the filter are notified
6381 * of the disconnection
6382 */
6383 if (cfil_filters_udp_attached(so, TRUE) == 0) {
6384 break;
6385 }
6386 }
6387 }
6388
6389 static bool
cfil_sock_udp_is_closed_per_flow(struct socket * so,struct soflow_hash_entry * hash_entry,void * context)6390 cfil_sock_udp_is_closed_per_flow(struct socket *so,
6391 struct soflow_hash_entry *hash_entry,
6392 void *context)
6393 {
6394 #pragma unused(context)
6395 struct cfil_info *cfil_info = NULL;
6396 errno_t error = 0;
6397 int kcunit;
6398
6399 if (hash_entry->soflow_feat_ctxt == NULL) {
6400 return true;
6401 }
6402
6403 cfil_info = hash_entry->soflow_feat_ctxt;
6404
6405 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
6406 /* Let the filters know of the closing */
6407 error = cfil_dispatch_closed_event(so, cfil_info, kcunit);
6408 }
6409
6410 /* Last chance to push passed data out */
6411 error = cfil_acquire_sockbuf(so, cfil_info, 1);
6412 if (error == 0) {
6413 cfil_service_inject_queue(so, cfil_info, 1);
6414 }
6415 cfil_release_sockbuf(so, 1);
6416
6417 cfil_info->cfi_flags |= CFIF_SOCK_CLOSED;
6418
6419 /* Pending data needs to go */
6420 cfil_flush_queues(so, cfil_info);
6421
6422 CFIL_INFO_VERIFY(cfil_info);
6423
6424 if (cfil_info->cfi_debug) {
6425 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP PER-FLOW IS_CLOSED");
6426 }
6427
6428 return true;
6429 }
6430
6431 void
cfil_sock_udp_is_closed(struct socket * so)6432 cfil_sock_udp_is_closed(struct socket *so)
6433 {
6434 socket_lock_assert_owned(so);
6435
6436 if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_flow_db != NULL) {
6437 soflow_db_apply(so->so_flow_db, cfil_sock_udp_is_closed_per_flow, NULL);
6438 }
6439 }
6440
6441 static bool
cfil_sock_udp_buf_update_per_flow(struct socket * so,struct soflow_hash_entry * hash_entry,void * context)6442 cfil_sock_udp_buf_update_per_flow(struct socket *so,
6443 struct soflow_hash_entry *hash_entry,
6444 void *context)
6445 {
6446 struct cfil_info *cfil_info = NULL;
6447 struct sockbuf *sb = NULL;
6448 errno_t error = 0;
6449 int outgoing;
6450
6451 if (hash_entry->soflow_feat_ctxt == NULL || context == NULL) {
6452 return true;
6453 }
6454
6455 cfil_info = hash_entry->soflow_feat_ctxt;
6456 sb = (struct sockbuf *) context;
6457
6458 if ((sb->sb_flags & SB_RECV) == 0) {
6459 if ((cfil_info->cfi_flags & CFIF_RETRY_INJECT_OUT) == 0) {
6460 return true;
6461 }
6462 outgoing = 1;
6463 OSIncrementAtomic(&cfil_stats.cfs_inject_q_out_retry);
6464 } else {
6465 if ((cfil_info->cfi_flags & CFIF_RETRY_INJECT_IN) == 0) {
6466 return true;
6467 }
6468 outgoing = 0;
6469 OSIncrementAtomic(&cfil_stats.cfs_inject_q_in_retry);
6470 }
6471
6472 CFIL_LOG(LOG_NOTICE, "so %llx outgoing %d",
6473 (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing);
6474
6475 error = cfil_acquire_sockbuf(so, cfil_info, outgoing);
6476 if (error == 0) {
6477 cfil_service_inject_queue(so, cfil_info, outgoing);
6478 }
6479 cfil_release_sockbuf(so, outgoing);
6480 return true;
6481 }
6482
6483 void
cfil_sock_udp_buf_update(struct sockbuf * sb)6484 cfil_sock_udp_buf_update(struct sockbuf *sb)
6485 {
6486 struct socket *so = sb->sb_so;
6487
6488 socket_lock_assert_owned(so);
6489
6490 if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_flow_db != NULL) {
6491 if (!cfil_sbtrim) {
6492 return;
6493 }
6494 soflow_db_apply(so->so_flow_db, cfil_sock_udp_buf_update_per_flow, (void *)sb);
6495 }
6496 }
6497
6498 void
cfil_filter_show(u_int32_t kcunit)6499 cfil_filter_show(u_int32_t kcunit)
6500 {
6501 struct content_filter *cfc = NULL;
6502 struct cfil_entry *entry;
6503 int count = 0;
6504
6505 if (content_filters == NULL) {
6506 return;
6507 }
6508 if (kcunit > MAX_CONTENT_FILTER) {
6509 return;
6510 }
6511
6512 cfil_rw_lock_shared(&cfil_lck_rw);
6513
6514 if (content_filters[kcunit - 1] == NULL) {
6515 cfil_rw_unlock_shared(&cfil_lck_rw);
6516 return;
6517 }
6518 cfc = content_filters[kcunit - 1];
6519
6520 CFIL_LOG(LOG_DEBUG, "CFIL: FILTER SHOW: Filter <unit %d, entry count %d> flags <%lx>:",
6521 kcunit, cfc->cf_sock_count, (unsigned long)cfc->cf_flags);
6522 if (cfc->cf_flags & CFF_DETACHING) {
6523 CFIL_LOG(LOG_DEBUG, "CFIL: FILTER SHOW:-DETACHING");
6524 }
6525 if (cfc->cf_flags & CFF_ACTIVE) {
6526 CFIL_LOG(LOG_DEBUG, "CFIL: FILTER SHOW:-ACTIVE");
6527 }
6528 if (cfc->cf_flags & CFF_FLOW_CONTROLLED) {
6529 CFIL_LOG(LOG_DEBUG, "CFIL: FILTER SHOW:-FLOW CONTROLLED");
6530 }
6531
6532 TAILQ_FOREACH(entry, &cfc->cf_sock_entries, cfe_link) {
6533 if (entry->cfe_cfil_info && entry->cfe_cfil_info->cfi_so) {
6534 struct cfil_info *cfil_info = entry->cfe_cfil_info;
6535
6536 count++;
6537
6538 if (entry->cfe_flags & CFEF_CFIL_DETACHED) {
6539 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: FILTER SHOW:-DETACHED");
6540 } else {
6541 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: FILTER SHOW:-ATTACHED");
6542 }
6543 }
6544 }
6545
6546 CFIL_LOG(LOG_DEBUG, "CFIL: FILTER SHOW:Filter - total entries shown: %d", count);
6547
6548 cfil_rw_unlock_shared(&cfil_lck_rw);
6549 }
6550
6551 void
cfil_info_show(void)6552 cfil_info_show(void)
6553 {
6554 struct cfil_info *cfil_info;
6555 int count = 0;
6556
6557 cfil_rw_lock_shared(&cfil_lck_rw);
6558
6559 CFIL_LOG(LOG_DEBUG, "CFIL: INFO SHOW:count %d", cfil_sock_attached_count);
6560
6561 TAILQ_FOREACH(cfil_info, &cfil_sock_head, cfi_link) {
6562 count++;
6563
6564 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: INFO SHOW");
6565
6566 if (cfil_info->cfi_flags & CFIF_DROP) {
6567 CFIL_LOG(LOG_DEBUG, "CFIL: INFO FLAG - DROP");
6568 }
6569 if (cfil_info->cfi_flags & CFIF_CLOSE_WAIT) {
6570 CFIL_LOG(LOG_DEBUG, "CFIL: INFO FLAG - CLOSE_WAIT");
6571 }
6572 if (cfil_info->cfi_flags & CFIF_SOCK_CLOSED) {
6573 CFIL_LOG(LOG_DEBUG, "CFIL: INFO FLAG - SOCK_CLOSED");
6574 }
6575 if (cfil_info->cfi_flags & CFIF_RETRY_INJECT_IN) {
6576 CFIL_LOG(LOG_DEBUG, "CFIL: INFO FLAG - RETRY_INJECT_IN");
6577 }
6578 if (cfil_info->cfi_flags & CFIF_RETRY_INJECT_OUT) {
6579 CFIL_LOG(LOG_DEBUG, "CFIL: INFO FLAG - RETRY_INJECT_OUT");
6580 }
6581 if (cfil_info->cfi_flags & CFIF_SHUT_WR) {
6582 CFIL_LOG(LOG_DEBUG, "CFIL: INFO FLAG - SHUT_WR");
6583 }
6584 if (cfil_info->cfi_flags & CFIF_SHUT_RD) {
6585 CFIL_LOG(LOG_DEBUG, "CFIL: INFO FLAG - SHUT_RD");
6586 }
6587 }
6588
6589 CFIL_LOG(LOG_DEBUG, "CFIL: INFO SHOW:total cfil_info shown: %d", count);
6590
6591 cfil_rw_unlock_shared(&cfil_lck_rw);
6592 }
6593
6594 bool
cfil_info_action_timed_out(struct cfil_info * cfil_info,int timeout)6595 cfil_info_action_timed_out(struct cfil_info *cfil_info, int timeout)
6596 {
6597 struct cfil_entry *entry;
6598 struct timeval current_tv;
6599 struct timeval diff_time;
6600
6601 if (cfil_info == NULL) {
6602 return false;
6603 }
6604
6605 /*
6606 * If we have queued up more data than passed offset and we haven't received
6607 * an action from user space for a while (the user space filter might have crashed),
6608 * return action timed out.
6609 */
6610 if (cfil_info->cfi_snd.cfi_pending_last > cfil_info->cfi_snd.cfi_pass_offset ||
6611 cfil_info->cfi_rcv.cfi_pending_last > cfil_info->cfi_rcv.cfi_pass_offset) {
6612 microuptime(¤t_tv);
6613
6614 for (int kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
6615 entry = &cfil_info->cfi_entries[kcunit - 1];
6616
6617 if (entry->cfe_filter == NULL) {
6618 continue;
6619 }
6620
6621 if (cfil_info->cfi_snd.cfi_pending_last > entry->cfe_snd.cfe_pass_offset ||
6622 cfil_info->cfi_rcv.cfi_pending_last > entry->cfe_rcv.cfe_pass_offset) {
6623 // haven't gotten an action from this filter, check timeout
6624 timersub(¤t_tv, &entry->cfe_last_action, &diff_time);
6625 if (diff_time.tv_sec >= timeout) {
6626 if (cfil_info->cfi_debug) {
6627 cfil_info_log(LOG_INFO, cfil_info, "CFIL: flow ACTION timeout expired");
6628 }
6629 return true;
6630 }
6631 }
6632 }
6633 }
6634 return false;
6635 }
6636
6637 bool
cfil_info_buffer_threshold_exceeded(struct cfil_info * cfil_info)6638 cfil_info_buffer_threshold_exceeded(struct cfil_info *cfil_info)
6639 {
6640 if (cfil_info == NULL) {
6641 return false;
6642 }
6643
6644 /*
6645 * Clean up flow if it exceeded queue thresholds
6646 */
6647 if (cfil_info->cfi_snd.cfi_tail_drop_cnt ||
6648 cfil_info->cfi_rcv.cfi_tail_drop_cnt) {
6649 if (cfil_info->cfi_debug) {
6650 CFIL_LOG(LOG_INFO, "CFIL: queue threshold exceeded:mbuf max < count: %d bytes: %d > tail drop count < OUT: %d IN: %d > ",
6651 cfil_udp_gc_mbuf_num_max,
6652 cfil_udp_gc_mbuf_cnt_max,
6653 cfil_info->cfi_snd.cfi_tail_drop_cnt,
6654 cfil_info->cfi_rcv.cfi_tail_drop_cnt);
6655 cfil_info_log(LOG_INFO, cfil_info, "CFIL: queue threshold exceeded");
6656 }
6657 return true;
6658 }
6659
6660 return false;
6661 }
6662
6663 static bool
cfil_dgram_gc_needed(struct socket * so,struct soflow_hash_entry * hash_entry,u_int64_t current_time)6664 cfil_dgram_gc_needed(struct socket *so, struct soflow_hash_entry *hash_entry, u_int64_t current_time)
6665 {
6666 #pragma unused(current_time)
6667 struct cfil_info *cfil_info = NULL;
6668
6669 if (so == NULL || hash_entry == NULL || hash_entry->soflow_feat_ctxt == NULL) {
6670 return false;
6671 }
6672 cfil_info = (struct cfil_info *) hash_entry->soflow_feat_ctxt;
6673
6674 cfil_rw_lock_shared(&cfil_lck_rw);
6675
6676 if (cfil_info_action_timed_out(cfil_info, UDP_FLOW_GC_ACTION_TO) ||
6677 cfil_info_buffer_threshold_exceeded(cfil_info)) {
6678 if (cfil_info->cfi_debug) {
6679 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP PER-FLOW GC NEEDED");
6680 }
6681 cfil_rw_unlock_shared(&cfil_lck_rw);
6682 return true;
6683 }
6684
6685 cfil_rw_unlock_shared(&cfil_lck_rw);
6686 return false;
6687 }
6688
6689 static bool
cfil_dgram_gc_perform(struct socket * so,struct soflow_hash_entry * hash_entry)6690 cfil_dgram_gc_perform(struct socket *so, struct soflow_hash_entry *hash_entry)
6691 {
6692 struct cfil_info *cfil_info = NULL;
6693
6694 if (so == NULL || hash_entry == NULL || hash_entry->soflow_feat_ctxt == NULL) {
6695 return false;
6696 }
6697 cfil_info = (struct cfil_info *) hash_entry->soflow_feat_ctxt;
6698
6699 if (cfil_info->cfi_debug) {
6700 cfil_info_log(LOG_INFO, cfil_info, "CFIL: UDP PER-FLOW GC PERFORM");
6701 }
6702
6703 for (int kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
6704 /* Let the filters know of the closing */
6705 cfil_dispatch_closed_event(so, cfil_info, kcunit);
6706 }
6707 cfil_sock_udp_unlink_flow(so, hash_entry, cfil_info);
6708 CFIL_INFO_FREE(cfil_info);
6709 OSIncrementAtomic(&cfil_stats.cfs_sock_detached);
6710 return true;
6711 }
6712
6713 static bool
cfil_dgram_detach_entry(struct socket * so,struct soflow_hash_entry * hash_entry)6714 cfil_dgram_detach_entry(struct socket *so, struct soflow_hash_entry *hash_entry)
6715 {
6716 struct cfil_info *cfil_info = NULL;
6717
6718 if (hash_entry == NULL || hash_entry->soflow_feat_ctxt == NULL) {
6719 return true;
6720 }
6721 cfil_info = (struct cfil_info *) hash_entry->soflow_feat_ctxt;
6722
6723 if (cfil_info->cfi_debug) {
6724 cfil_info_log(LOG_INFO, cfil_info, "CFIL: DGRAM DETACH ENTRY");
6725 }
6726
6727 cfil_sock_udp_unlink_flow(so, hash_entry, cfil_info);
6728 CFIL_INFO_FREE(cfil_info);
6729 OSIncrementAtomic(&cfil_stats.cfs_sock_detached);
6730
6731 return true;
6732 }
6733
6734 static bool
cfil_dgram_detach_db(struct socket * so,struct soflow_db * db)6735 cfil_dgram_detach_db(struct socket *so, struct soflow_db *db)
6736 {
6737 #pragma unused(db)
6738 if (so && so->so_flags & SOF_CONTENT_FILTER) {
6739 so->so_flags &= ~SOF_CONTENT_FILTER;
6740 CFIL_LOG(LOG_DEBUG, "CFIL: DGRAM DETACH DB <so %llx>", (uint64_t)VM_KERNEL_ADDRPERM(so));
6741 }
6742 return true;
6743 }
6744
6745 struct m_tag *
cfil_dgram_save_socket_state(struct cfil_info * cfil_info,struct mbuf * m)6746 cfil_dgram_save_socket_state(struct cfil_info *cfil_info, struct mbuf *m)
6747 {
6748 struct m_tag *tag = NULL;
6749 struct cfil_tag *ctag = NULL;
6750 struct soflow_hash_entry *hash_entry = NULL;
6751 struct inpcb *inp = NULL;
6752
6753 if (cfil_info == NULL || cfil_info->cfi_so == NULL ||
6754 cfil_info->cfi_hash_entry == NULL || m == NULL || !(m->m_flags & M_PKTHDR)) {
6755 return NULL;
6756 }
6757
6758 inp = sotoinpcb(cfil_info->cfi_so);
6759
6760 /* Allocate a tag */
6761 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP,
6762 sizeof(struct cfil_tag), M_DONTWAIT, m);
6763
6764 if (tag) {
6765 ctag = (struct cfil_tag*)(tag + 1);
6766 ctag->cfil_so_state_change_cnt = cfil_info->cfi_so->so_state_change_cnt;
6767 ctag->cfil_so_options = cfil_info->cfi_so->so_options;
6768 ctag->cfil_inp_flags = inp ? inp->inp_flags : 0;
6769
6770 hash_entry = cfil_info->cfi_hash_entry;
6771 if (hash_entry->soflow_family == AF_INET6) {
6772 fill_ip6_sockaddr_4_6(&ctag->cfil_faddr,
6773 &hash_entry->soflow_faddr.addr6,
6774 hash_entry->soflow_fport, hash_entry->soflow_faddr6_ifscope);
6775 } else if (hash_entry->soflow_family == AF_INET) {
6776 fill_ip_sockaddr_4_6(&ctag->cfil_faddr,
6777 hash_entry->soflow_faddr.addr46.ia46_addr4,
6778 hash_entry->soflow_fport);
6779 }
6780 m_tag_prepend(m, tag);
6781 return tag;
6782 }
6783 return NULL;
6784 }
6785
6786 struct m_tag *
cfil_dgram_get_socket_state(struct mbuf * m,uint32_t * state_change_cnt,uint32_t * options,struct sockaddr ** faddr,int * inp_flags)6787 cfil_dgram_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, uint32_t *options,
6788 struct sockaddr **faddr, int *inp_flags)
6789 {
6790 struct m_tag *tag = NULL;
6791 struct cfil_tag *ctag = NULL;
6792
6793 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP, NULL);
6794 if (tag) {
6795 ctag = (struct cfil_tag *)(tag + 1);
6796 if (state_change_cnt) {
6797 *state_change_cnt = ctag->cfil_so_state_change_cnt;
6798 }
6799 if (options) {
6800 *options = ctag->cfil_so_options;
6801 }
6802 if (faddr) {
6803 *faddr = (struct sockaddr *) &ctag->cfil_faddr;
6804 }
6805 if (inp_flags) {
6806 *inp_flags = ctag->cfil_inp_flags;
6807 }
6808
6809 /*
6810 * Unlink tag and hand it over to caller.
6811 * Note that caller will be responsible to free it.
6812 */
6813 m_tag_unlink(m, tag);
6814 return tag;
6815 }
6816 return NULL;
6817 }
6818
6819 boolean_t
cfil_dgram_peek_socket_state(struct mbuf * m,int * inp_flags)6820 cfil_dgram_peek_socket_state(struct mbuf *m, int *inp_flags)
6821 {
6822 struct m_tag *tag = NULL;
6823 struct cfil_tag *ctag = NULL;
6824
6825 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP, NULL);
6826 if (tag) {
6827 ctag = (struct cfil_tag *)(tag + 1);
6828 if (inp_flags) {
6829 *inp_flags = ctag->cfil_inp_flags;
6830 }
6831 return true;
6832 }
6833 return false;
6834 }
6835
6836 static int
cfil_dispatch_stats_event_locked(int kcunit,struct cfil_stats_report_buffer * buffer,uint32_t stats_count)6837 cfil_dispatch_stats_event_locked(int kcunit, struct cfil_stats_report_buffer *buffer, uint32_t stats_count)
6838 {
6839 struct content_filter *cfc = NULL;
6840 errno_t error = 0;
6841 size_t msgsize = 0;
6842
6843 if (buffer == NULL || stats_count == 0) {
6844 return error;
6845 }
6846
6847 if (content_filters == NULL || kcunit > MAX_CONTENT_FILTER) {
6848 return error;
6849 }
6850
6851 cfc = content_filters[kcunit - 1];
6852 if (cfc == NULL) {
6853 return error;
6854 }
6855
6856 /* Would be wasteful to try */
6857 if (cfc->cf_flags & CFF_FLOW_CONTROLLED) {
6858 error = ENOBUFS;
6859 goto done;
6860 }
6861
6862 msgsize = sizeof(struct cfil_msg_stats_report) + (sizeof(struct cfil_msg_sock_stats) * stats_count);
6863 buffer->msghdr.cfm_len = (uint32_t)msgsize;
6864 buffer->msghdr.cfm_version = 1;
6865 buffer->msghdr.cfm_type = CFM_TYPE_EVENT;
6866 buffer->msghdr.cfm_op = CFM_OP_STATS;
6867 buffer->msghdr.cfm_sock_id = 0;
6868 buffer->count = stats_count;
6869
6870 if (cfil_log_stats) {
6871 CFIL_LOG(LOG_DEBUG, "STATS (kcunit %d): msg size %lu - %lu %lu %lu",
6872 kcunit,
6873 (unsigned long)msgsize,
6874 (unsigned long)sizeof(struct cfil_msg_stats_report),
6875 (unsigned long)sizeof(struct cfil_msg_sock_stats),
6876 (unsigned long)stats_count);
6877 }
6878
6879 error = ctl_enqueuedata(cfc->cf_kcref, cfc->cf_kcunit,
6880 buffer,
6881 msgsize,
6882 CTL_DATA_EOR);
6883 if (error != 0) {
6884 CFIL_LOG(LOG_ERR, "ctl_enqueuedata() failed:%d", error);
6885 goto done;
6886 }
6887 OSIncrementAtomic(&cfil_stats.cfs_stats_event_ok);
6888
6889 if (cfil_log_stats) {
6890 CFIL_LOG(LOG_DEBUG, "CFIL: STATS REPORT:send msg to %d", kcunit);
6891 }
6892 done:
6893
6894 if (error == ENOBUFS) {
6895 OSIncrementAtomic(
6896 &cfil_stats.cfs_stats_event_flow_control);
6897
6898 if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) {
6899 cfil_rw_lock_exclusive(&cfil_lck_rw);
6900 }
6901
6902 cfc->cf_flags |= CFF_FLOW_CONTROLLED;
6903
6904 cfil_rw_lock_exclusive_to_shared(&cfil_lck_rw);
6905 } else if (error != 0) {
6906 OSIncrementAtomic(&cfil_stats.cfs_stats_event_fail);
6907 }
6908
6909 return error;
6910 }
6911
6912 static void
cfil_stats_report_thread_sleep(bool forever)6913 cfil_stats_report_thread_sleep(bool forever)
6914 {
6915 if (cfil_log_stats) {
6916 CFIL_LOG(LOG_DEBUG, "CFIL: STATS COLLECTION SLEEP");
6917 }
6918
6919 if (forever) {
6920 (void) assert_wait((event_t) &cfil_sock_attached_stats_count,
6921 THREAD_INTERRUPTIBLE);
6922 } else {
6923 uint64_t deadline = 0;
6924 nanoseconds_to_absolutetime(CFIL_STATS_REPORT_RUN_INTERVAL_NSEC, &deadline);
6925 clock_absolutetime_interval_to_deadline(deadline, &deadline);
6926
6927 (void) assert_wait_deadline(&cfil_sock_attached_stats_count,
6928 THREAD_INTERRUPTIBLE, deadline);
6929 }
6930 }
6931
6932 static void
cfil_stats_report_thread_func(void * v,wait_result_t w)6933 cfil_stats_report_thread_func(void *v, wait_result_t w)
6934 {
6935 #pragma unused(v, w)
6936
6937 ASSERT(cfil_stats_report_thread == current_thread());
6938 thread_set_thread_name(current_thread(), "CFIL_STATS_REPORT");
6939
6940 // Kick off gc shortly
6941 cfil_stats_report_thread_sleep(false);
6942 thread_block_parameter((thread_continue_t) cfil_stats_report, NULL);
6943 /* NOTREACHED */
6944 }
6945
6946 static bool
cfil_stats_collect_flow_stats_for_filter(int kcunit,struct cfil_info * cfil_info,struct cfil_entry * entry,struct timeval current_tv)6947 cfil_stats_collect_flow_stats_for_filter(int kcunit,
6948 struct cfil_info *cfil_info,
6949 struct cfil_entry *entry,
6950 struct timeval current_tv)
6951 {
6952 struct cfil_stats_report_buffer *buffer = NULL;
6953 struct cfil_msg_sock_stats *flow_array = NULL;
6954 struct cfil_msg_sock_stats *stats = NULL;
6955 struct inpcb *inp = NULL;
6956 struct timeval diff_time;
6957 uint64_t diff_time_usecs;
6958 int index = 0;
6959
6960 if (entry->cfe_stats_report_frequency == 0) {
6961 return false;
6962 }
6963
6964 buffer = global_cfil_stats_report_buffers[kcunit - 1];
6965 if (buffer == NULL) {
6966 CFIL_LOG(LOG_ERR, "CFIL: STATS: no buffer");
6967 return false;
6968 }
6969
6970 timersub(¤t_tv, &entry->cfe_stats_report_ts, &diff_time);
6971 diff_time_usecs = (diff_time.tv_sec * USEC_PER_SEC) + diff_time.tv_usec;
6972
6973 if (cfil_info->cfi_debug && cfil_log_stats) {
6974 CFIL_LOG(LOG_DEBUG, "CFIL: STATS REPORT - elapsed time - ts %llu %llu cur ts %llu %llu diff %llu %llu(usecs %llu) @freq %llu usecs sockID %llu",
6975 (unsigned long long)entry->cfe_stats_report_ts.tv_sec,
6976 (unsigned long long)entry->cfe_stats_report_ts.tv_usec,
6977 (unsigned long long)current_tv.tv_sec,
6978 (unsigned long long)current_tv.tv_usec,
6979 (unsigned long long)diff_time.tv_sec,
6980 (unsigned long long)diff_time.tv_usec,
6981 (unsigned long long)diff_time_usecs,
6982 (unsigned long long)((entry->cfe_stats_report_frequency * NSEC_PER_MSEC) / NSEC_PER_USEC),
6983 cfil_info->cfi_sock_id);
6984 }
6985
6986 // Compare elapsed time in usecs
6987 if (diff_time_usecs >= (entry->cfe_stats_report_frequency * NSEC_PER_MSEC) / NSEC_PER_USEC) {
6988 if (cfil_info->cfi_debug && cfil_log_stats) {
6989 CFIL_LOG(LOG_DEBUG, "CFIL: STATS REPORT - in %llu reported %llu",
6990 cfil_info->cfi_byte_inbound_count,
6991 entry->cfe_byte_inbound_count_reported);
6992 CFIL_LOG(LOG_DEBUG, "CFIL: STATS REPORT - out %llu reported %llu",
6993 cfil_info->cfi_byte_outbound_count,
6994 entry->cfe_byte_outbound_count_reported);
6995 }
6996 // Check if flow has new bytes that have not been reported
6997 if (entry->cfe_byte_inbound_count_reported < cfil_info->cfi_byte_inbound_count ||
6998 entry->cfe_byte_outbound_count_reported < cfil_info->cfi_byte_outbound_count) {
6999 flow_array = (struct cfil_msg_sock_stats *)&buffer->stats;
7000 index = global_cfil_stats_counts[kcunit - 1];
7001
7002 stats = &flow_array[index];
7003 stats->cfs_sock_id = cfil_info->cfi_sock_id;
7004 stats->cfs_byte_inbound_count = cfil_info->cfi_byte_inbound_count;
7005 stats->cfs_byte_outbound_count = cfil_info->cfi_byte_outbound_count;
7006
7007 if (entry->cfe_laddr_sent == false) {
7008 /* cache it if necessary */
7009 if (cfil_info->cfi_so_attach_laddr.sa.sa_len == 0) {
7010 inp = cfil_info->cfi_so ? sotoinpcb(cfil_info->cfi_so) : NULL;
7011 if (inp != NULL) {
7012 boolean_t outgoing = (cfil_info->cfi_dir == CFS_CONNECTION_DIR_OUT);
7013 union sockaddr_in_4_6 *src = outgoing ? &cfil_info->cfi_so_attach_laddr : NULL;
7014 union sockaddr_in_4_6 *dst = outgoing ? NULL : &cfil_info->cfi_so_attach_laddr;
7015 cfil_fill_event_msg_addresses(cfil_info->cfi_hash_entry, inp,
7016 src, dst, !IS_INP_V6(inp), outgoing);
7017 }
7018 }
7019
7020 if (cfil_info->cfi_so_attach_laddr.sa.sa_len != 0) {
7021 stats->cfs_laddr.sin6 = cfil_info->cfi_so_attach_laddr.sin6;
7022 entry->cfe_laddr_sent = true;
7023 }
7024 }
7025
7026 global_cfil_stats_counts[kcunit - 1]++;
7027
7028 entry->cfe_stats_report_ts = current_tv;
7029 entry->cfe_byte_inbound_count_reported = cfil_info->cfi_byte_inbound_count;
7030 entry->cfe_byte_outbound_count_reported = cfil_info->cfi_byte_outbound_count;
7031 if (cfil_info->cfi_debug && cfil_log_stats) {
7032 cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: STATS COLLECTED");
7033 }
7034 CFI_ADD_TIME_LOG(cfil_info, ¤t_tv, &cfil_info->cfi_first_event, CFM_OP_STATS);
7035 return true;
7036 }
7037 }
7038 return false;
7039 }
7040
7041 static void
cfil_stats_report(void * v,wait_result_t w)7042 cfil_stats_report(void *v, wait_result_t w)
7043 {
7044 #pragma unused(v, w)
7045
7046 struct cfil_info *cfil_info = NULL;
7047 struct cfil_entry *entry = NULL;
7048 struct timeval current_tv;
7049 uint32_t flow_count = 0;
7050 uint64_t saved_next_sock_id = 0; // Next sock id to be reported for next loop
7051 bool flow_reported = false;
7052
7053 if (cfil_log_stats) {
7054 CFIL_LOG(LOG_DEBUG, "CFIL: STATS COLLECTION RUNNING");
7055 }
7056
7057 do {
7058 // Collect all sock ids of flows that has new stats
7059 cfil_rw_lock_shared(&cfil_lck_rw);
7060
7061 if (cfil_sock_attached_stats_count == 0) {
7062 if (cfil_log_stats) {
7063 CFIL_LOG(LOG_DEBUG, "CFIL: STATS: no flow");
7064 }
7065 cfil_rw_unlock_shared(&cfil_lck_rw);
7066 goto go_sleep;
7067 }
7068
7069 for (int kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
7070 if (global_cfil_stats_report_buffers[kcunit - 1] != NULL) {
7071 memset(global_cfil_stats_report_buffers[kcunit - 1], 0, sizeof(struct cfil_stats_report_buffer));
7072 }
7073 global_cfil_stats_counts[kcunit - 1] = 0;
7074 }
7075
7076 microuptime(¤t_tv);
7077 flow_count = 0;
7078
7079 TAILQ_FOREACH(cfil_info, &cfil_sock_head_stats, cfi_link_stats) {
7080 if (saved_next_sock_id != 0 &&
7081 saved_next_sock_id == cfil_info->cfi_sock_id) {
7082 // Here is where we left off previously, start accumulating
7083 saved_next_sock_id = 0;
7084 }
7085
7086 if (saved_next_sock_id == 0) {
7087 if (flow_count >= CFIL_STATS_REPORT_MAX_COUNT) {
7088 // Examine a fixed number of flows each round. Remember the current flow
7089 // so we can start from here for next loop
7090 saved_next_sock_id = cfil_info->cfi_sock_id;
7091 break;
7092 }
7093
7094 flow_reported = false;
7095 for (int kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
7096 entry = &cfil_info->cfi_entries[kcunit - 1];
7097 if (entry->cfe_filter == NULL) {
7098 if (cfil_info->cfi_debug && cfil_log_stats) {
7099 CFIL_LOG(LOG_DEBUG, "CFIL: STATS REPORT - so %llx no filter",
7100 cfil_info->cfi_so ? (uint64_t)VM_KERNEL_ADDRPERM(cfil_info->cfi_so) : 0);
7101 }
7102 continue;
7103 }
7104
7105 if ((entry->cfe_stats_report_frequency > 0) &&
7106 cfil_stats_collect_flow_stats_for_filter(kcunit, cfil_info, entry, current_tv) == true) {
7107 flow_reported = true;
7108 }
7109 }
7110 if (flow_reported == true) {
7111 flow_count++;
7112 }
7113 }
7114 }
7115
7116 if (flow_count > 0) {
7117 if (cfil_log_stats) {
7118 CFIL_LOG(LOG_DEBUG, "CFIL: STATS reporting for %d flows", flow_count);
7119 }
7120 for (int kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) {
7121 if (global_cfil_stats_report_buffers[kcunit - 1] != NULL &&
7122 global_cfil_stats_counts[kcunit - 1] > 0) {
7123 cfil_dispatch_stats_event_locked(kcunit,
7124 global_cfil_stats_report_buffers[kcunit - 1],
7125 global_cfil_stats_counts[kcunit - 1]);
7126 }
7127 }
7128 } else {
7129 cfil_rw_unlock_shared(&cfil_lck_rw);
7130 goto go_sleep;
7131 }
7132
7133 cfil_rw_unlock_shared(&cfil_lck_rw);
7134
7135 // Loop again if we haven't finished the whole cfil_info list
7136 } while (saved_next_sock_id != 0);
7137
7138 go_sleep:
7139
7140 // Sleep forever (until waken up) if no more flow to report
7141 cfil_rw_lock_shared(&cfil_lck_rw);
7142 cfil_stats_report_thread_sleep(cfil_sock_attached_stats_count == 0 ? true : false);
7143 cfil_rw_unlock_shared(&cfil_lck_rw);
7144 thread_block_parameter((thread_continue_t) cfil_stats_report, NULL);
7145 /* NOTREACHED */
7146 }
7147