xref: /xnu-12377.41.6/bsd/skywalk/nexus/flowswitch/fsw_flow.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
31 #include <skywalk/nexus/flowswitch/fsw_var.h>
32 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
33 
34 static void fsw_flow_route_ctor(void *, struct flow_route *);
35 static int fsw_flow_route_resolve(void *, struct flow_route *,
36     struct __kern_packet *);
37 
38 struct flow_owner *
fsw_flow_add(struct nx_flowswitch * fsw,struct nx_flow_req * req0,int * error)39 fsw_flow_add(struct nx_flowswitch *fsw, struct nx_flow_req *req0, int *error)
40 {
41 	struct kern_nexus *nx = fsw->fsw_nx;
42 	struct flow_mgr *fm = fsw->fsw_flow_mgr;
43 	nexus_port_t nx_port = req0->nfr_nx_port;
44 	struct flow_owner_bucket *fob;
45 	struct flow_owner *fo = NULL;
46 	void *__single fo_context = req0->nfr_context;
47 	boolean_t nx_bound = FALSE;
48 	boolean_t new_mapping = FALSE;
49 	struct nx_flow_req req;
50 	uuid_t uuid_key;
51 	bool nx_port_pid_bound;
52 	uint32_t max_flowadv = nx->nx_prov->nxprov_params->nxp_flowadv_max;
53 	struct proc *p;
54 	int pid = req0->nfr_pid;
55 	bool low_latency = ((req0->nfr_flags & NXFLOWREQF_LOW_LATENCY) != 0);
56 	struct flow_entry *__single aop_fe = NULL;
57 #if SK_LOG
58 	uuid_string_t uuidstr;
59 #endif /* SK_LOG */
60 
61 	*error = 0;
62 
63 	/*
64 	 * Make a local copy of the original request; we'll modify the
65 	 * local copy and write it back to the original upon success.
66 	 */
67 	bcopy(req0, &req, sizeof(*req0));
68 	ASSERT(!uuid_is_null(req.nfr_flow_uuid));
69 
70 	/*
71 	 * Interface attach and detach involve holding the flowswitch lock
72 	 * held as writer.  Given that we might block in msleep() below,
73 	 * holding the flowswitch RW lock is not an option.  Instead, we
74 	 * utilize the detach barrier prevent things from going away while
75 	 * we are here.
76 	 */
77 	if (!fsw_detach_barrier_add(fsw)) {
78 		SK_ERR("netagent detached");
79 		*error = ENXIO;
80 		return NULL;
81 	}
82 
83 	/*
84 	 * We insist that PID resolves to a process for flow add, but not for
85 	 * delete. That's because those events may be posted (to us) after the
86 	 * corresponding process has exited, and so we still need to be able to
87 	 * cleanup.
88 	 */
89 	p = proc_find(pid);
90 	if (p == PROC_NULL) {
91 		SK_ERR("process for pid %d doesn't exist", pid);
92 		*error = EINVAL;
93 		fsw_detach_barrier_remove(fsw);
94 		return NULL;
95 	}
96 	req.nfr_proc = p;
97 
98 	/*
99 	 * If interface is currently attached, indicate that a bind is in
100 	 * progress, so that upon releasing the lock any threads attempting
101 	 * to detach the interface will wait until we're done.
102 	 */
103 	fob = flow_mgr_get_fob_by_pid(fm, pid);
104 	FOB_LOCK_SPIN(fob);
105 	while (fob->fob_busy_flags & (FOBF_OPEN_BUSY | FOBF_CLOSE_BUSY)) {
106 		if (++(fob->fob_open_waiters) == 0) {   /* wraparound */
107 			fob->fob_open_waiters++;
108 		}
109 		if ((*error = msleep(&fob->fob_open_waiters, &fob->fob_lock,
110 		    (PZERO + 1) | PSPIN, __FUNCTION__, NULL)) == EINTR) {
111 			SK_ERR("%s(%d) binding for uuid %s was interrupted",
112 			    sk_proc_name(p), pid,
113 			    sk_uuid_unparse(req.nfr_flow_uuid, uuidstr));
114 			ASSERT(fob->fob_open_waiters > 0);
115 			fob->fob_open_waiters--;
116 			FOB_UNLOCK(fob);
117 			ASSERT(fo == NULL);
118 			goto unbusy;
119 		}
120 	}
121 	if (__improbable((fob->fob_busy_flags & FOBF_DEAD) != 0)) {
122 		SK_ERR("%s(%d) binding for flow_uuid %s aborted due to "
123 		    "dead owner", sk_proc_name(p), pid,
124 		    sk_uuid_unparse(req.nfr_flow_uuid, uuidstr));
125 		*error = ENXIO;
126 		goto done;
127 	}
128 	ASSERT(!(fob->fob_busy_flags & FOBF_OPEN_BUSY));
129 	fob->fob_busy_flags |= FOBF_OPEN_BUSY;
130 
131 	do {
132 		fo = flow_owner_find_by_pid(fob, pid, fo_context, low_latency);
133 		if (fo == NULL && nx_port == NEXUS_PORT_ANY) {
134 			struct nxbind nxb;
135 
136 			/*
137 			 * Release lock to maintain ordering with the
138 			 * flowswitch lock; busy flag is set above.
139 			 * Also read_random() may block.
140 			 */
141 			FOB_UNLOCK(fob);
142 
143 			uuid_generate_random(uuid_key);
144 
145 			bzero(&nxb, sizeof(nxb));
146 			nxb.nxb_flags |= NXBF_MATCH_UNIQUEID;
147 			nxb.nxb_uniqueid = proc_uniqueid(p);
148 			nxb.nxb_pid = pid;
149 			nxb.nxb_flags |= NXBF_MATCH_KEY;
150 			nxb.nxb_key = sk_alloc_data(sizeof(uuid_key),
151 			    Z_WAITOK | Z_NOFAIL, skmem_tag_nx_key);
152 			nxb.nxb_key_len = sizeof(uuid_key);
153 			bcopy(uuid_key, nxb.nxb_key, nxb.nxb_key_len);
154 
155 			/*
156 			 * Bind a new nexus port.  Directly invoke the
157 			 * nxdom_bind_port() callback of the nexus since
158 			 * the nexus instance is already known.  Free
159 			 * the UUID key upon failure; otherwise callee
160 			 * will attach it to the nexus port and clean
161 			 * it up during nxdom_unbind_port().
162 			 */
163 			if ((*error = NX_DOM(nx)->nxdom_bind_port(nx,
164 			    &nx_port, &nxb, NULL)) != 0) {
165 				sk_free_data_sized_by(nxb.nxb_key, nxb.nxb_key_len);
166 				SK_ERR("%s(%d) failed to bind flow_uuid %s to a "
167 				    "nx_port (err %d)", sk_proc_name(p),
168 				    pid, sk_uuid_unparse(req.nfr_flow_uuid,
169 				    uuidstr), *error);
170 				nx_port = NEXUS_PORT_ANY;
171 				FOB_LOCK_SPIN(fob);
172 				break;
173 			}
174 			ASSERT(nx_port != NEXUS_PORT_ANY);
175 			nx_bound = TRUE;
176 
177 			SK_DF(SK_VERB_FLOW, "%s(%d) flow_uuid %s associated with "
178 			    "ephemeral nx_port %d", sk_proc_name(p),
179 			    pid, sk_uuid_unparse(req.nfr_flow_uuid, uuidstr),
180 			    (int)nx_port);
181 
182 			FOB_LOCK_SPIN(fob);
183 			/*
184 			 * if there's no interface associated with this,
185 			 * then bail
186 			 */
187 			if (__improbable((fob->fob_busy_flags & FOBF_DEAD) !=
188 			    0 || fsw->fsw_ifp == NULL ||
189 			    fsw->fsw_agent_session == NULL)) {
190 				SK_ERR("%s(%d) binding for flow_uuid %s aborted "
191 				    "(lost race)", sk_proc_name(p),
192 				    pid, sk_uuid_unparse(req.nfr_flow_uuid,
193 				    uuidstr));
194 				*error = ENXIO;
195 				break;
196 			}
197 			nx_port_pid_bound = true;
198 			uuid_copy(req.nfr_bind_key, uuid_key);
199 		} else if (fo == NULL) {
200 			/* make sure request has valid nx_port */
201 			ASSERT(nx_port != NEXUS_PORT_ANY);
202 			/*
203 			 * XXX
204 			 * Why is this path supported? Normal flows are not
205 			 * added with a specified port and this check does
206 			 * nothing to verify if the port is used.
207 			 *
208 			 * Using nx_port_is_valid() is wrong because that
209 			 * assumes the array already has non-zero ports.
210 			 */
211 			if (__improbable(nx_port >= NX_PORT_CHUNK)) {
212 				*error = EINVAL;
213 				break;
214 			}
215 			/* read_random() may block */
216 			FOB_LOCK_CONVERT(fob);
217 
218 			nx_port_pid_bound = false;
219 			uuid_generate_random(uuid_key);
220 
221 			SK_DF(SK_VERB_FLOW, "%s(%d) flow_uuid %s associated "
222 			    "with nx_port %d", sk_proc_name(p),
223 			    pid, sk_uuid_unparse(req.nfr_flow_uuid, uuidstr),
224 			    (int)nx_port);
225 		} else {
226 			/* subsequent request should reuse existing port */
227 			ASSERT(fo->fo_nx_port != NEXUS_PORT_ANY);
228 			if (nx_port != NEXUS_PORT_ANY &&
229 			    nx_port != fo->fo_nx_port) {
230 				*error = EINVAL;
231 				break;
232 			}
233 			/* fillout info for nexus port */
234 			nx_port = fo->fo_nx_port;
235 			uuid_copy(uuid_key, fo->fo_key);
236 			break;
237 		}
238 
239 		FOB_LOCK_CONVERT(fob);
240 
241 		ASSERT(nx_port != NEXUS_PORT_ANY);
242 		ASSERT(fo == NULL);
243 		fo = flow_owner_alloc(fob, p, nx_port, nx_port_pid_bound,
244 		    (max_flowadv != 0), fsw, NULL, fo_context, low_latency);
245 		if (fo == NULL) {
246 			*error = ENOMEM;
247 			break;
248 		}
249 		ASSERT(!uuid_is_null(uuid_key));
250 		uuid_copy(fo->fo_key, uuid_key);
251 		new_mapping = TRUE;
252 	} while (0);
253 
254 	if (*error != 0) {
255 		goto done;
256 	}
257 
258 	/* make sure rule ID isn't already being used */
259 	struct flow_entry *__single fe;
260 	if ((fe = flow_entry_find_by_uuid(fo, req.nfr_flow_uuid)) != NULL) {
261 #if SK_LOG
262 		char dbgbuf[FLOWENTRY_DBGBUF_SIZE];
263 		SK_PERR(p, "flow uuid collision with fe \"%s\"",
264 		    fe2str(fe, dbgbuf, sizeof(dbgbuf)));
265 #endif /* SK_LOG */
266 		*error = EEXIST;
267 		flow_entry_release(&fe);
268 		goto done;
269 	}
270 
271 	/* return assigned nexus port to caller */
272 	req.nfr_nx_port = nx_port;
273 	if (__probable(!fsw_qos_default_restricted())) {
274 		req.nfr_flags |= NXFLOWREQF_QOS_MARKING;
275 	} else {
276 		req.nfr_flags &= ~NXFLOWREQF_QOS_MARKING;
277 	}
278 
279 	FOB_LOCK_CONVERT(fob);
280 
281 	*error = flow_mgr_flow_add(nx, fm, fo, fsw->fsw_ifp, &req,
282 	    fsw_flow_route_ctor, fsw_flow_route_resolve, fsw);
283 
284 
285 	if (*error == 0) {
286 		SK_DF(SK_VERB_FLOW, "%s(%d) flow_uuid %s is now on "
287 		    "nx_port %d", sk_proc_name(p), pid,
288 		    sk_uuid_unparse(req.nfr_flow_uuid, uuidstr),
289 		    (int)nx_port);
290 
291 		/* Lookup flow entry for RX steering if needed (before FOB unlock) */
292 		if (req.nfr_flags & NXFLOWREQF_AOP_OFFLOAD) {
293 			aop_fe = flow_entry_find_by_uuid(fo, req.nfr_flow_uuid);
294 			ASSERT(aop_fe);
295 		} else {
296 			/* replace original request with our (modified) local copy */
297 			bcopy(&req, req0, sizeof(*req0));
298 		}
299 	}
300 
301 done:
302 	if (__improbable(*error != 0)) {
303 		SK_ERR("%s(%d) failed to add flow_uuid %s (err %d)",
304 		    sk_proc_name(p), pid,
305 		    sk_uuid_unparse(req.nfr_flow_uuid, uuidstr), *error);
306 		if (fo != NULL) {
307 			if (new_mapping) {
308 				FOB_LOCK_CONVERT(fob);
309 				flow_owner_free(fob, fo);
310 			}
311 			fo = NULL;
312 		}
313 		if (nx_bound) {
314 			ASSERT(nx_port != NEXUS_PORT_ANY);
315 			FOB_LOCK_ASSERT_HELD(fob);
316 			/*
317 			 * Release lock to maintain ordering with the
318 			 * flowswitch lock; busy flag is set above.
319 			 */
320 			FOB_UNLOCK(fob);
321 			(void) NX_DOM(nx)->nxdom_unbind_port(nx, nx_port);
322 			nx_port = NEXUS_PORT_ANY;
323 			FOB_LOCK_SPIN(fob);
324 		}
325 	}
326 	fob->fob_busy_flags &= ~FOBF_OPEN_BUSY;
327 	if (__improbable(fob->fob_open_waiters > 0)) {
328 		fob->fob_open_waiters = 0;
329 		wakeup(&fob->fob_open_waiters);
330 	}
331 	if (__improbable(fob->fob_close_waiters > 0)) {
332 		fob->fob_close_waiters = 0;
333 		wakeup(&fob->fob_close_waiters);
334 	}
335 	FOB_UNLOCK(fob);
336 
337 	/* Configure RX flow steering if flow was added successfully and AOP offload is requested */
338 	if (aop_fe != NULL) {
339 		int rx_steering_err = flow_entry_add_rx_steering_rule(fsw, aop_fe);
340 		if (rx_steering_err != 0) {
341 			SK_ERR("%s(%d) failed to add RX steering rule for "
342 			    "flow_uuid %s (err %d)", sk_proc_name(p), pid,
343 			    sk_uuid_unparse(req.nfr_flow_uuid, uuidstr),
344 			    rx_steering_err);
345 			flow_entry_release(&aop_fe);
346 			aop_fe = NULL;
347 			/* Clean up the flow since RX steering failed */
348 			fsw_flow_del(fsw, &req, true, NULL);
349 			/*
350 			 * Release flow stats reference count for the additional reference
351 			 * that would be passed back to NECP client in successful flow creation.
352 			 * Since flow creation succeeded and stats were assigned to the request
353 			 * at flow_entry_alloc(), but we're now cleaning up the flow due to
354 			 * RX steering failure, we must release this reference as the caller
355 			 * should not receive flow stats for a flow that was cleaned up.
356 			 */
357 			if (req.nfr_flow_stats != NULL) {
358 				flow_stats_release(req.nfr_flow_stats);
359 				req.nfr_flow_stats = NULL;
360 			}
361 			*error = rx_steering_err;
362 			fo = NULL;
363 		} else {
364 			/* replace original request with our (modified) local copy */
365 			bcopy(&req, req0, sizeof(*req0));
366 			flow_entry_release(&aop_fe);
367 			aop_fe = NULL;
368 		}
369 	}
370 
371 unbusy:
372 	proc_rele(p);
373 	p = PROC_NULL;
374 	ASSERT(aop_fe == NULL);
375 	/* allow any pending detach to proceed */
376 	fsw_detach_barrier_remove(fsw);
377 
378 	return fo;
379 }
380 
381 int
fsw_flow_del(struct nx_flowswitch * fsw,struct nx_flow_req * req,bool nolinger,void * params)382 fsw_flow_del(struct nx_flowswitch *fsw, struct nx_flow_req *req, bool nolinger,
383     void *params)
384 {
385 	struct flow_mgr *fm = fsw->fsw_flow_mgr;
386 	struct kern_nexus *nx = fsw->fsw_nx;
387 	struct flow_owner_bucket *fob;
388 	struct flow_owner *fo;
389 	void *__single fo_context = req->nfr_context;
390 	pid_t pid = req->nfr_pid;
391 	bool low_latency = ((req->nfr_flags & NXFLOWREQF_LOW_LATENCY) != 0);
392 	int error;
393 
394 	ASSERT(!uuid_is_null(req->nfr_flow_uuid));
395 
396 	/*
397 	 * we use the detach barrier to prevent flowswith instance from
398 	 * going away while we are here.
399 	 */
400 	if (!fsw_detach_barrier_add(fsw)) {
401 		SK_ERR("netagent detached");
402 		return ENXIO;
403 	}
404 
405 	/* find mapping */
406 	fob = flow_mgr_get_fob_by_pid(fm, pid);
407 	FOB_LOCK_SPIN(fob);
408 	while (fob->fob_busy_flags & (FOBF_OPEN_BUSY | FOBF_CLOSE_BUSY)) {
409 		if (++(fob->fob_close_waiters) == 0) {  /* wraparound */
410 			fob->fob_close_waiters++;
411 		}
412 		(void) msleep(&fob->fob_close_waiters, &fob->fob_lock,
413 		    (PZERO - 1) | PSPIN, __FUNCTION__, NULL);
414 	}
415 	fob->fob_busy_flags |= FOBF_CLOSE_BUSY;
416 
417 	fo = flow_owner_find_by_pid(fob, pid, fo_context, low_latency);
418 	if (fo == NULL) {
419 		error = ENOENT;
420 		goto done;
421 	}
422 
423 	FOB_LOCK_CONVERT(fob);
424 
425 	/*
426 	 * Unbind flow.  Note that if "auto close" is enabled, the flows
427 	 * associated with this fo would have been removed when the channel
428 	 * opened to the nexus port gets closed.  If we get ENOENT just
429 	 * treat as as non-fatal and proceed further down.
430 	 */
431 	error = flow_owner_destroy_entry(fo, req->nfr_flow_uuid, nolinger,
432 	    params);
433 	if (error != 0 && error != ENOENT) {
434 		goto done;
435 	}
436 
437 	/*
438 	 * If the channel that was connected to the nexus port is no longer
439 	 * around, i.e. fsw_port_dtor() has been called, and there are no
440 	 * more flows on the owner, and the owner was bound to PID on the
441 	 * nexus port in fsw_flow_bind(), remove the nexus binding now to make
442 	 * this port available.
443 	 */
444 	if (RB_EMPTY(&fo->fo_flow_entry_id_head) &&
445 	    fo->fo_nx_port_destroyed && fo->fo_nx_port_pid_bound) {
446 		nexus_port_t nx_port = fo->fo_nx_port;
447 		ASSERT(nx_port != NEXUS_PORT_ANY);
448 		/*
449 		 * Release lock to maintain ordering with the
450 		 * flowswitch lock; busy flag is set above.
451 		 */
452 		FOB_UNLOCK(fob);
453 		(void) NX_DOM(nx)->nxdom_unbind_port(nx, nx_port);
454 		FOB_LOCK(fob);
455 		flow_owner_free(fob, fo);
456 		fo = NULL;
457 	}
458 	error = 0;
459 
460 done:
461 #if SK_LOG
462 	if (__improbable((sk_verbose & SK_VERB_FLOW) != 0)) {
463 		uuid_string_t uuidstr;
464 		if (fo != NULL) {
465 			SK_DF(SK_VERB_FLOW, "%s(%d) flow_uuid %s (err %d)",
466 			    fo->fo_name, fo->fo_pid,
467 			    sk_uuid_unparse(req->nfr_flow_uuid, uuidstr), error);
468 		} else {
469 			SK_DF(SK_VERB_FLOW, "pid %d flow_uuid %s (err %d)", pid,
470 			    sk_uuid_unparse(req->nfr_flow_uuid, uuidstr), error);
471 		}
472 	}
473 #endif /* SK_LOG */
474 
475 	fob->fob_busy_flags &= ~FOBF_CLOSE_BUSY;
476 	if (__improbable(fob->fob_open_waiters > 0)) {
477 		fob->fob_open_waiters = 0;
478 		wakeup(&fob->fob_open_waiters);
479 	}
480 	if (__improbable(fob->fob_close_waiters > 0)) {
481 		fob->fob_close_waiters = 0;
482 		wakeup(&fob->fob_close_waiters);
483 	}
484 	FOB_UNLOCK(fob);
485 
486 	/* allow any pending detach to proceed */
487 	fsw_detach_barrier_remove(fsw);
488 
489 	return error;
490 }
491 
492 int
fsw_flow_config(struct nx_flowswitch * fsw,struct nx_flow_req * req)493 fsw_flow_config(struct nx_flowswitch *fsw, struct nx_flow_req *req)
494 {
495 	struct flow_mgr *fm = fsw->fsw_flow_mgr;
496 	struct flow_entry *__single fe = NULL;
497 	struct ns_token *__single nt = NULL;
498 	int error = 0;
499 
500 	FSW_RLOCK(fsw);
501 	fe = flow_mgr_get_fe_by_uuid_rlock(fm, req->nfr_flow_uuid);
502 	if (fe == NULL) {
503 		SK_ERR("can't find flow");
504 		error = ENOENT;
505 		goto done;
506 	}
507 
508 	if (fe->fe_pid != req->nfr_pid) {
509 		SK_ERR("flow ownership error");
510 		error = EPERM;
511 		goto done;
512 	}
513 
514 	nt = fe->fe_port_reservation;
515 
516 	/*
517 	 * First handle the idle/reused connection flags
518 	 *
519 	 * Note: That we expect either connection idle/reused to be set or
520 	 * no wake from sleep to be set/cleared
521 	 */
522 	if (req->nfr_flags & (NXFLOWREQF_CONNECTION_IDLE | NXFLOWREQF_CONNECTION_REUSED)) {
523 		if (req->nfr_flags & NXFLOWREQF_CONNECTION_IDLE) {
524 			os_atomic_or(&fe->fe_flags, FLOWENTF_CONNECTION_IDLE, relaxed);
525 			netns_change_flags(&nt, NETNS_CONNECTION_IDLE, 0);
526 		}
527 		if (req->nfr_flags & NXFLOWREQF_CONNECTION_REUSED) {
528 			os_atomic_andnot(&fe->fe_flags, FLOWENTF_CONNECTION_IDLE, relaxed);
529 			netns_change_flags(&nt, 0, NETNS_CONNECTION_IDLE);
530 		}
531 #if SK_LOG
532 		char dbgbuf[256];
533 		SK_DF(SK_VERB_FLOW, "%s: CONNECTION_IDLE %d CONNECTION_REUSE %d",
534 		    fe2str(fe, dbgbuf, sizeof(dbgbuf)),
535 		    req->nfr_flags & NXFLOWREQF_CONNECTION_IDLE ? 1 : 0,
536 		    req->nfr_flags & NXFLOWREQF_CONNECTION_REUSED ? 1 : 0);
537 #endif /* SK_LOG */
538 	} else {
539 		/* right now only support NXFLOWREQF_NOWAKEFROMSLEEP config */
540 		if (req->nfr_flags & NXFLOWREQF_NOWAKEFROMSLEEP) {
541 			os_atomic_or(&fe->fe_flags, FLOWENTF_NOWAKEFROMSLEEP, relaxed);
542 			netns_change_flags(&nt, NETNS_NOWAKEFROMSLEEP, 0);
543 		} else {
544 			os_atomic_andnot(&fe->fe_flags, FLOWENTF_NOWAKEFROMSLEEP, relaxed);
545 			netns_change_flags(&nt, 0, NETNS_NOWAKEFROMSLEEP);
546 		}
547 #if SK_LOG
548 		char dbgbuf[FLOWENTRY_DBGBUF_SIZE];
549 		SK_DF(SK_VERB_FLOW, "%s: NOWAKEFROMSLEEP %d",
550 		    fe2str(fe, dbgbuf, sizeof(dbgbuf)),
551 		    req->nfr_flags & NXFLOWREQF_NOWAKEFROMSLEEP ? 1 : 0);
552 #endif /* SK_LOG */
553 	}
554 done:
555 	if (fe != NULL) {
556 		fe_stats_update(fe);
557 		flow_entry_release(&fe);
558 	}
559 	FSW_RUNLOCK(fsw);
560 	return error;
561 }
562 
563 static void
fsw_flow_route_ctor(void * arg,struct flow_route * fr)564 fsw_flow_route_ctor(void *arg, struct flow_route *fr)
565 {
566 	struct nx_flowswitch *__single fsw = arg;
567 	if (fsw->fsw_ctor != NULL) {
568 		fsw->fsw_ctor(fsw, fr);
569 	}
570 }
571 
572 static int
fsw_flow_route_resolve(void * arg,struct flow_route * fr,struct __kern_packet * pkt)573 fsw_flow_route_resolve(void *arg, struct flow_route *fr,
574     struct __kern_packet *pkt)
575 {
576 	struct nx_flowswitch *__single fsw = arg;
577 	return (fsw->fsw_resolve != NULL) ? fsw->fsw_resolve(fsw, fr, pkt) : 0;
578 }
579