1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS
70
71 /*
72 * Socket operations for use by nfs
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/signalvar.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/vnode.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
88 #include <sys/un.h>
89 #include <sys/syslog.h>
90 #include <sys/tprintf.h>
91 #include <libkern/OSAtomic.h>
92 #include <IOKit/IOPlatformExpert.h>
93
94 #include <sys/reboot.h>
95 #include <sys/time.h>
96 #include <kern/clock.h>
97 #include <kern/task.h>
98 #include <kern/thread.h>
99 #include <kern/thread_call.h>
100 #include <sys/user.h>
101 #include <sys/acct.h>
102
103 #include <netinet/in.h>
104 #include <netinet/tcp.h>
105
106 #include <nfs/rpcv2.h>
107 #include <nfs/krpc.h>
108 #include <nfs/nfsproto.h>
109 #include <nfs/nfs.h>
110 #include <nfs/xdr_subs.h>
111 #include <nfs/nfsm_subs.h>
112 #include <nfs/nfs_gss.h>
113 #include <nfs/nfsmount.h>
114 #include <nfs/nfsnode.h>
115
116 #define NFS_SOCK_DBG(...) NFSCLNT_DBG(NFSCLNT_FAC_SOCK, 7, ## __VA_ARGS__)
117 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFSCLNT_IS_DBG(NFSCLNT_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
118
119 #ifndef SUN_LEN
120 #define SUN_LEN(su) \
121 (sizeof(*(su)) - sizeof((su)->sun_path) + strnlen((su)->sun_path, sizeof((su)->sun_path)))
122 #endif /* SUN_LEN */
123
124 /* XXX */
125 kern_return_t thread_terminate(thread_t);
126
127 ZONE_DECLARE(nfs_fhandle_zone, "fhandle", sizeof(struct fhandle), ZC_NONE);
128 ZONE_DECLARE(nfs_req_zone, "NFS req", sizeof(struct nfsreq), ZC_NONE);
129 ZONE_DECLARE(nfsrv_descript_zone, "NFSV3 srvdesc",
130 sizeof(struct nfsrv_descript), ZC_NONE);
131
132
133 #if CONFIG_NFS_SERVER
134 int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
135
136 int nfsrv_getstream(struct nfsrv_sock *, int);
137 int nfsrv_getreq(struct nfsrv_descript *);
138 extern int nfsv3_procid[NFS_NPROCS];
139 #endif /* CONFIG_NFS_SERVER */
140
141 /*
142 * compare two sockaddr structures
143 */
144 int
nfs_sockaddr_cmp(struct sockaddr * sa1,struct sockaddr * sa2)145 nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2)
146 {
147 if (!sa1) {
148 return -1;
149 }
150 if (!sa2) {
151 return 1;
152 }
153 if (sa1->sa_family != sa2->sa_family) {
154 return (sa1->sa_family < sa2->sa_family) ? -1 : 1;
155 }
156 if (sa1->sa_len != sa2->sa_len) {
157 return (sa1->sa_len < sa2->sa_len) ? -1 : 1;
158 }
159 if (sa1->sa_family == AF_INET) {
160 return bcmp(&((struct sockaddr_in*)sa1)->sin_addr,
161 &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr));
162 }
163 if (sa1->sa_family == AF_INET6) {
164 return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr,
165 &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr));
166 }
167 return -1;
168 }
169
170 #if CONFIG_NFS_CLIENT
171
172 int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
173 int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int);
174 int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *);
175 void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *);
176 void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
177 int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
178 int nfs_reconnect(struct nfsmount *);
179 int nfs_connect_setup(struct nfsmount *);
180 void nfs_mount_sock_thread(void *, wait_result_t);
181 void nfs_udp_rcv(socket_t, void*, int);
182 void nfs_tcp_rcv(socket_t, void*, int);
183 void nfs_sock_poke(struct nfsmount *);
184 void nfs_request_match_reply(struct nfsmount *, mbuf_t);
185 void nfs_reqdequeue(struct nfsreq *);
186 void nfs_reqbusy(struct nfsreq *);
187 struct nfsreq *nfs_reqnext(struct nfsreq *);
188 int nfs_wait_reply(struct nfsreq *);
189 void nfs_softterm(struct nfsreq *);
190 int nfs_can_squish(struct nfsmount *);
191 int nfs_is_squishy(struct nfsmount *);
192 int nfs_is_dead(int, struct nfsmount *);
193
194 /*
195 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
196 * Use the mean and mean deviation of rtt for the appropriate type of rpc
197 * for the frequent rpcs and a default for the others.
198 * The justification for doing "other" this way is that these rpcs
199 * happen so infrequently that timer est. would probably be stale.
200 * Also, since many of these rpcs are
201 * non-idempotent, a conservative timeout is desired.
202 * getattr, lookup - A+2D
203 * read, write - A+4D
204 * other - nm_timeo
205 */
206 #define NFS_RTO(n, t) \
207 ((t) == 0 ? (n)->nm_timeo : \
208 ((t) < 3 ? \
209 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
210 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
211 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
212 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
213
214 /*
215 * Defines which timer to use for the procnum.
216 * 0 - default
217 * 1 - getattr
218 * 2 - lookup
219 * 3 - read
220 * 4 - write
221 */
222 static const int proct[] = {
223 [NFSPROC_NULL] = 0,
224 [NFSPROC_GETATTR] = 1,
225 [NFSPROC_SETATTR] = 0,
226 [NFSPROC_LOOKUP] = 2,
227 [NFSPROC_ACCESS] = 1,
228 [NFSPROC_READLINK] = 3,
229 [NFSPROC_READ] = 3,
230 [NFSPROC_WRITE] = 4,
231 [NFSPROC_CREATE] = 0,
232 [NFSPROC_MKDIR] = 0,
233 [NFSPROC_SYMLINK] = 0,
234 [NFSPROC_MKNOD] = 0,
235 [NFSPROC_REMOVE] = 0,
236 [NFSPROC_RMDIR] = 0,
237 [NFSPROC_RENAME] = 0,
238 [NFSPROC_LINK] = 0,
239 [NFSPROC_READDIR] = 3,
240 [NFSPROC_READDIRPLUS] = 3,
241 [NFSPROC_FSSTAT] = 0,
242 [NFSPROC_FSINFO] = 0,
243 [NFSPROC_PATHCONF] = 0,
244 [NFSPROC_COMMIT] = 0,
245 [NFSPROC_NOOP] = 0,
246 };
247
248 /*
249 * There is a congestion window for outstanding rpcs maintained per mount
250 * point. The cwnd size is adjusted in roughly the way that:
251 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
252 * SIGCOMM '88". ACM, August 1988.
253 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
254 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
255 * of rpcs is in progress.
256 * (The sent count and cwnd are scaled for integer arith.)
257 * Variants of "slow start" were tried and were found to be too much of a
258 * performance hit (ave. rtt 3 times larger),
259 * I suspect due to the large rtt that nfs rpcs have.
260 */
261 #define NFS_CWNDSCALE 256
262 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
263 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
264
265 /*
266 * Increment location index to next address/server/location.
267 */
268 void
nfs_location_next(struct nfs_fs_locations * nlp,struct nfs_location_index * nlip)269 nfs_location_next(struct nfs_fs_locations *nlp, struct nfs_location_index *nlip)
270 {
271 uint8_t loc = nlip->nli_loc;
272 uint8_t serv = nlip->nli_serv;
273 uint8_t addr = nlip->nli_addr;
274
275 /* move to next address */
276 addr++;
277 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
278 /* no more addresses on current server, go to first address of next server */
279 next_server:
280 addr = 0;
281 serv++;
282 if (serv >= nlp->nl_locations[loc]->nl_servcount) {
283 /* no more servers on current location, go to first server of next location */
284 serv = 0;
285 loc++;
286 if (loc >= nlp->nl_numlocs) {
287 loc = 0; /* after last location, wrap back around to first location */
288 }
289 }
290 }
291 /*
292 * It's possible for this next server to not have any addresses.
293 * Check for that here and go to the next server.
294 * But bail out if we've managed to come back around to the original
295 * location that was passed in. (That would mean no servers had any
296 * addresses. And we don't want to spin here forever.)
297 */
298 if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) {
299 return;
300 }
301 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
302 goto next_server;
303 }
304
305 nlip->nli_loc = loc;
306 nlip->nli_serv = serv;
307 nlip->nli_addr = addr;
308 }
309
310 /*
311 * Compare two location indices.
312 */
313 int
nfs_location_index_cmp(struct nfs_location_index * nlip1,struct nfs_location_index * nlip2)314 nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2)
315 {
316 if (nlip1->nli_loc != nlip2->nli_loc) {
317 return nlip1->nli_loc - nlip2->nli_loc;
318 }
319 if (nlip1->nli_serv != nlip2->nli_serv) {
320 return nlip1->nli_serv - nlip2->nli_serv;
321 }
322 return nlip1->nli_addr - nlip2->nli_addr;
323 }
324
325 /*
326 * Get the mntfromname (or path portion only) for a given location.
327 */
328 void
nfs_location_mntfromname(struct nfs_fs_locations * locs,struct nfs_location_index idx,char * s,size_t size,int pathonly)329 nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, size_t size, int pathonly)
330 {
331 struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
332 char *p;
333 int cnt, i;
334
335 p = s;
336 if (!pathonly) {
337 char *name = fsl->nl_servers[idx.nli_serv]->ns_name;
338 if (name == NULL) {
339 name = "";
340 }
341 if (*name == '\0') {
342 if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) {
343 name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr];
344 }
345 cnt = scnprintf(p, size, "<%s>:", name);
346 } else {
347 cnt = scnprintf(p, size, "%s:", name);
348 }
349 p += cnt;
350 size -= cnt;
351 }
352 if (fsl->nl_path.np_compcount == 0) {
353 /* mounting root export on server */
354 if (size > 0) {
355 *p++ = '/';
356 *p++ = '\0';
357 }
358 return;
359 }
360 /* append each server path component */
361 for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) {
362 cnt = scnprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
363 p += cnt;
364 size -= cnt;
365 }
366 }
367
368 /*
369 * NFS client connect socket upcall.
370 * (Used only during socket connect/search.)
371 */
372 void
nfs_connect_upcall(socket_t so,void * arg,__unused int waitflag)373 nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag)
374 {
375 struct nfs_socket *nso = arg;
376 size_t rcvlen;
377 mbuf_t m;
378 int error = 0, recv = 1;
379
380 if (nso->nso_flags & NSO_CONNECTING) {
381 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso, nso->nso_flags);
382 wakeup(nso->nso_wake);
383 return;
384 }
385
386 lck_mtx_lock(&nso->nso_lock);
387 if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) {
388 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso);
389 lck_mtx_unlock(&nso->nso_lock);
390 return;
391 }
392 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso, nso->nso_flags);
393 nso->nso_flags |= NSO_UPCALL;
394
395 /* loop while we make error-free progress */
396 while (!error && recv) {
397 /* make sure we're still interested in this socket */
398 if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) {
399 break;
400 }
401 lck_mtx_unlock(&nso->nso_lock);
402 m = NULL;
403 if (nso->nso_sotype == SOCK_STREAM) {
404 error = nfs_rpc_record_read(so, &nso->nso_rrs, MSG_DONTWAIT, &recv, &m);
405 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error, recv);
406 } else {
407 rcvlen = 1000000;
408 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
409 recv = m ? 1 : 0;
410 }
411 lck_mtx_lock(&nso->nso_lock);
412 if (m) {
413 /* match response with request */
414 struct nfsm_chain nmrep;
415 uint32_t reply = 0, rxid = 0, verf_type, verf_len;
416 uint32_t reply_status, rejected_status, accepted_status;
417
418 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m);
419 nfsm_chain_dissect_init(error, &nmrep, m);
420 nfsm_chain_get_32(error, &nmrep, rxid);
421 nfsm_chain_get_32(error, &nmrep, reply);
422 if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) {
423 error = EBADRPC;
424 }
425 nfsm_chain_get_32(error, &nmrep, reply_status);
426 if (!error && (reply_status == RPC_MSGDENIED)) {
427 nfsm_chain_get_32(error, &nmrep, rejected_status);
428 if (!error) {
429 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
430 }
431 }
432 nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */
433 nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */
434 nfsmout_if(error);
435 if (verf_len) {
436 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
437 }
438 nfsm_chain_get_32(error, &nmrep, accepted_status);
439 nfsmout_if(error);
440 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status, nso->nso_version);
441 if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) {
442 uint32_t minvers, maxvers;
443 nfsm_chain_get_32(error, &nmrep, minvers);
444 nfsm_chain_get_32(error, &nmrep, maxvers);
445 nfsmout_if(error);
446 if (nso->nso_protocol == PMAPPROG) {
447 if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) {
448 error = EPROGMISMATCH;
449 } else if ((nso->nso_saddr->sa_family == AF_INET) &&
450 (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) {
451 nso->nso_version = PMAPVERS;
452 } else if (nso->nso_saddr->sa_family == AF_INET6) {
453 if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) {
454 nso->nso_version = RPCBVERS4;
455 } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) {
456 nso->nso_version = RPCBVERS3;
457 }
458 }
459 } else if (nso->nso_protocol == NFS_PROG) {
460 int vers;
461
462 /*
463 * N.B. Both portmapper and rpcbind V3 are happy to return
464 * addresses for other versions than the one you ask (getport or
465 * getaddr) and thus we may have fallen to this code path. So if
466 * we get a version that we support, use highest supported
467 * version. This assumes that the server supports all versions
468 * between minvers and maxvers. Note for IPv6 we will try and
469 * use rpcbind V4 which has getversaddr and we should not get
470 * here if that was successful.
471 */
472 for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) {
473 if (vers >= (int)minvers && vers <= (int)maxvers) {
474 break;
475 }
476 }
477 nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers;
478 }
479 if (!error && nso->nso_version) {
480 accepted_status = RPC_SUCCESS;
481 }
482 }
483 if (!error) {
484 switch (accepted_status) {
485 case RPC_SUCCESS:
486 error = 0;
487 break;
488 case RPC_PROGUNAVAIL:
489 error = EPROGUNAVAIL;
490 break;
491 case RPC_PROGMISMATCH:
492 error = EPROGMISMATCH;
493 break;
494 case RPC_PROCUNAVAIL:
495 error = EPROCUNAVAIL;
496 break;
497 case RPC_GARBAGE:
498 error = EBADRPC;
499 break;
500 case RPC_SYSTEM_ERR:
501 default:
502 error = EIO;
503 break;
504 }
505 }
506 nfsmout:
507 nso->nso_flags &= ~NSO_PINGING;
508 if (error) {
509 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
510 nso->nso_protocol, nso->nso_version, error);
511 nso->nso_error = error;
512 nso->nso_flags |= NSO_DEAD;
513 } else {
514 nso->nso_flags |= NSO_VERIFIED;
515 }
516 mbuf_freem(m);
517 /* wake up search thread */
518 wakeup(nso->nso_wake);
519 break;
520 }
521 }
522
523 nso->nso_flags &= ~NSO_UPCALL;
524 if ((error != EWOULDBLOCK) && (error || !recv)) {
525 /* problems with the socket... */
526 NFS_SOCK_DBG("connect upcall failed %d\n", error);
527 nso->nso_error = error ? error : EPIPE;
528 nso->nso_flags |= NSO_DEAD;
529 wakeup(nso->nso_wake);
530 }
531 if (nso->nso_flags & NSO_DISCONNECTING) {
532 wakeup(&nso->nso_flags);
533 }
534 lck_mtx_unlock(&nso->nso_lock);
535 }
536
537 /*
538 * Create/initialize an nfs_socket structure.
539 */
540 int
nfs_socket_create(struct nfsmount * nmp,struct sockaddr * sa,uint8_t sotype,in_port_t port,uint32_t protocol,uint32_t vers,int resvport,struct nfs_socket ** nsop)541 nfs_socket_create(
542 struct nfsmount *nmp,
543 struct sockaddr *sa,
544 uint8_t sotype,
545 in_port_t port,
546 uint32_t protocol,
547 uint32_t vers,
548 int resvport,
549 struct nfs_socket **nsop)
550 {
551 struct nfs_socket *nso;
552 struct timeval now;
553 int error;
554 #define NFS_SOCKET_DEBUGGING
555 #ifdef NFS_SOCKET_DEBUGGING
556 char naddr[sizeof((struct sockaddr_un *)0)->sun_path];
557 void *sinaddr;
558
559 switch (sa->sa_family) {
560 case AF_INET:
561 if (sa->sa_len != sizeof(struct sockaddr_in)) {
562 return EINVAL;
563 }
564 sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
565 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
566 strlcpy(naddr, "<unknown>", sizeof(naddr));
567 }
568 break;
569 case AF_INET6:
570 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
571 return EINVAL;
572 }
573 sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
574 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
575 strlcpy(naddr, "<unknown>", sizeof(naddr));
576 }
577 break;
578 case AF_LOCAL:
579 if (sa->sa_len != sizeof(struct sockaddr_un) && sa->sa_len != SUN_LEN((struct sockaddr_un *)sa)) {
580 return EINVAL;
581 }
582 strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
583 break;
584 default:
585 strlcpy(naddr, "<unsupported address family>", sizeof(naddr));
586 break;
587 }
588 #else
589 char naddr[1] = { 0 };
590 #endif
591
592 *nsop = NULL;
593
594 /* Create the socket. */
595 nso = kalloc_type(struct nfs_socket, Z_WAITOK | Z_ZERO | Z_NOFAIL);
596 nso->nso_saddr = (struct sockaddr *)alloc_sockaddr(sa->sa_len, Z_WAITOK | Z_NOFAIL);
597
598 lck_mtx_init(&nso->nso_lock, &nfs_request_grp, LCK_ATTR_NULL);
599 nso->nso_sotype = sotype;
600 if (nso->nso_sotype == SOCK_STREAM) {
601 nfs_rpc_record_state_init(&nso->nso_rrs);
602 }
603 microuptime(&now);
604 nso->nso_timestamp = now.tv_sec;
605 bcopy(sa, nso->nso_saddr, sa->sa_len);
606 switch (sa->sa_family) {
607 case AF_INET:
608 case AF_INET6:
609 if (sa->sa_family == AF_INET) {
610 ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
611 } else if (sa->sa_family == AF_INET6) {
612 ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
613 }
614 break;
615 case AF_LOCAL:
616 break;
617 }
618 nso->nso_protocol = protocol;
619 nso->nso_version = vers;
620 nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers);
621 nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers);
622
623 error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so);
624
625 /* Some servers require that the client port be a reserved port number. */
626 if (!error && resvport && ((sa->sa_family == AF_INET) || (sa->sa_family == AF_INET6))) {
627 struct sockaddr_storage ss;
628 int level = (sa->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
629 int optname = (sa->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
630 int portrange = IP_PORTRANGE_LOW;
631
632 error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange));
633 if (!error) { /* bind now to check for failure */
634 ss.ss_len = sa->sa_len;
635 ss.ss_family = sa->sa_family;
636 if (ss.ss_family == AF_INET) {
637 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
638 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
639 } else if (ss.ss_family == AF_INET6) {
640 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
641 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
642 } else {
643 error = EINVAL;
644 }
645 if (!error) {
646 error = sock_bind(nso->nso_so, (struct sockaddr*)&ss);
647 }
648 }
649 }
650
651 if (error) {
652 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
653 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype,
654 resvport ? "r" : "", port, protocol, vers);
655 nfs_socket_destroy(nso);
656 } else {
657 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
658 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr,
659 sotype, resvport ? "r" : "", port, protocol, vers);
660 *nsop = nso;
661 }
662 return error;
663 }
664
665 /*
666 * Destroy an nfs_socket structure.
667 */
668 void
nfs_socket_destroy(struct nfs_socket * nso)669 nfs_socket_destroy(struct nfs_socket *nso)
670 {
671 struct timespec ts = { .tv_sec = 4, .tv_nsec = 0 };
672
673 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso, nso->nso_flags, nso->nso_error);
674 lck_mtx_lock(&nso->nso_lock);
675 nso->nso_flags |= NSO_DISCONNECTING;
676 if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */
677 msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts);
678 }
679 lck_mtx_unlock(&nso->nso_lock);
680 sock_shutdown(nso->nso_so, SHUT_RDWR);
681 sock_close(nso->nso_so);
682 if (nso->nso_sotype == SOCK_STREAM) {
683 nfs_rpc_record_state_cleanup(&nso->nso_rrs);
684 }
685 lck_mtx_destroy(&nso->nso_lock, &nfs_request_grp);
686
687 free_sockaddr(nso->nso_saddr);
688 free_sockaddr(nso->nso_saddr2);
689
690 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso);
691 kfree_type(struct nfs_socket, nso);
692 }
693
694 /*
695 * Set common socket options on an nfs_socket.
696 */
697 void
nfs_socket_options(struct nfsmount * nmp,struct nfs_socket * nso)698 nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
699 {
700 /*
701 * Set socket send/receive timeouts
702 * - Receive timeout shouldn't matter because most receives are performed
703 * in the socket upcall non-blocking.
704 * - Send timeout should allow us to react to a blocked socket.
705 * Soft mounts will want to abort sooner.
706 */
707 struct timeval timeo;
708 int on = 1, proto, reserve, error;
709
710 timeo.tv_usec = 0;
711 timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
712 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
713 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
714 if (nso->nso_sotype == SOCK_STREAM) {
715 /* Assume that SOCK_STREAM always requires a connection */
716 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
717 /* set nodelay for TCP */
718 sock_gettype(nso->nso_so, NULL, NULL, &proto);
719 if (proto == IPPROTO_TCP) {
720 sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
721 }
722 }
723
724 /* set socket buffer sizes for UDP/TCP */
725 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_wsize * 2);
726 {
727 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
728 }
729
730 if (error) {
731 log(LOG_INFO, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error, reserve);
732 }
733
734 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_rsize * 2);
735 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
736 if (error) {
737 log(LOG_INFO, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error, reserve);
738 }
739
740 /* set SO_NOADDRERR to detect network changes ASAP */
741 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
742 /* just playin' it safe with upcalls */
743 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
744 /* socket should be interruptible if the mount is */
745 if (!NMFLAG(nmp, INTR)) {
746 sock_nointerrupt(nso->nso_so, 1);
747 }
748 }
749
750 /*
751 * Release resources held in an nfs_socket_search.
752 */
753 void
nfs_socket_search_cleanup(struct nfs_socket_search * nss)754 nfs_socket_search_cleanup(struct nfs_socket_search *nss)
755 {
756 struct nfs_socket *nso, *nsonext;
757
758 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
759 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
760 nss->nss_sockcnt--;
761 nfs_socket_destroy(nso);
762 }
763 if (nss->nss_sock) {
764 nfs_socket_destroy(nss->nss_sock);
765 nss->nss_sock = NULL;
766 }
767 }
768
769 /*
770 * Prefer returning certain errors over others.
771 * This function returns a ranking of the given error.
772 */
773 int
nfs_connect_error_class(int error)774 nfs_connect_error_class(int error)
775 {
776 switch (error) {
777 case 0:
778 return 0;
779 case ETIMEDOUT:
780 case EAGAIN:
781 return 1;
782 case EPIPE:
783 case EADDRNOTAVAIL:
784 case ENETDOWN:
785 case ENETUNREACH:
786 case ENETRESET:
787 case ECONNABORTED:
788 case ECONNRESET:
789 case EISCONN:
790 case ENOTCONN:
791 case ESHUTDOWN:
792 case ECONNREFUSED:
793 case EHOSTDOWN:
794 case EHOSTUNREACH:
795 return 2;
796 case ERPCMISMATCH:
797 case EPROCUNAVAIL:
798 case EPROGMISMATCH:
799 case EPROGUNAVAIL:
800 return 3;
801 case EBADRPC:
802 return 4;
803 default:
804 return 5;
805 }
806 }
807
808 /*
809 * Make sure a socket search returns the best error.
810 */
811 void
nfs_socket_search_update_error(struct nfs_socket_search * nss,int error)812 nfs_socket_search_update_error(struct nfs_socket_search *nss, int error)
813 {
814 if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) {
815 nss->nss_error = error;
816 }
817 }
818
819 /* nfs_connect_search_new_socket:
820 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
821 * by nss.
822 *
823 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
824 * could not be used or if a socket timed out.
825 */
826 int
nfs_connect_search_new_socket(struct nfsmount * nmp,struct nfs_socket_search * nss,struct timeval * now)827 nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
828 {
829 struct nfs_fs_location *fsl;
830 struct nfs_fs_server *fss;
831 struct sockaddr_storage ss;
832 struct nfs_socket *nso;
833 char *addrstr;
834 int error = 0;
835
836
837 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
838 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt);
839
840 /*
841 * while there are addresses and:
842 * we have no sockets or
843 * the last address failed and did not produce a socket (nss_last < 0) or
844 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
845 * then attempt to create a socket with the current address.
846 */
847 while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) ||
848 ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) {
849 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
850 return EINTR;
851 }
852 /* Can we convert the address to a sockaddr? */
853 fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc];
854 fss = fsl->nl_servers[nss->nss_nextloc.nli_serv];
855 addrstr = fss->ns_addresses[nss->nss_nextloc.nli_addr];
856 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr, nss->nss_protocol, nss->nss_port);
857 if (*addrstr == '\0') {
858 /*
859 * We have an unspecified local domain address. We use the program to translate to
860 * a well known local transport address. We only support PMAPROG and NFS for this.
861 */
862 if (nss->nss_protocol == PMAPPROG) {
863 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? RPCB_TICLTS_PATH : RPCB_TICOTSORD_PATH;
864 } else if (nss->nss_protocol == NFS_PROG) {
865 addrstr = nmp->nm_nfs_localport;
866 if (!addrstr || *addrstr == '\0') {
867 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? NFS_TICLTS_PATH : NFS_TICOTSORD_PATH;
868 }
869 }
870 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss->nss_protocol, addrstr);
871 }
872 if (!nfs_uaddr2sockaddr(addrstr, (struct sockaddr*)&ss)) {
873 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr);
874 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
875 nss->nss_addrcnt -= 1;
876 nss->nss_last = -2;
877 continue;
878 }
879 /* Check that socket family is acceptable. */
880 if (nmp->nm_sofamily && (ss.ss_family != nmp->nm_sofamily)) {
881 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss.ss_family, nmp->nm_sofamily);
882 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
883 nss->nss_addrcnt -= 1;
884 nss->nss_last = -2;
885 continue;
886 }
887
888 /* Create the socket. */
889 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype,
890 nss->nss_port, nss->nss_protocol, nss->nss_version,
891 ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso);
892 if (error) {
893 return error;
894 }
895
896 nso->nso_location = nss->nss_nextloc;
897 nso->nso_wake = nss;
898 error = sock_setupcall(nso->nso_so, nfs_connect_upcall, nso);
899 if (error) {
900 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso, error);
901 lck_mtx_lock(&nso->nso_lock);
902 nso->nso_error = error;
903 nso->nso_flags |= NSO_DEAD;
904 lck_mtx_unlock(&nso->nso_lock);
905 }
906
907 TAILQ_INSERT_TAIL(&nss->nss_socklist, nso, nso_link);
908 nss->nss_sockcnt++;
909 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
910 nss->nss_addrcnt -= 1;
911
912 nss->nss_last = now->tv_sec;
913 }
914
915 if (nss->nss_addrcnt == 0 && nss->nss_last < 0) {
916 nss->nss_last = now->tv_sec;
917 }
918
919 return error;
920 }
921
922 /*
923 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
924 * If successful set the socket options for the socket as require from the mount.
925 *
926 * Assumes: nso->nso_lock is held on entry and return.
927 */
928 int
nfs_connect_search_socket_connect(struct nfsmount * nmp,struct nfs_socket * nso,int verbose)929 nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose)
930 {
931 int error;
932
933 if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) {
934 /* no connection needed, just say it's already connected */
935 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
936 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
937 nso->nso_flags |= NSO_CONNECTED;
938 nfs_socket_options(nmp, nso);
939 return 1; /* Socket is connected and setup */
940 } else if (!(nso->nso_flags & NSO_CONNECTING)) {
941 /* initiate the connection */
942 nso->nso_flags |= NSO_CONNECTING;
943 lck_mtx_unlock(&nso->nso_lock);
944 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
945 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso,
946 nso->nso_saddr->sa_family == AF_LOCAL ? ((struct sockaddr_un*)nso->nso_saddr)->sun_path : "");
947 error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT);
948 if (error) {
949 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
950 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
951 }
952 lck_mtx_lock(&nso->nso_lock);
953 if (error && (error != EINPROGRESS)) {
954 nso->nso_error = error;
955 nso->nso_flags |= NSO_DEAD;
956 return 0;
957 }
958 }
959 if (nso->nso_flags & NSO_CONNECTING) {
960 /* check the connection */
961 if (sock_isconnected(nso->nso_so)) {
962 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
963 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
964 nso->nso_flags &= ~NSO_CONNECTING;
965 nso->nso_flags |= NSO_CONNECTED;
966 nfs_socket_options(nmp, nso);
967 return 1; /* Socket is connected and setup */
968 } else {
969 int optlen = sizeof(error);
970 error = 0;
971 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen);
972 if (error) { /* we got an error on the socket */
973 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
974 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
975 if (verbose) {
976 printf("nfs connect socket error %d for %s\n",
977 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname);
978 }
979 nso->nso_error = error;
980 nso->nso_flags |= NSO_DEAD;
981 return 0;
982 }
983 }
984 }
985
986 return 0; /* Waiting to be connected */
987 }
988
989 /*
990 * nfs_connect_search_ping: Send a null proc on the nso socket.
991 */
992 int
nfs_connect_search_ping(struct nfsmount * nmp,struct nfs_socket * nso,struct timeval * now)993 nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct timeval *now)
994 {
995 /* initiate a NULL RPC request */
996 uint64_t xid = nso->nso_pingxid;
997 mbuf_t m, mreq = NULL;
998 struct msghdr msg;
999 size_t reqlen, sentlen;
1000 uint32_t vers = nso->nso_version;
1001 int error;
1002
1003 if (!vers) {
1004 if (nso->nso_protocol == PMAPPROG) {
1005 vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1006 } else if (nso->nso_protocol == NFS_PROG) {
1007 vers = PVER2MAJOR(nmp->nm_max_vers);
1008 }
1009 }
1010 lck_mtx_unlock(&nso->nso_lock);
1011 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso, nso->nso_sotype, nso->nso_protocol, vers);
1012 error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
1013 vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq);
1014 lck_mtx_lock(&nso->nso_lock);
1015 if (!error) {
1016 nso->nso_flags |= NSO_PINGING;
1017 nso->nso_pingxid = R_XID32(xid);
1018 nso->nso_reqtimestamp = now->tv_sec;
1019 bzero(&msg, sizeof(msg));
1020 if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) {
1021 msg.msg_name = nso->nso_saddr;
1022 msg.msg_namelen = nso->nso_saddr->sa_len;
1023 }
1024 for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) {
1025 reqlen += mbuf_len(m);
1026 }
1027 lck_mtx_unlock(&nso->nso_lock);
1028 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq);
1029 error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen);
1030 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
1031 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1032 lck_mtx_lock(&nso->nso_lock);
1033 if (!error && (sentlen != reqlen)) {
1034 error = ETIMEDOUT;
1035 }
1036 }
1037 if (error) {
1038 nso->nso_error = error;
1039 nso->nso_flags |= NSO_DEAD;
1040 return 0;
1041 }
1042
1043 return 1;
1044 }
1045
1046 /*
1047 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
1048 * Set the nfs socket protocol and version if needed.
1049 */
1050 void
nfs_connect_search_socket_found(struct nfsmount * nmp,struct nfs_socket_search * nss,struct nfs_socket * nso)1051 nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
1052 {
1053 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
1054 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1055 if (!nso->nso_version) {
1056 /* If the version isn't set, the default must have worked. */
1057 if (nso->nso_protocol == PMAPPROG) {
1058 nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1059 }
1060 if (nso->nso_protocol == NFS_PROG) {
1061 nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
1062 }
1063 }
1064 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1065 nss->nss_sockcnt--;
1066 nss->nss_sock = nso;
1067 }
1068
1069 /*
1070 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1071 * the list. Dead socket are then destroyed.
1072 */
1073 void
nfs_connect_search_socket_reap(struct nfsmount * nmp __unused,struct nfs_socket_search * nss,struct timeval * now)1074 nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now)
1075 {
1076 struct nfs_socket *nso, *nsonext;
1077
1078 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
1079 lck_mtx_lock(&nso->nso_lock);
1080 if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) {
1081 /* took too long */
1082 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
1083 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1084 nso->nso_error = ETIMEDOUT;
1085 nso->nso_flags |= NSO_DEAD;
1086 }
1087 if (!(nso->nso_flags & NSO_DEAD)) {
1088 lck_mtx_unlock(&nso->nso_lock);
1089 continue;
1090 }
1091 lck_mtx_unlock(&nso->nso_lock);
1092 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1093 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error, nso->nso_flags);
1094 nfs_socket_search_update_error(nss, nso->nso_error);
1095 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1096 nss->nss_sockcnt--;
1097 nfs_socket_destroy(nso);
1098 /* If there are more sockets to try, force the starting of another socket */
1099 if (nss->nss_addrcnt > 0) {
1100 nss->nss_last = -2;
1101 }
1102 }
1103 }
1104
1105 /*
1106 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1107 */
1108 int
nfs_connect_search_check(struct nfsmount * nmp,struct nfs_socket_search * nss,struct timeval * now)1109 nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
1110 {
1111 int error;
1112
1113 /* log a warning if connect is taking a while */
1114 if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) {
1115 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1116 nss->nss_flags |= NSS_WARNED;
1117 }
1118 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
1119 return EINTR;
1120 }
1121 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
1122 return error;
1123 }
1124
1125 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1126 if (nss->nss_last >= 0) {
1127 tsleep(nss, PSOCK, "nfs_connect_search_wait", hz);
1128 }
1129
1130 return 0;
1131 }
1132
1133
1134 /*
1135 * Continue the socket search until we have something to report.
1136 */
1137 int
nfs_connect_search_loop(struct nfsmount * nmp,struct nfs_socket_search * nss)1138 nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss)
1139 {
1140 struct nfs_socket *nso;
1141 struct timeval now;
1142 int error;
1143 int verbose = (nss->nss_flags & NSS_VERBOSE);
1144
1145 loop:
1146 microuptime(&now);
1147 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec);
1148
1149 /* add a new socket to the socket list if needed and available */
1150 error = nfs_connect_search_new_socket(nmp, nss, &now);
1151 if (error) {
1152 NFS_SOCK_DBG("nfs connect returned %d\n", error);
1153 return error;
1154 }
1155
1156 /* check each active socket on the list and try to push it along */
1157 TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) {
1158 lck_mtx_lock(&nso->nso_lock);
1159
1160 /* If not connected connect it */
1161 if (!(nso->nso_flags & NSO_CONNECTED)) {
1162 if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) {
1163 lck_mtx_unlock(&nso->nso_lock);
1164 continue;
1165 }
1166 }
1167
1168 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1169 if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) ||
1170 ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) {
1171 if (!nfs_connect_search_ping(nmp, nso, &now)) {
1172 lck_mtx_unlock(&nso->nso_lock);
1173 continue;
1174 }
1175 }
1176
1177 /* Has the socket been verified by the up call routine? */
1178 if (nso->nso_flags & NSO_VERIFIED) {
1179 /* WOOHOO!! This socket looks good! */
1180 nfs_connect_search_socket_found(nmp, nss, nso);
1181 lck_mtx_unlock(&nso->nso_lock);
1182 break;
1183 }
1184 lck_mtx_unlock(&nso->nso_lock);
1185 }
1186
1187 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1188 nfs_connect_search_socket_reap(nmp, nss, &now);
1189
1190 /*
1191 * Keep looping if we haven't found a socket yet and we have more
1192 * sockets to (continue to) try.
1193 */
1194 error = 0;
1195 if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) {
1196 error = nfs_connect_search_check(nmp, nss, &now);
1197 if (!error) {
1198 goto loop;
1199 }
1200 }
1201
1202 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1203 return error;
1204 }
1205
1206 /*
1207 * Initialize a new NFS connection.
1208 *
1209 * Search for a location to connect a socket to and initialize the connection.
1210 *
1211 * An NFS mount may have multiple locations/servers/addresses available.
1212 * We attempt to connect to each one asynchronously and will start
1213 * several sockets in parallel if other locations are slow to answer.
1214 * We'll use the first NFS socket we can successfully set up.
1215 *
1216 * The search may involve contacting the portmapper service first.
1217 *
1218 * A mount's initial connection may require negotiating some parameters such
1219 * as socket type and NFS version.
1220 */
1221
1222 int
nfs_connect(struct nfsmount * nmp,int verbose,int timeo)1223 nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
1224 {
1225 struct nfs_socket_search nss;
1226 struct nfs_socket *nso, *nsonfs;
1227 struct sockaddr_storage ss;
1228 struct sockaddr *saddr, *oldsaddr;
1229 sock_upcall upcall;
1230 #if CONFIG_NFS4
1231 struct timeval now;
1232 #endif
1233 struct timeval start;
1234 int error, savederror, nfsvers;
1235 int tryv4 = 1;
1236 uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
1237 fhandle_t *fh = NULL;
1238 char *path = NULL;
1239 in_port_t port = 0;
1240 int addrtotal = 0;
1241
1242 /* paranoia... check that we have at least one address in the locations */
1243 uint32_t loc, serv;
1244 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
1245 for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) {
1246 addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount;
1247 if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) {
1248 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1249 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1250 nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name);
1251 }
1252 }
1253 }
1254
1255 if (addrtotal == 0) {
1256 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1257 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1258 return EINVAL;
1259 } else {
1260 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1261 vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal);
1262 }
1263
1264 lck_mtx_lock(&nmp->nm_lock);
1265 nmp->nm_sockflags |= NMSOCK_CONNECTING;
1266 nmp->nm_nss = &nss;
1267 lck_mtx_unlock(&nmp->nm_lock);
1268 microuptime(&start);
1269 savederror = error = 0;
1270
1271 tryagain:
1272 /* initialize socket search state */
1273 bzero(&nss, sizeof(nss));
1274 nss.nss_addrcnt = addrtotal;
1275 nss.nss_error = savederror;
1276 TAILQ_INIT(&nss.nss_socklist);
1277 nss.nss_sotype = sotype;
1278 nss.nss_startloc = nmp->nm_locations.nl_current;
1279 nss.nss_timestamp = start.tv_sec;
1280 nss.nss_timeo = timeo;
1281 if (verbose) {
1282 nss.nss_flags |= NSS_VERBOSE;
1283 }
1284
1285 /* First time connecting, we may need to negotiate some things */
1286 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1287 NFS_SOCK_DBG("so_family = %d\n", nmp->nm_sofamily);
1288 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp->nm_nfsport, nmp->nm_nfs_localport ? nmp->nm_nfs_localport : "");
1289 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp->nm_mountport, nmp->nm_mount_localport ? nmp->nm_mount_localport : "");
1290 if (!nmp->nm_vers) {
1291 /* No NFS version specified... */
1292 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1293 #if CONFIG_NFS4
1294 if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
1295 nss.nss_port = NFS_PORT;
1296 nss.nss_protocol = NFS_PROG;
1297 nss.nss_version = 4;
1298 nss.nss_flags |= NSS_FALLBACK2PMAP;
1299 } else {
1300 #endif
1301 /* ...connect to portmapper first if we (may) need any ports. */
1302 nss.nss_port = PMAPPORT;
1303 nss.nss_protocol = PMAPPROG;
1304 nss.nss_version = 0;
1305 #if CONFIG_NFS4
1306 }
1307 #endif
1308 } else {
1309 /* ...connect to NFS port first. */
1310 nss.nss_port = nmp->nm_nfsport;
1311 nss.nss_protocol = NFS_PROG;
1312 nss.nss_version = 0;
1313 }
1314 #if CONFIG_NFS4
1315 } else if (nmp->nm_vers >= NFS_VER4) {
1316 if (tryv4) {
1317 /* For NFSv4, we use the given (or default) port. */
1318 nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
1319 nss.nss_protocol = NFS_PROG;
1320 nss.nss_version = 4;
1321 /*
1322 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1323 * if no port is specified on the mount;
1324 * Note nm_vers is set so we will only try NFS_VER4.
1325 */
1326 if (!nmp->nm_nfsport) {
1327 nss.nss_flags |= NSS_FALLBACK2PMAP;
1328 }
1329 } else {
1330 nss.nss_port = PMAPPORT;
1331 nss.nss_protocol = PMAPPROG;
1332 nss.nss_version = 0;
1333 }
1334 #endif
1335 } else {
1336 /* For NFSv3/v2... */
1337 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1338 /* ...connect to portmapper first if we need any ports. */
1339 nss.nss_port = PMAPPORT;
1340 nss.nss_protocol = PMAPPROG;
1341 nss.nss_version = 0;
1342 } else {
1343 /* ...connect to NFS port first. */
1344 nss.nss_port = nmp->nm_nfsport;
1345 nss.nss_protocol = NFS_PROG;
1346 nss.nss_version = nmp->nm_vers;
1347 }
1348 }
1349 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1350 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1351 nss.nss_protocol, nss.nss_version);
1352 } else {
1353 /* we've connected before, just connect to NFS port */
1354 if (!nmp->nm_nfsport) {
1355 /* need to ask portmapper which port that would be */
1356 nss.nss_port = PMAPPORT;
1357 nss.nss_protocol = PMAPPROG;
1358 nss.nss_version = 0;
1359 } else {
1360 nss.nss_port = nmp->nm_nfsport;
1361 nss.nss_protocol = NFS_PROG;
1362 nss.nss_version = nmp->nm_vers;
1363 }
1364 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1365 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1366 nss.nss_protocol, nss.nss_version);
1367 }
1368
1369 /* Set next location to first valid location. */
1370 /* If start location is invalid, find next location. */
1371 nss.nss_nextloc = nss.nss_startloc;
1372 if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) ||
1373 (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) {
1374 nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc);
1375 if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) {
1376 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1377 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1378 return ENOENT;
1379 }
1380 }
1381 nss.nss_last = -1;
1382
1383 keepsearching:
1384
1385 error = nfs_connect_search_loop(nmp, &nss);
1386 if (error || !nss.nss_sock) {
1387 /* search failed */
1388 nfs_socket_search_cleanup(&nss);
1389 if (nss.nss_flags & NSS_FALLBACK2PMAP) {
1390 tryv4 = 0;
1391 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1392 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1393 goto tryagain;
1394 }
1395
1396 if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
1397 /* Try using UDP */
1398 sotype = SOCK_DGRAM;
1399 savederror = nss.nss_error;
1400 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1401 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1402 goto tryagain;
1403 }
1404 if (!error) {
1405 error = nss.nss_error ? nss.nss_error : ETIMEDOUT;
1406 }
1407 lck_mtx_lock(&nmp->nm_lock);
1408 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1409 nmp->nm_nss = NULL;
1410 lck_mtx_unlock(&nmp->nm_lock);
1411 if (nss.nss_flags & NSS_WARNED) {
1412 log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n",
1413 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1414 }
1415 if (fh) {
1416 NFS_ZFREE(nfs_fhandle_zone, fh);
1417 }
1418 if (path) {
1419 NFS_ZFREE(ZV_NAMEI, path);
1420 }
1421 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1422 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1423 return error;
1424 }
1425
1426 /* try to use nss_sock */
1427 nso = nss.nss_sock;
1428 nss.nss_sock = NULL;
1429
1430 /* We may be speaking to portmap first... to determine port(s). */
1431 if (nso->nso_saddr->sa_family == AF_INET) {
1432 port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port);
1433 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1434 port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port);
1435 } else if (nso->nso_saddr->sa_family == AF_LOCAL) {
1436 if (nso->nso_protocol == PMAPPROG) {
1437 port = PMAPPORT;
1438 }
1439 }
1440
1441 if (port == PMAPPORT) {
1442 /* Use this portmapper port to get the port #s we need. */
1443 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1444 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1445
1446 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1447 sock_setupcall(nso->nso_so, NULL, NULL);
1448
1449 /* Set up socket address and port for NFS socket. */
1450 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1451
1452 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1453 nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
1454 if (!(port = nmp->nm_nfsport)) {
1455 if (ss.ss_family == AF_INET) {
1456 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
1457 } else if (ss.ss_family == AF_INET6) {
1458 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
1459 } else if (ss.ss_family == AF_LOCAL) {
1460 if (((struct sockaddr_un*)&ss)->sun_path[0] == '/') {
1461 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un*)&ss)->sun_path);
1462 }
1463 }
1464 for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
1465 if (nmp->nm_vers && nmp->nm_vers != nfsvers) {
1466 continue; /* Wrong version */
1467 }
1468 #if CONFIG_NFS4
1469 if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) {
1470 continue; /* NFSv4 does not do UDP */
1471 }
1472 #endif
1473 if (ss.ss_family == AF_LOCAL && nmp->nm_nfs_localport) {
1474 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
1475 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp->nm_nfs_localport);
1476 strlcpy(sun->sun_path, nmp->nm_nfs_localport, sizeof(sun->sun_path));
1477 error = 0;
1478 } else {
1479 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1480 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1481 nso->nso_so, NFS_PROG, nfsvers, nso->nso_sotype, timeo);
1482 }
1483 if (!error) {
1484 if (ss.ss_family == AF_INET) {
1485 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1486 } else if (ss.ss_family == AF_INET6) {
1487 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1488 } else if (ss.ss_family == AF_LOCAL) {
1489 port = ((struct sockaddr_un *)&ss)->sun_path[0] ? NFS_PORT : 0;
1490 }
1491 if (!port) {
1492 error = EPROGUNAVAIL;
1493 }
1494 #if CONFIG_NFS4
1495 if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) {
1496 continue; /* We already tried this */
1497 }
1498 #endif
1499 }
1500 if (!error) {
1501 break;
1502 }
1503 }
1504 if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) {
1505 error = EPROGUNAVAIL;
1506 }
1507 if (error) {
1508 nfs_socket_search_update_error(&nss, error);
1509 nfs_socket_destroy(nso);
1510 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers, error);
1511 goto keepsearching;
1512 }
1513 } else if (nmp->nm_nfs_localport) {
1514 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_nfs_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1515 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp->nm_nfs_localport);
1516 }
1517
1518 /* Create NFS protocol socket and add it to the list of sockets. */
1519 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1520 if (ss.ss_family == AF_LOCAL) {
1521 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un*)&ss)->sun_path, port);
1522 }
1523 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
1524 NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
1525 if (error) {
1526 nfs_socket_search_update_error(&nss, error);
1527 nfs_socket_destroy(nso);
1528 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error);
1529 goto keepsearching;
1530 }
1531 nsonfs->nso_location = nso->nso_location;
1532 nsonfs->nso_wake = &nss;
1533 error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs);
1534 if (error) {
1535 nfs_socket_search_update_error(&nss, error);
1536 nfs_socket_destroy(nsonfs);
1537 nfs_socket_destroy(nso);
1538 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error);
1539 goto keepsearching;
1540 }
1541 TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link);
1542 nss.nss_sockcnt++;
1543 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1544 /* Set up socket address and port for MOUNT socket. */
1545 error = 0;
1546 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1547 port = nmp->nm_mountport;
1548 NFS_SOCK_DBG("mount port = %d\n", port);
1549 if (ss.ss_family == AF_INET) {
1550 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1551 } else if (ss.ss_family == AF_INET6) {
1552 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1553 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1554 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1555 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1556 }
1557 if (!port) {
1558 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1559 /* If NFS version is unknown, optimistically choose for NFSv3. */
1560 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1561 int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
1562 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso->nso_so);
1563 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1564 nso->nso_so, RPCPROG_MNT, mntvers, mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
1565 }
1566 if (!error) {
1567 if (ss.ss_family == AF_INET) {
1568 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1569 } else if (ss.ss_family == AF_INET6) {
1570 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1571 } else if (ss.ss_family == AF_LOCAL) {
1572 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
1573 }
1574 if (!port) {
1575 error = EPROGUNAVAIL;
1576 }
1577 }
1578 /* create sockaddr for MOUNT */
1579 if (!error) {
1580 nsonfs->nso_saddr2 = (struct sockaddr *)alloc_sockaddr(ss.ss_len, Z_WAITOK | Z_NOFAIL);
1581 }
1582 if (!error) {
1583 bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len);
1584 }
1585 if (error) {
1586 NFS_SOCK_DBG("Could not create mount sockaet address %d", error);
1587 lck_mtx_lock(&nsonfs->nso_lock);
1588 nsonfs->nso_error = error;
1589 nsonfs->nso_flags |= NSO_DEAD;
1590 lck_mtx_unlock(&nsonfs->nso_lock);
1591 }
1592 }
1593 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso, nso->nso_so);
1594 nfs_socket_destroy(nso);
1595 goto keepsearching;
1596 }
1597
1598 /* nso is an NFS socket */
1599 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1600
1601 /* If NFS version wasn't specified, it was determined during the connect. */
1602 nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version;
1603
1604 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1605 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1606 error = 0;
1607 saddr = nso->nso_saddr2;
1608 if (!saddr) {
1609 /* Need sockaddr for MOUNT port */
1610 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp->nm_mountport, nmp->nm_mount_localport);
1611 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1612 port = nmp->nm_mountport;
1613 if (ss.ss_family == AF_INET) {
1614 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1615 } else if (ss.ss_family == AF_INET6) {
1616 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1617 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1618 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1619 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1620 }
1621 if (!port) {
1622 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1623 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1624 int so_type = NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : nso->nso_sotype;
1625 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1626 NULL, RPCPROG_MNT, mntvers, so_type, timeo);
1627 if (ss.ss_family == AF_INET) {
1628 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1629 } else if (ss.ss_family == AF_INET6) {
1630 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1631 }
1632 }
1633 if (!error) {
1634 if (port) {
1635 saddr = (struct sockaddr*)&ss;
1636 } else {
1637 error = EPROGUNAVAIL;
1638 }
1639 }
1640 }
1641 if (!error) {
1642 error = nfs3_check_lockmode(nmp, saddr, nso->nso_sotype, timeo);
1643 if (error) {
1644 nfs_socket_search_update_error(&nss, error);
1645 nfs_socket_destroy(nso);
1646 return error;
1647 }
1648 }
1649 if (saddr) {
1650 fh = zalloc(nfs_fhandle_zone);
1651 }
1652 if (saddr && fh) {
1653 path = zalloc(ZV_NAMEI);
1654 }
1655 if (!saddr || !fh || !path) {
1656 if (!error) {
1657 error = ENOMEM;
1658 }
1659 if (fh) {
1660 NFS_ZFREE(nfs_fhandle_zone, fh);
1661 }
1662 if (path) {
1663 NFS_ZFREE(ZV_NAMEI, path);
1664 }
1665 nfs_socket_search_update_error(&nss, error);
1666 nfs_socket_destroy(nso);
1667 goto keepsearching;
1668 }
1669 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1);
1670 error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers,
1671 path, vfs_context_current(), timeo, fh, &nmp->nm_servsec);
1672 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1673 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1674 if (!error) {
1675 /* Make sure we can agree on a security flavor. */
1676 int o, s; /* indices into mount option and server security flavor lists */
1677 int found = 0;
1678
1679 if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) {
1680 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1681 nmp->nm_servsec.count = 1;
1682 nmp->nm_servsec.flavors[0] = RPCAUTH_SYS;
1683 }
1684 if (nmp->nm_sec.count) {
1685 /* Choose the first flavor in our list that the server supports. */
1686 if (!nmp->nm_servsec.count) {
1687 /* we don't know what the server supports, just use our first choice */
1688 nmp->nm_auth = nmp->nm_sec.flavors[0];
1689 found = 1;
1690 }
1691 for (o = 0; !found && (o < nmp->nm_sec.count); o++) {
1692 for (s = 0; !found && (s < nmp->nm_servsec.count); s++) {
1693 if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) {
1694 nmp->nm_auth = nmp->nm_sec.flavors[o];
1695 found = 1;
1696 }
1697 }
1698 }
1699 } else {
1700 /* Choose the first one we support from the server's list. */
1701 if (!nmp->nm_servsec.count) {
1702 nmp->nm_auth = RPCAUTH_SYS;
1703 found = 1;
1704 }
1705 for (s = 0; s < nmp->nm_servsec.count; s++) {
1706 switch (nmp->nm_servsec.flavors[s]) {
1707 case RPCAUTH_SYS:
1708 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1709 if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
1710 found = 0;
1711 }
1712 OS_FALLTHROUGH;
1713 case RPCAUTH_NONE:
1714 case RPCAUTH_KRB5:
1715 case RPCAUTH_KRB5I:
1716 case RPCAUTH_KRB5P:
1717 if (!found) {
1718 nmp->nm_auth = nmp->nm_servsec.flavors[s];
1719 found = 1;
1720 }
1721 break;
1722 }
1723 }
1724 }
1725 error = !found ? EAUTH : 0;
1726 }
1727 NFS_ZFREE(ZV_NAMEI, path);
1728 if (error) {
1729 nfs_socket_search_update_error(&nss, error);
1730 NFS_ZFREE(nfs_fhandle_zone, fh);
1731 nfs_socket_destroy(nso);
1732 goto keepsearching;
1733 }
1734 if (nmp->nm_fh) {
1735 NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
1736 }
1737 nmp->nm_fh = fh;
1738 fh = NULL;
1739 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
1740 }
1741
1742 /* put the real upcall in place */
1743 upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
1744 error = sock_setupcall(nso->nso_so, upcall, nmp);
1745 if (error) {
1746 nfs_socket_search_update_error(&nss, error);
1747 nfs_socket_destroy(nso);
1748 goto keepsearching;
1749 }
1750
1751 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1752 /* set mntfromname to this location */
1753 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
1754 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location,
1755 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1756 sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0);
1757 }
1758 /* some negotiated values need to remain unchanged for the life of the mount */
1759 if (!nmp->nm_sotype) {
1760 nmp->nm_sotype = nso->nso_sotype;
1761 }
1762 if (!nmp->nm_vers) {
1763 nmp->nm_vers = nfsvers;
1764 #if CONFIG_NFS4
1765 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1766 if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1767 if (nso->nso_saddr->sa_family == AF_INET) {
1768 port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
1769 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1770 port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
1771 } else {
1772 port = 0;
1773 }
1774 if (port == NFS_PORT) {
1775 nmp->nm_nfsport = NFS_PORT;
1776 }
1777 }
1778 #endif
1779 }
1780 #if CONFIG_NFS4
1781 /* do some version-specific pre-mount set up */
1782 if (nmp->nm_vers >= NFS_VER4) {
1783 microtime(&now);
1784 nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec;
1785 if (!NMFLAG(nmp, NOCALLBACK)) {
1786 nfs4_mount_callback_setup(nmp);
1787 }
1788 }
1789 #endif
1790 }
1791
1792 /* Initialize NFS socket state variables */
1793 lck_mtx_lock(&nmp->nm_lock);
1794 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
1795 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
1796 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
1797 nmp->nm_sdrtt[3] = 0;
1798 if (nso->nso_sotype == SOCK_DGRAM) {
1799 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
1800 nmp->nm_sent = 0;
1801 } else if (nso->nso_sotype == SOCK_STREAM) {
1802 nmp->nm_timeouts = 0;
1803 }
1804 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1805 nmp->nm_sockflags |= NMSOCK_SETUP;
1806 /* move the socket to the mount structure */
1807 nmp->nm_nso = nso;
1808 oldsaddr = nmp->nm_saddr;
1809 nmp->nm_saddr = nso->nso_saddr;
1810 lck_mtx_unlock(&nmp->nm_lock);
1811 error = nfs_connect_setup(nmp);
1812 lck_mtx_lock(&nmp->nm_lock);
1813 nmp->nm_sockflags &= ~NMSOCK_SETUP;
1814 if (!error) {
1815 nmp->nm_sockflags |= NMSOCK_READY;
1816 wakeup(&nmp->nm_sockflags);
1817 }
1818 if (error) {
1819 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1820 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1821 nfs_socket_search_update_error(&nss, error);
1822 nmp->nm_saddr = oldsaddr;
1823 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1824 /* undo settings made prior to setup */
1825 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) {
1826 nmp->nm_sotype = 0;
1827 }
1828 #if CONFIG_NFS4
1829 if (nmp->nm_vers >= NFS_VER4) {
1830 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1831 nmp->nm_nfsport = 0;
1832 }
1833 if (nmp->nm_cbid) {
1834 nfs4_mount_callback_shutdown(nmp);
1835 }
1836 if (IS_VALID_CRED(nmp->nm_mcred)) {
1837 kauth_cred_unref(&nmp->nm_mcred);
1838 }
1839 bzero(&nmp->nm_un, sizeof(nmp->nm_un));
1840 }
1841 #endif
1842 nmp->nm_vers = 0;
1843 }
1844 lck_mtx_unlock(&nmp->nm_lock);
1845 nmp->nm_nso = NULL;
1846 nfs_socket_destroy(nso);
1847 goto keepsearching;
1848 }
1849
1850 /* update current location */
1851 if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) &&
1852 (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) {
1853 /* server has changed, we should initiate failover/recovery */
1854 // XXX
1855 }
1856 nmp->nm_locations.nl_current = nso->nso_location;
1857 nmp->nm_locations.nl_current.nli_flags |= NLI_VALID;
1858
1859 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1860 /* We have now successfully connected... make a note of it. */
1861 nmp->nm_sockflags |= NMSOCK_HASCONNECTED;
1862 }
1863
1864 lck_mtx_unlock(&nmp->nm_lock);
1865
1866 free_sockaddr(oldsaddr);
1867
1868 if (nss.nss_flags & NSS_WARNED) {
1869 log(LOG_INFO, "nfs_connect: socket connect completed for %s\n",
1870 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1871 }
1872
1873 nmp->nm_nss = NULL;
1874 nfs_socket_search_cleanup(&nss);
1875 if (fh) {
1876 NFS_ZFREE(nfs_fhandle_zone, fh);
1877 }
1878 if (path) {
1879 NFS_ZFREE(ZV_NAMEI, path);
1880 }
1881 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1882 return 0;
1883 }
1884
1885
1886 /* setup & confirm socket connection is functional */
1887 int
nfs_connect_setup(__unused struct nfsmount * nmp)1888 nfs_connect_setup(
1889 #if !CONFIG_NFS4
1890 __unused
1891 #endif
1892 struct nfsmount *nmp)
1893 {
1894 int error = 0;
1895 #if CONFIG_NFS4
1896 if (nmp->nm_vers >= NFS_VER4) {
1897 if (nmp->nm_state & NFSSTA_CLIENTID) {
1898 /* first, try to renew our current state */
1899 error = nfs4_renew(nmp, R_SETUP);
1900 if ((error == NFSERR_ADMIN_REVOKED) ||
1901 (error == NFSERR_CB_PATH_DOWN) ||
1902 (error == NFSERR_EXPIRED) ||
1903 (error == NFSERR_LEASE_MOVED) ||
1904 (error == NFSERR_STALE_CLIENTID)) {
1905 lck_mtx_lock(&nmp->nm_lock);
1906 nfs_need_recover(nmp, error);
1907 lck_mtx_unlock(&nmp->nm_lock);
1908 }
1909 }
1910 error = nfs4_setclientid(nmp);
1911 }
1912 #endif
1913 return error;
1914 }
1915
1916 /*
1917 * NFS socket reconnect routine:
1918 * Called when a connection is broken.
1919 * - disconnect the old socket
1920 * - nfs_connect() again
1921 * - set R_MUSTRESEND for all outstanding requests on mount point
1922 * If this fails the mount point is DEAD!
1923 */
1924 int
nfs_reconnect(struct nfsmount * nmp)1925 nfs_reconnect(struct nfsmount *nmp)
1926 {
1927 struct nfsreq *rq;
1928 struct timeval now;
1929 thread_t thd = current_thread();
1930 int error, wentdown = 0, verbose = 1;
1931 time_t lastmsg;
1932 int timeo;
1933
1934 microuptime(&now);
1935 lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
1936
1937 nfs_disconnect(nmp);
1938
1939
1940 lck_mtx_lock(&nmp->nm_lock);
1941 timeo = nfs_is_squishy(nmp) ? 8 : 30;
1942 lck_mtx_unlock(&nmp->nm_lock);
1943
1944 while ((error = nfs_connect(nmp, verbose, timeo))) {
1945 verbose = 0;
1946 nfs_disconnect(nmp);
1947 if ((error == EINTR) || (error == ERESTART)) {
1948 return EINTR;
1949 }
1950 if (error == EIO) {
1951 return EIO;
1952 }
1953 microuptime(&now);
1954 if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
1955 lastmsg = now.tv_sec;
1956 nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
1957 wentdown = 1;
1958 }
1959 lck_mtx_lock(&nmp->nm_lock);
1960 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1961 /* we're not yet completely mounted and */
1962 /* we can't reconnect, so we fail */
1963 lck_mtx_unlock(&nmp->nm_lock);
1964 NFS_SOCK_DBG("Not mounted returning %d\n", error);
1965 return error;
1966 }
1967
1968 if (nfs_mount_check_dead_timeout(nmp)) {
1969 nfs_mount_make_zombie(nmp);
1970 lck_mtx_unlock(&nmp->nm_lock);
1971 return ENXIO;
1972 }
1973
1974 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1975 lck_mtx_unlock(&nmp->nm_lock);
1976 return error;
1977 }
1978 lck_mtx_unlock(&nmp->nm_lock);
1979 tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz);
1980 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1981 return error;
1982 }
1983 }
1984
1985 if (wentdown) {
1986 nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
1987 }
1988
1989 /*
1990 * Loop through outstanding request list and mark all requests
1991 * as needing a resend. (Though nfs_need_reconnect() probably
1992 * marked them all already.)
1993 */
1994 lck_mtx_lock(&nfs_request_mutex);
1995 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
1996 if (rq->r_nmp == nmp) {
1997 lck_mtx_lock(&rq->r_mtx);
1998 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
1999 rq->r_flags |= R_MUSTRESEND;
2000 rq->r_rtt = -1;
2001 wakeup(rq);
2002 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2003 nfs_asyncio_resend(rq);
2004 }
2005 }
2006 lck_mtx_unlock(&rq->r_mtx);
2007 }
2008 }
2009 lck_mtx_unlock(&nfs_request_mutex);
2010 return 0;
2011 }
2012
2013 /*
2014 * NFS disconnect. Clean up and unlink.
2015 */
2016 void
nfs_disconnect(struct nfsmount * nmp)2017 nfs_disconnect(struct nfsmount *nmp)
2018 {
2019 struct nfs_socket *nso;
2020
2021 lck_mtx_lock(&nmp->nm_lock);
2022 tryagain:
2023 if (nmp->nm_nso) {
2024 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2025 if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
2026 nmp->nm_state |= NFSSTA_WANTSND;
2027 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts);
2028 goto tryagain;
2029 }
2030 if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
2031 msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts);
2032 goto tryagain;
2033 }
2034 nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
2035 nmp->nm_sockflags &= ~NMSOCK_READY;
2036 nso = nmp->nm_nso;
2037 nmp->nm_nso = NULL;
2038 if (nso->nso_saddr == nmp->nm_saddr) {
2039 nso->nso_saddr = NULL;
2040 }
2041 lck_mtx_unlock(&nmp->nm_lock);
2042 nfs_socket_destroy(nso);
2043 lck_mtx_lock(&nmp->nm_lock);
2044 nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
2045 lck_mtx_unlock(&nmp->nm_lock);
2046 } else {
2047 lck_mtx_unlock(&nmp->nm_lock);
2048 }
2049 }
2050
2051 /*
2052 * mark an NFS mount as needing a reconnect/resends.
2053 */
2054 void
nfs_need_reconnect(struct nfsmount * nmp)2055 nfs_need_reconnect(struct nfsmount *nmp)
2056 {
2057 struct nfsreq *rq;
2058
2059 lck_mtx_lock(&nmp->nm_lock);
2060 nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP);
2061 lck_mtx_unlock(&nmp->nm_lock);
2062
2063 /*
2064 * Loop through outstanding request list and
2065 * mark all requests as needing a resend.
2066 */
2067 lck_mtx_lock(&nfs_request_mutex);
2068 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
2069 if (rq->r_nmp == nmp) {
2070 lck_mtx_lock(&rq->r_mtx);
2071 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2072 rq->r_flags |= R_MUSTRESEND;
2073 rq->r_rtt = -1;
2074 wakeup(rq);
2075 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2076 nfs_asyncio_resend(rq);
2077 }
2078 }
2079 lck_mtx_unlock(&rq->r_mtx);
2080 }
2081 }
2082 lck_mtx_unlock(&nfs_request_mutex);
2083 }
2084
2085
2086 /*
2087 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
2088 */
2089 void
nfs_mount_sock_thread(void * arg,__unused wait_result_t wr)2090 nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
2091 {
2092 struct nfsmount *nmp = arg;
2093 struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 };
2094 thread_t thd = current_thread();
2095 struct nfsreq *req;
2096 struct timeval now;
2097 int error, dofinish;
2098 nfsnode_t np;
2099 int do_reconnect_sleep = 0;
2100
2101 lck_mtx_lock(&nmp->nm_lock);
2102 while (!(nmp->nm_sockflags & NMSOCK_READY) ||
2103 !TAILQ_EMPTY(&nmp->nm_resendq) ||
2104 !LIST_EMPTY(&nmp->nm_monlist) ||
2105 nmp->nm_deadto_start ||
2106 (nmp->nm_state & NFSSTA_RECOVER) ||
2107 ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) {
2108 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
2109 break;
2110 }
2111 /* do reconnect, if necessary */
2112 if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2113 if (nmp->nm_reconnect_start <= 0) {
2114 microuptime(&now);
2115 nmp->nm_reconnect_start = now.tv_sec;
2116 }
2117 lck_mtx_unlock(&nmp->nm_lock);
2118 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2119 /*
2120 * XXX We don't want to call reconnect again right away if returned errors
2121 * before that may not have blocked. This has caused spamming null procs
2122 * from machines in the pass.
2123 */
2124 if (do_reconnect_sleep) {
2125 tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
2126 }
2127 error = nfs_reconnect(nmp);
2128 if (error) {
2129 int lvl = 7;
2130 if (error == EIO || error == EINTR) {
2131 lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
2132 }
2133 NFSCLNT_DBG(NFSCLNT_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
2134 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
2135 } else {
2136 nmp->nm_reconnect_start = 0;
2137 do_reconnect_sleep = 0;
2138 }
2139 lck_mtx_lock(&nmp->nm_lock);
2140 }
2141 if ((nmp->nm_sockflags & NMSOCK_READY) &&
2142 (nmp->nm_state & NFSSTA_RECOVER) &&
2143 !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2144 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2145 /* perform state recovery */
2146 lck_mtx_unlock(&nmp->nm_lock);
2147 nfs_recover(nmp);
2148 lck_mtx_lock(&nmp->nm_lock);
2149 }
2150 #if CONFIG_NFS4
2151 /* handle NFSv4 delegation returns */
2152 while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
2153 (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
2154 ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
2155 lck_mtx_unlock(&nmp->nm_lock);
2156 nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
2157 lck_mtx_lock(&nmp->nm_lock);
2158 }
2159 #endif
2160 /* do resends, if necessary/possible */
2161 while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
2162 (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) &&
2163 ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
2164 if (req->r_resendtime) {
2165 microuptime(&now);
2166 }
2167 while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) {
2168 req = TAILQ_NEXT(req, r_rchain);
2169 }
2170 if (!req) {
2171 break;
2172 }
2173 /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
2174 lck_mtx_unlock(&nmp->nm_lock);
2175 lck_mtx_lock(&req->r_mtx);
2176 lck_mtx_lock(&nmp->nm_lock);
2177 if ((req->r_flags & R_RESENDQ) == 0 || (req->r_rchain.tqe_next == NFSREQNOLIST)) {
2178 lck_mtx_unlock(&req->r_mtx);
2179 continue;
2180 }
2181 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
2182 req->r_flags &= ~R_RESENDQ;
2183 req->r_rchain.tqe_next = NFSREQNOLIST;
2184 lck_mtx_unlock(&nmp->nm_lock);
2185 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2186 if (req->r_error || req->r_nmrep.nmc_mhead) {
2187 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2188 wakeup(req);
2189 lck_mtx_unlock(&req->r_mtx);
2190 if (dofinish) {
2191 nfs_asyncio_finish(req);
2192 }
2193 nfs_request_rele(req);
2194 lck_mtx_lock(&nmp->nm_lock);
2195 continue;
2196 }
2197 if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
2198 req->r_flags &= ~R_RESTART;
2199 req->r_resendtime = 0;
2200 lck_mtx_unlock(&req->r_mtx);
2201 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2202 nfs_reqdequeue(req);
2203 #if CONFIG_NFS_GSS
2204 if (nfs_request_using_gss(req)) {
2205 nfs_gss_clnt_rpcdone(req);
2206 error = nfs_gss_clnt_args_restore(req);
2207 if (error == ENEEDAUTH) {
2208 req->r_xid = 0;
2209 }
2210 }
2211 #endif /* CONFIG_NFS_GSS */
2212 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2213 nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
2214 req->r_flags, req->r_rtt);
2215 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2216 if (!error) {
2217 error = nfs_request_add_header(req);
2218 }
2219 if (!error) {
2220 error = nfs_request_send(req, 0);
2221 }
2222 lck_mtx_lock(&req->r_mtx);
2223 if (error) {
2224 req->r_error = error;
2225 }
2226 wakeup(req);
2227 dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2228 lck_mtx_unlock(&req->r_mtx);
2229 if (dofinish) {
2230 nfs_asyncio_finish(req);
2231 }
2232 nfs_request_rele(req);
2233 lck_mtx_lock(&nmp->nm_lock);
2234 error = 0;
2235 continue;
2236 }
2237 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2238 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
2239 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2240 if (!error) {
2241 req->r_flags |= R_SENDING;
2242 lck_mtx_unlock(&req->r_mtx);
2243 error = nfs_send(req, 0);
2244 lck_mtx_lock(&req->r_mtx);
2245 if (!error) {
2246 wakeup(req);
2247 lck_mtx_unlock(&req->r_mtx);
2248 nfs_request_rele(req);
2249 lck_mtx_lock(&nmp->nm_lock);
2250 continue;
2251 }
2252 }
2253 req->r_error = error;
2254 wakeup(req);
2255 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2256 lck_mtx_unlock(&req->r_mtx);
2257 if (dofinish) {
2258 nfs_asyncio_finish(req);
2259 }
2260 nfs_request_rele(req);
2261 lck_mtx_lock(&nmp->nm_lock);
2262 }
2263 if (nfs_mount_check_dead_timeout(nmp)) {
2264 nfs_mount_make_zombie(nmp);
2265 break;
2266 }
2267
2268 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
2269 break;
2270 }
2271 /* check monitored nodes, if necessary/possible */
2272 if (!LIST_EMPTY(&nmp->nm_monlist)) {
2273 nmp->nm_state |= NFSSTA_MONITOR_SCAN;
2274 LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
2275 if (!(nmp->nm_sockflags & NMSOCK_READY) ||
2276 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2277 break;
2278 }
2279 np->n_mflag |= NMMONSCANINPROG;
2280 lck_mtx_unlock(&nmp->nm_lock);
2281 error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR));
2282 if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */
2283 nfs_data_update_size(np, 0);
2284 }
2285 lck_mtx_lock(&nmp->nm_lock);
2286 np->n_mflag &= ~NMMONSCANINPROG;
2287 if (np->n_mflag & NMMONSCANWANT) {
2288 np->n_mflag &= ~NMMONSCANWANT;
2289 wakeup(&np->n_mflag);
2290 }
2291 if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
2292 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2293 break;
2294 }
2295 }
2296 nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
2297 if (nmp->nm_state & NFSSTA_UNMOUNTING) {
2298 wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
2299 }
2300 }
2301 if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) {
2302 if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
2303 (nmp->nm_state & NFSSTA_RECOVER)) {
2304 ts.tv_sec = 1;
2305 } else {
2306 ts.tv_sec = 5;
2307 }
2308 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
2309 }
2310 }
2311
2312 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2313 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2314 (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
2315 (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2316 lck_mtx_unlock(&nmp->nm_lock);
2317 nfs3_umount_rpc(nmp, vfs_context_kernel(),
2318 (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
2319 lck_mtx_lock(&nmp->nm_lock);
2320 }
2321
2322 if (nmp->nm_sockthd == thd) {
2323 nmp->nm_sockthd = NULL;
2324 }
2325 lck_mtx_unlock(&nmp->nm_lock);
2326 wakeup(&nmp->nm_sockthd);
2327 thread_terminate(thd);
2328 }
2329
2330 /* start or wake a mount's socket thread */
2331 void
nfs_mount_sock_thread_wake(struct nfsmount * nmp)2332 nfs_mount_sock_thread_wake(struct nfsmount *nmp)
2333 {
2334 if (nmp->nm_sockthd) {
2335 wakeup(&nmp->nm_sockthd);
2336 } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) {
2337 thread_deallocate(nmp->nm_sockthd);
2338 }
2339 }
2340
2341 /*
2342 * Check if we should mark the mount dead because the
2343 * unresponsive mount has reached the dead timeout.
2344 * (must be called with nmp locked)
2345 */
2346 int
nfs_mount_check_dead_timeout(struct nfsmount * nmp)2347 nfs_mount_check_dead_timeout(struct nfsmount *nmp)
2348 {
2349 struct timeval now;
2350
2351 if (nmp->nm_state & NFSSTA_DEAD) {
2352 return 1;
2353 }
2354 if (nmp->nm_deadto_start == 0) {
2355 return 0;
2356 }
2357 nfs_is_squishy(nmp);
2358 if (nmp->nm_curdeadtimeout <= 0) {
2359 return 0;
2360 }
2361 microuptime(&now);
2362 if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) {
2363 return 0;
2364 }
2365 return 1;
2366 }
2367
2368 /*
2369 * Call nfs_mount_zombie to remove most of the
2370 * nfs state for the mount, and then ask to be forcibly unmounted.
2371 *
2372 * Assumes the nfs mount structure lock nm_lock is held.
2373 */
2374
2375 void
nfs_mount_make_zombie(struct nfsmount * nmp)2376 nfs_mount_make_zombie(struct nfsmount *nmp)
2377 {
2378 fsid_t fsid;
2379
2380 if (!nmp) {
2381 return;
2382 }
2383
2384 if (nmp->nm_state & NFSSTA_DEAD) {
2385 return;
2386 }
2387
2388 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
2389 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
2390 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
2391 lck_mtx_unlock(&nmp->nm_lock);
2392 nfs_mount_zombie(nmp, NFSSTA_DEAD);
2393 vfs_event_signal(&fsid, VQ_DEAD, 0);
2394 lck_mtx_lock(&nmp->nm_lock);
2395 }
2396
2397
2398 /*
2399 * NFS callback channel socket state
2400 */
2401 struct nfs_callback_socket {
2402 TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
2403 socket_t ncbs_so; /* the socket */
2404 struct sockaddr_storage ncbs_saddr; /* socket address */
2405 struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
2406 time_t ncbs_stamp; /* last accessed at */
2407 uint32_t ncbs_flags; /* see below */
2408 };
2409 #define NCBSOCK_UPCALL 0x0001
2410 #define NCBSOCK_UPCALLWANT 0x0002
2411 #define NCBSOCK_DEAD 0x0004
2412
2413 #if CONFIG_NFS4
2414 /*
2415 * NFS callback channel state
2416 *
2417 * One listening socket for accepting socket connections from servers and
2418 * a list of connected sockets to handle callback requests on.
2419 * Mounts registered with the callback channel are assigned IDs and
2420 * put on a list so that the callback request handling code can match
2421 * the requests up with mounts.
2422 */
2423 socket_t nfs4_cb_so = NULL;
2424 socket_t nfs4_cb_so6 = NULL;
2425 in_port_t nfs4_cb_port = 0;
2426 in_port_t nfs4_cb_port6 = 0;
2427 uint32_t nfs4_cb_id = 0;
2428 uint32_t nfs4_cb_so_usecount = 0;
2429 TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks;
2430 TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts;
2431
2432 int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
2433
2434 /*
2435 * Set up the callback channel for the NFS mount.
2436 *
2437 * Initializes the callback channel socket state and
2438 * assigns a callback ID to the mount.
2439 */
2440 void
nfs4_mount_callback_setup(struct nfsmount * nmp)2441 nfs4_mount_callback_setup(struct nfsmount *nmp)
2442 {
2443 struct sockaddr_in sin;
2444 struct sockaddr_in6 sin6;
2445 socket_t so = NULL;
2446 socket_t so6 = NULL;
2447 struct timeval timeo;
2448 int error, on = 1;
2449 in_port_t port;
2450
2451 lck_mtx_lock(&nfs_global_mutex);
2452 if (nfs4_cb_id == 0) {
2453 TAILQ_INIT(&nfs4_cb_mounts);
2454 TAILQ_INIT(&nfs4_cb_socks);
2455 nfs4_cb_id++;
2456 }
2457 nmp->nm_cbid = nfs4_cb_id++;
2458 if (nmp->nm_cbid == 0) {
2459 nmp->nm_cbid = nfs4_cb_id++;
2460 }
2461 nfs4_cb_so_usecount++;
2462 TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
2463
2464 if (nfs4_cb_so) {
2465 lck_mtx_unlock(&nfs_global_mutex);
2466 return;
2467 }
2468
2469 /* IPv4 */
2470 error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
2471 if (error) {
2472 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
2473 goto fail;
2474 }
2475 so = nfs4_cb_so;
2476
2477 if (NFS_PORT_INVALID(nfs_callback_port)) {
2478 error = EINVAL;
2479 log(LOG_INFO, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error, nfs_callback_port);
2480 goto fail;
2481 }
2482
2483 sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2484 sin.sin_len = sizeof(struct sockaddr_in);
2485 sin.sin_family = AF_INET;
2486 sin.sin_addr.s_addr = htonl(INADDR_ANY);
2487 sin.sin_port = htons((in_port_t)nfs_callback_port); /* try to use specified port */
2488 error = sock_bind(so, (struct sockaddr *)&sin);
2489 if (error) {
2490 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
2491 goto fail;
2492 }
2493 error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
2494 if (error) {
2495 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
2496 goto fail;
2497 }
2498 nfs4_cb_port = ntohs(sin.sin_port);
2499
2500 error = sock_listen(so, 32);
2501 if (error) {
2502 log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
2503 goto fail;
2504 }
2505
2506 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2507 timeo.tv_usec = 0;
2508 timeo.tv_sec = 60;
2509 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2510 if (error) {
2511 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
2512 }
2513 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2514 if (error) {
2515 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
2516 }
2517 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2518 sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2519 sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2520 error = 0;
2521
2522 /* IPv6 */
2523 error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
2524 if (error) {
2525 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
2526 goto fail;
2527 }
2528 so6 = nfs4_cb_so6;
2529
2530 sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2531 sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
2532 /* try to use specified port or same port as IPv4 */
2533 port = nfs_callback_port ? (in_port_t)nfs_callback_port : nfs4_cb_port;
2534 ipv6_bind_again:
2535 sin6.sin6_len = sizeof(struct sockaddr_in6);
2536 sin6.sin6_family = AF_INET6;
2537 sin6.sin6_addr = in6addr_any;
2538 sin6.sin6_port = htons(port);
2539 error = sock_bind(so6, (struct sockaddr *)&sin6);
2540 if (error) {
2541 if (port != nfs_callback_port) {
2542 /* if we simply tried to match the IPv4 port, then try any port */
2543 port = 0;
2544 goto ipv6_bind_again;
2545 }
2546 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
2547 goto fail;
2548 }
2549 error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
2550 if (error) {
2551 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
2552 goto fail;
2553 }
2554 nfs4_cb_port6 = ntohs(sin6.sin6_port);
2555
2556 error = sock_listen(so6, 32);
2557 if (error) {
2558 log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
2559 goto fail;
2560 }
2561
2562 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2563 timeo.tv_usec = 0;
2564 timeo.tv_sec = 60;
2565 error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2566 if (error) {
2567 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
2568 }
2569 error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2570 if (error) {
2571 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
2572 }
2573 sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2574 sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2575 sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2576 error = 0;
2577
2578 fail:
2579 if (error) {
2580 nfs4_cb_so = nfs4_cb_so6 = NULL;
2581 lck_mtx_unlock(&nfs_global_mutex);
2582 if (so) {
2583 sock_shutdown(so, SHUT_RDWR);
2584 sock_close(so);
2585 }
2586 if (so6) {
2587 sock_shutdown(so6, SHUT_RDWR);
2588 sock_close(so6);
2589 }
2590 } else {
2591 lck_mtx_unlock(&nfs_global_mutex);
2592 }
2593 }
2594
2595 /*
2596 * Shut down the callback channel for the NFS mount.
2597 *
2598 * Clears the mount's callback ID and releases the mounts
2599 * reference on the callback socket. Last reference dropped
2600 * will also shut down the callback socket(s).
2601 */
2602 void
nfs4_mount_callback_shutdown(struct nfsmount * nmp)2603 nfs4_mount_callback_shutdown(struct nfsmount *nmp)
2604 {
2605 struct nfs_callback_socket *ncbsp;
2606 socket_t so, so6;
2607 struct nfs4_cb_sock_list cb_socks;
2608 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2609
2610 lck_mtx_lock(&nfs_global_mutex);
2611 if (nmp->nm_cbid == 0) {
2612 lck_mtx_unlock(&nfs_global_mutex);
2613 return;
2614 }
2615 TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
2616 /* wait for any callbacks in progress to complete */
2617 while (nmp->nm_cbrefs) {
2618 msleep(&nmp->nm_cbrefs, &nfs_global_mutex, PSOCK, "cbshutwait", &ts);
2619 }
2620 nmp->nm_cbid = 0;
2621 if (--nfs4_cb_so_usecount) {
2622 lck_mtx_unlock(&nfs_global_mutex);
2623 return;
2624 }
2625 so = nfs4_cb_so;
2626 so6 = nfs4_cb_so6;
2627 nfs4_cb_so = nfs4_cb_so6 = NULL;
2628 TAILQ_INIT(&cb_socks);
2629 TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
2630 lck_mtx_unlock(&nfs_global_mutex);
2631 if (so) {
2632 sock_shutdown(so, SHUT_RDWR);
2633 sock_close(so);
2634 }
2635 if (so6) {
2636 sock_shutdown(so6, SHUT_RDWR);
2637 sock_close(so6);
2638 }
2639 while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
2640 TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
2641 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2642 sock_close(ncbsp->ncbs_so);
2643 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2644 kfree_type(struct nfs_callback_socket, ncbsp);
2645 }
2646 }
2647
2648 /*
2649 * Check periodically for stale/unused nfs callback sockets
2650 */
2651 #define NFS4_CB_TIMER_PERIOD 30
2652 #define NFS4_CB_IDLE_MAX 300
2653 void
nfs4_callback_timer(__unused void * param0,__unused void * param1)2654 nfs4_callback_timer(__unused void *param0, __unused void *param1)
2655 {
2656 struct nfs_callback_socket *ncbsp, *nextncbsp;
2657 struct timeval now;
2658
2659 loop:
2660 lck_mtx_lock(&nfs_global_mutex);
2661 if (TAILQ_EMPTY(&nfs4_cb_socks)) {
2662 nfs4_callback_timer_on = 0;
2663 lck_mtx_unlock(&nfs_global_mutex);
2664 return;
2665 }
2666 microuptime(&now);
2667 TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
2668 if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
2669 (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) {
2670 continue;
2671 }
2672 TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
2673 lck_mtx_unlock(&nfs_global_mutex);
2674 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2675 sock_close(ncbsp->ncbs_so);
2676 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2677 kfree_type(struct nfs_callback_socket, ncbsp);
2678 goto loop;
2679 }
2680 nfs4_callback_timer_on = 1;
2681 nfs_interval_timer_start(nfs4_callback_timer_call,
2682 NFS4_CB_TIMER_PERIOD * 1000);
2683 lck_mtx_unlock(&nfs_global_mutex);
2684 }
2685
2686 /*
2687 * Accept a new callback socket.
2688 */
2689 void
nfs4_cb_accept(socket_t so,__unused void * arg,__unused int waitflag)2690 nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
2691 {
2692 socket_t newso = NULL;
2693 struct nfs_callback_socket *ncbsp;
2694 struct nfsmount *nmp;
2695 struct timeval timeo, now;
2696 int error, on = 1, ip;
2697
2698 if (so == nfs4_cb_so) {
2699 ip = 4;
2700 } else if (so == nfs4_cb_so6) {
2701 ip = 6;
2702 } else {
2703 return;
2704 }
2705
2706 /* allocate/initialize a new nfs_callback_socket */
2707 ncbsp = kalloc_type(struct nfs_callback_socket,
2708 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2709 ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
2710 nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
2711
2712 /* accept a new socket */
2713 error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
2714 ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
2715 nfs4_cb_rcv, ncbsp, &newso);
2716 if (error) {
2717 log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
2718 kfree_type(struct nfs_callback_socket, ncbsp);
2719 return;
2720 }
2721
2722 /* set up the new socket */
2723 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2724 timeo.tv_usec = 0;
2725 timeo.tv_sec = 60;
2726 error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2727 if (error) {
2728 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
2729 }
2730 error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2731 if (error) {
2732 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
2733 }
2734 sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2735 sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2736 sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2737 sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2738
2739 ncbsp->ncbs_so = newso;
2740 microuptime(&now);
2741 ncbsp->ncbs_stamp = now.tv_sec;
2742
2743 lck_mtx_lock(&nfs_global_mutex);
2744
2745 /* add it to the list */
2746 TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
2747
2748 /* verify it's from a host we have mounted */
2749 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2750 /* check if socket's source address matches this mount's server address */
2751 if (!nmp->nm_saddr) {
2752 continue;
2753 }
2754 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2755 break;
2756 }
2757 }
2758 if (!nmp) { /* we don't want this socket, mark it dead */
2759 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2760 }
2761
2762 /* make sure the callback socket cleanup timer is running */
2763 /* (shorten the timer if we've got a socket we don't want) */
2764 if (!nfs4_callback_timer_on) {
2765 nfs4_callback_timer_on = 1;
2766 nfs_interval_timer_start(nfs4_callback_timer_call,
2767 !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
2768 } else if (!nmp && (nfs4_callback_timer_on < 2)) {
2769 nfs4_callback_timer_on = 2;
2770 thread_call_cancel(nfs4_callback_timer_call);
2771 nfs_interval_timer_start(nfs4_callback_timer_call, 500);
2772 }
2773
2774 lck_mtx_unlock(&nfs_global_mutex);
2775 }
2776
2777 /*
2778 * Receive mbufs from callback sockets into RPC records and process each record.
2779 * Detect connection has been closed and shut down.
2780 */
2781 void
nfs4_cb_rcv(socket_t so,void * arg,__unused int waitflag)2782 nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
2783 {
2784 struct nfs_callback_socket *ncbsp = arg;
2785 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2786 struct timeval now;
2787 mbuf_t m;
2788 int error = 0, recv = 1;
2789
2790 lck_mtx_lock(&nfs_global_mutex);
2791 while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
2792 /* wait if upcall is already in progress */
2793 ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
2794 msleep(ncbsp, &nfs_global_mutex, PSOCK, "cbupcall", &ts);
2795 }
2796 ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
2797 lck_mtx_unlock(&nfs_global_mutex);
2798
2799 /* loop while we make error-free progress */
2800 while (!error && recv) {
2801 error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
2802 if (m) { /* handle the request */
2803 error = nfs4_cb_handler(ncbsp, m);
2804 }
2805 }
2806
2807 /* note: no error and no data indicates server closed its end */
2808 if ((error != EWOULDBLOCK) && (error || !recv)) {
2809 /*
2810 * Socket is either being closed or should be.
2811 * We can't close the socket in the context of the upcall.
2812 * So we mark it as dead and leave it for the cleanup timer to reap.
2813 */
2814 ncbsp->ncbs_stamp = 0;
2815 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2816 } else {
2817 microuptime(&now);
2818 ncbsp->ncbs_stamp = now.tv_sec;
2819 }
2820
2821 lck_mtx_lock(&nfs_global_mutex);
2822 ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
2823 lck_mtx_unlock(&nfs_global_mutex);
2824 wakeup(ncbsp);
2825 }
2826
2827 /*
2828 * Handle an NFS callback channel request.
2829 */
2830 int
nfs4_cb_handler(struct nfs_callback_socket * ncbsp,mbuf_t mreq)2831 nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
2832 {
2833 socket_t so = ncbsp->ncbs_so;
2834 struct nfsm_chain nmreq, nmrep;
2835 mbuf_t mhead = NULL, mrest = NULL, m;
2836 struct msghdr msg;
2837 struct nfsmount *nmp;
2838 fhandle_t *fh;
2839 nfsnode_t np;
2840 nfs_stateid stateid;
2841 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
2842 uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
2843 uint32_t auth_type, auth_len;
2844 uint32_t numres, *pnumres;
2845 int error = 0, replen, len;
2846 size_t sentlen = 0;
2847
2848 xid = numops = op = status = procnum = taglen = cbid = 0;
2849 fh = zalloc(nfs_fhandle_zone);
2850
2851 nfsm_chain_dissect_init(error, &nmreq, mreq);
2852 nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
2853 nfsm_chain_get_32(error, &nmreq, val); // RPC Call
2854 nfsm_assert(error, (val == RPC_CALL), EBADRPC);
2855 nfsm_chain_get_32(error, &nmreq, val); // RPC Version
2856 nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
2857 nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
2858 nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
2859 nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
2860 nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
2861 nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
2862 nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
2863
2864 /* Handle authentication */
2865 /* XXX just ignore auth for now - handling kerberos may be tricky */
2866 nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
2867 nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
2868 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2869 if (!error && (auth_len > 0)) {
2870 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2871 }
2872 nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
2873 nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
2874 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2875 if (!error && (auth_len > 0)) {
2876 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2877 }
2878 if (error) {
2879 status = error;
2880 error = 0;
2881 goto nfsmout;
2882 }
2883
2884 switch (procnum) {
2885 case NFSPROC4_CB_NULL:
2886 status = NFSERR_RETVOID;
2887 break;
2888 case NFSPROC4_CB_COMPOUND:
2889 /* tag, minorversion, cb ident, numops, op array */
2890 nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
2891 nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
2892
2893 /* start building the body of the response */
2894 nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED);
2895 nfsm_chain_init(&nmrep, mrest);
2896
2897 /* copy tag from request to response */
2898 nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
2899 for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
2900 nfsm_chain_get_32(error, &nmreq, val);
2901 nfsm_chain_add_32(error, &nmrep, val);
2902 }
2903
2904 /* insert number of results placeholder */
2905 numres = 0;
2906 nfsm_chain_add_32(error, &nmrep, numres);
2907 pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
2908
2909 nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
2910 nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
2911 nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
2912 nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
2913 if (error) {
2914 if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) {
2915 status = error;
2916 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
2917 status = NFSERR_RESOURCE;
2918 } else {
2919 status = NFSERR_SERVERFAULT;
2920 }
2921 error = 0;
2922 nfsm_chain_null(&nmrep);
2923 goto nfsmout;
2924 }
2925 /* match the callback ID to a registered mount */
2926 lck_mtx_lock(&nfs_global_mutex);
2927 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2928 if (nmp->nm_cbid != cbid) {
2929 continue;
2930 }
2931 /* verify socket's source address matches this mount's server address */
2932 if (!nmp->nm_saddr) {
2933 continue;
2934 }
2935 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2936 break;
2937 }
2938 }
2939 /* mark the NFS mount as busy */
2940 if (nmp) {
2941 nmp->nm_cbrefs++;
2942 }
2943 lck_mtx_unlock(&nfs_global_mutex);
2944 if (!nmp) {
2945 /* if no mount match, just drop socket. */
2946 error = EPERM;
2947 nfsm_chain_null(&nmrep);
2948 goto out;
2949 }
2950
2951 /* process ops, adding results to mrest */
2952 while (numops > 0) {
2953 numops--;
2954 nfsm_chain_get_32(error, &nmreq, op);
2955 if (error) {
2956 break;
2957 }
2958 switch (op) {
2959 case NFS_OP_CB_GETATTR:
2960 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2961 np = NULL;
2962 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
2963 bmlen = NFS_ATTR_BITMAP_LEN;
2964 nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
2965 if (error) {
2966 status = error;
2967 error = 0;
2968 numops = 0; /* don't process any more ops */
2969 } else {
2970 /* find the node for the file handle */
2971 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
2972 if (error || !np) {
2973 status = NFSERR_BADHANDLE;
2974 error = 0;
2975 np = NULL;
2976 numops = 0; /* don't process any more ops */
2977 }
2978 }
2979 nfsm_chain_add_32(error, &nmrep, op);
2980 nfsm_chain_add_32(error, &nmrep, status);
2981 if (!error && (status == EBADRPC)) {
2982 error = status;
2983 }
2984 if (np) {
2985 /* only allow returning size, change, and mtime attrs */
2986 NFS_CLEAR_ATTRIBUTES(&rbitmap);
2987 attrbytes = 0;
2988 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
2989 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
2990 attrbytes += 2 * NFSX_UNSIGNED;
2991 }
2992 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
2993 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
2994 attrbytes += 2 * NFSX_UNSIGNED;
2995 }
2996 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
2997 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
2998 attrbytes += 3 * NFSX_UNSIGNED;
2999 }
3000 nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
3001 nfsm_chain_add_32(error, &nmrep, attrbytes);
3002 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
3003 nfsm_chain_add_64(error, &nmrep,
3004 np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
3005 }
3006 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
3007 nfsm_chain_add_64(error, &nmrep, np->n_size);
3008 }
3009 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
3010 nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
3011 nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
3012 }
3013 nfs_node_unlock(np);
3014 vnode_put(NFSTOV(np));
3015 np = NULL;
3016 }
3017 /*
3018 * If we hit an error building the reply, we can't easily back up.
3019 * So we'll just update the status and hope the server ignores the
3020 * extra garbage.
3021 */
3022 break;
3023 case NFS_OP_CB_RECALL:
3024 // (STATEID, TRUNCATE, FH) -> (STATUS)
3025 np = NULL;
3026 nfsm_chain_get_stateid(error, &nmreq, &stateid);
3027 nfsm_chain_get_32(error, &nmreq, truncate);
3028 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
3029 if (error) {
3030 status = error;
3031 error = 0;
3032 numops = 0; /* don't process any more ops */
3033 } else {
3034 /* find the node for the file handle */
3035 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
3036 if (error || !np) {
3037 status = NFSERR_BADHANDLE;
3038 error = 0;
3039 np = NULL;
3040 numops = 0; /* don't process any more ops */
3041 } else if (!(np->n_openflags & N_DELEG_MASK) ||
3042 bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
3043 /* delegation stateid state doesn't match */
3044 status = NFSERR_BAD_STATEID;
3045 numops = 0; /* don't process any more ops */
3046 }
3047 if (!status) { /* add node to recall queue, and wake socket thread */
3048 nfs4_delegation_return_enqueue(np);
3049 }
3050 if (np) {
3051 nfs_node_unlock(np);
3052 vnode_put(NFSTOV(np));
3053 }
3054 }
3055 nfsm_chain_add_32(error, &nmrep, op);
3056 nfsm_chain_add_32(error, &nmrep, status);
3057 if (!error && (status == EBADRPC)) {
3058 error = status;
3059 }
3060 break;
3061 case NFS_OP_CB_ILLEGAL:
3062 default:
3063 nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
3064 status = NFSERR_OP_ILLEGAL;
3065 nfsm_chain_add_32(error, &nmrep, status);
3066 numops = 0; /* don't process any more ops */
3067 break;
3068 }
3069 numres++;
3070 }
3071
3072 if (!status && error) {
3073 if (error == EBADRPC) {
3074 status = error;
3075 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
3076 status = NFSERR_RESOURCE;
3077 } else {
3078 status = NFSERR_SERVERFAULT;
3079 }
3080 error = 0;
3081 }
3082
3083 /* Now, set the numres field */
3084 *pnumres = txdr_unsigned(numres);
3085 nfsm_chain_build_done(error, &nmrep);
3086 nfsm_chain_null(&nmrep);
3087
3088 /* drop the callback reference on the mount */
3089 lck_mtx_lock(&nfs_global_mutex);
3090 nmp->nm_cbrefs--;
3091 if (!nmp->nm_cbid) {
3092 wakeup(&nmp->nm_cbrefs);
3093 }
3094 lck_mtx_unlock(&nfs_global_mutex);
3095 break;
3096 }
3097
3098 nfsmout:
3099 if (status == EBADRPC) {
3100 OSAddAtomic64(1, &nfsclntstats.rpcinvalid);
3101 }
3102
3103 /* build reply header */
3104 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
3105 nfsm_chain_init(&nmrep, mhead);
3106 nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
3107 nfsm_chain_add_32(error, &nmrep, xid);
3108 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
3109 if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
3110 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
3111 if (status & NFSERR_AUTHERR) {
3112 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
3113 nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
3114 } else {
3115 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
3116 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3117 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3118 }
3119 } else {
3120 /* reply status */
3121 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
3122 /* XXX RPCAUTH_NULL verifier */
3123 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
3124 nfsm_chain_add_32(error, &nmrep, 0);
3125 /* accepted status */
3126 switch (status) {
3127 case EPROGUNAVAIL:
3128 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
3129 break;
3130 case EPROGMISMATCH:
3131 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
3132 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3133 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3134 break;
3135 case EPROCUNAVAIL:
3136 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
3137 break;
3138 case EBADRPC:
3139 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
3140 break;
3141 default:
3142 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
3143 if (status != NFSERR_RETVOID) {
3144 nfsm_chain_add_32(error, &nmrep, status);
3145 }
3146 break;
3147 }
3148 }
3149 nfsm_chain_build_done(error, &nmrep);
3150 if (error) {
3151 nfsm_chain_null(&nmrep);
3152 goto out;
3153 }
3154 error = mbuf_setnext(nmrep.nmc_mcur, mrest);
3155 if (error) {
3156 printf("nfs cb: mbuf_setnext failed %d\n", error);
3157 goto out;
3158 }
3159 mrest = NULL;
3160 /* Calculate the size of the reply */
3161 replen = 0;
3162 for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) {
3163 replen += mbuf_len(m);
3164 }
3165 mbuf_pkthdr_setlen(mhead, replen);
3166 error = mbuf_pkthdr_setrcvif(mhead, NULL);
3167 nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
3168 nfsm_chain_null(&nmrep);
3169
3170 /* send the reply */
3171 bzero(&msg, sizeof(msg));
3172 error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
3173 mhead = NULL;
3174 if (!error && ((int)sentlen != replen)) {
3175 error = EWOULDBLOCK;
3176 }
3177 if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */
3178 error = ETIMEDOUT;
3179 }
3180 out:
3181 if (error) {
3182 nfsm_chain_cleanup(&nmrep);
3183 }
3184 if (mhead) {
3185 mbuf_freem(mhead);
3186 }
3187 if (mrest) {
3188 mbuf_freem(mrest);
3189 }
3190 if (mreq) {
3191 mbuf_freem(mreq);
3192 }
3193 NFS_ZFREE(nfs_fhandle_zone, fh);
3194 return error;
3195 }
3196 #endif /* CONFIG_NFS4 */
3197
3198 /*
3199 * Initialize an nfs_rpc_record_state structure.
3200 */
3201 void
nfs_rpc_record_state_init(struct nfs_rpc_record_state * nrrsp)3202 nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
3203 {
3204 bzero(nrrsp, sizeof(*nrrsp));
3205 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3206 }
3207
3208 /*
3209 * Clean up an nfs_rpc_record_state structure.
3210 */
3211 void
nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state * nrrsp)3212 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
3213 {
3214 if (nrrsp->nrrs_m) {
3215 mbuf_freem(nrrsp->nrrs_m);
3216 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3217 }
3218 }
3219
3220 /*
3221 * Read the next (marked) RPC record from the socket.
3222 *
3223 * *recvp returns if any data was received.
3224 * *mp returns the next complete RPC record
3225 */
3226 int
nfs_rpc_record_read(socket_t so,struct nfs_rpc_record_state * nrrsp,int flags,int * recvp,mbuf_t * mp)3227 nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
3228 {
3229 struct iovec aio;
3230 struct msghdr msg;
3231 size_t rcvlen;
3232 int error = 0;
3233 mbuf_t m;
3234
3235 *recvp = 0;
3236 *mp = NULL;
3237
3238 /* read the TCP RPC record marker */
3239 while (!error && nrrsp->nrrs_markerleft) {
3240 aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
3241 sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
3242 aio.iov_len = nrrsp->nrrs_markerleft;
3243 bzero(&msg, sizeof(msg));
3244 msg.msg_iov = &aio;
3245 msg.msg_iovlen = 1;
3246 error = sock_receive(so, &msg, flags, &rcvlen);
3247 if (error || !rcvlen) {
3248 break;
3249 }
3250 *recvp = 1;
3251 nrrsp->nrrs_markerleft -= rcvlen;
3252 if (nrrsp->nrrs_markerleft) {
3253 continue;
3254 }
3255 /* record marker complete */
3256 nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
3257 if (nrrsp->nrrs_fragleft & 0x80000000) {
3258 nrrsp->nrrs_lastfrag = 1;
3259 nrrsp->nrrs_fragleft &= ~0x80000000;
3260 }
3261 nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
3262 if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
3263 /* This is SERIOUS! We are out of sync with the sender. */
3264 log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
3265 error = EFBIG;
3266 }
3267 }
3268
3269 /* read the TCP RPC record fragment */
3270 while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
3271 m = NULL;
3272 rcvlen = nrrsp->nrrs_fragleft;
3273 error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
3274 if (error || !rcvlen || !m) {
3275 break;
3276 }
3277 *recvp = 1;
3278 /* append mbufs to list */
3279 nrrsp->nrrs_fragleft -= rcvlen;
3280 if (!nrrsp->nrrs_m) {
3281 nrrsp->nrrs_m = m;
3282 } else {
3283 error = mbuf_setnext(nrrsp->nrrs_mlast, m);
3284 if (error) {
3285 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
3286 mbuf_freem(m);
3287 break;
3288 }
3289 }
3290 while (mbuf_next(m)) {
3291 m = mbuf_next(m);
3292 }
3293 nrrsp->nrrs_mlast = m;
3294 }
3295
3296 /* done reading fragment? */
3297 if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
3298 /* reset socket fragment parsing state */
3299 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3300 if (nrrsp->nrrs_lastfrag) {
3301 /* RPC record complete */
3302 *mp = nrrsp->nrrs_m;
3303 /* reset socket record parsing state */
3304 nrrsp->nrrs_reclen = 0;
3305 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3306 nrrsp->nrrs_lastfrag = 0;
3307 }
3308 }
3309
3310 return error;
3311 }
3312
3313
3314
3315 /*
3316 * The NFS client send routine.
3317 *
3318 * Send the given NFS request out the mount's socket.
3319 * Holds nfs_sndlock() for the duration of this call.
3320 *
3321 * - check for request termination (sigintr)
3322 * - wait for reconnect, if necessary
3323 * - UDP: check the congestion window
3324 * - make a copy of the request to send
3325 * - UDP: update the congestion window
3326 * - send the request
3327 *
3328 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3329 * rexmit count is also updated if this isn't the first send.
3330 *
3331 * If the send is not successful, make sure R_MUSTRESEND is set.
3332 * If this wasn't the first transmit, set R_RESENDERR.
3333 * Also, undo any UDP congestion window changes made.
3334 *
3335 * If the error appears to indicate that the socket should
3336 * be reconnected, mark the socket for reconnection.
3337 *
3338 * Only return errors when the request should be aborted.
3339 */
3340 int
nfs_send(struct nfsreq * req,int wait)3341 nfs_send(struct nfsreq *req, int wait)
3342 {
3343 struct nfsmount *nmp;
3344 struct nfs_socket *nso;
3345 int error, error2, sotype, rexmit, slpflag = 0, needrecon;
3346 struct msghdr msg;
3347 struct sockaddr *sendnam;
3348 mbuf_t mreqcopy;
3349 size_t sentlen = 0;
3350 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3351
3352 again:
3353 error = nfs_sndlock(req);
3354 if (error) {
3355 lck_mtx_lock(&req->r_mtx);
3356 req->r_error = error;
3357 req->r_flags &= ~R_SENDING;
3358 lck_mtx_unlock(&req->r_mtx);
3359 return error;
3360 }
3361
3362 error = nfs_sigintr(req->r_nmp, req, NULL, 0);
3363 if (error) {
3364 nfs_sndunlock(req);
3365 lck_mtx_lock(&req->r_mtx);
3366 req->r_error = error;
3367 req->r_flags &= ~R_SENDING;
3368 lck_mtx_unlock(&req->r_mtx);
3369 return error;
3370 }
3371 nmp = req->r_nmp;
3372 sotype = nmp->nm_sotype;
3373
3374 /*
3375 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3376 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3377 */
3378 if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
3379 ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
3380 error = ETIMEDOUT;
3381 nfs_sndunlock(req);
3382 lck_mtx_lock(&req->r_mtx);
3383 req->r_error = error;
3384 req->r_flags &= ~R_SENDING;
3385 lck_mtx_unlock(&req->r_mtx);
3386 return error;
3387 }
3388
3389 /* If the socket needs reconnection, do that now. */
3390 /* wait until socket is ready - unless this request is part of setup */
3391 lck_mtx_lock(&nmp->nm_lock);
3392 if (!(nmp->nm_sockflags & NMSOCK_READY) &&
3393 !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
3394 if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) {
3395 slpflag |= PCATCH;
3396 }
3397 lck_mtx_unlock(&nmp->nm_lock);
3398 nfs_sndunlock(req);
3399 if (!wait) {
3400 lck_mtx_lock(&req->r_mtx);
3401 req->r_flags &= ~R_SENDING;
3402 req->r_flags |= R_MUSTRESEND;
3403 req->r_rtt = 0;
3404 lck_mtx_unlock(&req->r_mtx);
3405 return 0;
3406 }
3407 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
3408 lck_mtx_lock(&req->r_mtx);
3409 req->r_flags &= ~R_MUSTRESEND;
3410 req->r_rtt = 0;
3411 lck_mtx_unlock(&req->r_mtx);
3412 lck_mtx_lock(&nmp->nm_lock);
3413 while (!(nmp->nm_sockflags & NMSOCK_READY)) {
3414 /* don't bother waiting if the socket thread won't be reconnecting it */
3415 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
3416 error = EIO;
3417 break;
3418 }
3419 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
3420 struct timeval now;
3421 microuptime(&now);
3422 if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
3423 /* soft mount in reconnect for a while... terminate ASAP */
3424 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
3425 req->r_flags |= R_SOFTTERM;
3426 req->r_error = error = ETIMEDOUT;
3427 break;
3428 }
3429 }
3430 /* make sure socket thread is running, then wait */
3431 nfs_mount_sock_thread_wake(nmp);
3432 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3433 break;
3434 }
3435 msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts);
3436 slpflag = 0;
3437 }
3438 lck_mtx_unlock(&nmp->nm_lock);
3439 if (error) {
3440 lck_mtx_lock(&req->r_mtx);
3441 req->r_error = error;
3442 req->r_flags &= ~R_SENDING;
3443 lck_mtx_unlock(&req->r_mtx);
3444 return error;
3445 }
3446 goto again;
3447 }
3448 nso = nmp->nm_nso;
3449 /* note that we're using the mount's socket to do the send */
3450 nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
3451 lck_mtx_unlock(&nmp->nm_lock);
3452 if (!nso) {
3453 nfs_sndunlock(req);
3454 lck_mtx_lock(&req->r_mtx);
3455 req->r_flags &= ~R_SENDING;
3456 req->r_flags |= R_MUSTRESEND;
3457 req->r_rtt = 0;
3458 lck_mtx_unlock(&req->r_mtx);
3459 return 0;
3460 }
3461
3462 lck_mtx_lock(&req->r_mtx);
3463 rexmit = (req->r_flags & R_SENT);
3464
3465 if (sotype == SOCK_DGRAM) {
3466 lck_mtx_lock(&nmp->nm_lock);
3467 if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
3468 /* if we can't send this out yet, wait on the cwnd queue */
3469 slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
3470 lck_mtx_unlock(&nmp->nm_lock);
3471 nfs_sndunlock(req);
3472 req->r_flags &= ~R_SENDING;
3473 req->r_flags |= R_MUSTRESEND;
3474 lck_mtx_unlock(&req->r_mtx);
3475 if (!wait) {
3476 req->r_rtt = 0;
3477 return 0;
3478 }
3479 lck_mtx_lock(&nmp->nm_lock);
3480 while (nmp->nm_sent >= nmp->nm_cwnd) {
3481 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3482 break;
3483 }
3484 TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
3485 msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
3486 slpflag = 0;
3487 if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
3488 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
3489 req->r_cchain.tqe_next = NFSREQNOLIST;
3490 }
3491 }
3492 lck_mtx_unlock(&nmp->nm_lock);
3493 goto again;
3494 }
3495 /*
3496 * We update these *before* the send to avoid racing
3497 * against others who may be looking to send requests.
3498 */
3499 if (!rexmit) {
3500 /* first transmit */
3501 req->r_flags |= R_CWND;
3502 nmp->nm_sent += NFS_CWNDSCALE;
3503 } else {
3504 /*
3505 * When retransmitting, turn timing off
3506 * and divide congestion window by 2.
3507 */
3508 req->r_flags &= ~R_TIMING;
3509 nmp->nm_cwnd >>= 1;
3510 if (nmp->nm_cwnd < NFS_CWNDSCALE) {
3511 nmp->nm_cwnd = NFS_CWNDSCALE;
3512 }
3513 }
3514 lck_mtx_unlock(&nmp->nm_lock);
3515 }
3516
3517 req->r_flags &= ~R_MUSTRESEND;
3518 lck_mtx_unlock(&req->r_mtx);
3519
3520 error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
3521 wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
3522 if (error) {
3523 if (wait) {
3524 log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
3525 }
3526 nfs_sndunlock(req);
3527 lck_mtx_lock(&req->r_mtx);
3528 req->r_flags &= ~R_SENDING;
3529 req->r_flags |= R_MUSTRESEND;
3530 req->r_rtt = 0;
3531 lck_mtx_unlock(&req->r_mtx);
3532 return 0;
3533 }
3534
3535 bzero(&msg, sizeof(msg));
3536 if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
3537 msg.msg_name = (caddr_t)sendnam;
3538 msg.msg_namelen = sendnam->sa_len;
3539 }
3540 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy);
3541 error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
3542 if (error || (sentlen != req->r_mreqlen)) {
3543 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3544 req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
3545 }
3546
3547 if (!error && (sentlen != req->r_mreqlen)) {
3548 error = EWOULDBLOCK;
3549 }
3550 needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
3551
3552 lck_mtx_lock(&req->r_mtx);
3553 req->r_flags &= ~R_SENDING;
3554 req->r_rtt = 0;
3555 if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) {
3556 req->r_rexmit = NFS_MAXREXMIT;
3557 }
3558
3559 if (!error) {
3560 /* SUCCESS */
3561 req->r_flags &= ~R_RESENDERR;
3562 if (rexmit) {
3563 OSAddAtomic64(1, &nfsclntstats.rpcretries);
3564 }
3565 req->r_flags |= R_SENT;
3566 if (req->r_flags & R_WAITSENT) {
3567 req->r_flags &= ~R_WAITSENT;
3568 wakeup(req);
3569 }
3570 nfs_sndunlock(req);
3571 lck_mtx_unlock(&req->r_mtx);
3572 return 0;
3573 }
3574
3575 /* send failed */
3576 req->r_flags |= R_MUSTRESEND;
3577 if (rexmit) {
3578 req->r_flags |= R_RESENDERR;
3579 }
3580 if ((error == EINTR) || (error == ERESTART)) {
3581 req->r_error = error;
3582 }
3583 lck_mtx_unlock(&req->r_mtx);
3584
3585 if (sotype == SOCK_DGRAM) {
3586 /*
3587 * Note: even though a first send may fail, we consider
3588 * the request sent for congestion window purposes.
3589 * So we don't need to undo any of the changes made above.
3590 */
3591 /*
3592 * Socket errors ignored for connectionless sockets??
3593 * For now, ignore them all
3594 */
3595 if ((error != EINTR) && (error != ERESTART) &&
3596 (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
3597 int clearerror = 0, optlen = sizeof(clearerror);
3598 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
3599 #ifdef NFS_SOCKET_DEBUGGING
3600 if (clearerror) {
3601 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3602 error, clearerror);
3603 }
3604 #endif
3605 }
3606 }
3607
3608 /* check if it appears we should reconnect the socket */
3609 switch (error) {
3610 case EWOULDBLOCK:
3611 /* if send timed out, reconnect if on TCP */
3612 if (sotype != SOCK_STREAM) {
3613 break;
3614 }
3615 OS_FALLTHROUGH;
3616 case EPIPE:
3617 case EADDRNOTAVAIL:
3618 case ENETDOWN:
3619 case ENETUNREACH:
3620 case ENETRESET:
3621 case ECONNABORTED:
3622 case ECONNRESET:
3623 case ENOTCONN:
3624 case ESHUTDOWN:
3625 case ECONNREFUSED:
3626 case EHOSTDOWN:
3627 case EHOSTUNREACH:
3628 /* case ECANCELED??? */
3629 needrecon = 1;
3630 break;
3631 }
3632 if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
3633 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
3634 nfs_need_reconnect(nmp);
3635 }
3636
3637 nfs_sndunlock(req);
3638
3639 if (nfs_is_dead(error, nmp)) {
3640 error = EIO;
3641 }
3642
3643 /*
3644 * Don't log some errors:
3645 * EPIPE errors may be common with servers that drop idle connections.
3646 * EADDRNOTAVAIL may occur on network transitions.
3647 * ENOTCONN may occur under some network conditions.
3648 */
3649 if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) {
3650 error = 0;
3651 }
3652 if (error && (error != EINTR) && (error != ERESTART)) {
3653 log(LOG_INFO, "nfs send error %d for server %s\n", error,
3654 !req->r_nmp ? "<unmounted>" :
3655 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
3656 }
3657
3658 /* prefer request termination error over other errors */
3659 error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
3660 if (error2) {
3661 error = error2;
3662 }
3663
3664 /* only allow the following errors to be returned */
3665 if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
3666 (error != ENXIO) && (error != ETIMEDOUT)) {
3667 /*
3668 * We got some error we don't know what do do with,
3669 * i.e., we're not reconnecting, we map it to
3670 * EIO. Presumably our send failed and we better tell
3671 * the caller so they don't wait for a reply that is
3672 * never going to come. If we are reconnecting we
3673 * return 0 and the request will be resent.
3674 */
3675 error = needrecon ? 0 : EIO;
3676 }
3677 return error;
3678 }
3679
3680 /*
3681 * NFS client socket upcalls
3682 *
3683 * Pull RPC replies out of an NFS mount's socket and match them
3684 * up with the pending request.
3685 *
3686 * The datagram code is simple because we always get whole
3687 * messages out of the socket.
3688 *
3689 * The stream code is more involved because we have to parse
3690 * the RPC records out of the stream.
3691 */
3692
3693 /* NFS client UDP socket upcall */
3694 void
nfs_udp_rcv(socket_t so,void * arg,__unused int waitflag)3695 nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
3696 {
3697 struct nfsmount *nmp = arg;
3698 struct nfs_socket *nso = nmp->nm_nso;
3699 size_t rcvlen;
3700 mbuf_t m;
3701 int error = 0;
3702
3703 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3704 return;
3705 }
3706
3707 do {
3708 /* make sure we're on the current socket */
3709 if (!nso || (nso->nso_so != so)) {
3710 return;
3711 }
3712
3713 m = NULL;
3714 rcvlen = 1000000;
3715 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
3716 if (m) {
3717 nfs_request_match_reply(nmp, m);
3718 }
3719 } while (m && !error);
3720
3721 if (error && (error != EWOULDBLOCK)) {
3722 /* problems with the socket... mark for reconnection */
3723 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
3724 nfs_need_reconnect(nmp);
3725 }
3726 }
3727
3728 /* NFS client TCP socket upcall */
3729 void
nfs_tcp_rcv(socket_t so,void * arg,__unused int waitflag)3730 nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
3731 {
3732 struct nfsmount *nmp = arg;
3733 struct nfs_socket *nso = nmp->nm_nso;
3734 struct nfs_rpc_record_state nrrs;
3735 mbuf_t m;
3736 int error = 0;
3737 int recv = 1;
3738 int wup = 0;
3739
3740 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3741 return;
3742 }
3743
3744 /* make sure we're on the current socket */
3745 lck_mtx_lock(&nmp->nm_lock);
3746 nso = nmp->nm_nso;
3747 if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
3748 lck_mtx_unlock(&nmp->nm_lock);
3749 return;
3750 }
3751 lck_mtx_unlock(&nmp->nm_lock);
3752
3753 /* make sure this upcall should be trying to do work */
3754 lck_mtx_lock(&nso->nso_lock);
3755 if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) {
3756 lck_mtx_unlock(&nso->nso_lock);
3757 return;
3758 }
3759 nso->nso_flags |= NSO_UPCALL;
3760 nrrs = nso->nso_rrs;
3761 lck_mtx_unlock(&nso->nso_lock);
3762
3763 /* loop while we make error-free progress */
3764 while (!error && recv) {
3765 error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
3766 if (m) { /* match completed response with request */
3767 nfs_request_match_reply(nmp, m);
3768 }
3769 }
3770
3771 /* Update the sockets's rpc parsing state */
3772 lck_mtx_lock(&nso->nso_lock);
3773 nso->nso_rrs = nrrs;
3774 if (nso->nso_flags & NSO_DISCONNECTING) {
3775 wup = 1;
3776 }
3777 nso->nso_flags &= ~NSO_UPCALL;
3778 lck_mtx_unlock(&nso->nso_lock);
3779 if (wup) {
3780 wakeup(&nso->nso_flags);
3781 }
3782
3783 #ifdef NFS_SOCKET_DEBUGGING
3784 if (!recv && (error != EWOULDBLOCK)) {
3785 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
3786 }
3787 #endif
3788 /* note: no error and no data indicates server closed its end */
3789 if ((error != EWOULDBLOCK) && (error || !recv)) {
3790 /* problems with the socket... mark for reconnection */
3791 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
3792 nfs_need_reconnect(nmp);
3793 }
3794 }
3795
3796 /*
3797 * "poke" a socket to try to provoke any pending errors
3798 */
3799 void
nfs_sock_poke(struct nfsmount * nmp)3800 nfs_sock_poke(struct nfsmount *nmp)
3801 {
3802 struct iovec aio;
3803 struct msghdr msg;
3804 size_t len;
3805 int error = 0;
3806 int dummy;
3807
3808 lck_mtx_lock(&nmp->nm_lock);
3809 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
3810 !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
3811 /* Nothing to poke */
3812 nmp->nm_sockflags &= ~NMSOCK_POKE;
3813 wakeup(&nmp->nm_sockflags);
3814 lck_mtx_unlock(&nmp->nm_lock);
3815 return;
3816 }
3817 lck_mtx_unlock(&nmp->nm_lock);
3818 aio.iov_base = &dummy;
3819 aio.iov_len = 0;
3820 len = 0;
3821 bzero(&msg, sizeof(msg));
3822 msg.msg_iov = &aio;
3823 msg.msg_iovlen = 1;
3824 error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
3825 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
3826 lck_mtx_lock(&nmp->nm_lock);
3827 nmp->nm_sockflags &= ~NMSOCK_POKE;
3828 wakeup(&nmp->nm_sockflags);
3829 lck_mtx_unlock(&nmp->nm_lock);
3830 nfs_is_dead(error, nmp);
3831 }
3832
3833 /*
3834 * Match an RPC reply with the corresponding request
3835 */
3836 void
nfs_request_match_reply(struct nfsmount * nmp,mbuf_t mrep)3837 nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
3838 {
3839 struct nfsreq *req;
3840 struct nfsm_chain nmrep;
3841 u_int32_t reply = 0, rxid = 0;
3842 int error = 0, asyncioq, t1;
3843
3844 bzero(&nmrep, sizeof(nmrep));
3845 /* Get the xid and check that it is an rpc reply */
3846 nfsm_chain_dissect_init(error, &nmrep, mrep);
3847 nfsm_chain_get_32(error, &nmrep, rxid);
3848 nfsm_chain_get_32(error, &nmrep, reply);
3849 if (error || (reply != RPC_REPLY)) {
3850 OSAddAtomic64(1, &nfsclntstats.rpcinvalid);
3851 mbuf_freem(mrep);
3852 return;
3853 }
3854
3855 /*
3856 * Loop through the request list to match up the reply
3857 * Iff no match, just drop it.
3858 */
3859 lck_mtx_lock(&nfs_request_mutex);
3860 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
3861 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3862 continue;
3863 }
3864 /* looks like we have it, grab lock and double check */
3865 lck_mtx_lock(&req->r_mtx);
3866 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3867 lck_mtx_unlock(&req->r_mtx);
3868 continue;
3869 }
3870 /* Found it.. */
3871 req->r_nmrep = nmrep;
3872 lck_mtx_lock(&nmp->nm_lock);
3873 if (nmp->nm_sotype == SOCK_DGRAM) {
3874 /*
3875 * Update congestion window.
3876 * Do the additive increase of one rpc/rtt.
3877 */
3878 FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
3879 if (nmp->nm_cwnd <= nmp->nm_sent) {
3880 nmp->nm_cwnd +=
3881 ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
3882 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
3883 if (nmp->nm_cwnd > NFS_MAXCWND) {
3884 nmp->nm_cwnd = NFS_MAXCWND;
3885 }
3886 }
3887 if (req->r_flags & R_CWND) {
3888 nmp->nm_sent -= NFS_CWNDSCALE;
3889 req->r_flags &= ~R_CWND;
3890 }
3891 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
3892 /* congestion window is open, poke the cwnd queue */
3893 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
3894 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
3895 req2->r_cchain.tqe_next = NFSREQNOLIST;
3896 wakeup(req2);
3897 }
3898 }
3899 /*
3900 * Update rtt using a gain of 0.125 on the mean
3901 * and a gain of 0.25 on the deviation.
3902 */
3903 if (req->r_flags & R_TIMING) {
3904 /*
3905 * Since the timer resolution of
3906 * NFS_HZ is so course, it can often
3907 * result in r_rtt == 0. Since
3908 * r_rtt == N means that the actual
3909 * rtt is between N+dt and N+2-dt ticks,
3910 * add 1.
3911 */
3912 if (proct[req->r_procnum] == 0) {
3913 panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
3914 }
3915 t1 = req->r_rtt + 1;
3916 t1 -= (NFS_SRTT(req) >> 3);
3917 NFS_SRTT(req) += t1;
3918 if (t1 < 0) {
3919 t1 = -t1;
3920 }
3921 t1 -= (NFS_SDRTT(req) >> 2);
3922 NFS_SDRTT(req) += t1;
3923 }
3924 nmp->nm_timeouts = 0;
3925 lck_mtx_unlock(&nmp->nm_lock);
3926 /* signal anyone waiting on this request */
3927 wakeup(req);
3928 asyncioq = (req->r_callback.rcb_func != NULL);
3929 #if CONFIG_NFS_GSS
3930 if (nfs_request_using_gss(req)) {
3931 nfs_gss_clnt_rpcdone(req);
3932 }
3933 #endif /* CONFIG_NFS_GSS */
3934 lck_mtx_unlock(&req->r_mtx);
3935 lck_mtx_unlock(&nfs_request_mutex);
3936 /* if it's an async RPC with a callback, queue it up */
3937 if (asyncioq) {
3938 nfs_asyncio_finish(req);
3939 }
3940 break;
3941 }
3942
3943 if (!req) {
3944 /* not matched to a request, so drop it. */
3945 lck_mtx_unlock(&nfs_request_mutex);
3946 OSAddAtomic64(1, &nfsclntstats.rpcunexpected);
3947 mbuf_freem(mrep);
3948 }
3949 }
3950
3951 /*
3952 * Wait for the reply for a given request...
3953 * ...potentially resending the request if necessary.
3954 */
3955 int
nfs_wait_reply(struct nfsreq * req)3956 nfs_wait_reply(struct nfsreq *req)
3957 {
3958 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3959 int error = 0, slpflag, first = 1;
3960
3961 if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
3962 slpflag = PCATCH;
3963 } else {
3964 slpflag = 0;
3965 }
3966
3967 lck_mtx_lock(&req->r_mtx);
3968 while (!req->r_nmrep.nmc_mhead) {
3969 if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) {
3970 break;
3971 }
3972 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
3973 break;
3974 }
3975 /* check if we need to resend */
3976 if (req->r_flags & R_MUSTRESEND) {
3977 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3978 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
3979 req->r_flags |= R_SENDING;
3980 lck_mtx_unlock(&req->r_mtx);
3981 if (nfs_request_using_gss(req)) {
3982 /*
3983 * It's an RPCSEC_GSS request.
3984 * Can't just resend the original request
3985 * without bumping the cred sequence number.
3986 * Go back and re-build the request.
3987 */
3988 lck_mtx_lock(&req->r_mtx);
3989 req->r_flags &= ~R_SENDING;
3990 lck_mtx_unlock(&req->r_mtx);
3991 return EAGAIN;
3992 }
3993 error = nfs_send(req, 1);
3994 lck_mtx_lock(&req->r_mtx);
3995 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3996 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error);
3997 if (error) {
3998 break;
3999 }
4000 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
4001 break;
4002 }
4003 }
4004 /* need to poll if we're P_NOREMOTEHANG */
4005 if (nfs_noremotehang(req->r_thread)) {
4006 ts.tv_sec = 1;
4007 }
4008 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
4009 first = slpflag = 0;
4010 }
4011 lck_mtx_unlock(&req->r_mtx);
4012
4013 return error;
4014 }
4015
4016 /*
4017 * An NFS request goes something like this:
4018 * (nb: always frees up mreq mbuf list)
4019 * nfs_request_create()
4020 * - allocates a request struct if one is not provided
4021 * - initial fill-in of the request struct
4022 * nfs_request_add_header()
4023 * - add the RPC header
4024 * nfs_request_send()
4025 * - link it into list
4026 * - call nfs_send() for first transmit
4027 * nfs_request_wait()
4028 * - call nfs_wait_reply() to wait for the reply
4029 * nfs_request_finish()
4030 * - break down rpc header and return with error or nfs reply
4031 * pointed to by nmrep.
4032 * nfs_request_rele()
4033 * nfs_request_destroy()
4034 * - clean up the request struct
4035 * - free the request struct if it was allocated by nfs_request_create()
4036 */
4037
4038 /*
4039 * Set up an NFS request struct (allocating if no request passed in).
4040 */
4041 int
nfs_request_create(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,thread_t thd,kauth_cred_t cred,struct nfsreq ** reqp)4042 nfs_request_create(
4043 nfsnode_t np,
4044 mount_t mp, /* used only if !np */
4045 struct nfsm_chain *nmrest,
4046 int procnum,
4047 thread_t thd,
4048 kauth_cred_t cred,
4049 struct nfsreq **reqp)
4050 {
4051 struct nfsreq *req, *newreq = NULL;
4052 struct nfsmount *nmp;
4053
4054 req = *reqp;
4055 if (!req) {
4056 /* allocate a new NFS request structure */
4057 req = newreq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
4058 } else {
4059 bzero(req, sizeof(*req));
4060 }
4061 if (req == newreq) {
4062 req->r_flags = R_ALLOCATED;
4063 }
4064
4065 nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
4066 if (nfs_mount_gone(nmp)) {
4067 if (newreq) {
4068 NFS_ZFREE(nfs_req_zone, newreq);
4069 }
4070 return ENXIO;
4071 }
4072 lck_mtx_lock(&nmp->nm_lock);
4073 if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
4074 (nmp->nm_state & NFSSTA_TIMEO)) {
4075 lck_mtx_unlock(&nmp->nm_lock);
4076 mbuf_freem(nmrest->nmc_mhead);
4077 nmrest->nmc_mhead = NULL;
4078 if (newreq) {
4079 NFS_ZFREE(nfs_req_zone, newreq);
4080 }
4081 return ENXIO;
4082 }
4083
4084 if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) {
4085 OSAddAtomic64(1, &nfsclntstats.rpccntv3[procnum]);
4086 }
4087 if (nmp->nm_vers == NFS_VER4) {
4088 if (procnum == NFSPROC4_COMPOUND || procnum == NFSPROC4_NULL) {
4089 OSAddAtomic64(1, &nfsclntstats.opcntv4[procnum]);
4090 } else {
4091 panic("nfs_request: invalid NFSv4 RPC request %d", procnum);
4092 }
4093 }
4094
4095 lck_mtx_init(&req->r_mtx, &nfs_request_grp, LCK_ATTR_NULL);
4096 req->r_nmp = nmp;
4097 nmp->nm_ref++;
4098 req->r_np = np;
4099 req->r_thread = thd;
4100 if (!thd) {
4101 req->r_flags |= R_NOINTR;
4102 }
4103 if (IS_VALID_CRED(cred)) {
4104 kauth_cred_ref(cred);
4105 req->r_cred = cred;
4106 }
4107 req->r_procnum = procnum;
4108 if (proct[procnum] > 0) {
4109 req->r_flags |= R_TIMING;
4110 }
4111 req->r_nmrep.nmc_mhead = NULL;
4112 SLIST_INIT(&req->r_gss_seqlist);
4113 req->r_achain.tqe_next = NFSREQNOLIST;
4114 req->r_rchain.tqe_next = NFSREQNOLIST;
4115 req->r_cchain.tqe_next = NFSREQNOLIST;
4116
4117 /* set auth flavor to use for request */
4118 if (!req->r_cred) {
4119 req->r_auth = RPCAUTH_NONE;
4120 } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) {
4121 req->r_auth = req->r_np->n_auth;
4122 } else {
4123 req->r_auth = nmp->nm_auth;
4124 }
4125
4126 lck_mtx_unlock(&nmp->nm_lock);
4127
4128 /* move the request mbuf chain to the nfsreq */
4129 req->r_mrest = nmrest->nmc_mhead;
4130 nmrest->nmc_mhead = NULL;
4131
4132 req->r_flags |= R_INITTED;
4133 req->r_refs = 1;
4134 if (newreq) {
4135 *reqp = req;
4136 }
4137 return 0;
4138 }
4139
4140 /*
4141 * Clean up and free an NFS request structure.
4142 */
4143 void
nfs_request_destroy(struct nfsreq * req)4144 nfs_request_destroy(struct nfsreq *req)
4145 {
4146 struct nfsmount *nmp;
4147 int clearjbtimeo = 0;
4148
4149 #if CONFIG_NFS_GSS
4150 struct gss_seq *gsp, *ngsp;
4151 #endif
4152
4153 if (!req || !(req->r_flags & R_INITTED)) {
4154 return;
4155 }
4156 nmp = req->r_nmp;
4157 req->r_flags &= ~R_INITTED;
4158 if (req->r_lflags & RL_QUEUED) {
4159 nfs_reqdequeue(req);
4160 }
4161
4162 if (req->r_achain.tqe_next != NFSREQNOLIST) {
4163 /*
4164 * Still on an async I/O queue?
4165 * %%% But which one, we may be on a local iod.
4166 */
4167 lck_mtx_lock(&nfsiod_mutex);
4168 if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
4169 TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
4170 req->r_achain.tqe_next = NFSREQNOLIST;
4171 }
4172 lck_mtx_unlock(&nfsiod_mutex);
4173 }
4174
4175 lck_mtx_lock(&req->r_mtx);
4176 if (nmp) {
4177 lck_mtx_lock(&nmp->nm_lock);
4178 if (req->r_flags & R_CWND) {
4179 /* Decrement the outstanding request count. */
4180 req->r_flags &= ~R_CWND;
4181 nmp->nm_sent -= NFS_CWNDSCALE;
4182 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4183 /* congestion window is open, poke the cwnd queue */
4184 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4185 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4186 req2->r_cchain.tqe_next = NFSREQNOLIST;
4187 wakeup(req2);
4188 }
4189 }
4190 /* XXX should we just remove this conditional, we should have a reference if we're resending */
4191 if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
4192 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4193 req->r_flags &= ~R_RESENDQ;
4194 req->r_rchain.tqe_next = NFSREQNOLIST;
4195 }
4196 if (req->r_cchain.tqe_next != NFSREQNOLIST) {
4197 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
4198 req->r_cchain.tqe_next = NFSREQNOLIST;
4199 }
4200 if (req->r_flags & R_JBTPRINTFMSG) {
4201 req->r_flags &= ~R_JBTPRINTFMSG;
4202 nmp->nm_jbreqs--;
4203 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4204 }
4205 lck_mtx_unlock(&nmp->nm_lock);
4206 }
4207 lck_mtx_unlock(&req->r_mtx);
4208
4209 if (clearjbtimeo) {
4210 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4211 }
4212 if (req->r_mhead) {
4213 mbuf_freem(req->r_mhead);
4214 } else if (req->r_mrest) {
4215 mbuf_freem(req->r_mrest);
4216 }
4217 if (req->r_nmrep.nmc_mhead) {
4218 mbuf_freem(req->r_nmrep.nmc_mhead);
4219 }
4220 if (IS_VALID_CRED(req->r_cred)) {
4221 kauth_cred_unref(&req->r_cred);
4222 }
4223 #if CONFIG_NFS_GSS
4224 if (nfs_request_using_gss(req)) {
4225 nfs_gss_clnt_rpcdone(req);
4226 }
4227 SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
4228 kfree_type(struct gss_seq, gsp);
4229 if (req->r_gss_ctx) {
4230 nfs_gss_clnt_ctx_unref(req);
4231 }
4232 #endif /* CONFIG_NFS_GSS */
4233 if (req->r_wrongsec) {
4234 kfree_data(req->r_wrongsec, NX_MAX_SEC_FLAVORS * sizeof(uint32_t));
4235 }
4236 if (nmp) {
4237 nfs_mount_rele(nmp);
4238 }
4239 lck_mtx_destroy(&req->r_mtx, &nfs_request_grp);
4240 if (req->r_flags & R_ALLOCATED) {
4241 NFS_ZFREE(nfs_req_zone, req);
4242 }
4243 }
4244
4245 void
nfs_request_ref(struct nfsreq * req,int locked)4246 nfs_request_ref(struct nfsreq *req, int locked)
4247 {
4248 if (!locked) {
4249 lck_mtx_lock(&req->r_mtx);
4250 }
4251 if (req->r_refs <= 0) {
4252 panic("nfsreq reference error");
4253 }
4254 req->r_refs++;
4255 if (!locked) {
4256 lck_mtx_unlock(&req->r_mtx);
4257 }
4258 }
4259
4260 void
nfs_request_rele(struct nfsreq * req)4261 nfs_request_rele(struct nfsreq *req)
4262 {
4263 int destroy;
4264
4265 lck_mtx_lock(&req->r_mtx);
4266 if (req->r_refs <= 0) {
4267 panic("nfsreq reference underflow");
4268 }
4269 req->r_refs--;
4270 destroy = (req->r_refs == 0);
4271 lck_mtx_unlock(&req->r_mtx);
4272 if (destroy) {
4273 nfs_request_destroy(req);
4274 }
4275 }
4276
4277
4278 /*
4279 * Add an (updated) RPC header with authorization to an NFS request.
4280 */
4281 int
nfs_request_add_header(struct nfsreq * req)4282 nfs_request_add_header(struct nfsreq *req)
4283 {
4284 struct nfsmount *nmp;
4285 int error = 0;
4286 mbuf_t m;
4287
4288 /* free up any previous header */
4289 if ((m = req->r_mhead)) {
4290 while (m && (m != req->r_mrest)) {
4291 m = mbuf_free(m);
4292 }
4293 req->r_mhead = NULL;
4294 }
4295
4296 nmp = req->r_nmp;
4297 if (nfs_mount_gone(nmp)) {
4298 return ENXIO;
4299 }
4300
4301 error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
4302 if (error) {
4303 return error;
4304 }
4305
4306 req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
4307 nmp = req->r_nmp;
4308 if (nfs_mount_gone(nmp)) {
4309 return ENXIO;
4310 }
4311 lck_mtx_lock(&nmp->nm_lock);
4312 if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) {
4313 req->r_retry = nmp->nm_retry;
4314 } else {
4315 req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
4316 }
4317 lck_mtx_unlock(&nmp->nm_lock);
4318
4319 return error;
4320 }
4321
4322
4323 /*
4324 * Queue an NFS request up and send it out.
4325 */
4326 int
nfs_request_send(struct nfsreq * req,int wait)4327 nfs_request_send(struct nfsreq *req, int wait)
4328 {
4329 struct nfsmount *nmp;
4330 struct timeval now;
4331
4332 lck_mtx_lock(&req->r_mtx);
4333 req->r_flags |= R_SENDING;
4334 lck_mtx_unlock(&req->r_mtx);
4335
4336 lck_mtx_lock(&nfs_request_mutex);
4337
4338 nmp = req->r_nmp;
4339 if (nfs_mount_gone(nmp)) {
4340 lck_mtx_unlock(&nfs_request_mutex);
4341 return ENXIO;
4342 }
4343
4344 microuptime(&now);
4345 if (!req->r_start) {
4346 req->r_start = now.tv_sec;
4347 req->r_lastmsg = now.tv_sec -
4348 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
4349 }
4350
4351 OSAddAtomic64(1, &nfsclntstats.rpcrequests);
4352
4353 /*
4354 * Make sure the request is not in the queue.
4355 */
4356 if (req->r_lflags & RL_QUEUED) {
4357 #if DEVELOPMENT
4358 panic("nfs_request_send: req %p is already in global requests queue", req);
4359 #else
4360 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
4361 req->r_lflags &= ~RL_QUEUED;
4362 #endif /* DEVELOPMENT */
4363 }
4364
4365 /*
4366 * Chain request into list of outstanding requests. Be sure
4367 * to put it LAST so timer finds oldest requests first.
4368 * Make sure that the request queue timer is running
4369 * to check for possible request timeout.
4370 */
4371 TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
4372 req->r_lflags |= RL_QUEUED;
4373 if (!nfs_request_timer_on) {
4374 nfs_request_timer_on = 1;
4375 nfs_interval_timer_start(nfs_request_timer_call,
4376 NFS_REQUESTDELAY);
4377 }
4378 lck_mtx_unlock(&nfs_request_mutex);
4379
4380 /* Send the request... */
4381 return nfs_send(req, wait);
4382 }
4383
4384 /*
4385 * Call nfs_wait_reply() to wait for the reply.
4386 */
4387 void
nfs_request_wait(struct nfsreq * req)4388 nfs_request_wait(struct nfsreq *req)
4389 {
4390 req->r_error = nfs_wait_reply(req);
4391 }
4392
4393 /*
4394 * Finish up an NFS request by dequeueing it and
4395 * doing the initial NFS request reply processing.
4396 */
4397 int
nfs_request_finish(struct nfsreq * req,struct nfsm_chain * nmrepp,int * status)4398 nfs_request_finish(
4399 struct nfsreq *req,
4400 struct nfsm_chain *nmrepp,
4401 int *status)
4402 {
4403 struct nfsmount *nmp;
4404 mbuf_t mrep;
4405 int verf_type = 0;
4406 uint32_t verf_len = 0;
4407 uint32_t reply_status = 0;
4408 uint32_t rejected_status = 0;
4409 uint32_t auth_status = 0;
4410 uint32_t accepted_status = 0;
4411 struct nfsm_chain nmrep;
4412 int error, clearjbtimeo;
4413
4414 error = req->r_error;
4415
4416 if (nmrepp) {
4417 nmrepp->nmc_mhead = NULL;
4418 }
4419
4420 /* RPC done, unlink the request. */
4421 nfs_reqdequeue(req);
4422
4423 mrep = req->r_nmrep.nmc_mhead;
4424
4425 nmp = req->r_nmp;
4426
4427 if ((req->r_flags & R_CWND) && nmp) {
4428 /*
4429 * Decrement the outstanding request count.
4430 */
4431 req->r_flags &= ~R_CWND;
4432 lck_mtx_lock(&nmp->nm_lock);
4433 FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
4434 nmp->nm_sent -= NFS_CWNDSCALE;
4435 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4436 /* congestion window is open, poke the cwnd queue */
4437 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4438 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4439 req2->r_cchain.tqe_next = NFSREQNOLIST;
4440 wakeup(req2);
4441 }
4442 lck_mtx_unlock(&nmp->nm_lock);
4443 }
4444
4445 #if CONFIG_NFS_GSS
4446 if (nfs_request_using_gss(req)) {
4447 /*
4448 * If the request used an RPCSEC_GSS credential
4449 * then reset its sequence number bit in the
4450 * request window.
4451 */
4452 nfs_gss_clnt_rpcdone(req);
4453
4454 /*
4455 * If we need to re-send, go back and re-build the
4456 * request based on a new sequence number.
4457 * Note that we're using the original XID.
4458 */
4459 if (error == EAGAIN) {
4460 req->r_error = 0;
4461 if (mrep) {
4462 mbuf_freem(mrep);
4463 }
4464 error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs
4465 req->r_nmrep.nmc_mhead = NULL;
4466 req->r_flags |= R_RESTART;
4467 if (error == ENEEDAUTH) {
4468 req->r_xid = 0; // get a new XID
4469 error = 0;
4470 }
4471 goto nfsmout;
4472 }
4473 }
4474 #endif /* CONFIG_NFS_GSS */
4475
4476 /*
4477 * If there was a successful reply, make sure to mark the mount as up.
4478 * If a tprintf message was given (or if this is a timed-out soft mount)
4479 * then post a tprintf message indicating the server is alive again.
4480 */
4481 if (!error) {
4482 if ((req->r_flags & R_TPRINTFMSG) ||
4483 (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) &&
4484 ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) {
4485 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
4486 } else {
4487 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
4488 }
4489 }
4490 if (!error && !nmp) {
4491 error = ENXIO;
4492 }
4493 nfsmout_if(error);
4494
4495 /*
4496 * break down the RPC header and check if ok
4497 */
4498 nmrep = req->r_nmrep;
4499 nfsm_chain_get_32(error, &nmrep, reply_status);
4500 nfsmout_if(error);
4501 if (reply_status == RPC_MSGDENIED) {
4502 nfsm_chain_get_32(error, &nmrep, rejected_status);
4503 nfsmout_if(error);
4504 if (rejected_status == RPC_MISMATCH) {
4505 error = ENOTSUP;
4506 goto nfsmout;
4507 }
4508 nfsm_chain_get_32(error, &nmrep, auth_status);
4509 nfsmout_if(error);
4510 switch (auth_status) {
4511 #if CONFIG_NFS_GSS
4512 case RPCSEC_GSS_CREDPROBLEM:
4513 case RPCSEC_GSS_CTXPROBLEM:
4514 /*
4515 * An RPCSEC_GSS cred or context problem.
4516 * We can't use it anymore.
4517 * Restore the args, renew the context
4518 * and set up for a resend.
4519 */
4520 error = nfs_gss_clnt_args_restore(req);
4521 if (error && error != ENEEDAUTH) {
4522 break;
4523 }
4524
4525 if (!error) {
4526 error = nfs_gss_clnt_ctx_renew(req);
4527 if (error) {
4528 break;
4529 }
4530 }
4531 mbuf_freem(mrep);
4532 req->r_nmrep.nmc_mhead = NULL;
4533 req->r_xid = 0; // get a new XID
4534 req->r_flags |= R_RESTART;
4535 goto nfsmout;
4536 #endif /* CONFIG_NFS_GSS */
4537 default:
4538 error = EACCES;
4539 break;
4540 }
4541 goto nfsmout;
4542 }
4543
4544 /* Now check the verifier */
4545 nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor
4546 nfsm_chain_get_32(error, &nmrep, verf_len); // verifier length
4547 nfsmout_if(error);
4548
4549 switch (req->r_auth) {
4550 case RPCAUTH_NONE:
4551 case RPCAUTH_SYS:
4552 /* Any AUTH_SYS verifier is ignored */
4553 if (verf_len > 0) {
4554 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
4555 }
4556 nfsm_chain_get_32(error, &nmrep, accepted_status);
4557 break;
4558 #if CONFIG_NFS_GSS
4559 case RPCAUTH_KRB5:
4560 case RPCAUTH_KRB5I:
4561 case RPCAUTH_KRB5P:
4562 error = nfs_gss_clnt_verf_get(req, &nmrep,
4563 verf_type, verf_len, &accepted_status);
4564 break;
4565 #endif /* CONFIG_NFS_GSS */
4566 }
4567 nfsmout_if(error);
4568
4569 switch (accepted_status) {
4570 case RPC_SUCCESS:
4571 if (req->r_procnum == NFSPROC_NULL) {
4572 /*
4573 * The NFS null procedure is unique,
4574 * in not returning an NFS status.
4575 */
4576 *status = NFS_OK;
4577 } else {
4578 nfsm_chain_get_32(error, &nmrep, *status);
4579 nfsmout_if(error);
4580 }
4581
4582 if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) {
4583 /*
4584 * It's a JUKEBOX error - delay and try again
4585 */
4586 int delay, slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
4587
4588 mbuf_freem(mrep);
4589 req->r_nmrep.nmc_mhead = NULL;
4590 if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) {
4591 /* we're not yet completely mounted and */
4592 /* we can't complete an RPC, so we fail */
4593 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
4594 nfs_softterm(req);
4595 error = req->r_error;
4596 goto nfsmout;
4597 }
4598 req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2);
4599 if (req->r_delay > 30) {
4600 req->r_delay = 30;
4601 }
4602 if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) {
4603 if (!(req->r_flags & R_JBTPRINTFMSG)) {
4604 req->r_flags |= R_JBTPRINTFMSG;
4605 lck_mtx_lock(&nmp->nm_lock);
4606 nmp->nm_jbreqs++;
4607 lck_mtx_unlock(&nmp->nm_lock);
4608 }
4609 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
4610 "resource temporarily unavailable (jukebox)", 0);
4611 }
4612 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) &&
4613 !(req->r_flags & R_NOINTR)) {
4614 /* for soft mounts, just give up after a short while */
4615 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
4616 nfs_softterm(req);
4617 error = req->r_error;
4618 goto nfsmout;
4619 }
4620 delay = req->r_delay;
4621 if (req->r_callback.rcb_func) {
4622 struct timeval now;
4623 microuptime(&now);
4624 req->r_resendtime = now.tv_sec + delay;
4625 } else {
4626 do {
4627 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
4628 goto nfsmout;
4629 }
4630 tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz);
4631 slpflag = 0;
4632 } while (--delay > 0);
4633 }
4634 req->r_xid = 0; // get a new XID
4635 req->r_flags |= R_RESTART;
4636 req->r_start = 0;
4637 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER);
4638 return 0;
4639 }
4640
4641 if (req->r_flags & R_JBTPRINTFMSG) {
4642 req->r_flags &= ~R_JBTPRINTFMSG;
4643 lck_mtx_lock(&nmp->nm_lock);
4644 nmp->nm_jbreqs--;
4645 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4646 lck_mtx_unlock(&nmp->nm_lock);
4647 nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again");
4648 }
4649
4650 #if CONFIG_NFS4
4651 if ((nmp->nm_vers >= NFS_VER4) && (*status == NFSERR_WRONGSEC)) {
4652 /*
4653 * Hmmm... we need to try a different security flavor.
4654 * The first time a request hits this, we will allocate an array
4655 * to track flavors to try. We fill the array with the mount's
4656 * preferred flavors or the server's preferred flavors or just the
4657 * flavors we support.
4658 */
4659 uint32_t srvflavors[NX_MAX_SEC_FLAVORS];
4660 int srvcount, i, j;
4661
4662 /* Call SECINFO to try to get list of flavors from server. */
4663 srvcount = NX_MAX_SEC_FLAVORS;
4664 nfs4_secinfo_rpc(nmp, &req->r_secinfo, req->r_cred, srvflavors, &srvcount);
4665
4666 if (!req->r_wrongsec) {
4667 /* first time... set up flavor array */
4668 req->r_wrongsec = kalloc_data(NX_MAX_SEC_FLAVORS * sizeof(uint32_t), Z_WAITOK);
4669 if (!req->r_wrongsec) {
4670 error = EACCES;
4671 goto nfsmout;
4672 }
4673 i = 0;
4674 if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */
4675 for (; i < nmp->nm_sec.count; i++) {
4676 req->r_wrongsec[i] = nmp->nm_sec.flavors[i];
4677 }
4678 } else if (srvcount) { /* otherwise use the server's list of flavors */
4679 for (; i < srvcount; i++) {
4680 req->r_wrongsec[i] = srvflavors[i];
4681 }
4682 } else { /* otherwise, just try the flavors we support. */
4683 req->r_wrongsec[i++] = RPCAUTH_KRB5P;
4684 req->r_wrongsec[i++] = RPCAUTH_KRB5I;
4685 req->r_wrongsec[i++] = RPCAUTH_KRB5;
4686 req->r_wrongsec[i++] = RPCAUTH_SYS;
4687 req->r_wrongsec[i++] = RPCAUTH_NONE;
4688 }
4689 for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */
4690 req->r_wrongsec[i] = RPCAUTH_INVALID;
4691 }
4692 }
4693
4694 /* clear the current flavor from the list */
4695 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4696 if (req->r_wrongsec[i] == req->r_auth) {
4697 req->r_wrongsec[i] = RPCAUTH_INVALID;
4698 }
4699 }
4700
4701 /* find the next flavor to try */
4702 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4703 if (req->r_wrongsec[i] != RPCAUTH_INVALID) {
4704 if (!srvcount) { /* no server list, just try it */
4705 break;
4706 }
4707 /* check that it's in the server's list */
4708 for (j = 0; j < srvcount; j++) {
4709 if (req->r_wrongsec[i] == srvflavors[j]) {
4710 break;
4711 }
4712 }
4713 if (j < srvcount) { /* found */
4714 break;
4715 }
4716 /* not found in server list */
4717 req->r_wrongsec[i] = RPCAUTH_INVALID;
4718 }
4719 }
4720 if (i == NX_MAX_SEC_FLAVORS) {
4721 /* nothing left to try! */
4722 error = EACCES;
4723 goto nfsmout;
4724 }
4725
4726 /* retry with the next auth flavor */
4727 req->r_auth = req->r_wrongsec[i];
4728 req->r_xid = 0; // get a new XID
4729 req->r_flags |= R_RESTART;
4730 req->r_start = 0;
4731 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC);
4732 return 0;
4733 }
4734 if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) {
4735 /*
4736 * We renegotiated security for this request; so update the
4737 * default security flavor for the associated node.
4738 */
4739 if (req->r_np) {
4740 req->r_np->n_auth = req->r_auth;
4741 }
4742 }
4743 #endif /* CONFIG_NFS4 */
4744 if (*status == NFS_OK) {
4745 /*
4746 * Successful NFS request
4747 */
4748 *nmrepp = nmrep;
4749 req->r_nmrep.nmc_mhead = NULL;
4750 break;
4751 }
4752 /* Got an NFS error of some kind */
4753
4754 /*
4755 * If the File Handle was stale, invalidate the
4756 * lookup cache, just in case.
4757 */
4758 if ((*status == ESTALE) && req->r_np) {
4759 cache_purge(NFSTOV(req->r_np));
4760 /* if monitored, also send delete event */
4761 if (vnode_ismonitored(NFSTOV(req->r_np))) {
4762 nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE));
4763 }
4764 }
4765 if (nmp->nm_vers == NFS_VER2) {
4766 mbuf_freem(mrep);
4767 } else {
4768 *nmrepp = nmrep;
4769 }
4770 req->r_nmrep.nmc_mhead = NULL;
4771 error = 0;
4772 break;
4773 case RPC_PROGUNAVAIL:
4774 error = EPROGUNAVAIL;
4775 break;
4776 case RPC_PROGMISMATCH:
4777 error = ERPCMISMATCH;
4778 break;
4779 case RPC_PROCUNAVAIL:
4780 error = EPROCUNAVAIL;
4781 break;
4782 case RPC_GARBAGE:
4783 error = EBADRPC;
4784 break;
4785 case RPC_SYSTEM_ERR:
4786 default:
4787 error = EIO;
4788 break;
4789 }
4790 nfsmout:
4791 if (req->r_flags & R_JBTPRINTFMSG) {
4792 req->r_flags &= ~R_JBTPRINTFMSG;
4793 lck_mtx_lock(&nmp->nm_lock);
4794 nmp->nm_jbreqs--;
4795 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4796 lck_mtx_unlock(&nmp->nm_lock);
4797 if (clearjbtimeo) {
4798 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4799 }
4800 }
4801 FSDBG(273, R_XID32(req->r_xid), nmp, req,
4802 (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error);
4803 return error;
4804 }
4805
4806 /*
4807 * NFS request using a GSS/Kerberos security flavor?
4808 */
4809 int
nfs_request_using_gss(struct nfsreq * req)4810 nfs_request_using_gss(struct nfsreq *req)
4811 {
4812 if (!req->r_gss_ctx) {
4813 return 0;
4814 }
4815 switch (req->r_auth) {
4816 case RPCAUTH_KRB5:
4817 case RPCAUTH_KRB5I:
4818 case RPCAUTH_KRB5P:
4819 return 1;
4820 }
4821 return 0;
4822 }
4823
4824 /*
4825 * Perform an NFS request synchronously.
4826 */
4827
4828 int
nfs_request(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,vfs_context_t ctx,struct nfsreq_secinfo_args * si,struct nfsm_chain * nmrepp,u_int64_t * xidp,int * status)4829 nfs_request(
4830 nfsnode_t np,
4831 mount_t mp, /* used only if !np */
4832 struct nfsm_chain *nmrest,
4833 int procnum,
4834 vfs_context_t ctx,
4835 struct nfsreq_secinfo_args *si,
4836 struct nfsm_chain *nmrepp,
4837 u_int64_t *xidp,
4838 int *status)
4839 {
4840 return nfs_request2(np, mp, nmrest, procnum,
4841 vfs_context_thread(ctx), vfs_context_ucred(ctx),
4842 si, 0, nmrepp, xidp, status);
4843 }
4844
4845 int
nfs_request2(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,thread_t thd,kauth_cred_t cred,struct nfsreq_secinfo_args * si,int flags,struct nfsm_chain * nmrepp,u_int64_t * xidp,int * status)4846 nfs_request2(
4847 nfsnode_t np,
4848 mount_t mp, /* used only if !np */
4849 struct nfsm_chain *nmrest,
4850 int procnum,
4851 thread_t thd,
4852 kauth_cred_t cred,
4853 struct nfsreq_secinfo_args *si,
4854 int flags,
4855 struct nfsm_chain *nmrepp,
4856 u_int64_t *xidp,
4857 int *status)
4858 {
4859 struct nfsreq *req;
4860 int error;
4861
4862 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4863 if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
4864 goto out_free;
4865 }
4866 req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
4867 if (si) {
4868 req->r_secinfo = *si;
4869 }
4870
4871 FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0);
4872 do {
4873 req->r_error = 0;
4874 req->r_flags &= ~R_RESTART;
4875 if ((error = nfs_request_add_header(req))) {
4876 break;
4877 }
4878 if (xidp) {
4879 *xidp = req->r_xid;
4880 }
4881 if ((error = nfs_request_send(req, 1))) {
4882 break;
4883 }
4884 nfs_request_wait(req);
4885 if ((error = nfs_request_finish(req, nmrepp, status))) {
4886 break;
4887 }
4888 } while (req->r_flags & R_RESTART);
4889
4890 FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
4891 nfs_request_rele(req);
4892 out_free:
4893 NFS_ZFREE(nfs_req_zone, req);
4894 return error;
4895 }
4896
4897
4898 #if CONFIG_NFS_GSS
4899 /*
4900 * Set up a new null proc request to exchange GSS context tokens with the
4901 * server. Associate the context that we are setting up with the request that we
4902 * are sending.
4903 */
4904
4905 int
nfs_request_gss(mount_t mp,struct nfsm_chain * nmrest,thread_t thd,kauth_cred_t cred,int flags,struct nfs_gss_clnt_ctx * cp,struct nfsm_chain * nmrepp,int * status)4906 nfs_request_gss(
4907 mount_t mp,
4908 struct nfsm_chain *nmrest,
4909 thread_t thd,
4910 kauth_cred_t cred,
4911 int flags,
4912 struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */
4913 struct nfsm_chain *nmrepp,
4914 int *status)
4915 {
4916 struct nfsreq *req;
4917 int error, wait = 1;
4918
4919 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4920 if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
4921 goto out_free;
4922 }
4923 req->r_flags |= (flags & R_OPTMASK);
4924
4925 if (cp == NULL) {
4926 printf("nfs_request_gss request has no context\n");
4927 nfs_request_rele(req);
4928 error = NFSERR_EAUTH;
4929 goto out_free;
4930 }
4931 nfs_gss_clnt_ctx_ref(req, cp);
4932
4933 /*
4934 * Don't wait for a reply to a context destroy advisory
4935 * to avoid hanging on a dead server.
4936 */
4937 if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
4938 wait = 0;
4939 }
4940
4941 FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
4942 do {
4943 req->r_error = 0;
4944 req->r_flags &= ~R_RESTART;
4945 if ((error = nfs_request_add_header(req))) {
4946 break;
4947 }
4948
4949 if ((error = nfs_request_send(req, wait))) {
4950 break;
4951 }
4952 if (!wait) {
4953 break;
4954 }
4955
4956 nfs_request_wait(req);
4957 if ((error = nfs_request_finish(req, nmrepp, status))) {
4958 break;
4959 }
4960 } while (req->r_flags & R_RESTART);
4961
4962 FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
4963
4964 nfs_gss_clnt_ctx_unref(req);
4965 nfs_request_rele(req);
4966 out_free:
4967 NFS_ZFREE(nfs_req_zone, req);
4968 return error;
4969 }
4970 #endif /* CONFIG_NFS_GSS */
4971
4972 /*
4973 * Create and start an asynchronous NFS request.
4974 */
4975 int
nfs_request_async(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,thread_t thd,kauth_cred_t cred,struct nfsreq_secinfo_args * si,int flags,struct nfsreq_cbinfo * cb,struct nfsreq ** reqp)4976 nfs_request_async(
4977 nfsnode_t np,
4978 mount_t mp, /* used only if !np */
4979 struct nfsm_chain *nmrest,
4980 int procnum,
4981 thread_t thd,
4982 kauth_cred_t cred,
4983 struct nfsreq_secinfo_args *si,
4984 int flags,
4985 struct nfsreq_cbinfo *cb,
4986 struct nfsreq **reqp)
4987 {
4988 struct nfsreq *req;
4989 struct nfsmount *nmp;
4990 int error, sent;
4991
4992 error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp);
4993 req = *reqp;
4994 FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error);
4995 if (error) {
4996 return error;
4997 }
4998 req->r_flags |= (flags & R_OPTMASK);
4999 req->r_flags |= R_ASYNC;
5000 if (si) {
5001 req->r_secinfo = *si;
5002 }
5003 if (cb) {
5004 req->r_callback = *cb;
5005 }
5006 error = nfs_request_add_header(req);
5007 if (!error) {
5008 req->r_flags |= R_WAITSENT;
5009 if (req->r_callback.rcb_func) {
5010 nfs_request_ref(req, 0);
5011 }
5012 error = nfs_request_send(req, 1);
5013 lck_mtx_lock(&req->r_mtx);
5014 if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) {
5015 /* make sure to wait until this async I/O request gets sent */
5016 int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
5017 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5018 while (!(req->r_flags & R_SENT)) {
5019 nmp = req->r_nmp;
5020 if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
5021 lck_mtx_lock(&nmp->nm_lock);
5022 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5023 /*
5024 * It's not going to get off the resend queue if we're in recovery.
5025 * So, just take it off ourselves. We could be holding mount state
5026 * busy and thus holding up the start of recovery.
5027 */
5028 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5029 req->r_flags &= ~R_RESENDQ;
5030 req->r_rchain.tqe_next = NFSREQNOLIST;
5031 lck_mtx_unlock(&nmp->nm_lock);
5032 req->r_flags |= R_SENDING;
5033 lck_mtx_unlock(&req->r_mtx);
5034 error = nfs_send(req, 1);
5035 /* Remove the R_RESENDQ reference */
5036 nfs_request_rele(req);
5037 lck_mtx_lock(&req->r_mtx);
5038 if (error) {
5039 break;
5040 }
5041 continue;
5042 }
5043 lck_mtx_unlock(&nmp->nm_lock);
5044 }
5045 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5046 break;
5047 }
5048 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts);
5049 slpflag = 0;
5050 }
5051 }
5052 sent = req->r_flags & R_SENT;
5053 lck_mtx_unlock(&req->r_mtx);
5054 if (error && req->r_callback.rcb_func && !sent) {
5055 nfs_request_rele(req);
5056 }
5057 }
5058 FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
5059 if (error || req->r_callback.rcb_func) {
5060 nfs_request_rele(req);
5061 }
5062
5063 return error;
5064 }
5065
5066 /*
5067 * Wait for and finish an asynchronous NFS request.
5068 */
5069 int
nfs_request_async_finish(struct nfsreq * req,struct nfsm_chain * nmrepp,u_int64_t * xidp,int * status)5070 nfs_request_async_finish(
5071 struct nfsreq *req,
5072 struct nfsm_chain *nmrepp,
5073 u_int64_t *xidp,
5074 int *status)
5075 {
5076 int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
5077 struct nfsmount *nmp;
5078
5079 lck_mtx_lock(&req->r_mtx);
5080 if (!asyncio) {
5081 req->r_flags |= R_ASYNCWAIT;
5082 }
5083 while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
5084 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5085
5086 if ((nmp = req->r_nmp)) {
5087 lck_mtx_lock(&nmp->nm_lock);
5088 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5089 /*
5090 * It's not going to get off the resend queue if we're in recovery.
5091 * So, just take it off ourselves. We could be holding mount state
5092 * busy and thus holding up the start of recovery.
5093 */
5094 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5095 req->r_flags &= ~R_RESENDQ;
5096 req->r_rchain.tqe_next = NFSREQNOLIST;
5097 /* Remove the R_RESENDQ reference */
5098 assert(req->r_refs > 0);
5099 req->r_refs--;
5100 lck_mtx_unlock(&nmp->nm_lock);
5101 break;
5102 }
5103 lck_mtx_unlock(&nmp->nm_lock);
5104 }
5105 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5106 break;
5107 }
5108 msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts);
5109 }
5110 lck_mtx_unlock(&req->r_mtx);
5111
5112 if (!error) {
5113 nfs_request_wait(req);
5114 error = nfs_request_finish(req, nmrepp, status);
5115 }
5116
5117 while (!error && (req->r_flags & R_RESTART)) {
5118 if (asyncio) {
5119 assert(req->r_achain.tqe_next == NFSREQNOLIST);
5120 lck_mtx_lock(&req->r_mtx);
5121 req->r_flags &= ~R_IOD;
5122 if (req->r_resendtime) { /* send later */
5123 nfs_asyncio_resend(req);
5124 lck_mtx_unlock(&req->r_mtx);
5125 return EINPROGRESS;
5126 }
5127 lck_mtx_unlock(&req->r_mtx);
5128 }
5129 req->r_error = 0;
5130 req->r_flags &= ~R_RESTART;
5131 if ((error = nfs_request_add_header(req))) {
5132 break;
5133 }
5134 if ((error = nfs_request_send(req, !asyncio))) {
5135 break;
5136 }
5137 if (asyncio) {
5138 return EINPROGRESS;
5139 }
5140 nfs_request_wait(req);
5141 if ((error = nfs_request_finish(req, nmrepp, status))) {
5142 break;
5143 }
5144 }
5145 if (xidp) {
5146 *xidp = req->r_xid;
5147 }
5148
5149 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error);
5150 nfs_request_rele(req);
5151 return error;
5152 }
5153
5154 /*
5155 * Cancel a pending asynchronous NFS request.
5156 */
5157 void
nfs_request_async_cancel(struct nfsreq * req)5158 nfs_request_async_cancel(struct nfsreq *req)
5159 {
5160 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
5161 nfs_request_rele(req);
5162 }
5163
5164 /*
5165 * Flag a request as being terminated.
5166 */
5167 void
nfs_softterm(struct nfsreq * req)5168 nfs_softterm(struct nfsreq *req)
5169 {
5170 struct nfsmount *nmp = req->r_nmp;
5171 req->r_flags |= R_SOFTTERM;
5172 req->r_error = ETIMEDOUT;
5173 if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) {
5174 return;
5175 }
5176 /* update congestion window */
5177 req->r_flags &= ~R_CWND;
5178 lck_mtx_lock(&nmp->nm_lock);
5179 FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
5180 nmp->nm_sent -= NFS_CWNDSCALE;
5181 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
5182 /* congestion window is open, poke the cwnd queue */
5183 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
5184 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
5185 req2->r_cchain.tqe_next = NFSREQNOLIST;
5186 wakeup(req2);
5187 }
5188 lck_mtx_unlock(&nmp->nm_lock);
5189 }
5190
5191 /*
5192 * Ensure req isn't in use by the timer, then dequeue it.
5193 */
5194 void
nfs_reqdequeue(struct nfsreq * req)5195 nfs_reqdequeue(struct nfsreq *req)
5196 {
5197 lck_mtx_lock(&nfs_request_mutex);
5198 while (req->r_lflags & RL_BUSY) {
5199 req->r_lflags |= RL_WAITING;
5200 msleep(&req->r_lflags, &nfs_request_mutex, PSOCK, "reqdeq", NULL);
5201 }
5202 if (req->r_lflags & RL_QUEUED) {
5203 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
5204 req->r_lflags &= ~RL_QUEUED;
5205 }
5206 lck_mtx_unlock(&nfs_request_mutex);
5207 }
5208
5209 /*
5210 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5211 * free()'d out from under it.
5212 */
5213 void
nfs_reqbusy(struct nfsreq * req)5214 nfs_reqbusy(struct nfsreq *req)
5215 {
5216 if (req->r_lflags & RL_BUSY) {
5217 panic("req locked");
5218 }
5219 req->r_lflags |= RL_BUSY;
5220 }
5221
5222 /*
5223 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5224 */
5225 struct nfsreq *
nfs_reqnext(struct nfsreq * req)5226 nfs_reqnext(struct nfsreq *req)
5227 {
5228 struct nfsreq * nextreq;
5229
5230 if (req == NULL) {
5231 return NULL;
5232 }
5233 /*
5234 * We need to get and busy the next req before signalling the
5235 * current one, otherwise wakeup() may block us and we'll race to
5236 * grab the next req.
5237 */
5238 nextreq = TAILQ_NEXT(req, r_chain);
5239 if (nextreq != NULL) {
5240 nfs_reqbusy(nextreq);
5241 }
5242 /* unbusy and signal. */
5243 req->r_lflags &= ~RL_BUSY;
5244 if (req->r_lflags & RL_WAITING) {
5245 req->r_lflags &= ~RL_WAITING;
5246 wakeup(&req->r_lflags);
5247 }
5248 return nextreq;
5249 }
5250
5251 /*
5252 * NFS request queue timer routine
5253 *
5254 * Scan the NFS request queue for any requests that have timed out.
5255 *
5256 * Alert the system of unresponsive servers.
5257 * Mark expired requests on soft mounts as terminated.
5258 * For UDP, mark/signal requests for retransmission.
5259 */
5260 void
nfs_request_timer(__unused void * param0,__unused void * param1)5261 nfs_request_timer(__unused void *param0, __unused void *param1)
5262 {
5263 struct nfsreq *req;
5264 struct nfsmount *nmp;
5265 int timeo, maxtime, finish_asyncio, error;
5266 struct timeval now;
5267 TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
5268 TAILQ_INIT(&nfs_mount_poke_queue);
5269
5270 restart:
5271 lck_mtx_lock(&nfs_request_mutex);
5272 req = TAILQ_FIRST(&nfs_reqq);
5273 if (req == NULL) { /* no requests - turn timer off */
5274 nfs_request_timer_on = 0;
5275 lck_mtx_unlock(&nfs_request_mutex);
5276 return;
5277 }
5278
5279 nfs_reqbusy(req);
5280
5281 microuptime(&now);
5282 for (; req != NULL; req = nfs_reqnext(req)) {
5283 nmp = req->r_nmp;
5284 if (nmp == NULL) {
5285 NFS_SOCK_DBG("Found a request with out a mount!\n");
5286 continue;
5287 }
5288 if (req->r_error || req->r_nmrep.nmc_mhead) {
5289 continue;
5290 }
5291 if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
5292 if (req->r_callback.rcb_func != NULL) {
5293 /* async I/O RPC needs to be finished */
5294 lck_mtx_lock(&req->r_mtx);
5295 req->r_error = error;
5296 finish_asyncio = !(req->r_flags & R_WAITSENT);
5297 wakeup(req);
5298 lck_mtx_unlock(&req->r_mtx);
5299 if (finish_asyncio) {
5300 nfs_asyncio_finish(req);
5301 }
5302 }
5303 continue;
5304 }
5305
5306 lck_mtx_lock(&req->r_mtx);
5307
5308 if (nmp->nm_tprintf_initial_delay &&
5309 ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) &&
5310 ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
5311 req->r_lastmsg = now.tv_sec;
5312 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5313 "not responding", 1);
5314 req->r_flags |= R_TPRINTFMSG;
5315 lck_mtx_lock(&nmp->nm_lock);
5316 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
5317 lck_mtx_unlock(&nmp->nm_lock);
5318 /* we're not yet completely mounted and */
5319 /* we can't complete an RPC, so we fail */
5320 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
5321 nfs_softterm(req);
5322 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5323 wakeup(req);
5324 lck_mtx_unlock(&req->r_mtx);
5325 if (finish_asyncio) {
5326 nfs_asyncio_finish(req);
5327 }
5328 continue;
5329 }
5330 lck_mtx_unlock(&nmp->nm_lock);
5331 }
5332
5333 /*
5334 * Put a reasonable limit on the maximum timeout,
5335 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5336 */
5337 if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) {
5338 maxtime = NFS_MAXTIMEO;
5339 } else if ((req->r_flags & (R_SETUP | R_RECOVER)) ||
5340 ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) {
5341 maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2;
5342 } else {
5343 maxtime = NFS_MINTIMEO / 4;
5344 }
5345
5346 /*
5347 * Check for request timeout.
5348 */
5349 if (req->r_rtt >= 0) {
5350 req->r_rtt++;
5351 lck_mtx_lock(&nmp->nm_lock);
5352 if (req->r_flags & R_RESENDERR) {
5353 /* with resend errors, retry every few seconds */
5354 timeo = 4 * hz;
5355 } else {
5356 if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) {
5357 timeo = NFS_MINIDEMTIMEO; // gss context setup
5358 } else if (NMFLAG(nmp, DUMBTIMER)) {
5359 timeo = nmp->nm_timeo;
5360 } else {
5361 timeo = NFS_RTO(nmp, proct[req->r_procnum]);
5362 }
5363
5364 /* ensure 62.5 ms floor */
5365 while (16 * timeo < hz) {
5366 timeo *= 2;
5367 }
5368 if (nmp->nm_timeouts > 0) {
5369 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
5370 }
5371 }
5372 /* limit timeout to max */
5373 if (timeo > maxtime) {
5374 timeo = maxtime;
5375 }
5376 if (req->r_rtt <= timeo) {
5377 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo);
5378 lck_mtx_unlock(&nmp->nm_lock);
5379 lck_mtx_unlock(&req->r_mtx);
5380 continue;
5381 }
5382 /* The request has timed out */
5383 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5384 req->r_procnum, proct[req->r_procnum],
5385 req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts,
5386 (now.tv_sec - req->r_start) * NFS_HZ, maxtime);
5387 if (nmp->nm_timeouts < 8) {
5388 nmp->nm_timeouts++;
5389 }
5390 if (nfs_mount_check_dead_timeout(nmp)) {
5391 /* Unbusy this request */
5392 req->r_lflags &= ~RL_BUSY;
5393 if (req->r_lflags & RL_WAITING) {
5394 req->r_lflags &= ~RL_WAITING;
5395 wakeup(&req->r_lflags);
5396 }
5397 lck_mtx_unlock(&req->r_mtx);
5398
5399 /* No need to poke this mount */
5400 if (nmp->nm_sockflags & NMSOCK_POKE) {
5401 nmp->nm_sockflags &= ~NMSOCK_POKE;
5402 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5403 }
5404 /* Release our lock state, so we can become a zombie */
5405 lck_mtx_unlock(&nfs_request_mutex);
5406
5407 /*
5408 * Note nfs_mount_make zombie(nmp) must be
5409 * called with nm_lock held. After doing some
5410 * work we release nm_lock in
5411 * nfs_make_mount_zombie with out acquiring any
5412 * other locks. (Later, in nfs_mount_zombie we
5413 * will acquire &nfs_request_mutex, r_mtx,
5414 * nm_lock in that order). So we should not be
5415 * introducing deadlock here. We take a reference
5416 * on the mount so that its still there when we
5417 * release the lock.
5418 */
5419 nmp->nm_ref++;
5420 nfs_mount_make_zombie(nmp);
5421 lck_mtx_unlock(&nmp->nm_lock);
5422 nfs_mount_rele(nmp);
5423
5424 /*
5425 * All the request for this mount have now been
5426 * removed from the request queue. Restart to
5427 * process the remaining mounts
5428 */
5429 goto restart;
5430 }
5431
5432 /* if it's been a few seconds, try poking the socket */
5433 if ((nmp->nm_sotype == SOCK_STREAM) &&
5434 ((now.tv_sec - req->r_start) >= 3) &&
5435 !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) &&
5436 (nmp->nm_sockflags & NMSOCK_READY)) {
5437 nmp->nm_sockflags |= NMSOCK_POKE;
5438 /*
5439 * We take a ref on the mount so that we know the mount will still be there
5440 * when we process the nfs_mount_poke_queue. An unmount request will block
5441 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5442 * the reference after calling nfs_sock_poke below;
5443 */
5444 nmp->nm_ref++;
5445 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
5446 }
5447 lck_mtx_unlock(&nmp->nm_lock);
5448 }
5449
5450 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5451 if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) &&
5452 ((req->r_rexmit >= req->r_retry) || /* too many */
5453 ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */
5454 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
5455 lck_mtx_lock(&nmp->nm_lock);
5456 if (!(nmp->nm_state & NFSSTA_TIMEO)) {
5457 lck_mtx_unlock(&nmp->nm_lock);
5458 /* make sure we note the unresponsive server */
5459 /* (maxtime may be less than tprintf delay) */
5460 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5461 "not responding", 1);
5462 req->r_lastmsg = now.tv_sec;
5463 req->r_flags |= R_TPRINTFMSG;
5464 } else {
5465 lck_mtx_unlock(&nmp->nm_lock);
5466 }
5467 if (req->r_flags & R_NOINTR) {
5468 /* don't terminate nointr requests on timeout */
5469 lck_mtx_unlock(&req->r_mtx);
5470 continue;
5471 }
5472 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5473 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt,
5474 now.tv_sec - req->r_start);
5475 nfs_softterm(req);
5476 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5477 wakeup(req);
5478 lck_mtx_unlock(&req->r_mtx);
5479 if (finish_asyncio) {
5480 nfs_asyncio_finish(req);
5481 }
5482 continue;
5483 }
5484
5485 /* for TCP, only resend if explicitly requested */
5486 if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) {
5487 if (++req->r_rexmit > NFS_MAXREXMIT) {
5488 req->r_rexmit = NFS_MAXREXMIT;
5489 }
5490 req->r_rtt = 0;
5491 lck_mtx_unlock(&req->r_mtx);
5492 continue;
5493 }
5494
5495 /*
5496 * The request needs to be (re)sent. Kick the requester to resend it.
5497 * (unless it's already marked as needing a resend)
5498 */
5499 if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) {
5500 lck_mtx_unlock(&req->r_mtx);
5501 continue;
5502 }
5503 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5504 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
5505 req->r_flags |= R_MUSTRESEND;
5506 req->r_rtt = -1;
5507 wakeup(req);
5508 if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
5509 nfs_asyncio_resend(req);
5510 }
5511 lck_mtx_unlock(&req->r_mtx);
5512 }
5513
5514 lck_mtx_unlock(&nfs_request_mutex);
5515
5516 /* poke any sockets */
5517 while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
5518 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5519 nfs_sock_poke(nmp);
5520 nfs_mount_rele(nmp);
5521 }
5522
5523 nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
5524 }
5525
5526 /*
5527 * check a thread's proc for the "noremotehang" flag.
5528 */
5529 int
nfs_noremotehang(thread_t thd)5530 nfs_noremotehang(thread_t thd)
5531 {
5532 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
5533 return p && proc_noremotehang(p);
5534 }
5535
5536 /*
5537 * Test for a termination condition pending on the process.
5538 * This is used to determine if we need to bail on a mount.
5539 * ETIMEDOUT is returned if there has been a soft timeout.
5540 * EINTR is returned if there is a signal pending that is not being ignored
5541 * ESHUTDOWN is return if the system is in shutdown.
5542 * and the mount is interruptable, or if we are a thread that is in the process
5543 * of cancellation (also SIGKILL posted).
5544 */
5545 int
nfs_sigintr(struct nfsmount * nmp,struct nfsreq * req,thread_t thd,int nmplocked)5546 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
5547 {
5548 proc_t p;
5549 int error = 0;
5550
5551 if (!nmp) {
5552 return ENXIO;
5553 }
5554
5555 /*
5556 * If the mount is hung and we've requested shutdown, then bail.
5557 * If reboot_kernel was called, no need to wait for mount to become unresponsive
5558 * because network state may be unknown.
5559 */
5560 if ((IOPMRootDomainGetWillShutdown() && (nmp->nm_state & NFSSTA_TIMEO)) ||
5561 get_system_inshutdown()) {
5562 NFS_SOCK_DBG("Shutdown in progress\n");
5563 return ESHUTDOWN;
5564 }
5565
5566 if (req && (req->r_flags & R_SOFTTERM)) {
5567 return ETIMEDOUT; /* request has been terminated. */
5568 }
5569 if (req && (req->r_flags & R_NOINTR)) {
5570 thd = NULL; /* don't check for signal on R_NOINTR */
5571 }
5572 if (!nmplocked) {
5573 lck_mtx_lock(&nmp->nm_lock);
5574 }
5575 if (nmp->nm_state & NFSSTA_FORCE) {
5576 /* If a force unmount is in progress then fail. */
5577 error = EIO;
5578 } else if (vfs_isforce(nmp->nm_mountp)) {
5579 /* Someone is unmounting us, go soft and mark it. */
5580 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
5581 nmp->nm_state |= NFSSTA_FORCE;
5582 }
5583
5584 /* Check if the mount is marked dead. */
5585 if (!error && (nmp->nm_state & NFSSTA_DEAD)) {
5586 error = ENXIO;
5587 }
5588
5589 /*
5590 * If the mount is hung and we've requested not to hang
5591 * on remote filesystems, then bail now.
5592 */
5593 if (current_proc() != kernproc &&
5594 !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) {
5595 error = EIO;
5596 }
5597
5598 if (!nmplocked) {
5599 lck_mtx_unlock(&nmp->nm_lock);
5600 }
5601 if (error) {
5602 return error;
5603 }
5604
5605 /* may not have a thread for async I/O */
5606 if (thd == NULL || current_proc() == kernproc) {
5607 return 0;
5608 }
5609
5610 /*
5611 * Check if the process is aborted, but don't interrupt if we
5612 * were killed by a signal and this is the exiting thread which
5613 * is attempting to dump core.
5614 */
5615 if (proc_isabortedsignal(current_proc())) {
5616 return EINTR;
5617 }
5618
5619 /* mask off thread and process blocked signals. */
5620 if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) &&
5621 proc_pendingsignals(p, NFSINT_SIGMASK)) {
5622 return EINTR;
5623 }
5624 return 0;
5625 }
5626
5627 /*
5628 * Lock a socket against others.
5629 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5630 * and also to avoid race conditions between the processes with nfs requests
5631 * in progress when a reconnect is necessary.
5632 */
5633 int
nfs_sndlock(struct nfsreq * req)5634 nfs_sndlock(struct nfsreq *req)
5635 {
5636 struct nfsmount *nmp = req->r_nmp;
5637 int *statep;
5638 int error = 0, slpflag = 0;
5639 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
5640
5641 if (nfs_mount_gone(nmp)) {
5642 return ENXIO;
5643 }
5644
5645 lck_mtx_lock(&nmp->nm_lock);
5646 statep = &nmp->nm_state;
5647
5648 if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
5649 slpflag = PCATCH;
5650 }
5651 while (*statep & NFSSTA_SNDLOCK) {
5652 if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) {
5653 break;
5654 }
5655 *statep |= NFSSTA_WANTSND;
5656 if (nfs_noremotehang(req->r_thread)) {
5657 ts.tv_sec = 1;
5658 }
5659 msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts);
5660 if (slpflag == PCATCH) {
5661 slpflag = 0;
5662 ts.tv_sec = 2;
5663 }
5664 }
5665 if (!error) {
5666 *statep |= NFSSTA_SNDLOCK;
5667 }
5668 lck_mtx_unlock(&nmp->nm_lock);
5669 return error;
5670 }
5671
5672 /*
5673 * Unlock the stream socket for others.
5674 */
5675 void
nfs_sndunlock(struct nfsreq * req)5676 nfs_sndunlock(struct nfsreq *req)
5677 {
5678 struct nfsmount *nmp = req->r_nmp;
5679 int *statep, wake = 0;
5680
5681 if (!nmp) {
5682 return;
5683 }
5684 lck_mtx_lock(&nmp->nm_lock);
5685 statep = &nmp->nm_state;
5686 if ((*statep & NFSSTA_SNDLOCK) == 0) {
5687 panic("nfs sndunlock");
5688 }
5689 *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING);
5690 if (*statep & NFSSTA_WANTSND) {
5691 *statep &= ~NFSSTA_WANTSND;
5692 wake = 1;
5693 }
5694 lck_mtx_unlock(&nmp->nm_lock);
5695 if (wake) {
5696 wakeup(statep);
5697 }
5698 }
5699
5700 int
nfs_aux_request(struct nfsmount * nmp,thread_t thd,struct sockaddr * saddr,socket_t so,int sotype,mbuf_t mreq,uint32_t xid,int bindresv,int timeo,struct nfsm_chain * nmrep)5701 nfs_aux_request(
5702 struct nfsmount *nmp,
5703 thread_t thd,
5704 struct sockaddr *saddr,
5705 socket_t so,
5706 int sotype,
5707 mbuf_t mreq,
5708 uint32_t xid,
5709 int bindresv,
5710 int timeo,
5711 struct nfsm_chain *nmrep)
5712 {
5713 int error = 0, on = 1, try, sendat = 2, soproto, recv, optlen, restoreto = 0;
5714 socket_t newso = NULL;
5715 struct sockaddr_storage ss;
5716 struct timeval orig_rcvto, orig_sndto, tv = { .tv_sec = 1, .tv_usec = 0 };
5717 mbuf_t m, mrep = NULL;
5718 struct msghdr msg;
5719 uint32_t rxid = 0, reply = 0, reply_status, rejected_status;
5720 uint32_t verf_type, verf_len, accepted_status;
5721 size_t readlen, sentlen;
5722 struct nfs_rpc_record_state nrrs;
5723
5724 if (!so) {
5725 /* create socket and set options */
5726 if (saddr->sa_family == AF_LOCAL) {
5727 soproto = 0;
5728 } else {
5729 soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP;
5730 }
5731 if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) {
5732 goto nfsmout;
5733 }
5734
5735 if (bindresv && saddr->sa_family != AF_LOCAL) {
5736 int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
5737 int optname = (saddr->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
5738 int portrange = IP_PORTRANGE_LOW;
5739 error = sock_setsockopt(newso, level, optname, &portrange, sizeof(portrange));
5740 nfsmout_if(error);
5741 ss.ss_len = saddr->sa_len;
5742 ss.ss_family = saddr->sa_family;
5743 if (ss.ss_family == AF_INET) {
5744 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
5745 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
5746 } else if (ss.ss_family == AF_INET6) {
5747 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
5748 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
5749 } else {
5750 error = EINVAL;
5751 }
5752 if (!error) {
5753 error = sock_bind(newso, (struct sockaddr *)&ss);
5754 }
5755 nfsmout_if(error);
5756 }
5757
5758 if (sotype == SOCK_STREAM) {
5759 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5760 int count = 0;
5761
5762 error = sock_connect(newso, saddr, MSG_DONTWAIT);
5763 if (error == EINPROGRESS) {
5764 error = 0;
5765 }
5766 nfsmout_if(error);
5767
5768 while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) {
5769 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5770 if (++count >= NFS_AUX_CONNECTION_TIMEOUT) {
5771 error = ETIMEDOUT;
5772 break;
5773 }
5774 }
5775 nfsmout_if(error);
5776 }
5777 if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
5778 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) ||
5779 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) {
5780 goto nfsmout;
5781 }
5782 so = newso;
5783 } else {
5784 /* make sure socket is using a one second timeout in this function */
5785 optlen = sizeof(orig_rcvto);
5786 error = sock_getsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, &optlen);
5787 if (!error) {
5788 optlen = sizeof(orig_sndto);
5789 error = sock_getsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, &optlen);
5790 }
5791 if (!error) {
5792 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
5793 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
5794 restoreto = 1;
5795 }
5796 }
5797
5798 if (sotype == SOCK_STREAM) {
5799 sendat = 0; /* we only resend the request for UDP */
5800 nfs_rpc_record_state_init(&nrrs);
5801 }
5802
5803 for (try = 0; try < timeo; try++) {
5804 if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) {
5805 break;
5806 }
5807 if (!try || (try == sendat)) {
5808 /* send the request (resending periodically for UDP) */
5809 if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) {
5810 goto nfsmout;
5811 }
5812 bzero(&msg, sizeof(msg));
5813 if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) {
5814 msg.msg_name = saddr;
5815 msg.msg_namelen = saddr->sa_len;
5816 }
5817 if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) {
5818 goto nfsmout;
5819 }
5820 sendat *= 2;
5821 if (sendat > 30) {
5822 sendat = 30;
5823 }
5824 }
5825 /* wait for the response */
5826 if (sotype == SOCK_STREAM) {
5827 /* try to read (more of) record */
5828 error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep);
5829 /* if we don't have the whole record yet, we'll keep trying */
5830 } else {
5831 readlen = 1 << 18;
5832 bzero(&msg, sizeof(msg));
5833 error = sock_receivembuf(so, &msg, &mrep, 0, &readlen);
5834 }
5835 if (error == EWOULDBLOCK) {
5836 continue;
5837 }
5838 nfsmout_if(error);
5839 /* parse the response */
5840 nfsm_chain_dissect_init(error, nmrep, mrep);
5841 nfsm_chain_get_32(error, nmrep, rxid);
5842 nfsm_chain_get_32(error, nmrep, reply);
5843 nfsmout_if(error);
5844 if ((rxid != xid) || (reply != RPC_REPLY)) {
5845 error = EBADRPC;
5846 }
5847 nfsm_chain_get_32(error, nmrep, reply_status);
5848 nfsmout_if(error);
5849 if (reply_status == RPC_MSGDENIED) {
5850 nfsm_chain_get_32(error, nmrep, rejected_status);
5851 nfsmout_if(error);
5852 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
5853 goto nfsmout;
5854 }
5855 nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */
5856 nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */
5857 nfsmout_if(error);
5858 if (verf_len) {
5859 nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len));
5860 }
5861 nfsm_chain_get_32(error, nmrep, accepted_status);
5862 nfsmout_if(error);
5863 switch (accepted_status) {
5864 case RPC_SUCCESS:
5865 error = 0;
5866 break;
5867 case RPC_PROGUNAVAIL:
5868 error = EPROGUNAVAIL;
5869 break;
5870 case RPC_PROGMISMATCH:
5871 error = EPROGMISMATCH;
5872 break;
5873 case RPC_PROCUNAVAIL:
5874 error = EPROCUNAVAIL;
5875 break;
5876 case RPC_GARBAGE:
5877 error = EBADRPC;
5878 break;
5879 case RPC_SYSTEM_ERR:
5880 default:
5881 error = EIO;
5882 break;
5883 }
5884 break;
5885 }
5886 nfsmout:
5887 if (restoreto) {
5888 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, sizeof(tv));
5889 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, sizeof(tv));
5890 }
5891 if (newso) {
5892 sock_shutdown(newso, SHUT_RDWR);
5893 sock_close(newso);
5894 }
5895 mbuf_freem(mreq);
5896 return error;
5897 }
5898
5899 int
nfs_portmap_lookup(struct nfsmount * nmp,vfs_context_t ctx,struct sockaddr * sa,socket_t so,uint32_t protocol,uint32_t vers,uint32_t stype,int timeo)5900 nfs_portmap_lookup(
5901 struct nfsmount *nmp,
5902 vfs_context_t ctx,
5903 struct sockaddr *sa,
5904 socket_t so,
5905 uint32_t protocol,
5906 uint32_t vers,
5907 uint32_t stype,
5908 int timeo)
5909 {
5910 thread_t thd = vfs_context_thread(ctx);
5911 kauth_cred_t cred = vfs_context_ucred(ctx);
5912 struct sockaddr_storage ss;
5913 struct sockaddr *saddr = (struct sockaddr*)&ss;
5914 static struct sockaddr_un rpcbind_cots = {
5915 sizeof(struct sockaddr_un),
5916 AF_LOCAL,
5917 RPCB_TICOTSORD_PATH
5918 };
5919 static struct sockaddr_un rpcbind_clts = {
5920 sizeof(struct sockaddr_un),
5921 AF_LOCAL,
5922 RPCB_TICLTS_PATH
5923 };
5924 struct nfsm_chain nmreq, nmrep;
5925 mbuf_t mreq;
5926 int error = 0, ip, pmprog, pmvers, pmproc;
5927 uint32_t ualen = 0, scopeid = 0, port32;
5928 uint64_t xid = 0;
5929 char uaddr[MAX_IPv6_STR_LEN + 16];
5930
5931 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5932 if (saddr->sa_family == AF_INET) {
5933 ip = 4;
5934 pmprog = PMAPPROG;
5935 pmvers = PMAPVERS;
5936 pmproc = PMAPPROC_GETPORT;
5937 } else if (saddr->sa_family == AF_INET6) {
5938 ip = 6;
5939 pmprog = RPCBPROG;
5940 pmvers = RPCBVERS4;
5941 pmproc = RPCBPROC_GETVERSADDR;
5942 } else if (saddr->sa_family == AF_LOCAL) {
5943 ip = 0;
5944 pmprog = RPCBPROG;
5945 pmvers = RPCBVERS4;
5946 pmproc = RPCBPROC_GETVERSADDR;
5947 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un*)sa)->sun_path);
5948 saddr = (struct sockaddr*)((stype == SOCK_STREAM) ? &rpcbind_cots : &rpcbind_clts);
5949 } else {
5950 return EINVAL;
5951 }
5952 nfsm_chain_null(&nmreq);
5953 nfsm_chain_null(&nmrep);
5954
5955 tryagain:
5956 /* send portmapper request to get port/uaddr */
5957 if (ip == 4) {
5958 ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT);
5959 } else if (ip == 6) {
5960 ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT);
5961 }
5962 nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED);
5963 nfsm_chain_add_32(error, &nmreq, protocol);
5964 nfsm_chain_add_32(error, &nmreq, vers);
5965 if (ip == 4) {
5966 nfsm_chain_add_32(error, &nmreq, stype == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP);
5967 nfsm_chain_add_32(error, &nmreq, 0);
5968 } else {
5969 if (stype == SOCK_STREAM) {
5970 if (ip == 6) {
5971 nfsm_chain_add_string(error, &nmreq, "tcp6", 4);
5972 } else {
5973 nfsm_chain_add_string(error, &nmreq, "ticotsord", 9);
5974 }
5975 } else {
5976 if (ip == 6) {
5977 nfsm_chain_add_string(error, &nmreq, "udp6", 4);
5978 } else {
5979 nfsm_chain_add_string(error, &nmreq, "ticlts", 6);
5980 }
5981 }
5982 nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */
5983 nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */
5984 }
5985 nfsm_chain_build_done(error, &nmreq);
5986 nfsmout_if(error);
5987 error = nfsm_rpchead2(nmp, stype, pmprog, pmvers, pmproc,
5988 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
5989 nfsmout_if(error);
5990 nmreq.nmc_mhead = NULL;
5991
5992 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq);
5993 error = nfs_aux_request(nmp, thd, saddr, so,
5994 stype, mreq, R_XID32(xid), 0, timeo, &nmrep);
5995 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep.nmc_mhead);
5996 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error, protocol, vers,
5997 (saddr->sa_family == AF_LOCAL) ? ((struct sockaddr_un *)saddr)->sun_path :
5998 (saddr->sa_family == AF_INET6) ? "INET6 socket" : "INET socket");
5999
6000 /* grab port from portmap response */
6001 if (ip == 4) {
6002 nfsm_chain_get_32(error, &nmrep, port32);
6003 if (!error) {
6004 if (NFS_PORT_INVALID(port32)) {
6005 error = EBADRPC;
6006 } else {
6007 ((struct sockaddr_in*)sa)->sin_port = htons((in_port_t)port32);
6008 }
6009 }
6010 } else {
6011 /* get uaddr string and convert to sockaddr */
6012 nfsm_chain_get_32(error, &nmrep, ualen);
6013 if (!error) {
6014 if (ualen > (sizeof(uaddr) - 1)) {
6015 error = EIO;
6016 }
6017 if (ualen < 1) {
6018 /* program is not available, just return a zero port */
6019 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6020 if (ip == 6) {
6021 ((struct sockaddr_in6*)saddr)->sin6_port = htons(0);
6022 } else {
6023 ((struct sockaddr_un*)saddr)->sun_path[0] = '\0';
6024 }
6025 NFS_SOCK_DBG("Program %u version %u unavailable", protocol, vers);
6026 } else {
6027 nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr);
6028 NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
6029 if (!error) {
6030 uaddr[ualen] = '\0';
6031 if (ip == 6) {
6032 scopeid = ((struct sockaddr_in6*)saddr)->sin6_scope_id;
6033 }
6034 if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
6035 error = EIO;
6036 }
6037 if (ip == 6 && scopeid != ((struct sockaddr_in6*)saddr)->sin6_scope_id) {
6038 NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6*)saddr)->sin6_scope_id, scopeid);
6039 ((struct sockaddr_in6*)saddr)->sin6_scope_id = scopeid;
6040 }
6041 }
6042 }
6043 }
6044 if ((error == EPROGMISMATCH) || (error == EPROCUNAVAIL) || (error == EIO) || (error == EBADRPC)) {
6045 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
6046 if (pmvers == RPCBVERS4) {
6047 /* fall back to v3 and GETADDR */
6048 pmvers = RPCBVERS3;
6049 pmproc = RPCBPROC_GETADDR;
6050 nfsm_chain_cleanup(&nmreq);
6051 nfsm_chain_cleanup(&nmrep);
6052 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6053 xid = 0;
6054 error = 0;
6055 goto tryagain;
6056 }
6057 }
6058 if (!error) {
6059 bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len));
6060 }
6061 }
6062 nfsmout:
6063 nfsm_chain_cleanup(&nmreq);
6064 nfsm_chain_cleanup(&nmrep);
6065 NFS_SOCK_DBG("Returned %d\n", error);
6066
6067 return error;
6068 }
6069
6070 int
nfs_msg(thread_t thd,const char * server,const char * msg,int error)6071 nfs_msg(thread_t thd,
6072 const char *server,
6073 const char *msg,
6074 int error)
6075 {
6076 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
6077 tpr_t tpr;
6078
6079 if (p) {
6080 tpr = tprintf_open(p);
6081 } else {
6082 tpr = NULL;
6083 }
6084 if (error) {
6085 tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error);
6086 } else {
6087 tprintf(tpr, "nfs server %s: %s\n", server, msg);
6088 }
6089 tprintf_close(tpr);
6090 return 0;
6091 }
6092
6093 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6094 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6095 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6096 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6097 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
6098
6099 uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
6100 uint32_t nfs_tcp_sockbuf = 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */
6101 int32_t nfs_is_mobile;
6102
6103 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6104 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
6105
6106 /*
6107 * Could this mount be squished?
6108 */
6109 int
nfs_can_squish(struct nfsmount * nmp)6110 nfs_can_squish(struct nfsmount *nmp)
6111 {
6112 uint64_t flags = vfs_flags(nmp->nm_mountp);
6113 int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT));
6114
6115 if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) {
6116 return 0;
6117 }
6118
6119 if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) {
6120 return 0;
6121 }
6122
6123 return 1;
6124 }
6125
6126 /*
6127 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6128 * the mount may become "not responding". It's desirable to be able
6129 * to unmount these dead mounts, but only if there is no risk of
6130 * losing data or crashing applications. A "squishy" NFS mount is one
6131 * that can be force unmounted with little risk of harm.
6132 *
6133 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6134 * in a squishy state iff it is allowed to be squishy and there are no
6135 * dirty pages and there are no mmapped files and there are no files
6136 * open for write. Mounts are allowed to be squishy is controlled by
6137 * the settings of the nfs_squishy_flags and its mobility state. These
6138 * flags can be set by sysctls.
6139 *
6140 * If nfs_is_squishy determines that we are in a squishy state we will
6141 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6142 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6143 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6144 * update the current dead timeout with the mount's nm_deadtimeout
6145 * value set at mount time.
6146 *
6147 * Assumes that nm_lock is held.
6148 *
6149 * Note this routine is racey, but its effects on setting the
6150 * dead timeout only have effects when we're in trouble and are likely
6151 * to stay that way. Since by default its only for automounted
6152 * volumes on mobile machines; this is a reasonable trade off between
6153 * data integrity and user experience. It can be disabled or set via
6154 * nfs.conf file.
6155 */
6156
6157 int
nfs_is_squishy(struct nfsmount * nmp)6158 nfs_is_squishy(struct nfsmount *nmp)
6159 {
6160 mount_t mp = nmp->nm_mountp;
6161 int squishy = 0;
6162 int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT;
6163
6164 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
6165 vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile);
6166
6167 if (!nfs_can_squish(nmp)) {
6168 goto out;
6169 }
6170
6171 timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo;
6172 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo);
6173
6174 if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) {
6175 uint64_t flags = mp ? vfs_flags(mp) : 0;
6176 squishy = 1;
6177
6178 /*
6179 * Walk the nfs nodes and check for dirty buffers it we're not
6180 * RDONLY and we've not already been declared as squishy since
6181 * this can be a bit expensive.
6182 */
6183 if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) {
6184 squishy = !nfs_mount_is_dirty(mp);
6185 }
6186 }
6187
6188 out:
6189 if (squishy) {
6190 nmp->nm_state |= NFSSTA_SQUISHY;
6191 } else {
6192 nmp->nm_state &= ~NFSSTA_SQUISHY;
6193 }
6194
6195 nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout;
6196
6197 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout);
6198
6199 return squishy;
6200 }
6201
6202 /*
6203 * On a send operation, if we can't reach the server and we've got only one server to talk to
6204 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6205 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6206 */
6207 int
nfs_is_dead(int error,struct nfsmount * nmp)6208 nfs_is_dead(int error, struct nfsmount *nmp)
6209 {
6210 fsid_t fsid;
6211
6212 lck_mtx_lock(&nmp->nm_lock);
6213 if (nmp->nm_state & NFSSTA_DEAD) {
6214 lck_mtx_unlock(&nmp->nm_lock);
6215 return 1;
6216 }
6217
6218 if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) ||
6219 !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) {
6220 lck_mtx_unlock(&nmp->nm_lock);
6221 return 0;
6222 }
6223
6224 if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) {
6225 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
6226 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
6227 lck_mtx_unlock(&nmp->nm_lock);
6228 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6229 vfs_event_signal(&fsid, VQ_DEAD, 0);
6230 return 1;
6231 }
6232 lck_mtx_unlock(&nmp->nm_lock);
6233 return 0;
6234 }
6235
6236 /*
6237 * If we've experienced timeouts and we're not really a
6238 * classic hard mount, then just return cached data to
6239 * the caller instead of likely hanging on an RPC.
6240 */
6241 int
nfs_use_cache(struct nfsmount * nmp)6242 nfs_use_cache(struct nfsmount *nmp)
6243 {
6244 /*
6245 *%%% We always let mobile users goto the cache,
6246 * perhaps we should not even require them to have
6247 * a timeout?
6248 */
6249 int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) ||
6250 nfs_can_squish(nmp) || nmp->nm_deadtimeout);
6251
6252 int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6253
6254 /*
6255 * So if we have a timeout and we're not really a hard hard-mount,
6256 * return 1 to not get things out of the cache.
6257 */
6258
6259 return (nmp->nm_state & timeoutmask) && cache_ok;
6260 }
6261
6262 /*
6263 * Log a message that nfs or lockd server is unresponsive. Check if we
6264 * can be squished and if we can, or that our dead timeout has
6265 * expired, and we're not holding state, set our mount as dead, remove
6266 * our mount state and ask to be unmounted. If we are holding state
6267 * we're being called from the nfs_request_timer and will soon detect
6268 * that we need to unmount.
6269 */
6270 void
nfs_down(struct nfsmount * nmp,thread_t thd,int error,int flags,const char * msg,int holding_state)6271 nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state)
6272 {
6273 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6274 uint32_t do_vfs_signal = 0;
6275 struct timeval now;
6276
6277 if (nfs_mount_gone(nmp)) {
6278 return;
6279 }
6280
6281 lck_mtx_lock(&nmp->nm_lock);
6282
6283 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6284 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6285 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6286 }
6287 wasunresponsive = (nmp->nm_state & timeoutmask);
6288
6289 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6290 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6291
6292 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
6293 nmp->nm_state |= NFSSTA_TIMEO;
6294 }
6295 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6296 nmp->nm_state |= NFSSTA_LOCKTIMEO;
6297 }
6298 if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6299 nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
6300 }
6301
6302 unresponsive = (nmp->nm_state & timeoutmask);
6303
6304 nfs_is_squishy(nmp);
6305
6306 if (unresponsive && (nmp->nm_curdeadtimeout > 0)) {
6307 microuptime(&now);
6308 if (!wasunresponsive) {
6309 nmp->nm_deadto_start = now.tv_sec;
6310 nfs_mount_sock_thread_wake(nmp);
6311 } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) {
6312 if (!(nmp->nm_state & NFSSTA_DEAD)) {
6313 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
6314 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
6315 }
6316 do_vfs_signal = VQ_DEAD;
6317 }
6318 }
6319 lck_mtx_unlock(&nmp->nm_lock);
6320
6321 if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) {
6322 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6323 } else if (softnobrowse || wasunresponsive || !unresponsive) {
6324 do_vfs_signal = 0;
6325 } else {
6326 do_vfs_signal = VQ_NOTRESP;
6327 }
6328 if (do_vfs_signal) {
6329 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0);
6330 }
6331
6332 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
6333 }
6334
6335 void
nfs_up(struct nfsmount * nmp,thread_t thd,int flags,const char * msg)6336 nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
6337 {
6338 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6339 int do_vfs_signal;
6340
6341 if (nfs_mount_gone(nmp)) {
6342 return;
6343 }
6344
6345 if (msg) {
6346 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
6347 }
6348
6349 lck_mtx_lock(&nmp->nm_lock);
6350
6351 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6352 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6353 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6354 }
6355 wasunresponsive = (nmp->nm_state & timeoutmask);
6356
6357 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6358 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6359
6360 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
6361 nmp->nm_state &= ~NFSSTA_TIMEO;
6362 }
6363 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6364 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
6365 }
6366 if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6367 nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO;
6368 }
6369
6370 unresponsive = (nmp->nm_state & timeoutmask);
6371
6372 nmp->nm_deadto_start = 0;
6373 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
6374 nmp->nm_state &= ~NFSSTA_SQUISHY;
6375 lck_mtx_unlock(&nmp->nm_lock);
6376
6377 if (softnobrowse) {
6378 do_vfs_signal = 0;
6379 } else {
6380 do_vfs_signal = (wasunresponsive && !unresponsive);
6381 }
6382 if (do_vfs_signal) {
6383 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
6384 }
6385 }
6386
6387
6388 #endif /* CONFIG_NFS_CLIENT */
6389
6390 #if CONFIG_NFS_SERVER
6391
6392 /*
6393 * Generate the rpc reply header
6394 * siz arg. is used to decide if adding a cluster is worthwhile
6395 */
6396 int
nfsrv_rephead(struct nfsrv_descript * nd,__unused struct nfsrv_sock * slp,struct nfsm_chain * nmrepp,size_t siz)6397 nfsrv_rephead(
6398 struct nfsrv_descript *nd,
6399 __unused struct nfsrv_sock *slp,
6400 struct nfsm_chain *nmrepp,
6401 size_t siz)
6402 {
6403 mbuf_t mrep;
6404 u_int32_t *tl;
6405 struct nfsm_chain nmrep;
6406 int err, error;
6407
6408 err = nd->nd_repstat;
6409 if (err && (nd->nd_vers == NFS_VER2)) {
6410 siz = 0;
6411 }
6412
6413 /*
6414 * If this is a big reply, use a cluster else
6415 * try and leave leading space for the lower level headers.
6416 */
6417 siz += RPC_REPLYSIZ;
6418 if (siz >= nfs_mbuf_minclsize) {
6419 error = mbuf_getpacket(MBUF_WAITOK, &mrep);
6420 } else {
6421 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep);
6422 }
6423 if (error) {
6424 /* unable to allocate packet */
6425 /* XXX should we keep statistics for these errors? */
6426 return error;
6427 }
6428 if (siz < nfs_mbuf_minclsize) {
6429 /* leave space for lower level headers */
6430 tl = mbuf_data(mrep);
6431 tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */
6432 mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED);
6433 }
6434 nfsm_chain_init(&nmrep, mrep);
6435 nfsm_chain_add_32(error, &nmrep, nd->nd_retxid);
6436 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
6437 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
6438 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
6439 if (err & NFSERR_AUTHERR) {
6440 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
6441 nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR));
6442 } else {
6443 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
6444 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6445 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6446 }
6447 } else {
6448 /* reply status */
6449 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
6450 if (nd->nd_gss_context != NULL) {
6451 /* RPCSEC_GSS verifier */
6452 error = nfs_gss_svc_verf_put(nd, &nmrep);
6453 if (error) {
6454 nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR);
6455 goto done;
6456 }
6457 } else {
6458 /* RPCAUTH_NULL verifier */
6459 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
6460 nfsm_chain_add_32(error, &nmrep, 0);
6461 }
6462 /* accepted status */
6463 switch (err) {
6464 case EPROGUNAVAIL:
6465 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
6466 break;
6467 case EPROGMISMATCH:
6468 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
6469 /* XXX hard coded versions? */
6470 nfsm_chain_add_32(error, &nmrep, NFS_VER2);
6471 nfsm_chain_add_32(error, &nmrep, NFS_VER3);
6472 break;
6473 case EPROCUNAVAIL:
6474 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
6475 break;
6476 case EBADRPC:
6477 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
6478 break;
6479 default:
6480 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
6481 if (nd->nd_gss_context != NULL) {
6482 error = nfs_gss_svc_prepare_reply(nd, &nmrep);
6483 }
6484 if (err != NFSERR_RETVOID) {
6485 nfsm_chain_add_32(error, &nmrep,
6486 (err ? nfsrv_errmap(nd, err) : 0));
6487 }
6488 break;
6489 }
6490 }
6491
6492 done:
6493 nfsm_chain_build_done(error, &nmrep);
6494 if (error) {
6495 /* error composing reply header */
6496 /* XXX should we keep statistics for these errors? */
6497 mbuf_freem(mrep);
6498 return error;
6499 }
6500
6501 *nmrepp = nmrep;
6502 if ((err != 0) && (err != NFSERR_RETVOID)) {
6503 OSAddAtomic64(1, &nfsrvstats.srvrpc_errs);
6504 }
6505 return 0;
6506 }
6507
6508 /*
6509 * The nfs server send routine.
6510 *
6511 * - return EINTR or ERESTART if interrupted by a signal
6512 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6513 * - do any cleanup required by recoverable socket errors (???)
6514 */
6515 int
nfsrv_send(struct nfsrv_sock * slp,mbuf_t nam,mbuf_t top)6516 nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top)
6517 {
6518 int error;
6519 socket_t so = slp->ns_so;
6520 struct sockaddr *sendnam;
6521 struct msghdr msg;
6522
6523 bzero(&msg, sizeof(msg));
6524 if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) {
6525 if ((sendnam = mbuf_data(nam))) {
6526 msg.msg_name = (caddr_t)sendnam;
6527 msg.msg_namelen = sendnam->sa_len;
6528 }
6529 }
6530 if (NFSRV_IS_DBG(NFSRV_FAC_SRV, 15)) {
6531 nfs_dump_mbuf(__func__, __LINE__, "nfsrv_send\n", top);
6532 }
6533 error = sock_sendmbuf(so, &msg, top, 0, NULL);
6534 if (!error) {
6535 return 0;
6536 }
6537 log(LOG_INFO, "nfsd send error %d\n", error);
6538
6539 if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) {
6540 error = EPIPE; /* zap TCP sockets if they time out on send */
6541 }
6542 /* Handle any recoverable (soft) socket errors here. (???) */
6543 if (error != EINTR && error != ERESTART && error != EIO &&
6544 error != EWOULDBLOCK && error != EPIPE) {
6545 error = 0;
6546 }
6547
6548 return error;
6549 }
6550
6551 /*
6552 * Socket upcall routine for the nfsd sockets.
6553 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6554 * Essentially do as much as possible non-blocking, else punt and it will
6555 * be called with MBUF_WAITOK from an nfsd.
6556 */
6557 void
nfsrv_rcv(socket_t so,void * arg,int waitflag)6558 nfsrv_rcv(socket_t so, void *arg, int waitflag)
6559 {
6560 struct nfsrv_sock *slp = arg;
6561
6562 if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) {
6563 return;
6564 }
6565
6566 lck_rw_lock_exclusive(&slp->ns_rwlock);
6567 nfsrv_rcv_locked(so, slp, waitflag);
6568 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6569 }
6570 void
nfsrv_rcv_locked(socket_t so,struct nfsrv_sock * slp,int waitflag)6571 nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag)
6572 {
6573 mbuf_t m, mp, mhck, m2;
6574 int ns_flag = 0, error;
6575 struct msghdr msg;
6576 size_t bytes_read;
6577
6578 if ((slp->ns_flag & SLP_VALID) == 0) {
6579 if (waitflag == MBUF_DONTWAIT) {
6580 lck_rw_done(&slp->ns_rwlock);
6581 }
6582 return;
6583 }
6584
6585 #ifdef notdef
6586 /*
6587 * Define this to test for nfsds handling this under heavy load.
6588 */
6589 if (waitflag == MBUF_DONTWAIT) {
6590 ns_flag = SLP_NEEDQ;
6591 goto dorecs;
6592 }
6593 #endif
6594 if (slp->ns_sotype == SOCK_STREAM) {
6595 /*
6596 * If there are already records on the queue, defer soreceive()
6597 * to an(other) nfsd so that there is feedback to the TCP layer that
6598 * the nfs servers are heavily loaded.
6599 */
6600 if (slp->ns_rec) {
6601 ns_flag = SLP_NEEDQ;
6602 goto dorecs;
6603 }
6604
6605 /*
6606 * Do soreceive().
6607 */
6608 bytes_read = 1000000000;
6609 error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
6610 if (error || mp == NULL) {
6611 if (error == EWOULDBLOCK) {
6612 ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0;
6613 } else {
6614 ns_flag = SLP_DISCONN;
6615 }
6616 goto dorecs;
6617 }
6618 m = mp;
6619 if (slp->ns_rawend) {
6620 if ((error = mbuf_setnext(slp->ns_rawend, m))) {
6621 panic("nfsrv_rcv: mbuf_setnext failed %d", error);
6622 }
6623 slp->ns_cc += bytes_read;
6624 } else {
6625 slp->ns_raw = m;
6626 slp->ns_cc = bytes_read;
6627 }
6628 while ((m2 = mbuf_next(m))) {
6629 m = m2;
6630 }
6631 slp->ns_rawend = m;
6632
6633 /*
6634 * Now try and parse record(s) out of the raw stream data.
6635 */
6636 error = nfsrv_getstream(slp, waitflag);
6637 if (error) {
6638 if (error == EPERM) {
6639 ns_flag = SLP_DISCONN;
6640 } else {
6641 ns_flag = SLP_NEEDQ;
6642 }
6643 }
6644 } else {
6645 struct sockaddr_storage nam;
6646
6647 if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) {
6648 /* already have max # RPC records queued on this socket */
6649 ns_flag = SLP_NEEDQ;
6650 goto dorecs;
6651 }
6652
6653 bzero(&msg, sizeof(msg));
6654 msg.msg_name = (caddr_t)&nam;
6655 msg.msg_namelen = sizeof(nam);
6656
6657 do {
6658 bytes_read = 1000000000;
6659 error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
6660 if (mp) {
6661 if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
6662 mbuf_setlen(mhck, nam.ss_len);
6663 bcopy(&nam, mbuf_data(mhck), nam.ss_len);
6664 m = mhck;
6665 if (mbuf_setnext(m, mp)) {
6666 /* trouble... just drop it */
6667 printf("nfsrv_rcv: mbuf_setnext failed\n");
6668 mbuf_free(mhck);
6669 m = mp;
6670 }
6671 } else {
6672 m = mp;
6673 }
6674 if (slp->ns_recend) {
6675 mbuf_setnextpkt(slp->ns_recend, m);
6676 } else {
6677 slp->ns_rec = m;
6678 slp->ns_flag |= SLP_DOREC;
6679 }
6680 slp->ns_recend = m;
6681 mbuf_setnextpkt(m, NULL);
6682 slp->ns_reccnt++;
6683 }
6684 } while (mp);
6685 }
6686
6687 /*
6688 * Now try and process the request records, non-blocking.
6689 */
6690 dorecs:
6691 if (ns_flag) {
6692 slp->ns_flag |= ns_flag;
6693 }
6694 if (waitflag == MBUF_DONTWAIT) {
6695 int wake = (slp->ns_flag & SLP_WORKTODO);
6696 lck_rw_done(&slp->ns_rwlock);
6697 if (wake && nfsd_thread_count) {
6698 lck_mtx_lock(&nfsd_mutex);
6699 nfsrv_wakenfsd(slp);
6700 lck_mtx_unlock(&nfsd_mutex);
6701 }
6702 }
6703 }
6704
6705 /*
6706 * Try and extract an RPC request from the mbuf data list received on a
6707 * stream socket. The "waitflag" argument indicates whether or not it
6708 * can sleep.
6709 */
6710 int
nfsrv_getstream(struct nfsrv_sock * slp,int waitflag)6711 nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
6712 {
6713 mbuf_t m;
6714 char *cp1, *cp2, *mdata;
6715 int error;
6716 size_t len, mlen;
6717 mbuf_t om, m2, recm;
6718 u_int32_t recmark;
6719
6720 if (slp->ns_flag & SLP_GETSTREAM) {
6721 panic("nfs getstream");
6722 }
6723 slp->ns_flag |= SLP_GETSTREAM;
6724 for (;;) {
6725 if (slp->ns_reclen == 0) {
6726 if (slp->ns_cc < NFSX_UNSIGNED) {
6727 slp->ns_flag &= ~SLP_GETSTREAM;
6728 return 0;
6729 }
6730 m = slp->ns_raw;
6731 mdata = mbuf_data(m);
6732 mlen = mbuf_len(m);
6733 if (mlen >= NFSX_UNSIGNED) {
6734 bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED);
6735 mdata += NFSX_UNSIGNED;
6736 mlen -= NFSX_UNSIGNED;
6737 mbuf_setdata(m, mdata, mlen);
6738 } else {
6739 cp1 = (caddr_t)&recmark;
6740 cp2 = mdata;
6741 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
6742 while (mlen == 0) {
6743 m = mbuf_next(m);
6744 cp2 = mbuf_data(m);
6745 mlen = mbuf_len(m);
6746 }
6747 *cp1++ = *cp2++;
6748 mlen--;
6749 mbuf_setdata(m, cp2, mlen);
6750 }
6751 }
6752 slp->ns_cc -= NFSX_UNSIGNED;
6753 recmark = ntohl(recmark);
6754 slp->ns_reclen = recmark & ~0x80000000;
6755 if (recmark & 0x80000000) {
6756 slp->ns_flag |= SLP_LASTFRAG;
6757 } else {
6758 slp->ns_flag &= ~SLP_LASTFRAG;
6759 }
6760 if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) {
6761 slp->ns_flag &= ~SLP_GETSTREAM;
6762 return EPERM;
6763 }
6764 }
6765
6766 /*
6767 * Now get the record part.
6768 *
6769 * Note that slp->ns_reclen may be 0. Linux sometimes
6770 * generates 0-length RPCs
6771 */
6772 recm = NULL;
6773 if (slp->ns_cc == slp->ns_reclen) {
6774 recm = slp->ns_raw;
6775 slp->ns_raw = slp->ns_rawend = NULL;
6776 slp->ns_cc = slp->ns_reclen = 0;
6777 } else if (slp->ns_cc > slp->ns_reclen) {
6778 len = 0;
6779 m = slp->ns_raw;
6780 mlen = mbuf_len(m);
6781 mdata = mbuf_data(m);
6782 om = NULL;
6783 while (len < slp->ns_reclen) {
6784 if ((len + mlen) > slp->ns_reclen) {
6785 if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) {
6786 slp->ns_flag &= ~SLP_GETSTREAM;
6787 return EWOULDBLOCK;
6788 }
6789 if (om) {
6790 if (mbuf_setnext(om, m2)) {
6791 /* trouble... just drop it */
6792 printf("nfsrv_getstream: mbuf_setnext failed\n");
6793 mbuf_freem(m2);
6794 slp->ns_flag &= ~SLP_GETSTREAM;
6795 return EWOULDBLOCK;
6796 }
6797 recm = slp->ns_raw;
6798 } else {
6799 recm = m2;
6800 }
6801 mdata += slp->ns_reclen - len;
6802 mlen -= slp->ns_reclen - len;
6803 mbuf_setdata(m, mdata, mlen);
6804 len = slp->ns_reclen;
6805 } else if ((len + mlen) == slp->ns_reclen) {
6806 om = m;
6807 len += mlen;
6808 m = mbuf_next(m);
6809 recm = slp->ns_raw;
6810 if (mbuf_setnext(om, NULL)) {
6811 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6812 slp->ns_flag &= ~SLP_GETSTREAM;
6813 return EWOULDBLOCK;
6814 }
6815 mlen = mbuf_len(m);
6816 mdata = mbuf_data(m);
6817 } else {
6818 om = m;
6819 len += mlen;
6820 m = mbuf_next(m);
6821 mlen = mbuf_len(m);
6822 mdata = mbuf_data(m);
6823 }
6824 }
6825 slp->ns_raw = m;
6826 slp->ns_cc -= len;
6827 slp->ns_reclen = 0;
6828 } else {
6829 slp->ns_flag &= ~SLP_GETSTREAM;
6830 return 0;
6831 }
6832
6833 /*
6834 * Accumulate the fragments into a record.
6835 */
6836 if (slp->ns_frag == NULL) {
6837 slp->ns_frag = recm;
6838 } else {
6839 m = slp->ns_frag;
6840 while ((m2 = mbuf_next(m))) {
6841 m = m2;
6842 }
6843 if ((error = mbuf_setnext(m, recm))) {
6844 panic("nfsrv_getstream: mbuf_setnext failed 3, %d", error);
6845 }
6846 }
6847 if (slp->ns_flag & SLP_LASTFRAG) {
6848 if (slp->ns_recend) {
6849 mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
6850 } else {
6851 slp->ns_rec = slp->ns_frag;
6852 slp->ns_flag |= SLP_DOREC;
6853 }
6854 slp->ns_recend = slp->ns_frag;
6855 slp->ns_frag = NULL;
6856 }
6857 }
6858 }
6859
6860 /*
6861 * Parse an RPC header.
6862 */
6863 int
nfsrv_dorec(struct nfsrv_sock * slp,struct nfsd * nfsd,struct nfsrv_descript ** ndp)6864 nfsrv_dorec(
6865 struct nfsrv_sock *slp,
6866 struct nfsd *nfsd,
6867 struct nfsrv_descript **ndp)
6868 {
6869 mbuf_t m;
6870 mbuf_t nam;
6871 struct nfsrv_descript *nd;
6872 int error = 0;
6873
6874 *ndp = NULL;
6875 if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
6876 return ENOBUFS;
6877 }
6878 nd = zalloc(nfsrv_descript_zone);
6879 m = slp->ns_rec;
6880 slp->ns_rec = mbuf_nextpkt(m);
6881 if (slp->ns_rec) {
6882 mbuf_setnextpkt(m, NULL);
6883 } else {
6884 slp->ns_flag &= ~SLP_DOREC;
6885 slp->ns_recend = NULL;
6886 }
6887 slp->ns_reccnt--;
6888 if (mbuf_type(m) == MBUF_TYPE_SONAME) {
6889 nam = m;
6890 m = mbuf_next(m);
6891 if ((error = mbuf_setnext(nam, NULL))) {
6892 panic("nfsrv_dorec: mbuf_setnext failed %d", error);
6893 }
6894 } else {
6895 nam = NULL;
6896 }
6897 nd->nd_nam2 = nam;
6898 nfsm_chain_dissect_init(error, &nd->nd_nmreq, m);
6899 if (!error) {
6900 error = nfsrv_getreq(nd);
6901 }
6902 if (error) {
6903 if (nam) {
6904 mbuf_freem(nam);
6905 }
6906 if (nd->nd_gss_context) {
6907 nfs_gss_svc_ctx_deref(nd->nd_gss_context);
6908 }
6909 NFS_ZFREE(nfsrv_descript_zone, nd);
6910 return error;
6911 }
6912 nd->nd_mrep = NULL;
6913 *ndp = nd;
6914 nfsd->nfsd_nd = nd;
6915 return 0;
6916 }
6917
6918 /*
6919 * Parse an RPC request
6920 * - verify it
6921 * - fill in the cred struct.
6922 */
6923 int
nfsrv_getreq(struct nfsrv_descript * nd)6924 nfsrv_getreq(struct nfsrv_descript *nd)
6925 {
6926 struct nfsm_chain *nmreq;
6927 int len, i;
6928 u_int32_t nfsvers, auth_type;
6929 int error = 0;
6930 uid_t user_id;
6931 gid_t group_id;
6932 short ngroups;
6933 uint32_t val;
6934
6935 nd->nd_cr = NULL;
6936 nd->nd_gss_context = NULL;
6937 nd->nd_gss_seqnum = 0;
6938 nd->nd_gss_mb = NULL;
6939
6940 user_id = group_id = -2;
6941 val = auth_type = len = 0;
6942
6943 nmreq = &nd->nd_nmreq;
6944 nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID
6945 nfsm_chain_get_32(error, nmreq, val); // RPC Call
6946 if (!error && (val != RPC_CALL)) {
6947 error = EBADRPC;
6948 }
6949 nfsmout_if(error);
6950 nd->nd_repstat = 0;
6951 nfsm_chain_get_32(error, nmreq, val); // RPC Version
6952 nfsmout_if(error);
6953 if (val != RPC_VER2) {
6954 nd->nd_repstat = ERPCMISMATCH;
6955 nd->nd_procnum = NFSPROC_NOOP;
6956 return 0;
6957 }
6958 nfsm_chain_get_32(error, nmreq, val); // RPC Program Number
6959 nfsmout_if(error);
6960 if (val != NFS_PROG) {
6961 nd->nd_repstat = EPROGUNAVAIL;
6962 nd->nd_procnum = NFSPROC_NOOP;
6963 return 0;
6964 }
6965 nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number
6966 nfsmout_if(error);
6967 if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) {
6968 nd->nd_repstat = EPROGMISMATCH;
6969 nd->nd_procnum = NFSPROC_NOOP;
6970 return 0;
6971 }
6972 nd->nd_vers = nfsvers;
6973 nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number
6974 nfsmout_if(error);
6975 if ((nd->nd_procnum >= NFS_NPROCS) ||
6976 ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) {
6977 nd->nd_repstat = EPROCUNAVAIL;
6978 nd->nd_procnum = NFSPROC_NOOP;
6979 return 0;
6980 }
6981 if (nfsvers != NFS_VER3) {
6982 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
6983 }
6984 nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor
6985 nfsm_chain_get_32(error, nmreq, len); // Auth Length
6986 if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) {
6987 error = EBADRPC;
6988 }
6989 nfsmout_if(error);
6990
6991 /* Handle authentication */
6992 if (auth_type == RPCAUTH_SYS) {
6993 struct posix_cred temp_pcred;
6994 if (nd->nd_procnum == NFSPROC_NULL) {
6995 return 0;
6996 }
6997 nd->nd_sec = RPCAUTH_SYS;
6998 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp
6999 nfsm_chain_get_32(error, nmreq, len); // hostname length
7000 if (len < 0 || len > NFS_MAXNAMLEN) {
7001 error = EBADRPC;
7002 }
7003 nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname
7004 nfsmout_if(error);
7005
7006 /* create a temporary credential using the bits from the wire */
7007 bzero(&temp_pcred, sizeof(temp_pcred));
7008 nfsm_chain_get_32(error, nmreq, user_id);
7009 nfsm_chain_get_32(error, nmreq, group_id);
7010 temp_pcred.cr_groups[0] = group_id;
7011 nfsm_chain_get_32(error, nmreq, len); // extra GID count
7012 if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) {
7013 error = EBADRPC;
7014 }
7015 nfsmout_if(error);
7016 for (i = 1; i <= len; i++) {
7017 if (i < NGROUPS) {
7018 nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]);
7019 } else {
7020 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);
7021 }
7022 }
7023 nfsmout_if(error);
7024 ngroups = (len >= NGROUPS) ? NGROUPS : (short)(len + 1);
7025 if (ngroups > 1) {
7026 nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
7027 }
7028 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
7029 nfsm_chain_get_32(error, nmreq, len); // verifier length
7030 if (len < 0 || len > RPCAUTH_MAXSIZ) {
7031 error = EBADRPC;
7032 }
7033 if (len > 0) {
7034 nfsm_chain_adv(error, nmreq, nfsm_rndup(len));
7035 }
7036
7037 /* request creation of a real credential */
7038 temp_pcred.cr_uid = user_id;
7039 temp_pcred.cr_ngroups = ngroups;
7040 nd->nd_cr = posix_cred_create(&temp_pcred);
7041 if (nd->nd_cr == NULL) {
7042 nd->nd_repstat = ENOMEM;
7043 nd->nd_procnum = NFSPROC_NOOP;
7044 return 0;
7045 }
7046 } else if (auth_type == RPCSEC_GSS) {
7047 error = nfs_gss_svc_cred_get(nd, nmreq);
7048 if (error) {
7049 if (error == EINVAL) {
7050 goto nfsmout; // drop the request
7051 }
7052 nd->nd_repstat = error;
7053 nd->nd_procnum = NFSPROC_NOOP;
7054 return 0;
7055 }
7056 } else {
7057 if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE
7058 return 0;
7059 }
7060 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
7061 nd->nd_procnum = NFSPROC_NOOP;
7062 return 0;
7063 }
7064 return 0;
7065 nfsmout:
7066 if (IS_VALID_CRED(nd->nd_cr)) {
7067 kauth_cred_unref(&nd->nd_cr);
7068 }
7069 nfsm_chain_cleanup(nmreq);
7070 return error;
7071 }
7072
7073 /*
7074 * Search for a sleeping nfsd and wake it up.
7075 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7076 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
7077 * Note: Must be called with nfsd_mutex held.
7078 */
7079 void
nfsrv_wakenfsd(struct nfsrv_sock * slp)7080 nfsrv_wakenfsd(struct nfsrv_sock *slp)
7081 {
7082 struct nfsd *nd;
7083
7084 if ((slp->ns_flag & SLP_VALID) == 0) {
7085 return;
7086 }
7087
7088 lck_rw_lock_exclusive(&slp->ns_rwlock);
7089 /* if there's work to do on this socket, make sure it's queued up */
7090 if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) {
7091 TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq);
7092 slp->ns_flag |= SLP_WAITQ;
7093 }
7094 lck_rw_done(&slp->ns_rwlock);
7095
7096 /* wake up a waiting nfsd, if possible */
7097 nd = TAILQ_FIRST(&nfsd_queue);
7098 if (!nd) {
7099 return;
7100 }
7101
7102 TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue);
7103 nd->nfsd_flag &= ~NFSD_WAITING;
7104 wakeup(nd);
7105 }
7106
7107 #endif /* CONFIG_NFS_SERVER */
7108
7109 #endif /* CONFIG_NFS */
7110