1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS
70
71 /*
72 * Socket operations for use by nfs
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/signalvar.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/vnode.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
88 #include <sys/un.h>
89 #include <sys/syslog.h>
90 #include <sys/tprintf.h>
91 #include <libkern/OSAtomic.h>
92 #include <IOKit/IOPlatformExpert.h>
93
94 #include <sys/reboot.h>
95 #include <sys/time.h>
96 #include <kern/clock.h>
97 #include <kern/task.h>
98 #include <kern/thread.h>
99 #include <kern/thread_call.h>
100 #include <sys/user.h>
101 #include <sys/acct.h>
102
103 #include <netinet/in.h>
104 #include <netinet/tcp.h>
105
106 #include <nfs/rpcv2.h>
107 #include <nfs/krpc.h>
108 #include <nfs/nfsproto.h>
109 #include <nfs/nfs.h>
110 #include <nfs/xdr_subs.h>
111 #include <nfs/nfsm_subs.h>
112 #include <nfs/nfs_gss.h>
113 #include <nfs/nfsmount.h>
114 #include <nfs/nfsnode.h>
115
116 #define NFS_SOCK_DBG(...) NFSCLNT_DBG(NFSCLNT_FAC_SOCK, 7, ## __VA_ARGS__)
117 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFSCLNT_IS_DBG(NFSCLNT_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
118
119 #ifndef SUN_LEN
120 #define SUN_LEN(su) \
121 (sizeof(*(su)) - sizeof((su)->sun_path) + strnlen((su)->sun_path, sizeof((su)->sun_path)))
122 #endif /* SUN_LEN */
123
124 /* XXX */
125 kern_return_t thread_terminate(thread_t);
126
127 ZONE_DEFINE_TYPE(nfs_fhandle_zone, "fhandle", struct fhandle, ZC_NONE);
128 ZONE_DEFINE_TYPE(nfs_req_zone, "NFS req", struct nfsreq, ZC_NONE);
129 ZONE_DEFINE(nfsrv_descript_zone, "NFSV3 srvdesc",
130 sizeof(struct nfsrv_descript), ZC_NONE);
131
132
133 #if CONFIG_NFS_SERVER
134 int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
135
136 int nfsrv_getstream(struct nfsrv_sock *, int);
137 int nfsrv_getreq(struct nfsrv_descript *);
138 extern int nfsv3_procid[NFS_NPROCS];
139 #endif /* CONFIG_NFS_SERVER */
140
141 /*
142 * compare two sockaddr structures
143 */
144 int
nfs_sockaddr_cmp(struct sockaddr * sa1,struct sockaddr * sa2)145 nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2)
146 {
147 if (!sa1) {
148 return -1;
149 }
150 if (!sa2) {
151 return 1;
152 }
153 if (sa1->sa_family != sa2->sa_family) {
154 return (sa1->sa_family < sa2->sa_family) ? -1 : 1;
155 }
156 if (sa1->sa_len != sa2->sa_len) {
157 return (sa1->sa_len < sa2->sa_len) ? -1 : 1;
158 }
159 if (sa1->sa_family == AF_INET) {
160 return bcmp(&((struct sockaddr_in*)sa1)->sin_addr,
161 &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr));
162 }
163 if (sa1->sa_family == AF_INET6) {
164 return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr,
165 &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr));
166 }
167 return -1;
168 }
169
170 #if CONFIG_NFS_CLIENT
171
172 int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
173 int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int);
174 int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *);
175 void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *);
176 void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
177 int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
178 int nfs_reconnect(struct nfsmount *);
179 int nfs_connect_setup(struct nfsmount *);
180 void nfs_mount_sock_thread(void *, wait_result_t);
181 void nfs_udp_rcv(socket_t, void*, int);
182 void nfs_tcp_rcv(socket_t, void*, int);
183 void nfs_sock_poke(struct nfsmount *);
184 void nfs_request_match_reply(struct nfsmount *, mbuf_t);
185 void nfs_reqdequeue(struct nfsreq *);
186 void nfs_reqbusy(struct nfsreq *);
187 struct nfsreq *nfs_reqnext(struct nfsreq *);
188 int nfs_wait_reply(struct nfsreq *);
189 void nfs_softterm(struct nfsreq *);
190 int nfs_can_squish(struct nfsmount *);
191 int nfs_is_squishy(struct nfsmount *);
192 int nfs_is_dead(int, struct nfsmount *);
193
194 /*
195 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
196 * Use the mean and mean deviation of rtt for the appropriate type of rpc
197 * for the frequent rpcs and a default for the others.
198 * The justification for doing "other" this way is that these rpcs
199 * happen so infrequently that timer est. would probably be stale.
200 * Also, since many of these rpcs are
201 * non-idempotent, a conservative timeout is desired.
202 * getattr, lookup - A+2D
203 * read, write - A+4D
204 * other - nm_timeo
205 */
206 #define NFS_RTO(n, t) \
207 ((t) == 0 ? (n)->nm_timeo : \
208 ((t) < 3 ? \
209 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
210 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
211 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
212 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
213
214 /*
215 * Defines which timer to use for the procnum.
216 * 0 - default
217 * 1 - getattr
218 * 2 - lookup
219 * 3 - read
220 * 4 - write
221 */
222 static const int proct[] = {
223 [NFSPROC_NULL] = 0,
224 [NFSPROC_GETATTR] = 1,
225 [NFSPROC_SETATTR] = 0,
226 [NFSPROC_LOOKUP] = 2,
227 [NFSPROC_ACCESS] = 1,
228 [NFSPROC_READLINK] = 3,
229 [NFSPROC_READ] = 3,
230 [NFSPROC_WRITE] = 4,
231 [NFSPROC_CREATE] = 0,
232 [NFSPROC_MKDIR] = 0,
233 [NFSPROC_SYMLINK] = 0,
234 [NFSPROC_MKNOD] = 0,
235 [NFSPROC_REMOVE] = 0,
236 [NFSPROC_RMDIR] = 0,
237 [NFSPROC_RENAME] = 0,
238 [NFSPROC_LINK] = 0,
239 [NFSPROC_READDIR] = 3,
240 [NFSPROC_READDIRPLUS] = 3,
241 [NFSPROC_FSSTAT] = 0,
242 [NFSPROC_FSINFO] = 0,
243 [NFSPROC_PATHCONF] = 0,
244 [NFSPROC_COMMIT] = 0,
245 [NFSPROC_NOOP] = 0,
246 };
247
248 /*
249 * There is a congestion window for outstanding rpcs maintained per mount
250 * point. The cwnd size is adjusted in roughly the way that:
251 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
252 * SIGCOMM '88". ACM, August 1988.
253 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
254 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
255 * of rpcs is in progress.
256 * (The sent count and cwnd are scaled for integer arith.)
257 * Variants of "slow start" were tried and were found to be too much of a
258 * performance hit (ave. rtt 3 times larger),
259 * I suspect due to the large rtt that nfs rpcs have.
260 */
261 #define NFS_CWNDSCALE 256
262 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
263 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
264
265 /*
266 * Increment location index to next address/server/location.
267 */
268 void
nfs_location_next(struct nfs_fs_locations * nlp,struct nfs_location_index * nlip)269 nfs_location_next(struct nfs_fs_locations *nlp, struct nfs_location_index *nlip)
270 {
271 uint8_t loc = nlip->nli_loc;
272 uint8_t serv = nlip->nli_serv;
273 uint8_t addr = nlip->nli_addr;
274
275 /* move to next address */
276 addr++;
277 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
278 /* no more addresses on current server, go to first address of next server */
279 next_server:
280 addr = 0;
281 serv++;
282 if (serv >= nlp->nl_locations[loc]->nl_servcount) {
283 /* no more servers on current location, go to first server of next location */
284 serv = 0;
285 loc++;
286 if (loc >= nlp->nl_numlocs) {
287 loc = 0; /* after last location, wrap back around to first location */
288 }
289 }
290 }
291 /*
292 * It's possible for this next server to not have any addresses.
293 * Check for that here and go to the next server.
294 * But bail out if we've managed to come back around to the original
295 * location that was passed in. (That would mean no servers had any
296 * addresses. And we don't want to spin here forever.)
297 */
298 if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) {
299 return;
300 }
301 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
302 goto next_server;
303 }
304
305 nlip->nli_loc = loc;
306 nlip->nli_serv = serv;
307 nlip->nli_addr = addr;
308 }
309
310 /*
311 * Compare two location indices.
312 */
313 int
nfs_location_index_cmp(struct nfs_location_index * nlip1,struct nfs_location_index * nlip2)314 nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2)
315 {
316 if (nlip1->nli_loc != nlip2->nli_loc) {
317 return nlip1->nli_loc - nlip2->nli_loc;
318 }
319 if (nlip1->nli_serv != nlip2->nli_serv) {
320 return nlip1->nli_serv - nlip2->nli_serv;
321 }
322 return nlip1->nli_addr - nlip2->nli_addr;
323 }
324
325 /*
326 * Get the mntfromname (or path portion only) for a given location.
327 */
328 void
nfs_location_mntfromname(struct nfs_fs_locations * locs,struct nfs_location_index idx,char * s,size_t size,int pathonly)329 nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, size_t size, int pathonly)
330 {
331 struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
332 char *p;
333 int cnt, i;
334
335 p = s;
336 if (!pathonly) {
337 char *name = fsl->nl_servers[idx.nli_serv]->ns_name;
338 if (name == NULL) {
339 name = "";
340 }
341 if (*name == '\0') {
342 if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) {
343 name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr];
344 }
345 cnt = scnprintf(p, size, "<%s>:", name);
346 } else {
347 cnt = scnprintf(p, size, "%s:", name);
348 }
349 p += cnt;
350 size -= cnt;
351 }
352 if (fsl->nl_path.np_compcount == 0) {
353 /* mounting root export on server */
354 if (size > 0) {
355 *p++ = '/';
356 *p++ = '\0';
357 }
358 return;
359 }
360 /* append each server path component */
361 for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) {
362 cnt = scnprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
363 p += cnt;
364 size -= cnt;
365 }
366 }
367
368 /*
369 * NFS client connect socket upcall.
370 * (Used only during socket connect/search.)
371 */
372 void
nfs_connect_upcall(socket_t so,void * arg,__unused int waitflag)373 nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag)
374 {
375 struct nfs_socket *nso = arg;
376 size_t rcvlen;
377 mbuf_t m;
378 int error = 0, recv = 1;
379
380 if (nso->nso_flags & NSO_CONNECTING) {
381 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso, nso->nso_flags);
382 wakeup(nso->nso_wake);
383 return;
384 }
385
386 lck_mtx_lock(&nso->nso_lock);
387 if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) {
388 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso);
389 lck_mtx_unlock(&nso->nso_lock);
390 return;
391 }
392 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso, nso->nso_flags);
393 nso->nso_flags |= NSO_UPCALL;
394
395 /* loop while we make error-free progress */
396 while (!error && recv) {
397 /* make sure we're still interested in this socket */
398 if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) {
399 break;
400 }
401 lck_mtx_unlock(&nso->nso_lock);
402 m = NULL;
403 if (nso->nso_sotype == SOCK_STREAM) {
404 error = nfs_rpc_record_read(so, &nso->nso_rrs, MSG_DONTWAIT, &recv, &m);
405 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error, recv);
406 } else {
407 rcvlen = 1000000;
408 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
409 recv = m ? 1 : 0;
410 }
411 lck_mtx_lock(&nso->nso_lock);
412 if (m) {
413 /* match response with request */
414 struct nfsm_chain nmrep;
415 uint32_t reply = 0, rxid = 0, verf_type, verf_len;
416 uint32_t reply_status, rejected_status, accepted_status;
417
418 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m);
419 nfsm_chain_dissect_init(error, &nmrep, m);
420 nfsm_chain_get_32(error, &nmrep, rxid);
421 nfsm_chain_get_32(error, &nmrep, reply);
422 if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) {
423 error = EBADRPC;
424 }
425 nfsm_chain_get_32(error, &nmrep, reply_status);
426 if (!error && (reply_status == RPC_MSGDENIED)) {
427 nfsm_chain_get_32(error, &nmrep, rejected_status);
428 if (!error) {
429 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
430 }
431 }
432 nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */
433 nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */
434 nfsmout_if(error);
435 if (verf_len) {
436 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
437 }
438 nfsm_chain_get_32(error, &nmrep, accepted_status);
439 nfsmout_if(error);
440 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status, nso->nso_version);
441 if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) {
442 uint32_t minvers, maxvers;
443 nfsm_chain_get_32(error, &nmrep, minvers);
444 nfsm_chain_get_32(error, &nmrep, maxvers);
445 nfsmout_if(error);
446 if (nso->nso_protocol == PMAPPROG) {
447 if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) {
448 error = EPROGMISMATCH;
449 } else if ((nso->nso_saddr->sa_family == AF_INET) &&
450 (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) {
451 nso->nso_version = PMAPVERS;
452 } else if (nso->nso_saddr->sa_family == AF_INET6) {
453 if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) {
454 nso->nso_version = RPCBVERS4;
455 } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) {
456 nso->nso_version = RPCBVERS3;
457 }
458 }
459 } else if (nso->nso_protocol == NFS_PROG) {
460 int vers;
461
462 /*
463 * N.B. Both portmapper and rpcbind V3 are happy to return
464 * addresses for other versions than the one you ask (getport or
465 * getaddr) and thus we may have fallen to this code path. So if
466 * we get a version that we support, use highest supported
467 * version. This assumes that the server supports all versions
468 * between minvers and maxvers. Note for IPv6 we will try and
469 * use rpcbind V4 which has getversaddr and we should not get
470 * here if that was successful.
471 */
472 for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) {
473 if (vers >= (int)minvers && vers <= (int)maxvers) {
474 break;
475 }
476 }
477 nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers;
478 }
479 if (!error && nso->nso_version) {
480 accepted_status = RPC_SUCCESS;
481 }
482 }
483 if (!error) {
484 switch (accepted_status) {
485 case RPC_SUCCESS:
486 error = 0;
487 break;
488 case RPC_PROGUNAVAIL:
489 error = EPROGUNAVAIL;
490 break;
491 case RPC_PROGMISMATCH:
492 error = EPROGMISMATCH;
493 break;
494 case RPC_PROCUNAVAIL:
495 error = EPROCUNAVAIL;
496 break;
497 case RPC_GARBAGE:
498 error = EBADRPC;
499 break;
500 case RPC_SYSTEM_ERR:
501 default:
502 error = EIO;
503 break;
504 }
505 }
506 nfsmout:
507 nso->nso_flags &= ~NSO_PINGING;
508 if (error) {
509 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
510 nso->nso_protocol, nso->nso_version, error);
511 nso->nso_error = error;
512 nso->nso_flags |= NSO_DEAD;
513 } else {
514 nso->nso_flags |= NSO_VERIFIED;
515 }
516 mbuf_freem(m);
517 /* wake up search thread */
518 wakeup(nso->nso_wake);
519 break;
520 }
521 }
522
523 nso->nso_flags &= ~NSO_UPCALL;
524 if ((error != EWOULDBLOCK) && (error || !recv)) {
525 /* problems with the socket... */
526 NFS_SOCK_DBG("connect upcall failed %d\n", error);
527 nso->nso_error = error ? error : EPIPE;
528 nso->nso_flags |= NSO_DEAD;
529 wakeup(nso->nso_wake);
530 }
531 if (nso->nso_flags & NSO_DISCONNECTING) {
532 wakeup(&nso->nso_flags);
533 }
534 lck_mtx_unlock(&nso->nso_lock);
535 }
536
537 /*
538 * Create/initialize an nfs_socket structure.
539 */
540 int
nfs_socket_create(struct nfsmount * nmp,struct sockaddr * sa,uint8_t sotype,in_port_t port,uint32_t protocol,uint32_t vers,int resvport,struct nfs_socket ** nsop)541 nfs_socket_create(
542 struct nfsmount *nmp,
543 struct sockaddr *sa,
544 uint8_t sotype,
545 in_port_t port,
546 uint32_t protocol,
547 uint32_t vers,
548 int resvport,
549 struct nfs_socket **nsop)
550 {
551 struct nfs_socket *nso;
552 struct timeval now;
553 int error;
554 #define NFS_SOCKET_DEBUGGING
555 #ifdef NFS_SOCKET_DEBUGGING
556 char naddr[sizeof((struct sockaddr_un *)0)->sun_path];
557 void *sinaddr;
558
559 switch (sa->sa_family) {
560 case AF_INET:
561 if (sa->sa_len != sizeof(struct sockaddr_in)) {
562 return EINVAL;
563 }
564 sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
565 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
566 strlcpy(naddr, "<unknown>", sizeof(naddr));
567 }
568 break;
569 case AF_INET6:
570 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
571 return EINVAL;
572 }
573 sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
574 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
575 strlcpy(naddr, "<unknown>", sizeof(naddr));
576 }
577 break;
578 case AF_LOCAL:
579 if (sa->sa_len != sizeof(struct sockaddr_un) && sa->sa_len != SUN_LEN((struct sockaddr_un *)sa)) {
580 return EINVAL;
581 }
582 strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
583 break;
584 default:
585 strlcpy(naddr, "<unsupported address family>", sizeof(naddr));
586 break;
587 }
588 #else
589 char naddr[1] = { 0 };
590 #endif
591
592 *nsop = NULL;
593
594 /* Create the socket. */
595 nso = kalloc_type(struct nfs_socket, Z_WAITOK | Z_ZERO | Z_NOFAIL);
596 nso->nso_saddr = (struct sockaddr *)alloc_sockaddr(sa->sa_len, Z_WAITOK | Z_NOFAIL);
597
598 lck_mtx_init(&nso->nso_lock, &nfs_request_grp, LCK_ATTR_NULL);
599 nso->nso_sotype = sotype;
600 if (nso->nso_sotype == SOCK_STREAM) {
601 nfs_rpc_record_state_init(&nso->nso_rrs);
602 }
603 microuptime(&now);
604 nso->nso_timestamp = now.tv_sec;
605 bcopy(sa, nso->nso_saddr, sa->sa_len);
606 switch (sa->sa_family) {
607 case AF_INET:
608 case AF_INET6:
609 if (sa->sa_family == AF_INET) {
610 ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
611 } else if (sa->sa_family == AF_INET6) {
612 ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
613 }
614 break;
615 case AF_LOCAL:
616 break;
617 }
618 nso->nso_protocol = protocol;
619 nso->nso_version = vers;
620 nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers);
621 nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers);
622
623 error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so);
624
625 /* Some servers require that the client port be a reserved port number. */
626 if (!error && resvport && ((sa->sa_family == AF_INET) || (sa->sa_family == AF_INET6))) {
627 struct sockaddr_storage ss;
628 int level = (sa->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
629 int optname = (sa->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
630 int portrange = IP_PORTRANGE_LOW;
631
632 error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange));
633 if (!error) { /* bind now to check for failure */
634 ss.ss_len = sa->sa_len;
635 ss.ss_family = sa->sa_family;
636 if (ss.ss_family == AF_INET) {
637 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
638 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
639 } else if (ss.ss_family == AF_INET6) {
640 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
641 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
642 } else {
643 error = EINVAL;
644 }
645 if (!error) {
646 error = sock_bind(nso->nso_so, (struct sockaddr*)&ss);
647 }
648 }
649 }
650
651 if (error) {
652 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
653 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype,
654 resvport ? "r" : "", port, protocol, vers);
655 nfs_socket_destroy(nso);
656 } else {
657 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
658 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr,
659 sotype, resvport ? "r" : "", port, protocol, vers);
660 *nsop = nso;
661 }
662 return error;
663 }
664
665 /*
666 * Destroy an nfs_socket structure.
667 */
668 void
nfs_socket_destroy(struct nfs_socket * nso)669 nfs_socket_destroy(struct nfs_socket *nso)
670 {
671 struct timespec ts = { .tv_sec = 4, .tv_nsec = 0 };
672
673 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso, nso->nso_flags, nso->nso_error);
674 lck_mtx_lock(&nso->nso_lock);
675 nso->nso_flags |= NSO_DISCONNECTING;
676 if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */
677 msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts);
678 }
679 lck_mtx_unlock(&nso->nso_lock);
680 sock_shutdown(nso->nso_so, SHUT_RDWR);
681 sock_close(nso->nso_so);
682 if (nso->nso_sotype == SOCK_STREAM) {
683 nfs_rpc_record_state_cleanup(&nso->nso_rrs);
684 }
685 lck_mtx_destroy(&nso->nso_lock, &nfs_request_grp);
686
687 free_sockaddr(nso->nso_saddr);
688 free_sockaddr(nso->nso_saddr2);
689
690 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso);
691 kfree_type(struct nfs_socket, nso);
692 }
693
694 /*
695 * Set common socket options on an nfs_socket.
696 */
697 void
nfs_socket_options(struct nfsmount * nmp,struct nfs_socket * nso)698 nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
699 {
700 /*
701 * Set socket send/receive timeouts
702 * - Receive timeout shouldn't matter because most receives are performed
703 * in the socket upcall non-blocking.
704 * - Send timeout should allow us to react to a blocked socket.
705 * Soft mounts will want to abort sooner.
706 */
707 struct timeval timeo;
708 int on = 1, proto, reserve, error;
709
710 timeo.tv_usec = 0;
711 timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
712 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
713 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
714 if (nso->nso_sotype == SOCK_STREAM) {
715 /* Assume that SOCK_STREAM always requires a connection */
716 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
717 /* set nodelay for TCP */
718 sock_gettype(nso->nso_so, NULL, NULL, &proto);
719 if (proto == IPPROTO_TCP) {
720 sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
721 }
722 }
723
724 /* set socket buffer sizes for UDP/TCP */
725 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_wsize * 2);
726 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
727 if (error) {
728 log(LOG_INFO, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error, reserve);
729 }
730
731 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_rsize * 2);
732 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
733 if (error) {
734 log(LOG_INFO, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error, reserve);
735 }
736
737 /* set SO_NOADDRERR to detect network changes ASAP */
738 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
739 /* just playin' it safe with upcalls */
740 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
741 /* socket should be interruptible if the mount is */
742 if (!NMFLAG(nmp, INTR)) {
743 sock_nointerrupt(nso->nso_so, 1);
744 }
745 }
746
747 /*
748 * Release resources held in an nfs_socket_search.
749 */
750 void
nfs_socket_search_cleanup(struct nfs_socket_search * nss)751 nfs_socket_search_cleanup(struct nfs_socket_search *nss)
752 {
753 struct nfs_socket *nso, *nsonext;
754
755 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
756 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
757 nss->nss_sockcnt--;
758 nfs_socket_destroy(nso);
759 }
760 if (nss->nss_sock) {
761 nfs_socket_destroy(nss->nss_sock);
762 nss->nss_sock = NULL;
763 }
764 }
765
766 /*
767 * Prefer returning certain errors over others.
768 * This function returns a ranking of the given error.
769 */
770 int
nfs_connect_error_class(int error)771 nfs_connect_error_class(int error)
772 {
773 switch (error) {
774 case 0:
775 return 0;
776 case ETIMEDOUT:
777 case EAGAIN:
778 return 1;
779 case EPIPE:
780 case EADDRNOTAVAIL:
781 case ENETDOWN:
782 case ENETUNREACH:
783 case ENETRESET:
784 case ECONNABORTED:
785 case ECONNRESET:
786 case EISCONN:
787 case ENOTCONN:
788 case ESHUTDOWN:
789 case ECONNREFUSED:
790 case EHOSTDOWN:
791 case EHOSTUNREACH:
792 return 2;
793 case ERPCMISMATCH:
794 case EPROCUNAVAIL:
795 case EPROGMISMATCH:
796 case EPROGUNAVAIL:
797 return 3;
798 case EBADRPC:
799 return 4;
800 default:
801 return 5;
802 }
803 }
804
805 /*
806 * Make sure a socket search returns the best error.
807 */
808 void
nfs_socket_search_update_error(struct nfs_socket_search * nss,int error)809 nfs_socket_search_update_error(struct nfs_socket_search *nss, int error)
810 {
811 if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) {
812 nss->nss_error = error;
813 }
814 }
815
816 /* nfs_connect_search_new_socket:
817 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
818 * by nss.
819 *
820 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
821 * could not be used or if a socket timed out.
822 */
823 int
nfs_connect_search_new_socket(struct nfsmount * nmp,struct nfs_socket_search * nss,struct timeval * now)824 nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
825 {
826 struct nfs_fs_location *fsl;
827 struct nfs_fs_server *fss;
828 struct sockaddr_storage ss;
829 struct nfs_socket *nso;
830 char *addrstr;
831 int error = 0;
832
833
834 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
835 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt);
836
837 /*
838 * while there are addresses and:
839 * we have no sockets or
840 * the last address failed and did not produce a socket (nss_last < 0) or
841 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
842 * then attempt to create a socket with the current address.
843 */
844 while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) ||
845 ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) {
846 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
847 return EINTR;
848 }
849 /* Can we convert the address to a sockaddr? */
850 fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc];
851 fss = fsl->nl_servers[nss->nss_nextloc.nli_serv];
852 addrstr = fss->ns_addresses[nss->nss_nextloc.nli_addr];
853 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr, nss->nss_protocol, nss->nss_port);
854 if (*addrstr == '\0') {
855 /*
856 * We have an unspecified local domain address. We use the program to translate to
857 * a well known local transport address. We only support PMAPROG and NFS for this.
858 */
859 if (nss->nss_protocol == PMAPPROG) {
860 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? RPCB_TICLTS_PATH : RPCB_TICOTSORD_PATH;
861 } else if (nss->nss_protocol == NFS_PROG) {
862 addrstr = nmp->nm_nfs_localport;
863 if (!addrstr || *addrstr == '\0') {
864 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? NFS_TICLTS_PATH : NFS_TICOTSORD_PATH;
865 }
866 }
867 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss->nss_protocol, addrstr);
868 }
869 if (!nfs_uaddr2sockaddr(addrstr, (struct sockaddr*)&ss)) {
870 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr);
871 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
872 nss->nss_addrcnt -= 1;
873 nss->nss_last = -2;
874 continue;
875 }
876 /* Check that socket family is acceptable. */
877 if (nmp->nm_sofamily && (ss.ss_family != nmp->nm_sofamily)) {
878 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss.ss_family, nmp->nm_sofamily);
879 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
880 nss->nss_addrcnt -= 1;
881 nss->nss_last = -2;
882 continue;
883 }
884
885 /* Create the socket. */
886 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype,
887 nss->nss_port, nss->nss_protocol, nss->nss_version,
888 ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso);
889 if (error) {
890 return error;
891 }
892
893 nso->nso_location = nss->nss_nextloc;
894 nso->nso_wake = nss;
895 error = sock_setupcall(nso->nso_so, nfs_connect_upcall, nso);
896 if (error) {
897 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso, error);
898 lck_mtx_lock(&nso->nso_lock);
899 nso->nso_error = error;
900 nso->nso_flags |= NSO_DEAD;
901 lck_mtx_unlock(&nso->nso_lock);
902 }
903
904 TAILQ_INSERT_TAIL(&nss->nss_socklist, nso, nso_link);
905 nss->nss_sockcnt++;
906 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
907 nss->nss_addrcnt -= 1;
908
909 nss->nss_last = now->tv_sec;
910 }
911
912 if (nss->nss_addrcnt == 0 && nss->nss_last < 0) {
913 nss->nss_last = now->tv_sec;
914 }
915
916 return error;
917 }
918
919 /*
920 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
921 * If successful set the socket options for the socket as require from the mount.
922 *
923 * Assumes: nso->nso_lock is held on entry and return.
924 */
925 int
nfs_connect_search_socket_connect(struct nfsmount * nmp,struct nfs_socket * nso,int verbose)926 nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose)
927 {
928 int error;
929
930 if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) {
931 /* no connection needed, just say it's already connected */
932 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
933 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
934 nso->nso_flags |= NSO_CONNECTED;
935 nfs_socket_options(nmp, nso);
936 return 1; /* Socket is connected and setup */
937 } else if (!(nso->nso_flags & NSO_CONNECTING)) {
938 /* initiate the connection */
939 nso->nso_flags |= NSO_CONNECTING;
940 lck_mtx_unlock(&nso->nso_lock);
941 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
942 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso,
943 nso->nso_saddr->sa_family == AF_LOCAL ? ((struct sockaddr_un*)nso->nso_saddr)->sun_path : "");
944 error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT);
945 if (error) {
946 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
947 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
948 }
949 lck_mtx_lock(&nso->nso_lock);
950 if (error && (error != EINPROGRESS)) {
951 nso->nso_error = error;
952 nso->nso_flags |= NSO_DEAD;
953 return 0;
954 }
955 }
956 if (nso->nso_flags & NSO_CONNECTING) {
957 /* check the connection */
958 if (sock_isconnected(nso->nso_so)) {
959 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
960 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
961 nso->nso_flags &= ~NSO_CONNECTING;
962 nso->nso_flags |= NSO_CONNECTED;
963 nfs_socket_options(nmp, nso);
964 return 1; /* Socket is connected and setup */
965 } else {
966 int optlen = sizeof(error);
967 error = 0;
968 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen);
969 if (error) { /* we got an error on the socket */
970 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
971 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
972 if (verbose) {
973 printf("nfs connect socket error %d for %s\n",
974 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname);
975 }
976 nso->nso_error = error;
977 nso->nso_flags |= NSO_DEAD;
978 return 0;
979 }
980 }
981 }
982
983 return 0; /* Waiting to be connected */
984 }
985
986 /*
987 * nfs_connect_search_ping: Send a null proc on the nso socket.
988 */
989 int
nfs_connect_search_ping(struct nfsmount * nmp,struct nfs_socket * nso,struct timeval * now)990 nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct timeval *now)
991 {
992 /* initiate a NULL RPC request */
993 uint64_t xid = nso->nso_pingxid;
994 mbuf_t m, mreq = NULL;
995 struct msghdr msg;
996 size_t reqlen, sentlen;
997 uint32_t vers = nso->nso_version;
998 int error;
999
1000 if (!vers) {
1001 if (nso->nso_protocol == PMAPPROG) {
1002 vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1003 } else if (nso->nso_protocol == NFS_PROG) {
1004 vers = PVER2MAJOR(nmp->nm_max_vers);
1005 }
1006 }
1007 lck_mtx_unlock(&nso->nso_lock);
1008 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso, nso->nso_sotype, nso->nso_protocol, vers);
1009 error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
1010 vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq);
1011 lck_mtx_lock(&nso->nso_lock);
1012 if (!error) {
1013 nso->nso_flags |= NSO_PINGING;
1014 nso->nso_pingxid = R_XID32(xid);
1015 nso->nso_reqtimestamp = now->tv_sec;
1016 bzero(&msg, sizeof(msg));
1017 if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) {
1018 msg.msg_name = nso->nso_saddr;
1019 msg.msg_namelen = nso->nso_saddr->sa_len;
1020 }
1021 for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) {
1022 reqlen += mbuf_len(m);
1023 }
1024 lck_mtx_unlock(&nso->nso_lock);
1025 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq);
1026 error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen);
1027 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
1028 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1029 lck_mtx_lock(&nso->nso_lock);
1030 if (!error && (sentlen != reqlen)) {
1031 error = ETIMEDOUT;
1032 }
1033 }
1034 if (error) {
1035 nso->nso_error = error;
1036 nso->nso_flags |= NSO_DEAD;
1037 return 0;
1038 }
1039
1040 return 1;
1041 }
1042
1043 /*
1044 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
1045 * Set the nfs socket protocol and version if needed.
1046 */
1047 void
nfs_connect_search_socket_found(struct nfsmount * nmp,struct nfs_socket_search * nss,struct nfs_socket * nso)1048 nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
1049 {
1050 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
1051 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1052 if (!nso->nso_version) {
1053 /* If the version isn't set, the default must have worked. */
1054 if (nso->nso_protocol == PMAPPROG) {
1055 nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1056 }
1057 if (nso->nso_protocol == NFS_PROG) {
1058 nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
1059 }
1060 }
1061 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1062 nss->nss_sockcnt--;
1063 nss->nss_sock = nso;
1064 }
1065
1066 /*
1067 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1068 * the list. Dead socket are then destroyed.
1069 */
1070 void
nfs_connect_search_socket_reap(struct nfsmount * nmp __unused,struct nfs_socket_search * nss,struct timeval * now)1071 nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now)
1072 {
1073 struct nfs_socket *nso, *nsonext;
1074
1075 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
1076 lck_mtx_lock(&nso->nso_lock);
1077 if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) {
1078 /* took too long */
1079 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
1080 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1081 nso->nso_error = ETIMEDOUT;
1082 nso->nso_flags |= NSO_DEAD;
1083 }
1084 if (!(nso->nso_flags & NSO_DEAD)) {
1085 lck_mtx_unlock(&nso->nso_lock);
1086 continue;
1087 }
1088 lck_mtx_unlock(&nso->nso_lock);
1089 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1090 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error, nso->nso_flags);
1091 nfs_socket_search_update_error(nss, nso->nso_error);
1092 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1093 nss->nss_sockcnt--;
1094 nfs_socket_destroy(nso);
1095 /* If there are more sockets to try, force the starting of another socket */
1096 if (nss->nss_addrcnt > 0) {
1097 nss->nss_last = -2;
1098 }
1099 }
1100 }
1101
1102 /*
1103 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1104 */
1105 int
nfs_connect_search_check(struct nfsmount * nmp,struct nfs_socket_search * nss,struct timeval * now)1106 nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
1107 {
1108 int error;
1109
1110 /* log a warning if connect is taking a while */
1111 if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) {
1112 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1113 nss->nss_flags |= NSS_WARNED;
1114 }
1115 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
1116 return EINTR;
1117 }
1118 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
1119 return error;
1120 }
1121
1122 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1123 if (nss->nss_last >= 0) {
1124 tsleep(nss, PSOCK, "nfs_connect_search_wait", hz);
1125 }
1126
1127 return 0;
1128 }
1129
1130
1131 /*
1132 * Continue the socket search until we have something to report.
1133 */
1134 int
nfs_connect_search_loop(struct nfsmount * nmp,struct nfs_socket_search * nss)1135 nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss)
1136 {
1137 struct nfs_socket *nso;
1138 struct timeval now;
1139 int error;
1140 int verbose = (nss->nss_flags & NSS_VERBOSE);
1141
1142 loop:
1143 microuptime(&now);
1144 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec);
1145
1146 /* add a new socket to the socket list if needed and available */
1147 error = nfs_connect_search_new_socket(nmp, nss, &now);
1148 if (error) {
1149 NFS_SOCK_DBG("nfs connect returned %d\n", error);
1150 return error;
1151 }
1152
1153 /* check each active socket on the list and try to push it along */
1154 TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) {
1155 lck_mtx_lock(&nso->nso_lock);
1156
1157 /* If not connected connect it */
1158 if (!(nso->nso_flags & NSO_CONNECTED)) {
1159 if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) {
1160 lck_mtx_unlock(&nso->nso_lock);
1161 continue;
1162 }
1163 }
1164
1165 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1166 if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) ||
1167 ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) {
1168 if (!nfs_connect_search_ping(nmp, nso, &now)) {
1169 lck_mtx_unlock(&nso->nso_lock);
1170 continue;
1171 }
1172 }
1173
1174 /* Has the socket been verified by the up call routine? */
1175 if (nso->nso_flags & NSO_VERIFIED) {
1176 /* WOOHOO!! This socket looks good! */
1177 nfs_connect_search_socket_found(nmp, nss, nso);
1178 lck_mtx_unlock(&nso->nso_lock);
1179 break;
1180 }
1181 lck_mtx_unlock(&nso->nso_lock);
1182 }
1183
1184 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1185 nfs_connect_search_socket_reap(nmp, nss, &now);
1186
1187 /*
1188 * Keep looping if we haven't found a socket yet and we have more
1189 * sockets to (continue to) try.
1190 */
1191 error = 0;
1192 if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) {
1193 error = nfs_connect_search_check(nmp, nss, &now);
1194 if (!error) {
1195 goto loop;
1196 }
1197 }
1198
1199 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1200 return error;
1201 }
1202
1203 /*
1204 * Initialize a new NFS connection.
1205 *
1206 * Search for a location to connect a socket to and initialize the connection.
1207 *
1208 * An NFS mount may have multiple locations/servers/addresses available.
1209 * We attempt to connect to each one asynchronously and will start
1210 * several sockets in parallel if other locations are slow to answer.
1211 * We'll use the first NFS socket we can successfully set up.
1212 *
1213 * The search may involve contacting the portmapper service first.
1214 *
1215 * A mount's initial connection may require negotiating some parameters such
1216 * as socket type and NFS version.
1217 */
1218
1219 int
nfs_connect(struct nfsmount * nmp,int verbose,int timeo)1220 nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
1221 {
1222 struct nfs_socket_search nss;
1223 struct nfs_socket *nso, *nsonfs;
1224 struct sockaddr_storage ss;
1225 struct sockaddr *saddr, *oldsaddr;
1226 sock_upcall upcall;
1227 #if CONFIG_NFS4
1228 struct timeval now;
1229 #endif
1230 struct timeval start;
1231 int error, savederror, nfsvers;
1232 int tryv4 = 1;
1233 uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
1234 fhandle_t *fh = NULL;
1235 char *path = NULL;
1236 in_port_t port = 0;
1237 int addrtotal = 0;
1238
1239 /* paranoia... check that we have at least one address in the locations */
1240 uint32_t loc, serv;
1241 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
1242 for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) {
1243 addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount;
1244 if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) {
1245 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1246 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1247 nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name);
1248 }
1249 }
1250 }
1251
1252 if (addrtotal == 0) {
1253 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1254 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1255 return EINVAL;
1256 } else {
1257 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1258 vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal);
1259 }
1260
1261 lck_mtx_lock(&nmp->nm_lock);
1262 nmp->nm_sockflags |= NMSOCK_CONNECTING;
1263 nmp->nm_nss = &nss;
1264 lck_mtx_unlock(&nmp->nm_lock);
1265 microuptime(&start);
1266 savederror = error = 0;
1267
1268 tryagain:
1269 /* initialize socket search state */
1270 bzero(&nss, sizeof(nss));
1271 nss.nss_addrcnt = addrtotal;
1272 nss.nss_error = savederror;
1273 TAILQ_INIT(&nss.nss_socklist);
1274 nss.nss_sotype = sotype;
1275 nss.nss_startloc = nmp->nm_locations.nl_current;
1276 nss.nss_timestamp = start.tv_sec;
1277 nss.nss_timeo = timeo;
1278 if (verbose) {
1279 nss.nss_flags |= NSS_VERBOSE;
1280 }
1281
1282 /* First time connecting, we may need to negotiate some things */
1283 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1284 NFS_SOCK_DBG("so_family = %d\n", nmp->nm_sofamily);
1285 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp->nm_nfsport, nmp->nm_nfs_localport ? nmp->nm_nfs_localport : "");
1286 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp->nm_mountport, nmp->nm_mount_localport ? nmp->nm_mount_localport : "");
1287 if (!nmp->nm_vers) {
1288 /* No NFS version specified... */
1289 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1290 #if CONFIG_NFS4
1291 if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
1292 nss.nss_port = NFS_PORT;
1293 nss.nss_protocol = NFS_PROG;
1294 nss.nss_version = 4;
1295 nss.nss_flags |= NSS_FALLBACK2PMAP;
1296 } else {
1297 #endif
1298 /* ...connect to portmapper first if we (may) need any ports. */
1299 nss.nss_port = PMAPPORT;
1300 nss.nss_protocol = PMAPPROG;
1301 nss.nss_version = 0;
1302 #if CONFIG_NFS4
1303 }
1304 #endif
1305 } else {
1306 /* ...connect to NFS port first. */
1307 nss.nss_port = nmp->nm_nfsport;
1308 nss.nss_protocol = NFS_PROG;
1309 nss.nss_version = 0;
1310 }
1311 #if CONFIG_NFS4
1312 } else if (nmp->nm_vers >= NFS_VER4) {
1313 if (tryv4) {
1314 /* For NFSv4, we use the given (or default) port. */
1315 nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
1316 nss.nss_protocol = NFS_PROG;
1317 nss.nss_version = 4;
1318 /*
1319 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1320 * if no port is specified on the mount;
1321 * Note nm_vers is set so we will only try NFS_VER4.
1322 */
1323 if (!nmp->nm_nfsport) {
1324 nss.nss_flags |= NSS_FALLBACK2PMAP;
1325 }
1326 } else {
1327 nss.nss_port = PMAPPORT;
1328 nss.nss_protocol = PMAPPROG;
1329 nss.nss_version = 0;
1330 }
1331 #endif
1332 } else {
1333 /* For NFSv3/v2... */
1334 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1335 /* ...connect to portmapper first if we need any ports. */
1336 nss.nss_port = PMAPPORT;
1337 nss.nss_protocol = PMAPPROG;
1338 nss.nss_version = 0;
1339 } else {
1340 /* ...connect to NFS port first. */
1341 nss.nss_port = nmp->nm_nfsport;
1342 nss.nss_protocol = NFS_PROG;
1343 nss.nss_version = nmp->nm_vers;
1344 }
1345 }
1346 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1347 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1348 nss.nss_protocol, nss.nss_version);
1349 } else {
1350 /* we've connected before, just connect to NFS port */
1351 if (!nmp->nm_nfsport) {
1352 /* need to ask portmapper which port that would be */
1353 nss.nss_port = PMAPPORT;
1354 nss.nss_protocol = PMAPPROG;
1355 nss.nss_version = 0;
1356 } else {
1357 nss.nss_port = nmp->nm_nfsport;
1358 nss.nss_protocol = NFS_PROG;
1359 nss.nss_version = nmp->nm_vers;
1360 }
1361 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1362 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1363 nss.nss_protocol, nss.nss_version);
1364 }
1365
1366 /* Set next location to first valid location. */
1367 /* If start location is invalid, find next location. */
1368 nss.nss_nextloc = nss.nss_startloc;
1369 if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) ||
1370 (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) {
1371 nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc);
1372 if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) {
1373 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1374 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1375 return ENOENT;
1376 }
1377 }
1378 nss.nss_last = -1;
1379
1380 keepsearching:
1381
1382 error = nfs_connect_search_loop(nmp, &nss);
1383 if (error || !nss.nss_sock) {
1384 /* search failed */
1385 nfs_socket_search_cleanup(&nss);
1386 if (nss.nss_flags & NSS_FALLBACK2PMAP) {
1387 tryv4 = 0;
1388 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1389 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1390 goto tryagain;
1391 }
1392
1393 if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
1394 if (nss.nss_error == EPROGUNAVAIL) {
1395 /* Try using UDP only when TCP is not supported */
1396 sotype = SOCK_DGRAM;
1397 savederror = nss.nss_error;
1398 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1399 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1400 goto tryagain;
1401 } else {
1402 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, aborting\n",
1403 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1404 }
1405 }
1406 if (!error) {
1407 error = nss.nss_error ? nss.nss_error : ETIMEDOUT;
1408 }
1409 lck_mtx_lock(&nmp->nm_lock);
1410 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1411 nmp->nm_nss = NULL;
1412 lck_mtx_unlock(&nmp->nm_lock);
1413 if (nss.nss_flags & NSS_WARNED) {
1414 log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n",
1415 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1416 }
1417 if (fh) {
1418 NFS_ZFREE(nfs_fhandle_zone, fh);
1419 }
1420 if (path) {
1421 NFS_ZFREE(ZV_NAMEI, path);
1422 }
1423 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1424 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1425 return error;
1426 }
1427
1428 /* try to use nss_sock */
1429 nso = nss.nss_sock;
1430 nss.nss_sock = NULL;
1431
1432 /* We may be speaking to portmap first... to determine port(s). */
1433 if (nso->nso_saddr->sa_family == AF_INET) {
1434 port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port);
1435 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1436 port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port);
1437 } else if (nso->nso_saddr->sa_family == AF_LOCAL) {
1438 if (nso->nso_protocol == PMAPPROG) {
1439 port = PMAPPORT;
1440 }
1441 }
1442
1443 if (port == PMAPPORT) {
1444 /* Use this portmapper port to get the port #s we need. */
1445 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1446 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1447
1448 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1449 sock_setupcall(nso->nso_so, NULL, NULL);
1450
1451 /* Set up socket address and port for NFS socket. */
1452 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1453
1454 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1455 nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
1456 if (!(port = nmp->nm_nfsport)) {
1457 if (ss.ss_family == AF_INET) {
1458 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
1459 } else if (ss.ss_family == AF_INET6) {
1460 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
1461 } else if (ss.ss_family == AF_LOCAL) {
1462 if (((struct sockaddr_un*)&ss)->sun_path[0] == '/') {
1463 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un*)&ss)->sun_path);
1464 }
1465 }
1466 for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
1467 if (nmp->nm_vers && nmp->nm_vers != nfsvers) {
1468 continue; /* Wrong version */
1469 }
1470 #if CONFIG_NFS4
1471 if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) {
1472 continue; /* NFSv4 does not do UDP */
1473 }
1474 #endif
1475 if (ss.ss_family == AF_LOCAL && nmp->nm_nfs_localport) {
1476 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
1477 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp->nm_nfs_localport);
1478 strlcpy(sun->sun_path, nmp->nm_nfs_localport, sizeof(sun->sun_path));
1479 error = 0;
1480 } else {
1481 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1482 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1483 nso->nso_so, NFS_PROG, nfsvers, nso->nso_sotype, timeo);
1484 }
1485 if (!error) {
1486 if (ss.ss_family == AF_INET) {
1487 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1488 } else if (ss.ss_family == AF_INET6) {
1489 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1490 } else if (ss.ss_family == AF_LOCAL) {
1491 port = ((struct sockaddr_un *)&ss)->sun_path[0] ? NFS_PORT : 0;
1492 }
1493 if (!port) {
1494 error = EPROGUNAVAIL;
1495 }
1496 #if CONFIG_NFS4
1497 if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) {
1498 continue; /* We already tried this */
1499 }
1500 #endif
1501 }
1502 if (!error) {
1503 break;
1504 }
1505 }
1506 if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) {
1507 error = EPROGUNAVAIL;
1508 }
1509 if (error) {
1510 nfs_socket_search_update_error(&nss, error);
1511 nfs_socket_destroy(nso);
1512 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers, error);
1513 goto keepsearching;
1514 }
1515 } else if (nmp->nm_nfs_localport) {
1516 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_nfs_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1517 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp->nm_nfs_localport);
1518 }
1519
1520 /* Create NFS protocol socket and add it to the list of sockets. */
1521 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1522 if (ss.ss_family == AF_LOCAL) {
1523 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un*)&ss)->sun_path, port);
1524 }
1525 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
1526 NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
1527 if (error) {
1528 nfs_socket_search_update_error(&nss, error);
1529 nfs_socket_destroy(nso);
1530 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error);
1531 goto keepsearching;
1532 }
1533 nsonfs->nso_location = nso->nso_location;
1534 nsonfs->nso_wake = &nss;
1535 error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs);
1536 if (error) {
1537 nfs_socket_search_update_error(&nss, error);
1538 nfs_socket_destroy(nsonfs);
1539 nfs_socket_destroy(nso);
1540 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error);
1541 goto keepsearching;
1542 }
1543 TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link);
1544 nss.nss_sockcnt++;
1545 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1546 /* Set up socket address and port for MOUNT socket. */
1547 error = 0;
1548 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1549 port = nmp->nm_mountport;
1550 NFS_SOCK_DBG("mount port = %d\n", port);
1551 if (ss.ss_family == AF_INET) {
1552 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1553 } else if (ss.ss_family == AF_INET6) {
1554 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1555 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1556 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1557 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1558 }
1559 if (!port) {
1560 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1561 /* If NFS version is unknown, optimistically choose for NFSv3. */
1562 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1563 int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
1564 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso->nso_so);
1565 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1566 nso->nso_so, RPCPROG_MNT, mntvers, mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
1567 }
1568 if (!error) {
1569 if (ss.ss_family == AF_INET) {
1570 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1571 } else if (ss.ss_family == AF_INET6) {
1572 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1573 } else if (ss.ss_family == AF_LOCAL) {
1574 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
1575 }
1576 if (!port) {
1577 error = EPROGUNAVAIL;
1578 }
1579 }
1580 /* create sockaddr for MOUNT */
1581 if (!error) {
1582 nsonfs->nso_saddr2 = (struct sockaddr *)alloc_sockaddr(ss.ss_len, Z_WAITOK | Z_NOFAIL);
1583 }
1584 if (!error) {
1585 bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len);
1586 }
1587 if (error) {
1588 NFS_SOCK_DBG("Could not create mount socket address %d", error);
1589 lck_mtx_lock(&nsonfs->nso_lock);
1590 nsonfs->nso_error = error;
1591 nsonfs->nso_flags |= NSO_DEAD;
1592 lck_mtx_unlock(&nsonfs->nso_lock);
1593 }
1594 }
1595 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso, nso->nso_so);
1596 nfs_socket_destroy(nso);
1597 goto keepsearching;
1598 }
1599
1600 /* nso is an NFS socket */
1601 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1602
1603 /* If NFS version wasn't specified, it was determined during the connect. */
1604 nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version;
1605
1606 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1607 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1608 error = 0;
1609 saddr = nso->nso_saddr2;
1610 if (!saddr) {
1611 /* Need sockaddr for MOUNT port */
1612 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp->nm_mountport, nmp->nm_mount_localport);
1613 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1614 port = nmp->nm_mountport;
1615 if (ss.ss_family == AF_INET) {
1616 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1617 } else if (ss.ss_family == AF_INET6) {
1618 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1619 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1620 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1621 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1622 }
1623 if (!port) {
1624 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1625 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1626 int so_type = NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : nso->nso_sotype;
1627 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1628 NULL, RPCPROG_MNT, mntvers, so_type, timeo);
1629 if (ss.ss_family == AF_INET) {
1630 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1631 } else if (ss.ss_family == AF_INET6) {
1632 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1633 }
1634 }
1635 if (!error) {
1636 if (port) {
1637 saddr = (struct sockaddr*)&ss;
1638 } else {
1639 error = EPROGUNAVAIL;
1640 }
1641 }
1642 }
1643 if (!error) {
1644 error = nfs3_check_lockmode(nmp, saddr, nso->nso_sotype, timeo);
1645 if (error) {
1646 nfs_socket_search_update_error(&nss, error);
1647 nfs_socket_destroy(nso);
1648 return error;
1649 }
1650 }
1651 if (saddr) {
1652 fh = zalloc(nfs_fhandle_zone);
1653 }
1654 if (saddr && fh) {
1655 path = zalloc(ZV_NAMEI);
1656 }
1657 if (!saddr || !fh || !path) {
1658 if (!error) {
1659 error = ENOMEM;
1660 }
1661 if (fh) {
1662 NFS_ZFREE(nfs_fhandle_zone, fh);
1663 }
1664 if (path) {
1665 NFS_ZFREE(ZV_NAMEI, path);
1666 }
1667 nfs_socket_search_update_error(&nss, error);
1668 nfs_socket_destroy(nso);
1669 goto keepsearching;
1670 }
1671 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1);
1672 error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers,
1673 path, vfs_context_current(), timeo, fh, &nmp->nm_servsec);
1674 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1675 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1676 if (!error) {
1677 /* Make sure we can agree on a security flavor. */
1678 int o, s; /* indices into mount option and server security flavor lists */
1679 int found = 0;
1680
1681 if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) {
1682 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1683 nmp->nm_servsec.count = 1;
1684 nmp->nm_servsec.flavors[0] = RPCAUTH_SYS;
1685 }
1686 if (nmp->nm_sec.count) {
1687 /* Choose the first flavor in our list that the server supports. */
1688 if (!nmp->nm_servsec.count) {
1689 /* we don't know what the server supports, just use our first choice */
1690 nmp->nm_auth = nmp->nm_sec.flavors[0];
1691 found = 1;
1692 }
1693 for (o = 0; !found && (o < nmp->nm_sec.count); o++) {
1694 for (s = 0; !found && (s < nmp->nm_servsec.count); s++) {
1695 if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) {
1696 nmp->nm_auth = nmp->nm_sec.flavors[o];
1697 found = 1;
1698 }
1699 }
1700 }
1701 } else {
1702 /* Choose the first one we support from the server's list. */
1703 if (!nmp->nm_servsec.count) {
1704 nmp->nm_auth = RPCAUTH_SYS;
1705 found = 1;
1706 }
1707 for (s = 0; s < nmp->nm_servsec.count; s++) {
1708 switch (nmp->nm_servsec.flavors[s]) {
1709 case RPCAUTH_SYS:
1710 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1711 if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
1712 found = 0;
1713 }
1714 OS_FALLTHROUGH;
1715 case RPCAUTH_NONE:
1716 case RPCAUTH_KRB5:
1717 case RPCAUTH_KRB5I:
1718 case RPCAUTH_KRB5P:
1719 if (!found) {
1720 nmp->nm_auth = nmp->nm_servsec.flavors[s];
1721 found = 1;
1722 }
1723 break;
1724 }
1725 }
1726 }
1727 error = !found ? EAUTH : 0;
1728 }
1729 NFS_ZFREE(ZV_NAMEI, path);
1730 if (error) {
1731 nfs_socket_search_update_error(&nss, error);
1732 NFS_ZFREE(nfs_fhandle_zone, fh);
1733 nfs_socket_destroy(nso);
1734 goto keepsearching;
1735 }
1736 if (nmp->nm_fh) {
1737 NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
1738 }
1739 nmp->nm_fh = fh;
1740 fh = NULL;
1741 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
1742 }
1743
1744 /* put the real upcall in place */
1745 upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
1746 error = sock_setupcall(nso->nso_so, upcall, nmp);
1747 if (error) {
1748 nfs_socket_search_update_error(&nss, error);
1749 nfs_socket_destroy(nso);
1750 goto keepsearching;
1751 }
1752
1753 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1754 /* set mntfromname to this location */
1755 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
1756 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location,
1757 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1758 sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0);
1759 }
1760 /* some negotiated values need to remain unchanged for the life of the mount */
1761 if (!nmp->nm_sotype) {
1762 nmp->nm_sotype = nso->nso_sotype;
1763 }
1764 if (!nmp->nm_vers) {
1765 nmp->nm_vers = nfsvers;
1766 #if CONFIG_NFS4
1767 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1768 if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1769 if (nso->nso_saddr->sa_family == AF_INET) {
1770 port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
1771 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1772 port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
1773 } else {
1774 port = 0;
1775 }
1776 if (port == NFS_PORT) {
1777 nmp->nm_nfsport = NFS_PORT;
1778 }
1779 }
1780 #endif
1781 }
1782 #if CONFIG_NFS4
1783 /* do some version-specific pre-mount set up */
1784 if (nmp->nm_vers >= NFS_VER4) {
1785 microtime(&now);
1786 nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec;
1787 if (!NMFLAG(nmp, NOCALLBACK)) {
1788 nfs4_mount_callback_setup(nmp);
1789 }
1790 }
1791 #endif
1792 }
1793
1794 /* Initialize NFS socket state variables */
1795 lck_mtx_lock(&nmp->nm_lock);
1796 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
1797 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
1798 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
1799 nmp->nm_sdrtt[3] = 0;
1800 if (nso->nso_sotype == SOCK_DGRAM) {
1801 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
1802 nmp->nm_sent = 0;
1803 } else if (nso->nso_sotype == SOCK_STREAM) {
1804 nmp->nm_timeouts = 0;
1805 }
1806 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1807 nmp->nm_sockflags |= NMSOCK_SETUP;
1808 /* move the socket to the mount structure */
1809 nmp->nm_nso = nso;
1810 oldsaddr = nmp->nm_saddr;
1811 nmp->nm_saddr = nso->nso_saddr;
1812 lck_mtx_unlock(&nmp->nm_lock);
1813 error = nfs_connect_setup(nmp);
1814 lck_mtx_lock(&nmp->nm_lock);
1815 nmp->nm_sockflags &= ~NMSOCK_SETUP;
1816 if (!error) {
1817 nmp->nm_sockflags |= NMSOCK_READY;
1818 wakeup(&nmp->nm_sockflags);
1819 }
1820 if (error) {
1821 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1822 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1823 nfs_socket_search_update_error(&nss, error);
1824 nmp->nm_saddr = oldsaddr;
1825 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1826 /* undo settings made prior to setup */
1827 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) {
1828 nmp->nm_sotype = 0;
1829 }
1830 #if CONFIG_NFS4
1831 if (nmp->nm_vers >= NFS_VER4) {
1832 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1833 nmp->nm_nfsport = 0;
1834 }
1835 if (nmp->nm_cbid) {
1836 nfs4_mount_callback_shutdown(nmp);
1837 }
1838 if (IS_VALID_CRED(nmp->nm_mcred)) {
1839 kauth_cred_unref(&nmp->nm_mcred);
1840 }
1841 bzero(&nmp->nm_un, sizeof(nmp->nm_un));
1842 }
1843 #endif
1844 nmp->nm_vers = 0;
1845 }
1846 lck_mtx_unlock(&nmp->nm_lock);
1847 nmp->nm_nso = NULL;
1848 nfs_socket_destroy(nso);
1849 goto keepsearching;
1850 }
1851
1852 /* update current location */
1853 if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) &&
1854 (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) {
1855 /* server has changed, we should initiate failover/recovery */
1856 // XXX
1857 }
1858 nmp->nm_locations.nl_current = nso->nso_location;
1859 nmp->nm_locations.nl_current.nli_flags |= NLI_VALID;
1860
1861 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1862 /* We have now successfully connected... make a note of it. */
1863 nmp->nm_sockflags |= NMSOCK_HASCONNECTED;
1864 }
1865
1866 lck_mtx_unlock(&nmp->nm_lock);
1867
1868 free_sockaddr(oldsaddr);
1869
1870 if (nss.nss_flags & NSS_WARNED) {
1871 log(LOG_INFO, "nfs_connect: socket connect completed for %s\n",
1872 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1873 }
1874
1875 nmp->nm_nss = NULL;
1876 nfs_socket_search_cleanup(&nss);
1877 if (fh) {
1878 NFS_ZFREE(nfs_fhandle_zone, fh);
1879 }
1880 if (path) {
1881 NFS_ZFREE(ZV_NAMEI, path);
1882 }
1883 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1884 return 0;
1885 }
1886
1887
1888 /* setup & confirm socket connection is functional */
1889 int
nfs_connect_setup(__unused struct nfsmount * nmp)1890 nfs_connect_setup(
1891 #if !CONFIG_NFS4
1892 __unused
1893 #endif
1894 struct nfsmount *nmp)
1895 {
1896 int error = 0;
1897 #if CONFIG_NFS4
1898 if (nmp->nm_vers >= NFS_VER4) {
1899 if (nmp->nm_state & NFSSTA_CLIENTID) {
1900 /* first, try to renew our current state */
1901 error = nfs4_renew(nmp, R_SETUP);
1902 if ((error == NFSERR_ADMIN_REVOKED) ||
1903 (error == NFSERR_CB_PATH_DOWN) ||
1904 (error == NFSERR_EXPIRED) ||
1905 (error == NFSERR_LEASE_MOVED) ||
1906 (error == NFSERR_STALE_CLIENTID)) {
1907 lck_mtx_lock(&nmp->nm_lock);
1908 nfs_need_recover(nmp, error);
1909 lck_mtx_unlock(&nmp->nm_lock);
1910 }
1911 }
1912 error = nfs4_setclientid(nmp);
1913 }
1914 #endif
1915 return error;
1916 }
1917
1918 /*
1919 * NFS socket reconnect routine:
1920 * Called when a connection is broken.
1921 * - disconnect the old socket
1922 * - nfs_connect() again
1923 * - set R_MUSTRESEND for all outstanding requests on mount point
1924 * If this fails the mount point is DEAD!
1925 */
1926 int
nfs_reconnect(struct nfsmount * nmp)1927 nfs_reconnect(struct nfsmount *nmp)
1928 {
1929 struct nfsreq *rq;
1930 struct timeval now;
1931 thread_t thd = current_thread();
1932 int error, wentdown = 0, verbose = 1;
1933 time_t lastmsg;
1934 int timeo;
1935
1936 microuptime(&now);
1937 lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
1938
1939 nfs_disconnect(nmp);
1940
1941
1942 lck_mtx_lock(&nmp->nm_lock);
1943 timeo = nfs_is_squishy(nmp) ? 8 : 30;
1944 lck_mtx_unlock(&nmp->nm_lock);
1945
1946 while ((error = nfs_connect(nmp, verbose, timeo))) {
1947 verbose = 0;
1948 nfs_disconnect(nmp);
1949 if ((error == EINTR) || (error == ERESTART)) {
1950 return EINTR;
1951 }
1952 if (error == EIO) {
1953 return EIO;
1954 }
1955 microuptime(&now);
1956 if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
1957 lastmsg = now.tv_sec;
1958 nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
1959 wentdown = 1;
1960 }
1961 lck_mtx_lock(&nmp->nm_lock);
1962 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1963 /* we're not yet completely mounted and */
1964 /* we can't reconnect, so we fail */
1965 lck_mtx_unlock(&nmp->nm_lock);
1966 NFS_SOCK_DBG("Not mounted returning %d\n", error);
1967 return error;
1968 }
1969
1970 if (nfs_mount_check_dead_timeout(nmp)) {
1971 nfs_mount_make_zombie(nmp);
1972 lck_mtx_unlock(&nmp->nm_lock);
1973 return ENXIO;
1974 }
1975
1976 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1977 lck_mtx_unlock(&nmp->nm_lock);
1978 return error;
1979 }
1980 lck_mtx_unlock(&nmp->nm_lock);
1981 tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz);
1982 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1983 return error;
1984 }
1985 }
1986
1987 if (wentdown) {
1988 nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
1989 }
1990
1991 /*
1992 * Loop through outstanding request list and mark all requests
1993 * as needing a resend. (Though nfs_need_reconnect() probably
1994 * marked them all already.)
1995 */
1996 lck_mtx_lock(&nfs_request_mutex);
1997 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
1998 if (rq->r_nmp == nmp) {
1999 lck_mtx_lock(&rq->r_mtx);
2000 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2001 rq->r_flags |= R_MUSTRESEND;
2002 rq->r_rtt = -1;
2003 wakeup(rq);
2004 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2005 nfs_asyncio_resend(rq);
2006 }
2007 }
2008 lck_mtx_unlock(&rq->r_mtx);
2009 }
2010 }
2011 lck_mtx_unlock(&nfs_request_mutex);
2012 return 0;
2013 }
2014
2015 /*
2016 * NFS disconnect. Clean up and unlink.
2017 */
2018 void
nfs_disconnect(struct nfsmount * nmp)2019 nfs_disconnect(struct nfsmount *nmp)
2020 {
2021 struct nfs_socket *nso;
2022
2023 lck_mtx_lock(&nmp->nm_lock);
2024 tryagain:
2025 if (nmp->nm_nso) {
2026 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2027 if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
2028 nmp->nm_state |= NFSSTA_WANTSND;
2029 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts);
2030 goto tryagain;
2031 }
2032 if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
2033 msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts);
2034 goto tryagain;
2035 }
2036 nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
2037 nmp->nm_sockflags &= ~NMSOCK_READY;
2038 nso = nmp->nm_nso;
2039 nmp->nm_nso = NULL;
2040 if (nso->nso_saddr == nmp->nm_saddr) {
2041 nso->nso_saddr = NULL;
2042 }
2043 lck_mtx_unlock(&nmp->nm_lock);
2044 nfs_socket_destroy(nso);
2045 lck_mtx_lock(&nmp->nm_lock);
2046 nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
2047 lck_mtx_unlock(&nmp->nm_lock);
2048 } else {
2049 lck_mtx_unlock(&nmp->nm_lock);
2050 }
2051 }
2052
2053 /*
2054 * mark an NFS mount as needing a reconnect/resends.
2055 */
2056 void
nfs_need_reconnect(struct nfsmount * nmp)2057 nfs_need_reconnect(struct nfsmount *nmp)
2058 {
2059 struct nfsreq *rq;
2060
2061 lck_mtx_lock(&nmp->nm_lock);
2062 nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP);
2063 lck_mtx_unlock(&nmp->nm_lock);
2064
2065 /*
2066 * Loop through outstanding request list and
2067 * mark all requests as needing a resend.
2068 */
2069 lck_mtx_lock(&nfs_request_mutex);
2070 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
2071 if (rq->r_nmp == nmp) {
2072 lck_mtx_lock(&rq->r_mtx);
2073 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2074 rq->r_flags |= R_MUSTRESEND;
2075 rq->r_rtt = -1;
2076 wakeup(rq);
2077 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2078 nfs_asyncio_resend(rq);
2079 }
2080 }
2081 lck_mtx_unlock(&rq->r_mtx);
2082 }
2083 }
2084 lck_mtx_unlock(&nfs_request_mutex);
2085 }
2086
2087
2088 /*
2089 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
2090 */
2091 void
nfs_mount_sock_thread(void * arg,__unused wait_result_t wr)2092 nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
2093 {
2094 struct nfsmount *nmp = arg;
2095 struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 };
2096 thread_t thd = current_thread();
2097 struct nfsreq *req;
2098 struct timeval now;
2099 int error, dofinish;
2100 nfsnode_t np;
2101 int do_reconnect_sleep = 0;
2102
2103 lck_mtx_lock(&nmp->nm_lock);
2104 while (!(nmp->nm_sockflags & NMSOCK_READY) ||
2105 !TAILQ_EMPTY(&nmp->nm_resendq) ||
2106 !LIST_EMPTY(&nmp->nm_monlist) ||
2107 nmp->nm_deadto_start ||
2108 (nmp->nm_state & NFSSTA_RECOVER) ||
2109 ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) {
2110 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
2111 break;
2112 }
2113 /* do reconnect, if necessary */
2114 if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2115 if (nmp->nm_reconnect_start <= 0) {
2116 microuptime(&now);
2117 nmp->nm_reconnect_start = now.tv_sec;
2118 }
2119 lck_mtx_unlock(&nmp->nm_lock);
2120 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2121 /*
2122 * XXX We don't want to call reconnect again right away if returned errors
2123 * before that may not have blocked. This has caused spamming null procs
2124 * from machines in the pass.
2125 */
2126 if (do_reconnect_sleep) {
2127 tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
2128 }
2129 error = nfs_reconnect(nmp);
2130 if (error) {
2131 int lvl = 7;
2132 if (error == EIO || error == EINTR) {
2133 lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
2134 }
2135 NFSCLNT_DBG(NFSCLNT_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
2136 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
2137 } else {
2138 nmp->nm_reconnect_start = 0;
2139 do_reconnect_sleep = 0;
2140 }
2141 lck_mtx_lock(&nmp->nm_lock);
2142 }
2143 if ((nmp->nm_sockflags & NMSOCK_READY) &&
2144 (nmp->nm_state & NFSSTA_RECOVER) &&
2145 !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2146 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2147 /* perform state recovery */
2148 lck_mtx_unlock(&nmp->nm_lock);
2149 nfs_recover(nmp);
2150 lck_mtx_lock(&nmp->nm_lock);
2151 }
2152 #if CONFIG_NFS4
2153 /* handle NFSv4 delegation returns */
2154 while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
2155 (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
2156 ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
2157 lck_mtx_unlock(&nmp->nm_lock);
2158 nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
2159 lck_mtx_lock(&nmp->nm_lock);
2160 }
2161 #endif
2162 /* do resends, if necessary/possible */
2163 while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
2164 (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) &&
2165 ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
2166 if (req->r_resendtime) {
2167 microuptime(&now);
2168 }
2169 while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) {
2170 req = TAILQ_NEXT(req, r_rchain);
2171 }
2172 if (!req) {
2173 break;
2174 }
2175 /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
2176 lck_mtx_unlock(&nmp->nm_lock);
2177 lck_mtx_lock(&req->r_mtx);
2178 lck_mtx_lock(&nmp->nm_lock);
2179 if ((req->r_flags & R_RESENDQ) == 0 || (req->r_rchain.tqe_next == NFSREQNOLIST)) {
2180 lck_mtx_unlock(&req->r_mtx);
2181 continue;
2182 }
2183 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
2184 req->r_flags &= ~R_RESENDQ;
2185 req->r_rchain.tqe_next = NFSREQNOLIST;
2186 lck_mtx_unlock(&nmp->nm_lock);
2187 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2188 if (req->r_error || req->r_nmrep.nmc_mhead) {
2189 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2190 wakeup(req);
2191 lck_mtx_unlock(&req->r_mtx);
2192 if (dofinish) {
2193 nfs_asyncio_finish(req);
2194 }
2195 nfs_request_rele(req);
2196 lck_mtx_lock(&nmp->nm_lock);
2197 continue;
2198 }
2199 if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
2200 req->r_flags &= ~R_RESTART;
2201 req->r_resendtime = 0;
2202 lck_mtx_unlock(&req->r_mtx);
2203 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2204 nfs_reqdequeue(req);
2205 #if CONFIG_NFS_GSS
2206 if (nfs_request_using_gss(req)) {
2207 nfs_gss_clnt_rpcdone(req);
2208 error = nfs_gss_clnt_args_restore(req);
2209 if (error == ENEEDAUTH) {
2210 req->r_xid = 0;
2211 }
2212 }
2213 #endif /* CONFIG_NFS_GSS */
2214 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2215 nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
2216 req->r_flags, req->r_rtt);
2217 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2218 if (!error) {
2219 error = nfs_request_add_header(req);
2220 }
2221 if (!error) {
2222 error = nfs_request_send(req, 0);
2223 }
2224 lck_mtx_lock(&req->r_mtx);
2225 if (error) {
2226 req->r_error = error;
2227 }
2228 wakeup(req);
2229 dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2230 lck_mtx_unlock(&req->r_mtx);
2231 if (dofinish) {
2232 nfs_asyncio_finish(req);
2233 }
2234 nfs_request_rele(req);
2235 lck_mtx_lock(&nmp->nm_lock);
2236 error = 0;
2237 continue;
2238 }
2239 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2240 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
2241 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2242 if (!error) {
2243 req->r_flags |= R_SENDING;
2244 lck_mtx_unlock(&req->r_mtx);
2245 error = nfs_send(req, 0);
2246 lck_mtx_lock(&req->r_mtx);
2247 if (!error) {
2248 wakeup(req);
2249 lck_mtx_unlock(&req->r_mtx);
2250 nfs_request_rele(req);
2251 lck_mtx_lock(&nmp->nm_lock);
2252 continue;
2253 }
2254 }
2255 req->r_error = error;
2256 wakeup(req);
2257 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2258 lck_mtx_unlock(&req->r_mtx);
2259 if (dofinish) {
2260 nfs_asyncio_finish(req);
2261 }
2262 nfs_request_rele(req);
2263 lck_mtx_lock(&nmp->nm_lock);
2264 }
2265 if (nfs_mount_check_dead_timeout(nmp)) {
2266 nfs_mount_make_zombie(nmp);
2267 break;
2268 }
2269
2270 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
2271 break;
2272 }
2273 /* check monitored nodes, if necessary/possible */
2274 if (!LIST_EMPTY(&nmp->nm_monlist)) {
2275 nmp->nm_state |= NFSSTA_MONITOR_SCAN;
2276 LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
2277 if (!(nmp->nm_sockflags & NMSOCK_READY) ||
2278 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2279 break;
2280 }
2281 np->n_mflag |= NMMONSCANINPROG;
2282 lck_mtx_unlock(&nmp->nm_lock);
2283 error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR));
2284 if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */
2285 nfs_data_update_size(np, 0);
2286 }
2287 lck_mtx_lock(&nmp->nm_lock);
2288 np->n_mflag &= ~NMMONSCANINPROG;
2289 if (np->n_mflag & NMMONSCANWANT) {
2290 np->n_mflag &= ~NMMONSCANWANT;
2291 wakeup(&np->n_mflag);
2292 }
2293 if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
2294 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2295 break;
2296 }
2297 }
2298 nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
2299 if (nmp->nm_state & NFSSTA_UNMOUNTING) {
2300 wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
2301 }
2302 }
2303 if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) {
2304 if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
2305 (nmp->nm_state & NFSSTA_RECOVER)) {
2306 ts.tv_sec = 1;
2307 } else {
2308 ts.tv_sec = 5;
2309 }
2310 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
2311 }
2312 }
2313
2314 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2315 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2316 (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
2317 (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2318 lck_mtx_unlock(&nmp->nm_lock);
2319 nfs3_umount_rpc(nmp, vfs_context_kernel(),
2320 (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
2321 lck_mtx_lock(&nmp->nm_lock);
2322 }
2323
2324 if (nmp->nm_sockthd == thd) {
2325 nmp->nm_sockthd = NULL;
2326 }
2327 lck_mtx_unlock(&nmp->nm_lock);
2328 wakeup(&nmp->nm_sockthd);
2329 thread_terminate(thd);
2330 }
2331
2332 /* start or wake a mount's socket thread */
2333 void
nfs_mount_sock_thread_wake(struct nfsmount * nmp)2334 nfs_mount_sock_thread_wake(struct nfsmount *nmp)
2335 {
2336 if (nmp->nm_sockthd) {
2337 wakeup(&nmp->nm_sockthd);
2338 } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) {
2339 thread_deallocate(nmp->nm_sockthd);
2340 }
2341 }
2342
2343 /*
2344 * Check if we should mark the mount dead because the
2345 * unresponsive mount has reached the dead timeout.
2346 * (must be called with nmp locked)
2347 */
2348 int
nfs_mount_check_dead_timeout(struct nfsmount * nmp)2349 nfs_mount_check_dead_timeout(struct nfsmount *nmp)
2350 {
2351 struct timeval now;
2352
2353 if (nmp->nm_state & NFSSTA_DEAD) {
2354 return 1;
2355 }
2356 if (nmp->nm_deadto_start == 0) {
2357 return 0;
2358 }
2359 nfs_is_squishy(nmp);
2360 if (nmp->nm_curdeadtimeout <= 0) {
2361 return 0;
2362 }
2363 microuptime(&now);
2364 if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) {
2365 return 0;
2366 }
2367 return 1;
2368 }
2369
2370 /*
2371 * Call nfs_mount_zombie to remove most of the
2372 * nfs state for the mount, and then ask to be forcibly unmounted.
2373 *
2374 * Assumes the nfs mount structure lock nm_lock is held.
2375 */
2376
2377 void
nfs_mount_make_zombie(struct nfsmount * nmp)2378 nfs_mount_make_zombie(struct nfsmount *nmp)
2379 {
2380 fsid_t fsid;
2381
2382 if (!nmp) {
2383 return;
2384 }
2385
2386 if (nmp->nm_state & NFSSTA_DEAD) {
2387 return;
2388 }
2389
2390 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
2391 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
2392 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
2393 lck_mtx_unlock(&nmp->nm_lock);
2394 nfs_mount_zombie(nmp, NFSSTA_DEAD);
2395 vfs_event_signal(&fsid, VQ_DEAD, 0);
2396 lck_mtx_lock(&nmp->nm_lock);
2397 }
2398
2399
2400 /*
2401 * NFS callback channel socket state
2402 */
2403 struct nfs_callback_socket {
2404 TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
2405 socket_t ncbs_so; /* the socket */
2406 struct sockaddr_storage ncbs_saddr; /* socket address */
2407 struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
2408 time_t ncbs_stamp; /* last accessed at */
2409 uint32_t ncbs_flags; /* see below */
2410 };
2411 #define NCBSOCK_UPCALL 0x0001
2412 #define NCBSOCK_UPCALLWANT 0x0002
2413 #define NCBSOCK_DEAD 0x0004
2414
2415 #if CONFIG_NFS4
2416 /*
2417 * NFS callback channel state
2418 *
2419 * One listening socket for accepting socket connections from servers and
2420 * a list of connected sockets to handle callback requests on.
2421 * Mounts registered with the callback channel are assigned IDs and
2422 * put on a list so that the callback request handling code can match
2423 * the requests up with mounts.
2424 */
2425 socket_t nfs4_cb_so = NULL;
2426 socket_t nfs4_cb_so6 = NULL;
2427 in_port_t nfs4_cb_port = 0;
2428 in_port_t nfs4_cb_port6 = 0;
2429 uint32_t nfs4_cb_id = 0;
2430 uint32_t nfs4_cb_so_usecount = 0;
2431 TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks;
2432 TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts;
2433
2434 int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
2435
2436 /*
2437 * Set up the callback channel for the NFS mount.
2438 *
2439 * Initializes the callback channel socket state and
2440 * assigns a callback ID to the mount.
2441 */
2442 void
nfs4_mount_callback_setup(struct nfsmount * nmp)2443 nfs4_mount_callback_setup(struct nfsmount *nmp)
2444 {
2445 struct sockaddr_in sin;
2446 struct sockaddr_in6 sin6;
2447 socket_t so = NULL;
2448 socket_t so6 = NULL;
2449 struct timeval timeo;
2450 int error, on = 1;
2451 in_port_t port;
2452
2453 lck_mtx_lock(&nfs_global_mutex);
2454 if (nfs4_cb_id == 0) {
2455 TAILQ_INIT(&nfs4_cb_mounts);
2456 TAILQ_INIT(&nfs4_cb_socks);
2457 nfs4_cb_id++;
2458 }
2459 nmp->nm_cbid = nfs4_cb_id++;
2460 if (nmp->nm_cbid == 0) {
2461 nmp->nm_cbid = nfs4_cb_id++;
2462 }
2463 nfs4_cb_so_usecount++;
2464 TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
2465
2466 if (nfs4_cb_so) {
2467 lck_mtx_unlock(&nfs_global_mutex);
2468 return;
2469 }
2470
2471 /* IPv4 */
2472 error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
2473 if (error) {
2474 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
2475 goto fail;
2476 }
2477 so = nfs4_cb_so;
2478
2479 if (NFS_PORT_INVALID(nfs_callback_port)) {
2480 error = EINVAL;
2481 log(LOG_INFO, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error, nfs_callback_port);
2482 goto fail;
2483 }
2484
2485 sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2486 sin.sin_len = sizeof(struct sockaddr_in);
2487 sin.sin_family = AF_INET;
2488 sin.sin_addr.s_addr = htonl(INADDR_ANY);
2489 sin.sin_port = htons((in_port_t)nfs_callback_port); /* try to use specified port */
2490 error = sock_bind(so, (struct sockaddr *)&sin);
2491 if (error) {
2492 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
2493 goto fail;
2494 }
2495 error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
2496 if (error) {
2497 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
2498 goto fail;
2499 }
2500 nfs4_cb_port = ntohs(sin.sin_port);
2501
2502 error = sock_listen(so, 32);
2503 if (error) {
2504 log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
2505 goto fail;
2506 }
2507
2508 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2509 timeo.tv_usec = 0;
2510 timeo.tv_sec = 60;
2511 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2512 if (error) {
2513 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
2514 }
2515 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2516 if (error) {
2517 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
2518 }
2519 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2520 sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2521 sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2522 error = 0;
2523
2524 /* IPv6 */
2525 error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
2526 if (error) {
2527 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
2528 goto fail;
2529 }
2530 so6 = nfs4_cb_so6;
2531
2532 sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2533 sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
2534 /* try to use specified port or same port as IPv4 */
2535 port = nfs_callback_port ? (in_port_t)nfs_callback_port : nfs4_cb_port;
2536 ipv6_bind_again:
2537 sin6.sin6_len = sizeof(struct sockaddr_in6);
2538 sin6.sin6_family = AF_INET6;
2539 sin6.sin6_addr = in6addr_any;
2540 sin6.sin6_port = htons(port);
2541 error = sock_bind(so6, (struct sockaddr *)&sin6);
2542 if (error) {
2543 if (port != nfs_callback_port) {
2544 /* if we simply tried to match the IPv4 port, then try any port */
2545 port = 0;
2546 goto ipv6_bind_again;
2547 }
2548 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
2549 goto fail;
2550 }
2551 error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
2552 if (error) {
2553 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
2554 goto fail;
2555 }
2556 nfs4_cb_port6 = ntohs(sin6.sin6_port);
2557
2558 error = sock_listen(so6, 32);
2559 if (error) {
2560 log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
2561 goto fail;
2562 }
2563
2564 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2565 timeo.tv_usec = 0;
2566 timeo.tv_sec = 60;
2567 error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2568 if (error) {
2569 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
2570 }
2571 error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2572 if (error) {
2573 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
2574 }
2575 sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2576 sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2577 sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2578 error = 0;
2579
2580 fail:
2581 if (error) {
2582 nfs4_cb_so = nfs4_cb_so6 = NULL;
2583 lck_mtx_unlock(&nfs_global_mutex);
2584 if (so) {
2585 sock_shutdown(so, SHUT_RDWR);
2586 sock_close(so);
2587 }
2588 if (so6) {
2589 sock_shutdown(so6, SHUT_RDWR);
2590 sock_close(so6);
2591 }
2592 } else {
2593 lck_mtx_unlock(&nfs_global_mutex);
2594 }
2595 }
2596
2597 /*
2598 * Shut down the callback channel for the NFS mount.
2599 *
2600 * Clears the mount's callback ID and releases the mounts
2601 * reference on the callback socket. Last reference dropped
2602 * will also shut down the callback socket(s).
2603 */
2604 void
nfs4_mount_callback_shutdown(struct nfsmount * nmp)2605 nfs4_mount_callback_shutdown(struct nfsmount *nmp)
2606 {
2607 struct nfs_callback_socket *ncbsp;
2608 socket_t so, so6;
2609 struct nfs4_cb_sock_list cb_socks;
2610 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2611
2612 lck_mtx_lock(&nfs_global_mutex);
2613 if (nmp->nm_cbid == 0) {
2614 lck_mtx_unlock(&nfs_global_mutex);
2615 return;
2616 }
2617 TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
2618 /* wait for any callbacks in progress to complete */
2619 while (nmp->nm_cbrefs) {
2620 msleep(&nmp->nm_cbrefs, &nfs_global_mutex, PSOCK, "cbshutwait", &ts);
2621 }
2622 nmp->nm_cbid = 0;
2623 if (--nfs4_cb_so_usecount) {
2624 lck_mtx_unlock(&nfs_global_mutex);
2625 return;
2626 }
2627 so = nfs4_cb_so;
2628 so6 = nfs4_cb_so6;
2629 nfs4_cb_so = nfs4_cb_so6 = NULL;
2630 TAILQ_INIT(&cb_socks);
2631 TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
2632 lck_mtx_unlock(&nfs_global_mutex);
2633 if (so) {
2634 sock_shutdown(so, SHUT_RDWR);
2635 sock_close(so);
2636 }
2637 if (so6) {
2638 sock_shutdown(so6, SHUT_RDWR);
2639 sock_close(so6);
2640 }
2641 while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
2642 TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
2643 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2644 sock_close(ncbsp->ncbs_so);
2645 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2646 kfree_type(struct nfs_callback_socket, ncbsp);
2647 }
2648 }
2649
2650 /*
2651 * Check periodically for stale/unused nfs callback sockets
2652 */
2653 #define NFS4_CB_TIMER_PERIOD 30
2654 #define NFS4_CB_IDLE_MAX 300
2655 void
nfs4_callback_timer(__unused void * param0,__unused void * param1)2656 nfs4_callback_timer(__unused void *param0, __unused void *param1)
2657 {
2658 struct nfs_callback_socket *ncbsp, *nextncbsp;
2659 struct timeval now;
2660
2661 loop:
2662 lck_mtx_lock(&nfs_global_mutex);
2663 if (TAILQ_EMPTY(&nfs4_cb_socks)) {
2664 nfs4_callback_timer_on = 0;
2665 lck_mtx_unlock(&nfs_global_mutex);
2666 return;
2667 }
2668 microuptime(&now);
2669 TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
2670 if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
2671 (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) {
2672 continue;
2673 }
2674 TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
2675 lck_mtx_unlock(&nfs_global_mutex);
2676 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2677 sock_close(ncbsp->ncbs_so);
2678 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2679 kfree_type(struct nfs_callback_socket, ncbsp);
2680 goto loop;
2681 }
2682 nfs4_callback_timer_on = 1;
2683 nfs_interval_timer_start(nfs4_callback_timer_call,
2684 NFS4_CB_TIMER_PERIOD * 1000);
2685 lck_mtx_unlock(&nfs_global_mutex);
2686 }
2687
2688 /*
2689 * Accept a new callback socket.
2690 */
2691 void
nfs4_cb_accept(socket_t so,__unused void * arg,__unused int waitflag)2692 nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
2693 {
2694 socket_t newso = NULL;
2695 struct nfs_callback_socket *ncbsp;
2696 struct nfsmount *nmp;
2697 struct timeval timeo, now;
2698 int error, on = 1, ip;
2699
2700 if (so == nfs4_cb_so) {
2701 ip = 4;
2702 } else if (so == nfs4_cb_so6) {
2703 ip = 6;
2704 } else {
2705 return;
2706 }
2707
2708 /* allocate/initialize a new nfs_callback_socket */
2709 ncbsp = kalloc_type(struct nfs_callback_socket,
2710 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2711 ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
2712 nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
2713
2714 /* accept a new socket */
2715 error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
2716 ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
2717 nfs4_cb_rcv, ncbsp, &newso);
2718 if (error) {
2719 log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
2720 kfree_type(struct nfs_callback_socket, ncbsp);
2721 return;
2722 }
2723
2724 /* set up the new socket */
2725 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2726 timeo.tv_usec = 0;
2727 timeo.tv_sec = 60;
2728 error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2729 if (error) {
2730 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
2731 }
2732 error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2733 if (error) {
2734 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
2735 }
2736 sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2737 sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2738 sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2739 sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2740
2741 ncbsp->ncbs_so = newso;
2742 microuptime(&now);
2743 ncbsp->ncbs_stamp = now.tv_sec;
2744
2745 lck_mtx_lock(&nfs_global_mutex);
2746
2747 /* add it to the list */
2748 TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
2749
2750 /* verify it's from a host we have mounted */
2751 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2752 /* check if socket's source address matches this mount's server address */
2753 if (!nmp->nm_saddr) {
2754 continue;
2755 }
2756 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2757 break;
2758 }
2759 }
2760 if (!nmp) { /* we don't want this socket, mark it dead */
2761 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2762 }
2763
2764 /* make sure the callback socket cleanup timer is running */
2765 /* (shorten the timer if we've got a socket we don't want) */
2766 if (!nfs4_callback_timer_on) {
2767 nfs4_callback_timer_on = 1;
2768 nfs_interval_timer_start(nfs4_callback_timer_call,
2769 !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
2770 } else if (!nmp && (nfs4_callback_timer_on < 2)) {
2771 nfs4_callback_timer_on = 2;
2772 thread_call_cancel(nfs4_callback_timer_call);
2773 nfs_interval_timer_start(nfs4_callback_timer_call, 500);
2774 }
2775
2776 lck_mtx_unlock(&nfs_global_mutex);
2777 }
2778
2779 /*
2780 * Receive mbufs from callback sockets into RPC records and process each record.
2781 * Detect connection has been closed and shut down.
2782 */
2783 void
nfs4_cb_rcv(socket_t so,void * arg,__unused int waitflag)2784 nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
2785 {
2786 struct nfs_callback_socket *ncbsp = arg;
2787 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2788 struct timeval now;
2789 mbuf_t m;
2790 int error = 0, recv = 1;
2791
2792 lck_mtx_lock(&nfs_global_mutex);
2793 while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
2794 /* wait if upcall is already in progress */
2795 ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
2796 msleep(ncbsp, &nfs_global_mutex, PSOCK, "cbupcall", &ts);
2797 }
2798 ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
2799 lck_mtx_unlock(&nfs_global_mutex);
2800
2801 /* loop while we make error-free progress */
2802 while (!error && recv) {
2803 error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
2804 if (m) { /* handle the request */
2805 error = nfs4_cb_handler(ncbsp, m);
2806 }
2807 }
2808
2809 /* note: no error and no data indicates server closed its end */
2810 if ((error != EWOULDBLOCK) && (error || !recv)) {
2811 /*
2812 * Socket is either being closed or should be.
2813 * We can't close the socket in the context of the upcall.
2814 * So we mark it as dead and leave it for the cleanup timer to reap.
2815 */
2816 ncbsp->ncbs_stamp = 0;
2817 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2818 } else {
2819 microuptime(&now);
2820 ncbsp->ncbs_stamp = now.tv_sec;
2821 }
2822
2823 lck_mtx_lock(&nfs_global_mutex);
2824 ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
2825 lck_mtx_unlock(&nfs_global_mutex);
2826 wakeup(ncbsp);
2827 }
2828
2829 /*
2830 * Handle an NFS callback channel request.
2831 */
2832 int
nfs4_cb_handler(struct nfs_callback_socket * ncbsp,mbuf_t mreq)2833 nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
2834 {
2835 socket_t so = ncbsp->ncbs_so;
2836 struct nfsm_chain nmreq, nmrep;
2837 mbuf_t mhead = NULL, mrest = NULL, m;
2838 struct msghdr msg;
2839 struct nfsmount *nmp;
2840 fhandle_t *fh;
2841 nfsnode_t np;
2842 nfs_stateid stateid;
2843 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
2844 uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
2845 uint32_t auth_type, auth_len;
2846 uint32_t numres, *pnumres;
2847 int error = 0, replen, len;
2848 size_t sentlen = 0;
2849
2850 xid = numops = op = status = procnum = taglen = cbid = 0;
2851 fh = zalloc(nfs_fhandle_zone);
2852
2853 nfsm_chain_dissect_init(error, &nmreq, mreq);
2854 nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
2855 nfsm_chain_get_32(error, &nmreq, val); // RPC Call
2856 nfsm_assert(error, (val == RPC_CALL), EBADRPC);
2857 nfsm_chain_get_32(error, &nmreq, val); // RPC Version
2858 nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
2859 nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
2860 nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
2861 nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
2862 nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
2863 nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
2864 nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
2865
2866 /* Handle authentication */
2867 /* XXX just ignore auth for now - handling kerberos may be tricky */
2868 nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
2869 nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
2870 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2871 if (!error && (auth_len > 0)) {
2872 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2873 }
2874 nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
2875 nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
2876 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2877 if (!error && (auth_len > 0)) {
2878 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2879 }
2880 if (error) {
2881 status = error;
2882 error = 0;
2883 goto nfsmout;
2884 }
2885
2886 switch (procnum) {
2887 case NFSPROC4_CB_NULL:
2888 status = NFSERR_RETVOID;
2889 break;
2890 case NFSPROC4_CB_COMPOUND:
2891 /* tag, minorversion, cb ident, numops, op array */
2892 nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
2893 nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
2894
2895 /* start building the body of the response */
2896 nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED);
2897 nfsm_chain_init(&nmrep, mrest);
2898
2899 /* copy tag from request to response */
2900 nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
2901 for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
2902 nfsm_chain_get_32(error, &nmreq, val);
2903 nfsm_chain_add_32(error, &nmrep, val);
2904 }
2905
2906 /* insert number of results placeholder */
2907 numres = 0;
2908 nfsm_chain_add_32(error, &nmrep, numres);
2909 pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
2910
2911 nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
2912 nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
2913 nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
2914 nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
2915 if (error) {
2916 if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) {
2917 status = error;
2918 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
2919 status = NFSERR_RESOURCE;
2920 } else {
2921 status = NFSERR_SERVERFAULT;
2922 }
2923 error = 0;
2924 nfsm_chain_null(&nmrep);
2925 goto nfsmout;
2926 }
2927 /* match the callback ID to a registered mount */
2928 lck_mtx_lock(&nfs_global_mutex);
2929 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2930 if (nmp->nm_cbid != cbid) {
2931 continue;
2932 }
2933 /* verify socket's source address matches this mount's server address */
2934 if (!nmp->nm_saddr) {
2935 continue;
2936 }
2937 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2938 break;
2939 }
2940 }
2941 /* mark the NFS mount as busy */
2942 if (nmp) {
2943 nmp->nm_cbrefs++;
2944 }
2945 lck_mtx_unlock(&nfs_global_mutex);
2946 if (!nmp) {
2947 /* if no mount match, just drop socket. */
2948 error = EPERM;
2949 nfsm_chain_null(&nmrep);
2950 goto out;
2951 }
2952
2953 /* process ops, adding results to mrest */
2954 while (numops > 0) {
2955 numops--;
2956 nfsm_chain_get_32(error, &nmreq, op);
2957 if (error) {
2958 break;
2959 }
2960 switch (op) {
2961 case NFS_OP_CB_GETATTR:
2962 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2963 np = NULL;
2964 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
2965 bmlen = NFS_ATTR_BITMAP_LEN;
2966 nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
2967 if (error) {
2968 status = error;
2969 error = 0;
2970 numops = 0; /* don't process any more ops */
2971 } else {
2972 /* find the node for the file handle */
2973 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
2974 if (error || !np) {
2975 status = NFSERR_BADHANDLE;
2976 error = 0;
2977 np = NULL;
2978 numops = 0; /* don't process any more ops */
2979 }
2980 }
2981 nfsm_chain_add_32(error, &nmrep, op);
2982 nfsm_chain_add_32(error, &nmrep, status);
2983 if (!error && (status == EBADRPC)) {
2984 error = status;
2985 }
2986 if (np) {
2987 /* only allow returning size, change, and mtime attrs */
2988 NFS_CLEAR_ATTRIBUTES(&rbitmap);
2989 attrbytes = 0;
2990 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
2991 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
2992 attrbytes += 2 * NFSX_UNSIGNED;
2993 }
2994 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
2995 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
2996 attrbytes += 2 * NFSX_UNSIGNED;
2997 }
2998 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
2999 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
3000 attrbytes += 3 * NFSX_UNSIGNED;
3001 }
3002 nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
3003 nfsm_chain_add_32(error, &nmrep, attrbytes);
3004 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
3005 nfsm_chain_add_64(error, &nmrep,
3006 np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
3007 }
3008 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
3009 nfsm_chain_add_64(error, &nmrep, np->n_size);
3010 }
3011 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
3012 nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
3013 nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
3014 }
3015 nfs_node_unlock(np);
3016 vnode_put(NFSTOV(np));
3017 np = NULL;
3018 }
3019 /*
3020 * If we hit an error building the reply, we can't easily back up.
3021 * So we'll just update the status and hope the server ignores the
3022 * extra garbage.
3023 */
3024 break;
3025 case NFS_OP_CB_RECALL:
3026 // (STATEID, TRUNCATE, FH) -> (STATUS)
3027 np = NULL;
3028 nfsm_chain_get_stateid(error, &nmreq, &stateid);
3029 nfsm_chain_get_32(error, &nmreq, truncate);
3030 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
3031 if (error) {
3032 status = error;
3033 error = 0;
3034 numops = 0; /* don't process any more ops */
3035 } else {
3036 /* find the node for the file handle */
3037 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
3038 if (error || !np) {
3039 status = NFSERR_BADHANDLE;
3040 error = 0;
3041 np = NULL;
3042 numops = 0; /* don't process any more ops */
3043 } else if (!(np->n_openflags & N_DELEG_MASK) ||
3044 bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
3045 /* delegation stateid state doesn't match */
3046 status = NFSERR_BAD_STATEID;
3047 numops = 0; /* don't process any more ops */
3048 }
3049 if (!status) { /* add node to recall queue, and wake socket thread */
3050 nfs4_delegation_return_enqueue(np);
3051 }
3052 if (np) {
3053 nfs_node_unlock(np);
3054 vnode_put(NFSTOV(np));
3055 }
3056 }
3057 nfsm_chain_add_32(error, &nmrep, op);
3058 nfsm_chain_add_32(error, &nmrep, status);
3059 if (!error && (status == EBADRPC)) {
3060 error = status;
3061 }
3062 break;
3063 case NFS_OP_CB_ILLEGAL:
3064 default:
3065 nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
3066 status = NFSERR_OP_ILLEGAL;
3067 nfsm_chain_add_32(error, &nmrep, status);
3068 numops = 0; /* don't process any more ops */
3069 break;
3070 }
3071 numres++;
3072 }
3073
3074 if (!status && error) {
3075 if (error == EBADRPC) {
3076 status = error;
3077 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
3078 status = NFSERR_RESOURCE;
3079 } else {
3080 status = NFSERR_SERVERFAULT;
3081 }
3082 error = 0;
3083 }
3084
3085 /* Now, set the numres field */
3086 *pnumres = txdr_unsigned(numres);
3087 nfsm_chain_build_done(error, &nmrep);
3088 nfsm_chain_null(&nmrep);
3089
3090 /* drop the callback reference on the mount */
3091 lck_mtx_lock(&nfs_global_mutex);
3092 nmp->nm_cbrefs--;
3093 if (!nmp->nm_cbid) {
3094 wakeup(&nmp->nm_cbrefs);
3095 }
3096 lck_mtx_unlock(&nfs_global_mutex);
3097 break;
3098 }
3099
3100 nfsmout:
3101 if (status == EBADRPC) {
3102 OSAddAtomic64(1, &nfsclntstats.rpcinvalid);
3103 }
3104
3105 /* build reply header */
3106 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
3107 nfsm_chain_init(&nmrep, mhead);
3108 nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
3109 nfsm_chain_add_32(error, &nmrep, xid);
3110 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
3111 if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
3112 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
3113 if (status & NFSERR_AUTHERR) {
3114 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
3115 nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
3116 } else {
3117 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
3118 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3119 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3120 }
3121 } else {
3122 /* reply status */
3123 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
3124 /* XXX RPCAUTH_NULL verifier */
3125 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
3126 nfsm_chain_add_32(error, &nmrep, 0);
3127 /* accepted status */
3128 switch (status) {
3129 case EPROGUNAVAIL:
3130 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
3131 break;
3132 case EPROGMISMATCH:
3133 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
3134 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3135 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3136 break;
3137 case EPROCUNAVAIL:
3138 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
3139 break;
3140 case EBADRPC:
3141 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
3142 break;
3143 default:
3144 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
3145 if (status != NFSERR_RETVOID) {
3146 nfsm_chain_add_32(error, &nmrep, status);
3147 }
3148 break;
3149 }
3150 }
3151 nfsm_chain_build_done(error, &nmrep);
3152 if (error) {
3153 nfsm_chain_null(&nmrep);
3154 goto out;
3155 }
3156 error = mbuf_setnext(nmrep.nmc_mcur, mrest);
3157 if (error) {
3158 printf("nfs cb: mbuf_setnext failed %d\n", error);
3159 goto out;
3160 }
3161 mrest = NULL;
3162 /* Calculate the size of the reply */
3163 replen = 0;
3164 for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) {
3165 replen += mbuf_len(m);
3166 }
3167 mbuf_pkthdr_setlen(mhead, replen);
3168 error = mbuf_pkthdr_setrcvif(mhead, NULL);
3169 nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
3170 nfsm_chain_null(&nmrep);
3171
3172 /* send the reply */
3173 bzero(&msg, sizeof(msg));
3174 error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
3175 mhead = NULL;
3176 if (!error && ((int)sentlen != replen)) {
3177 error = EWOULDBLOCK;
3178 }
3179 if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */
3180 error = ETIMEDOUT;
3181 }
3182 out:
3183 if (error) {
3184 nfsm_chain_cleanup(&nmrep);
3185 }
3186 if (mhead) {
3187 mbuf_freem(mhead);
3188 }
3189 if (mrest) {
3190 mbuf_freem(mrest);
3191 }
3192 if (mreq) {
3193 mbuf_freem(mreq);
3194 }
3195 NFS_ZFREE(nfs_fhandle_zone, fh);
3196 return error;
3197 }
3198 #endif /* CONFIG_NFS4 */
3199
3200 /*
3201 * Initialize an nfs_rpc_record_state structure.
3202 */
3203 void
nfs_rpc_record_state_init(struct nfs_rpc_record_state * nrrsp)3204 nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
3205 {
3206 bzero(nrrsp, sizeof(*nrrsp));
3207 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3208 }
3209
3210 /*
3211 * Clean up an nfs_rpc_record_state structure.
3212 */
3213 void
nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state * nrrsp)3214 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
3215 {
3216 if (nrrsp->nrrs_m) {
3217 mbuf_freem(nrrsp->nrrs_m);
3218 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3219 }
3220 }
3221
3222 /*
3223 * Read the next (marked) RPC record from the socket.
3224 *
3225 * *recvp returns if any data was received.
3226 * *mp returns the next complete RPC record
3227 */
3228 int
nfs_rpc_record_read(socket_t so,struct nfs_rpc_record_state * nrrsp,int flags,int * recvp,mbuf_t * mp)3229 nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
3230 {
3231 struct iovec aio;
3232 struct msghdr msg;
3233 size_t rcvlen;
3234 int error = 0;
3235 mbuf_t m;
3236
3237 *recvp = 0;
3238 *mp = NULL;
3239
3240 /* read the TCP RPC record marker */
3241 while (!error && nrrsp->nrrs_markerleft) {
3242 aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
3243 sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
3244 aio.iov_len = nrrsp->nrrs_markerleft;
3245 bzero(&msg, sizeof(msg));
3246 msg.msg_iov = &aio;
3247 msg.msg_iovlen = 1;
3248 error = sock_receive(so, &msg, flags, &rcvlen);
3249 if (error || !rcvlen) {
3250 break;
3251 }
3252 *recvp = 1;
3253 nrrsp->nrrs_markerleft -= rcvlen;
3254 if (nrrsp->nrrs_markerleft) {
3255 continue;
3256 }
3257 /* record marker complete */
3258 nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
3259 if (nrrsp->nrrs_fragleft & 0x80000000) {
3260 nrrsp->nrrs_lastfrag = 1;
3261 nrrsp->nrrs_fragleft &= ~0x80000000;
3262 }
3263 nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
3264 if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
3265 /* This is SERIOUS! We are out of sync with the sender. */
3266 log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
3267 error = EFBIG;
3268 }
3269 }
3270
3271 /* read the TCP RPC record fragment */
3272 while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
3273 m = NULL;
3274 rcvlen = nrrsp->nrrs_fragleft;
3275 error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
3276 if (error || !rcvlen || !m) {
3277 break;
3278 }
3279 *recvp = 1;
3280 /* append mbufs to list */
3281 nrrsp->nrrs_fragleft -= rcvlen;
3282 if (!nrrsp->nrrs_m) {
3283 nrrsp->nrrs_m = m;
3284 } else {
3285 error = mbuf_setnext(nrrsp->nrrs_mlast, m);
3286 if (error) {
3287 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
3288 mbuf_freem(m);
3289 break;
3290 }
3291 }
3292 while (mbuf_next(m)) {
3293 m = mbuf_next(m);
3294 }
3295 nrrsp->nrrs_mlast = m;
3296 }
3297
3298 /* done reading fragment? */
3299 if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
3300 /* reset socket fragment parsing state */
3301 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3302 if (nrrsp->nrrs_lastfrag) {
3303 /* RPC record complete */
3304 *mp = nrrsp->nrrs_m;
3305 /* reset socket record parsing state */
3306 nrrsp->nrrs_reclen = 0;
3307 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3308 nrrsp->nrrs_lastfrag = 0;
3309 }
3310 }
3311
3312 return error;
3313 }
3314
3315
3316
3317 /*
3318 * The NFS client send routine.
3319 *
3320 * Send the given NFS request out the mount's socket.
3321 * Holds nfs_sndlock() for the duration of this call.
3322 *
3323 * - check for request termination (sigintr)
3324 * - wait for reconnect, if necessary
3325 * - UDP: check the congestion window
3326 * - make a copy of the request to send
3327 * - UDP: update the congestion window
3328 * - send the request
3329 *
3330 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3331 * rexmit count is also updated if this isn't the first send.
3332 *
3333 * If the send is not successful, make sure R_MUSTRESEND is set.
3334 * If this wasn't the first transmit, set R_RESENDERR.
3335 * Also, undo any UDP congestion window changes made.
3336 *
3337 * If the error appears to indicate that the socket should
3338 * be reconnected, mark the socket for reconnection.
3339 *
3340 * Only return errors when the request should be aborted.
3341 */
3342 int
nfs_send(struct nfsreq * req,int wait)3343 nfs_send(struct nfsreq *req, int wait)
3344 {
3345 struct nfsmount *nmp;
3346 struct nfs_socket *nso;
3347 int error, error2, sotype, rexmit, slpflag = 0, needrecon;
3348 struct msghdr msg;
3349 struct sockaddr *sendnam;
3350 mbuf_t mreqcopy;
3351 size_t sentlen = 0;
3352 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3353
3354 again:
3355 error = nfs_sndlock(req);
3356 if (error) {
3357 lck_mtx_lock(&req->r_mtx);
3358 req->r_error = error;
3359 req->r_flags &= ~R_SENDING;
3360 lck_mtx_unlock(&req->r_mtx);
3361 return error;
3362 }
3363
3364 error = nfs_sigintr(req->r_nmp, req, NULL, 0);
3365 if (error) {
3366 nfs_sndunlock(req);
3367 lck_mtx_lock(&req->r_mtx);
3368 req->r_error = error;
3369 req->r_flags &= ~R_SENDING;
3370 lck_mtx_unlock(&req->r_mtx);
3371 return error;
3372 }
3373 nmp = req->r_nmp;
3374 sotype = nmp->nm_sotype;
3375
3376 /*
3377 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3378 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3379 */
3380 if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
3381 ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
3382 error = ETIMEDOUT;
3383 nfs_sndunlock(req);
3384 lck_mtx_lock(&req->r_mtx);
3385 req->r_error = error;
3386 req->r_flags &= ~R_SENDING;
3387 lck_mtx_unlock(&req->r_mtx);
3388 return error;
3389 }
3390
3391 /* If the socket needs reconnection, do that now. */
3392 /* wait until socket is ready - unless this request is part of setup */
3393 lck_mtx_lock(&nmp->nm_lock);
3394 if (!(nmp->nm_sockflags & NMSOCK_READY) &&
3395 !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
3396 if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) {
3397 slpflag |= PCATCH;
3398 }
3399 lck_mtx_unlock(&nmp->nm_lock);
3400 nfs_sndunlock(req);
3401 if (!wait) {
3402 lck_mtx_lock(&req->r_mtx);
3403 req->r_flags &= ~R_SENDING;
3404 req->r_flags |= R_MUSTRESEND;
3405 req->r_rtt = 0;
3406 lck_mtx_unlock(&req->r_mtx);
3407 return 0;
3408 }
3409 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
3410 lck_mtx_lock(&req->r_mtx);
3411 req->r_flags &= ~R_MUSTRESEND;
3412 req->r_rtt = 0;
3413 lck_mtx_unlock(&req->r_mtx);
3414 lck_mtx_lock(&nmp->nm_lock);
3415 while (!(nmp->nm_sockflags & NMSOCK_READY)) {
3416 /* don't bother waiting if the socket thread won't be reconnecting it */
3417 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
3418 error = EIO;
3419 break;
3420 }
3421 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
3422 struct timeval now;
3423 microuptime(&now);
3424 if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
3425 /* soft mount in reconnect for a while... terminate ASAP */
3426 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
3427 req->r_flags |= R_SOFTTERM;
3428 req->r_error = error = ETIMEDOUT;
3429 break;
3430 }
3431 }
3432 /* make sure socket thread is running, then wait */
3433 nfs_mount_sock_thread_wake(nmp);
3434 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3435 break;
3436 }
3437 msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts);
3438 slpflag = 0;
3439 }
3440 lck_mtx_unlock(&nmp->nm_lock);
3441 if (error) {
3442 lck_mtx_lock(&req->r_mtx);
3443 req->r_error = error;
3444 req->r_flags &= ~R_SENDING;
3445 lck_mtx_unlock(&req->r_mtx);
3446 return error;
3447 }
3448 goto again;
3449 }
3450 nso = nmp->nm_nso;
3451 /* note that we're using the mount's socket to do the send */
3452 nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
3453 lck_mtx_unlock(&nmp->nm_lock);
3454 if (!nso) {
3455 nfs_sndunlock(req);
3456 lck_mtx_lock(&req->r_mtx);
3457 req->r_flags &= ~R_SENDING;
3458 req->r_flags |= R_MUSTRESEND;
3459 req->r_rtt = 0;
3460 lck_mtx_unlock(&req->r_mtx);
3461 return 0;
3462 }
3463
3464 lck_mtx_lock(&req->r_mtx);
3465 rexmit = (req->r_flags & R_SENT);
3466
3467 if (sotype == SOCK_DGRAM) {
3468 lck_mtx_lock(&nmp->nm_lock);
3469 if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
3470 /* if we can't send this out yet, wait on the cwnd queue */
3471 slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
3472 lck_mtx_unlock(&nmp->nm_lock);
3473 nfs_sndunlock(req);
3474 req->r_flags &= ~R_SENDING;
3475 req->r_flags |= R_MUSTRESEND;
3476 lck_mtx_unlock(&req->r_mtx);
3477 if (!wait) {
3478 req->r_rtt = 0;
3479 return 0;
3480 }
3481 lck_mtx_lock(&nmp->nm_lock);
3482 while (nmp->nm_sent >= nmp->nm_cwnd) {
3483 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3484 break;
3485 }
3486 TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
3487 msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
3488 slpflag = 0;
3489 if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
3490 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
3491 req->r_cchain.tqe_next = NFSREQNOLIST;
3492 }
3493 }
3494 lck_mtx_unlock(&nmp->nm_lock);
3495 goto again;
3496 }
3497 /*
3498 * We update these *before* the send to avoid racing
3499 * against others who may be looking to send requests.
3500 */
3501 if (!rexmit) {
3502 /* first transmit */
3503 req->r_flags |= R_CWND;
3504 nmp->nm_sent += NFS_CWNDSCALE;
3505 } else {
3506 /*
3507 * When retransmitting, turn timing off
3508 * and divide congestion window by 2.
3509 */
3510 req->r_flags &= ~R_TIMING;
3511 nmp->nm_cwnd >>= 1;
3512 if (nmp->nm_cwnd < NFS_CWNDSCALE) {
3513 nmp->nm_cwnd = NFS_CWNDSCALE;
3514 }
3515 }
3516 lck_mtx_unlock(&nmp->nm_lock);
3517 }
3518
3519 req->r_flags &= ~R_MUSTRESEND;
3520 lck_mtx_unlock(&req->r_mtx);
3521
3522 error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
3523 wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
3524 if (error) {
3525 if (wait) {
3526 log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
3527 }
3528 nfs_sndunlock(req);
3529 lck_mtx_lock(&req->r_mtx);
3530 req->r_flags &= ~R_SENDING;
3531 req->r_flags |= R_MUSTRESEND;
3532 req->r_rtt = 0;
3533 lck_mtx_unlock(&req->r_mtx);
3534 return 0;
3535 }
3536
3537 bzero(&msg, sizeof(msg));
3538 if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
3539 msg.msg_name = (caddr_t)sendnam;
3540 msg.msg_namelen = sendnam->sa_len;
3541 }
3542 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy);
3543 error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
3544 if (error || (sentlen != req->r_mreqlen)) {
3545 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3546 req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
3547 }
3548
3549 if (!error && (sentlen != req->r_mreqlen)) {
3550 error = EWOULDBLOCK;
3551 }
3552 needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
3553
3554 lck_mtx_lock(&req->r_mtx);
3555 req->r_flags &= ~R_SENDING;
3556 req->r_rtt = 0;
3557 if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) {
3558 req->r_rexmit = NFS_MAXREXMIT;
3559 }
3560
3561 if (!error) {
3562 /* SUCCESS */
3563 req->r_flags &= ~R_RESENDERR;
3564 if (rexmit) {
3565 OSAddAtomic64(1, &nfsclntstats.rpcretries);
3566 }
3567 req->r_flags |= R_SENT;
3568 if (req->r_flags & R_WAITSENT) {
3569 req->r_flags &= ~R_WAITSENT;
3570 wakeup(req);
3571 }
3572 nfs_sndunlock(req);
3573 lck_mtx_unlock(&req->r_mtx);
3574 return 0;
3575 }
3576
3577 /* send failed */
3578 req->r_flags |= R_MUSTRESEND;
3579 if (rexmit) {
3580 req->r_flags |= R_RESENDERR;
3581 }
3582 if ((error == EINTR) || (error == ERESTART)) {
3583 req->r_error = error;
3584 }
3585 lck_mtx_unlock(&req->r_mtx);
3586
3587 if (sotype == SOCK_DGRAM) {
3588 /*
3589 * Note: even though a first send may fail, we consider
3590 * the request sent for congestion window purposes.
3591 * So we don't need to undo any of the changes made above.
3592 */
3593 /*
3594 * Socket errors ignored for connectionless sockets??
3595 * For now, ignore them all
3596 */
3597 if ((error != EINTR) && (error != ERESTART) &&
3598 (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
3599 int clearerror = 0, optlen = sizeof(clearerror);
3600 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
3601 #ifdef NFS_SOCKET_DEBUGGING
3602 if (clearerror) {
3603 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3604 error, clearerror);
3605 }
3606 #endif
3607 }
3608 }
3609
3610 /* check if it appears we should reconnect the socket */
3611 switch (error) {
3612 case EWOULDBLOCK:
3613 /* if send timed out, reconnect if on TCP */
3614 if (sotype != SOCK_STREAM) {
3615 break;
3616 }
3617 OS_FALLTHROUGH;
3618 case EPIPE:
3619 case EADDRNOTAVAIL:
3620 case ENETDOWN:
3621 case ENETUNREACH:
3622 case ENETRESET:
3623 case ECONNABORTED:
3624 case ECONNRESET:
3625 case ENOTCONN:
3626 case ESHUTDOWN:
3627 case ECONNREFUSED:
3628 case EHOSTDOWN:
3629 case EHOSTUNREACH:
3630 /* case ECANCELED??? */
3631 needrecon = 1;
3632 break;
3633 }
3634 if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
3635 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
3636 nfs_need_reconnect(nmp);
3637 }
3638
3639 nfs_sndunlock(req);
3640
3641 if (nfs_is_dead(error, nmp)) {
3642 error = EIO;
3643 }
3644
3645 /*
3646 * Don't log some errors:
3647 * EPIPE errors may be common with servers that drop idle connections.
3648 * EADDRNOTAVAIL may occur on network transitions.
3649 * ENOTCONN may occur under some network conditions.
3650 */
3651 if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) {
3652 error = 0;
3653 }
3654 if (error && (error != EINTR) && (error != ERESTART)) {
3655 log(LOG_INFO, "nfs send error %d for server %s\n", error,
3656 !req->r_nmp ? "<unmounted>" :
3657 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
3658 }
3659
3660 /* prefer request termination error over other errors */
3661 error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
3662 if (error2) {
3663 error = error2;
3664 }
3665
3666 /* only allow the following errors to be returned */
3667 if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
3668 (error != ENXIO) && (error != ETIMEDOUT)) {
3669 /*
3670 * We got some error we don't know what do do with,
3671 * i.e., we're not reconnecting, we map it to
3672 * EIO. Presumably our send failed and we better tell
3673 * the caller so they don't wait for a reply that is
3674 * never going to come. If we are reconnecting we
3675 * return 0 and the request will be resent.
3676 */
3677 error = needrecon ? 0 : EIO;
3678 }
3679 return error;
3680 }
3681
3682 /*
3683 * NFS client socket upcalls
3684 *
3685 * Pull RPC replies out of an NFS mount's socket and match them
3686 * up with the pending request.
3687 *
3688 * The datagram code is simple because we always get whole
3689 * messages out of the socket.
3690 *
3691 * The stream code is more involved because we have to parse
3692 * the RPC records out of the stream.
3693 */
3694
3695 /* NFS client UDP socket upcall */
3696 void
nfs_udp_rcv(socket_t so,void * arg,__unused int waitflag)3697 nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
3698 {
3699 struct nfsmount *nmp = arg;
3700 struct nfs_socket *nso = nmp->nm_nso;
3701 size_t rcvlen;
3702 mbuf_t m;
3703 int error = 0;
3704
3705 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3706 return;
3707 }
3708
3709 do {
3710 /* make sure we're on the current socket */
3711 if (!nso || (nso->nso_so != so)) {
3712 return;
3713 }
3714
3715 m = NULL;
3716 rcvlen = 1000000;
3717 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
3718 if (m) {
3719 nfs_request_match_reply(nmp, m);
3720 }
3721 } while (m && !error);
3722
3723 if (error && (error != EWOULDBLOCK)) {
3724 /* problems with the socket... mark for reconnection */
3725 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
3726 nfs_need_reconnect(nmp);
3727 }
3728 }
3729
3730 /* NFS client TCP socket upcall */
3731 void
nfs_tcp_rcv(socket_t so,void * arg,__unused int waitflag)3732 nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
3733 {
3734 struct nfsmount *nmp = arg;
3735 struct nfs_socket *nso = nmp->nm_nso;
3736 struct nfs_rpc_record_state nrrs;
3737 mbuf_t m;
3738 int error = 0;
3739 int recv = 1;
3740 int wup = 0;
3741
3742 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3743 return;
3744 }
3745
3746 /* make sure we're on the current socket */
3747 lck_mtx_lock(&nmp->nm_lock);
3748 nso = nmp->nm_nso;
3749 if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
3750 lck_mtx_unlock(&nmp->nm_lock);
3751 return;
3752 }
3753 lck_mtx_unlock(&nmp->nm_lock);
3754
3755 /* make sure this upcall should be trying to do work */
3756 lck_mtx_lock(&nso->nso_lock);
3757 if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) {
3758 lck_mtx_unlock(&nso->nso_lock);
3759 return;
3760 }
3761 nso->nso_flags |= NSO_UPCALL;
3762 nrrs = nso->nso_rrs;
3763 lck_mtx_unlock(&nso->nso_lock);
3764
3765 /* loop while we make error-free progress */
3766 while (!error && recv) {
3767 error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
3768 if (m) { /* match completed response with request */
3769 nfs_request_match_reply(nmp, m);
3770 }
3771 }
3772
3773 /* Update the sockets's rpc parsing state */
3774 lck_mtx_lock(&nso->nso_lock);
3775 nso->nso_rrs = nrrs;
3776 if (nso->nso_flags & NSO_DISCONNECTING) {
3777 wup = 1;
3778 }
3779 nso->nso_flags &= ~NSO_UPCALL;
3780 lck_mtx_unlock(&nso->nso_lock);
3781 if (wup) {
3782 wakeup(&nso->nso_flags);
3783 }
3784
3785 #ifdef NFS_SOCKET_DEBUGGING
3786 if (!recv && (error != EWOULDBLOCK)) {
3787 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
3788 }
3789 #endif
3790 /* note: no error and no data indicates server closed its end */
3791 if ((error != EWOULDBLOCK) && (error || !recv)) {
3792 /* problems with the socket... mark for reconnection */
3793 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
3794 nfs_need_reconnect(nmp);
3795 }
3796 }
3797
3798 /*
3799 * "poke" a socket to try to provoke any pending errors
3800 */
3801 void
nfs_sock_poke(struct nfsmount * nmp)3802 nfs_sock_poke(struct nfsmount *nmp)
3803 {
3804 struct iovec aio;
3805 struct msghdr msg;
3806 size_t len;
3807 int error = 0;
3808 int dummy;
3809
3810 lck_mtx_lock(&nmp->nm_lock);
3811 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
3812 !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
3813 /* Nothing to poke */
3814 nmp->nm_sockflags &= ~NMSOCK_POKE;
3815 wakeup(&nmp->nm_sockflags);
3816 lck_mtx_unlock(&nmp->nm_lock);
3817 return;
3818 }
3819 lck_mtx_unlock(&nmp->nm_lock);
3820 aio.iov_base = &dummy;
3821 aio.iov_len = 0;
3822 len = 0;
3823 bzero(&msg, sizeof(msg));
3824 msg.msg_iov = &aio;
3825 msg.msg_iovlen = 1;
3826 error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
3827 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
3828 lck_mtx_lock(&nmp->nm_lock);
3829 nmp->nm_sockflags &= ~NMSOCK_POKE;
3830 wakeup(&nmp->nm_sockflags);
3831 lck_mtx_unlock(&nmp->nm_lock);
3832 nfs_is_dead(error, nmp);
3833 }
3834
3835 /*
3836 * Match an RPC reply with the corresponding request
3837 */
3838 void
nfs_request_match_reply(struct nfsmount * nmp,mbuf_t mrep)3839 nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
3840 {
3841 struct nfsreq *req;
3842 struct nfsm_chain nmrep;
3843 u_int32_t reply = 0, rxid = 0;
3844 int error = 0, asyncioq, t1;
3845
3846 bzero(&nmrep, sizeof(nmrep));
3847 /* Get the xid and check that it is an rpc reply */
3848 nfsm_chain_dissect_init(error, &nmrep, mrep);
3849 nfsm_chain_get_32(error, &nmrep, rxid);
3850 nfsm_chain_get_32(error, &nmrep, reply);
3851 if (error || (reply != RPC_REPLY)) {
3852 OSAddAtomic64(1, &nfsclntstats.rpcinvalid);
3853 mbuf_freem(mrep);
3854 return;
3855 }
3856
3857 /*
3858 * Loop through the request list to match up the reply
3859 * Iff no match, just drop it.
3860 */
3861 lck_mtx_lock(&nfs_request_mutex);
3862 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
3863 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3864 continue;
3865 }
3866 /* looks like we have it, grab lock and double check */
3867 lck_mtx_lock(&req->r_mtx);
3868 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3869 lck_mtx_unlock(&req->r_mtx);
3870 continue;
3871 }
3872 /* Found it.. */
3873 req->r_nmrep = nmrep;
3874 lck_mtx_lock(&nmp->nm_lock);
3875 if (nmp->nm_sotype == SOCK_DGRAM) {
3876 /*
3877 * Update congestion window.
3878 * Do the additive increase of one rpc/rtt.
3879 */
3880 FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
3881 if (nmp->nm_cwnd <= nmp->nm_sent) {
3882 nmp->nm_cwnd +=
3883 ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
3884 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
3885 if (nmp->nm_cwnd > NFS_MAXCWND) {
3886 nmp->nm_cwnd = NFS_MAXCWND;
3887 }
3888 }
3889 if (req->r_flags & R_CWND) {
3890 nmp->nm_sent -= NFS_CWNDSCALE;
3891 req->r_flags &= ~R_CWND;
3892 }
3893 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
3894 /* congestion window is open, poke the cwnd queue */
3895 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
3896 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
3897 req2->r_cchain.tqe_next = NFSREQNOLIST;
3898 wakeup(req2);
3899 }
3900 }
3901 /*
3902 * Update rtt using a gain of 0.125 on the mean
3903 * and a gain of 0.25 on the deviation.
3904 */
3905 if (req->r_flags & R_TIMING) {
3906 /*
3907 * Since the timer resolution of
3908 * NFS_HZ is so course, it can often
3909 * result in r_rtt == 0. Since
3910 * r_rtt == N means that the actual
3911 * rtt is between N+dt and N+2-dt ticks,
3912 * add 1.
3913 */
3914 if (proct[req->r_procnum] == 0) {
3915 panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
3916 }
3917 t1 = req->r_rtt + 1;
3918 t1 -= (NFS_SRTT(req) >> 3);
3919 NFS_SRTT(req) += t1;
3920 if (t1 < 0) {
3921 t1 = -t1;
3922 }
3923 t1 -= (NFS_SDRTT(req) >> 2);
3924 NFS_SDRTT(req) += t1;
3925 }
3926 nmp->nm_timeouts = 0;
3927 lck_mtx_unlock(&nmp->nm_lock);
3928 /* signal anyone waiting on this request */
3929 wakeup(req);
3930 asyncioq = (req->r_callback.rcb_func != NULL);
3931 #if CONFIG_NFS_GSS
3932 if (nfs_request_using_gss(req)) {
3933 nfs_gss_clnt_rpcdone(req);
3934 }
3935 #endif /* CONFIG_NFS_GSS */
3936 lck_mtx_unlock(&req->r_mtx);
3937 lck_mtx_unlock(&nfs_request_mutex);
3938 /* if it's an async RPC with a callback, queue it up */
3939 if (asyncioq) {
3940 nfs_asyncio_finish(req);
3941 }
3942 break;
3943 }
3944
3945 if (!req) {
3946 /* not matched to a request, so drop it. */
3947 lck_mtx_unlock(&nfs_request_mutex);
3948 OSAddAtomic64(1, &nfsclntstats.rpcunexpected);
3949 mbuf_freem(mrep);
3950 }
3951 }
3952
3953 /*
3954 * Wait for the reply for a given request...
3955 * ...potentially resending the request if necessary.
3956 */
3957 int
nfs_wait_reply(struct nfsreq * req)3958 nfs_wait_reply(struct nfsreq *req)
3959 {
3960 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3961 int error = 0, slpflag, first = 1;
3962
3963 if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
3964 slpflag = PCATCH;
3965 } else {
3966 slpflag = 0;
3967 }
3968
3969 lck_mtx_lock(&req->r_mtx);
3970 while (!req->r_nmrep.nmc_mhead) {
3971 if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) {
3972 break;
3973 }
3974 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
3975 break;
3976 }
3977 /* check if we need to resend */
3978 if (req->r_flags & R_MUSTRESEND) {
3979 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3980 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
3981 req->r_flags |= R_SENDING;
3982 lck_mtx_unlock(&req->r_mtx);
3983 if (nfs_request_using_gss(req)) {
3984 /*
3985 * It's an RPCSEC_GSS request.
3986 * Can't just resend the original request
3987 * without bumping the cred sequence number.
3988 * Go back and re-build the request.
3989 */
3990 lck_mtx_lock(&req->r_mtx);
3991 req->r_flags &= ~R_SENDING;
3992 lck_mtx_unlock(&req->r_mtx);
3993 return EAGAIN;
3994 }
3995 error = nfs_send(req, 1);
3996 lck_mtx_lock(&req->r_mtx);
3997 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3998 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error);
3999 if (error) {
4000 break;
4001 }
4002 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
4003 break;
4004 }
4005 }
4006 /* need to poll if we're P_NOREMOTEHANG */
4007 if (nfs_noremotehang(req->r_thread)) {
4008 ts.tv_sec = 1;
4009 }
4010 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
4011 first = slpflag = 0;
4012 }
4013 lck_mtx_unlock(&req->r_mtx);
4014
4015 return error;
4016 }
4017
4018 /*
4019 * An NFS request goes something like this:
4020 * (nb: always frees up mreq mbuf list)
4021 * nfs_request_create()
4022 * - allocates a request struct if one is not provided
4023 * - initial fill-in of the request struct
4024 * nfs_request_add_header()
4025 * - add the RPC header
4026 * nfs_request_send()
4027 * - link it into list
4028 * - call nfs_send() for first transmit
4029 * nfs_request_wait()
4030 * - call nfs_wait_reply() to wait for the reply
4031 * nfs_request_finish()
4032 * - break down rpc header and return with error or nfs reply
4033 * pointed to by nmrep.
4034 * nfs_request_rele()
4035 * nfs_request_destroy()
4036 * - clean up the request struct
4037 * - free the request struct if it was allocated by nfs_request_create()
4038 */
4039
4040 /*
4041 * Set up an NFS request struct (allocating if no request passed in).
4042 */
4043 int
nfs_request_create(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,thread_t thd,kauth_cred_t cred,struct nfsreq ** reqp)4044 nfs_request_create(
4045 nfsnode_t np,
4046 mount_t mp, /* used only if !np */
4047 struct nfsm_chain *nmrest,
4048 int procnum,
4049 thread_t thd,
4050 kauth_cred_t cred,
4051 struct nfsreq **reqp)
4052 {
4053 struct nfsreq *req, *newreq = NULL;
4054 struct nfsmount *nmp;
4055
4056 req = *reqp;
4057 if (!req) {
4058 /* allocate a new NFS request structure */
4059 req = newreq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
4060 } else {
4061 bzero(req, sizeof(*req));
4062 }
4063 if (req == newreq) {
4064 req->r_flags = R_ALLOCATED;
4065 }
4066
4067 nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
4068 if (nfs_mount_gone(nmp)) {
4069 if (newreq) {
4070 NFS_ZFREE(nfs_req_zone, newreq);
4071 }
4072 return ENXIO;
4073 }
4074 lck_mtx_lock(&nmp->nm_lock);
4075 if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
4076 (nmp->nm_state & NFSSTA_TIMEO)) {
4077 lck_mtx_unlock(&nmp->nm_lock);
4078 mbuf_freem(nmrest->nmc_mhead);
4079 nmrest->nmc_mhead = NULL;
4080 if (newreq) {
4081 NFS_ZFREE(nfs_req_zone, newreq);
4082 }
4083 return ENXIO;
4084 }
4085
4086 if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) {
4087 OSAddAtomic64(1, &nfsclntstats.rpccntv3[procnum]);
4088 }
4089 if (nmp->nm_vers == NFS_VER4) {
4090 if (procnum == NFSPROC4_COMPOUND || procnum == NFSPROC4_NULL) {
4091 OSAddAtomic64(1, &nfsclntstats.opcntv4[procnum]);
4092 } else {
4093 panic("nfs_request: invalid NFSv4 RPC request %d", procnum);
4094 }
4095 }
4096
4097 lck_mtx_init(&req->r_mtx, &nfs_request_grp, LCK_ATTR_NULL);
4098 req->r_nmp = nmp;
4099 nmp->nm_ref++;
4100 req->r_np = np;
4101 req->r_thread = thd;
4102 if (!thd) {
4103 req->r_flags |= R_NOINTR;
4104 }
4105 if (IS_VALID_CRED(cred)) {
4106 kauth_cred_ref(cred);
4107 req->r_cred = cred;
4108 }
4109 req->r_procnum = procnum;
4110 if (proct[procnum] > 0) {
4111 req->r_flags |= R_TIMING;
4112 }
4113 req->r_nmrep.nmc_mhead = NULL;
4114 SLIST_INIT(&req->r_gss_seqlist);
4115 req->r_achain.tqe_next = NFSREQNOLIST;
4116 req->r_rchain.tqe_next = NFSREQNOLIST;
4117 req->r_cchain.tqe_next = NFSREQNOLIST;
4118
4119 /* set auth flavor to use for request */
4120 if (!req->r_cred) {
4121 req->r_auth = RPCAUTH_NONE;
4122 } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) {
4123 req->r_auth = req->r_np->n_auth;
4124 } else {
4125 req->r_auth = nmp->nm_auth;
4126 }
4127
4128 lck_mtx_unlock(&nmp->nm_lock);
4129
4130 /* move the request mbuf chain to the nfsreq */
4131 req->r_mrest = nmrest->nmc_mhead;
4132 nmrest->nmc_mhead = NULL;
4133
4134 req->r_flags |= R_INITTED;
4135 req->r_refs = 1;
4136 if (newreq) {
4137 *reqp = req;
4138 }
4139 return 0;
4140 }
4141
4142 /*
4143 * Clean up and free an NFS request structure.
4144 */
4145 void
nfs_request_destroy(struct nfsreq * req)4146 nfs_request_destroy(struct nfsreq *req)
4147 {
4148 struct nfsmount *nmp;
4149 int clearjbtimeo = 0;
4150
4151 #if CONFIG_NFS_GSS
4152 struct gss_seq *gsp, *ngsp;
4153 #endif
4154
4155 if (!req || !(req->r_flags & R_INITTED)) {
4156 return;
4157 }
4158 nmp = req->r_nmp;
4159 req->r_flags &= ~R_INITTED;
4160 if (req->r_lflags & RL_QUEUED) {
4161 nfs_reqdequeue(req);
4162 }
4163
4164 if (req->r_achain.tqe_next != NFSREQNOLIST) {
4165 /*
4166 * Still on an async I/O queue?
4167 * %%% But which one, we may be on a local iod.
4168 */
4169 lck_mtx_lock(&nfsiod_mutex);
4170 if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
4171 TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
4172 req->r_achain.tqe_next = NFSREQNOLIST;
4173 }
4174 lck_mtx_unlock(&nfsiod_mutex);
4175 }
4176
4177 lck_mtx_lock(&req->r_mtx);
4178 if (nmp) {
4179 lck_mtx_lock(&nmp->nm_lock);
4180 if (req->r_flags & R_CWND) {
4181 /* Decrement the outstanding request count. */
4182 req->r_flags &= ~R_CWND;
4183 nmp->nm_sent -= NFS_CWNDSCALE;
4184 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4185 /* congestion window is open, poke the cwnd queue */
4186 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4187 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4188 req2->r_cchain.tqe_next = NFSREQNOLIST;
4189 wakeup(req2);
4190 }
4191 }
4192 /* XXX should we just remove this conditional, we should have a reference if we're resending */
4193 if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
4194 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4195 req->r_flags &= ~R_RESENDQ;
4196 req->r_rchain.tqe_next = NFSREQNOLIST;
4197 }
4198 if (req->r_cchain.tqe_next != NFSREQNOLIST) {
4199 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
4200 req->r_cchain.tqe_next = NFSREQNOLIST;
4201 }
4202 if (req->r_flags & R_JBTPRINTFMSG) {
4203 req->r_flags &= ~R_JBTPRINTFMSG;
4204 nmp->nm_jbreqs--;
4205 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4206 }
4207 lck_mtx_unlock(&nmp->nm_lock);
4208 }
4209 lck_mtx_unlock(&req->r_mtx);
4210
4211 if (clearjbtimeo) {
4212 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4213 }
4214 if (req->r_mhead) {
4215 mbuf_freem(req->r_mhead);
4216 } else if (req->r_mrest) {
4217 mbuf_freem(req->r_mrest);
4218 }
4219 if (req->r_nmrep.nmc_mhead) {
4220 mbuf_freem(req->r_nmrep.nmc_mhead);
4221 }
4222 if (IS_VALID_CRED(req->r_cred)) {
4223 kauth_cred_unref(&req->r_cred);
4224 }
4225 #if CONFIG_NFS_GSS
4226 if (nfs_request_using_gss(req)) {
4227 nfs_gss_clnt_rpcdone(req);
4228 }
4229 SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
4230 kfree_type(struct gss_seq, gsp);
4231 if (req->r_gss_ctx) {
4232 nfs_gss_clnt_ctx_unref(req);
4233 }
4234 #endif /* CONFIG_NFS_GSS */
4235 if (req->r_wrongsec) {
4236 kfree_data(req->r_wrongsec, NX_MAX_SEC_FLAVORS * sizeof(uint32_t));
4237 }
4238 if (nmp) {
4239 nfs_mount_rele(nmp);
4240 }
4241 lck_mtx_destroy(&req->r_mtx, &nfs_request_grp);
4242 if (req->r_flags & R_ALLOCATED) {
4243 NFS_ZFREE(nfs_req_zone, req);
4244 }
4245 }
4246
4247 void
nfs_request_ref(struct nfsreq * req,int locked)4248 nfs_request_ref(struct nfsreq *req, int locked)
4249 {
4250 if (!locked) {
4251 lck_mtx_lock(&req->r_mtx);
4252 }
4253 if (req->r_refs <= 0) {
4254 panic("nfsreq reference error");
4255 }
4256 req->r_refs++;
4257 if (!locked) {
4258 lck_mtx_unlock(&req->r_mtx);
4259 }
4260 }
4261
4262 void
nfs_request_rele(struct nfsreq * req)4263 nfs_request_rele(struct nfsreq *req)
4264 {
4265 int destroy;
4266
4267 lck_mtx_lock(&req->r_mtx);
4268 if (req->r_refs <= 0) {
4269 panic("nfsreq reference underflow");
4270 }
4271 req->r_refs--;
4272 destroy = (req->r_refs == 0);
4273 lck_mtx_unlock(&req->r_mtx);
4274 if (destroy) {
4275 nfs_request_destroy(req);
4276 }
4277 }
4278
4279
4280 /*
4281 * Add an (updated) RPC header with authorization to an NFS request.
4282 */
4283 int
nfs_request_add_header(struct nfsreq * req)4284 nfs_request_add_header(struct nfsreq *req)
4285 {
4286 struct nfsmount *nmp;
4287 int error = 0;
4288 mbuf_t m;
4289
4290 /* free up any previous header */
4291 if ((m = req->r_mhead)) {
4292 while (m && (m != req->r_mrest)) {
4293 m = mbuf_free(m);
4294 }
4295 req->r_mhead = NULL;
4296 }
4297
4298 nmp = req->r_nmp;
4299 if (nfs_mount_gone(nmp)) {
4300 return ENXIO;
4301 }
4302
4303 error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
4304 if (error) {
4305 return error;
4306 }
4307
4308 req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
4309 nmp = req->r_nmp;
4310 if (nfs_mount_gone(nmp)) {
4311 return ENXIO;
4312 }
4313 lck_mtx_lock(&nmp->nm_lock);
4314 if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) {
4315 req->r_retry = nmp->nm_retry;
4316 } else {
4317 req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
4318 }
4319 lck_mtx_unlock(&nmp->nm_lock);
4320
4321 return error;
4322 }
4323
4324
4325 /*
4326 * Queue an NFS request up and send it out.
4327 */
4328 int
nfs_request_send(struct nfsreq * req,int wait)4329 nfs_request_send(struct nfsreq *req, int wait)
4330 {
4331 struct nfsmount *nmp;
4332 struct timeval now;
4333
4334 lck_mtx_lock(&req->r_mtx);
4335 req->r_flags |= R_SENDING;
4336 lck_mtx_unlock(&req->r_mtx);
4337
4338 lck_mtx_lock(&nfs_request_mutex);
4339
4340 nmp = req->r_nmp;
4341 if (nfs_mount_gone(nmp)) {
4342 lck_mtx_unlock(&nfs_request_mutex);
4343 return ENXIO;
4344 }
4345
4346 microuptime(&now);
4347 if (!req->r_start) {
4348 req->r_start = now.tv_sec;
4349 req->r_lastmsg = now.tv_sec -
4350 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
4351 }
4352
4353 OSAddAtomic64(1, &nfsclntstats.rpcrequests);
4354
4355 /*
4356 * Make sure the request is not in the queue.
4357 */
4358 if (req->r_lflags & RL_QUEUED) {
4359 #if DEVELOPMENT
4360 panic("nfs_request_send: req %p is already in global requests queue", req);
4361 #else
4362 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
4363 req->r_lflags &= ~RL_QUEUED;
4364 #endif /* DEVELOPMENT */
4365 }
4366
4367 /*
4368 * Chain request into list of outstanding requests. Be sure
4369 * to put it LAST so timer finds oldest requests first.
4370 * Make sure that the request queue timer is running
4371 * to check for possible request timeout.
4372 */
4373 TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
4374 req->r_lflags |= RL_QUEUED;
4375 if (!nfs_request_timer_on) {
4376 nfs_request_timer_on = 1;
4377 nfs_interval_timer_start(nfs_request_timer_call,
4378 NFS_REQUESTDELAY);
4379 }
4380 lck_mtx_unlock(&nfs_request_mutex);
4381
4382 /* Send the request... */
4383 return nfs_send(req, wait);
4384 }
4385
4386 /*
4387 * Call nfs_wait_reply() to wait for the reply.
4388 */
4389 void
nfs_request_wait(struct nfsreq * req)4390 nfs_request_wait(struct nfsreq *req)
4391 {
4392 req->r_error = nfs_wait_reply(req);
4393 }
4394
4395 /*
4396 * Finish up an NFS request by dequeueing it and
4397 * doing the initial NFS request reply processing.
4398 */
4399 int
nfs_request_finish(struct nfsreq * req,struct nfsm_chain * nmrepp,int * status)4400 nfs_request_finish(
4401 struct nfsreq *req,
4402 struct nfsm_chain *nmrepp,
4403 int *status)
4404 {
4405 struct nfsmount *nmp;
4406 mbuf_t mrep;
4407 int verf_type = 0;
4408 uint32_t verf_len = 0;
4409 uint32_t reply_status = 0;
4410 uint32_t rejected_status = 0;
4411 uint32_t auth_status = 0;
4412 uint32_t accepted_status = 0;
4413 struct nfsm_chain nmrep;
4414 int error, clearjbtimeo;
4415
4416 error = req->r_error;
4417
4418 if (nmrepp) {
4419 nmrepp->nmc_mhead = NULL;
4420 }
4421
4422 /* RPC done, unlink the request. */
4423 nfs_reqdequeue(req);
4424
4425 mrep = req->r_nmrep.nmc_mhead;
4426
4427 nmp = req->r_nmp;
4428
4429 if ((req->r_flags & R_CWND) && nmp) {
4430 /*
4431 * Decrement the outstanding request count.
4432 */
4433 req->r_flags &= ~R_CWND;
4434 lck_mtx_lock(&nmp->nm_lock);
4435 FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
4436 nmp->nm_sent -= NFS_CWNDSCALE;
4437 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4438 /* congestion window is open, poke the cwnd queue */
4439 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4440 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4441 req2->r_cchain.tqe_next = NFSREQNOLIST;
4442 wakeup(req2);
4443 }
4444 lck_mtx_unlock(&nmp->nm_lock);
4445 }
4446
4447 #if CONFIG_NFS_GSS
4448 if (nfs_request_using_gss(req)) {
4449 /*
4450 * If the request used an RPCSEC_GSS credential
4451 * then reset its sequence number bit in the
4452 * request window.
4453 */
4454 nfs_gss_clnt_rpcdone(req);
4455
4456 /*
4457 * If we need to re-send, go back and re-build the
4458 * request based on a new sequence number.
4459 * Note that we're using the original XID.
4460 */
4461 if (error == EAGAIN) {
4462 req->r_error = 0;
4463 if (mrep) {
4464 mbuf_freem(mrep);
4465 }
4466 error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs
4467 req->r_nmrep.nmc_mhead = NULL;
4468 req->r_flags |= R_RESTART;
4469 if (error == ENEEDAUTH) {
4470 req->r_xid = 0; // get a new XID
4471 error = 0;
4472 }
4473 goto nfsmout;
4474 }
4475 }
4476 #endif /* CONFIG_NFS_GSS */
4477
4478 /*
4479 * If there was a successful reply, make sure to mark the mount as up.
4480 * If a tprintf message was given (or if this is a timed-out soft mount)
4481 * then post a tprintf message indicating the server is alive again.
4482 */
4483 if (!error) {
4484 if ((req->r_flags & R_TPRINTFMSG) ||
4485 (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) &&
4486 ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) {
4487 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
4488 } else {
4489 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
4490 }
4491 }
4492 if (!error && !nmp) {
4493 error = ENXIO;
4494 }
4495 nfsmout_if(error);
4496
4497 /*
4498 * break down the RPC header and check if ok
4499 */
4500 nmrep = req->r_nmrep;
4501 nfsm_chain_get_32(error, &nmrep, reply_status);
4502 nfsmout_if(error);
4503 if (reply_status == RPC_MSGDENIED) {
4504 nfsm_chain_get_32(error, &nmrep, rejected_status);
4505 nfsmout_if(error);
4506 if (rejected_status == RPC_MISMATCH) {
4507 error = ENOTSUP;
4508 goto nfsmout;
4509 }
4510 nfsm_chain_get_32(error, &nmrep, auth_status);
4511 nfsmout_if(error);
4512 switch (auth_status) {
4513 #if CONFIG_NFS_GSS
4514 case RPCSEC_GSS_CREDPROBLEM:
4515 case RPCSEC_GSS_CTXPROBLEM:
4516 /*
4517 * An RPCSEC_GSS cred or context problem.
4518 * We can't use it anymore.
4519 * Restore the args, renew the context
4520 * and set up for a resend.
4521 */
4522 error = nfs_gss_clnt_args_restore(req);
4523 if (error && error != ENEEDAUTH) {
4524 break;
4525 }
4526
4527 if (!error) {
4528 error = nfs_gss_clnt_ctx_renew(req);
4529 if (error) {
4530 break;
4531 }
4532 }
4533 mbuf_freem(mrep);
4534 req->r_nmrep.nmc_mhead = NULL;
4535 req->r_xid = 0; // get a new XID
4536 req->r_flags |= R_RESTART;
4537 goto nfsmout;
4538 #endif /* CONFIG_NFS_GSS */
4539 default:
4540 error = EACCES;
4541 break;
4542 }
4543 goto nfsmout;
4544 }
4545
4546 /* Now check the verifier */
4547 nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor
4548 nfsm_chain_get_32(error, &nmrep, verf_len); // verifier length
4549 nfsmout_if(error);
4550
4551 switch (req->r_auth) {
4552 case RPCAUTH_NONE:
4553 case RPCAUTH_SYS:
4554 /* Any AUTH_SYS verifier is ignored */
4555 if (verf_len > 0) {
4556 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
4557 }
4558 nfsm_chain_get_32(error, &nmrep, accepted_status);
4559 break;
4560 #if CONFIG_NFS_GSS
4561 case RPCAUTH_KRB5:
4562 case RPCAUTH_KRB5I:
4563 case RPCAUTH_KRB5P:
4564 error = nfs_gss_clnt_verf_get(req, &nmrep,
4565 verf_type, verf_len, &accepted_status);
4566 break;
4567 #endif /* CONFIG_NFS_GSS */
4568 }
4569 nfsmout_if(error);
4570
4571 switch (accepted_status) {
4572 case RPC_SUCCESS:
4573 if (req->r_procnum == NFSPROC_NULL) {
4574 /*
4575 * The NFS null procedure is unique,
4576 * in not returning an NFS status.
4577 */
4578 *status = NFS_OK;
4579 } else {
4580 nfsm_chain_get_32(error, &nmrep, *status);
4581 nfsmout_if(error);
4582 }
4583
4584 if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) {
4585 /*
4586 * It's a JUKEBOX error - delay and try again
4587 */
4588 int delay, slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
4589
4590 mbuf_freem(mrep);
4591 req->r_nmrep.nmc_mhead = NULL;
4592 if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) {
4593 /* we're not yet completely mounted and */
4594 /* we can't complete an RPC, so we fail */
4595 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
4596 nfs_softterm(req);
4597 error = req->r_error;
4598 goto nfsmout;
4599 }
4600 req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2);
4601 if (req->r_delay > 30) {
4602 req->r_delay = 30;
4603 }
4604 if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) {
4605 if (!(req->r_flags & R_JBTPRINTFMSG)) {
4606 req->r_flags |= R_JBTPRINTFMSG;
4607 lck_mtx_lock(&nmp->nm_lock);
4608 nmp->nm_jbreqs++;
4609 lck_mtx_unlock(&nmp->nm_lock);
4610 }
4611 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
4612 "resource temporarily unavailable (jukebox)", 0);
4613 }
4614 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) &&
4615 !(req->r_flags & R_NOINTR)) {
4616 /* for soft mounts, just give up after a short while */
4617 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
4618 nfs_softterm(req);
4619 error = req->r_error;
4620 goto nfsmout;
4621 }
4622 delay = req->r_delay;
4623 if (req->r_callback.rcb_func) {
4624 struct timeval now;
4625 microuptime(&now);
4626 req->r_resendtime = now.tv_sec + delay;
4627 } else {
4628 do {
4629 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
4630 goto nfsmout;
4631 }
4632 tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz);
4633 slpflag = 0;
4634 } while (--delay > 0);
4635 }
4636 req->r_xid = 0; // get a new XID
4637 req->r_flags |= R_RESTART;
4638 req->r_start = 0;
4639 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER);
4640 return 0;
4641 }
4642
4643 if (req->r_flags & R_JBTPRINTFMSG) {
4644 req->r_flags &= ~R_JBTPRINTFMSG;
4645 lck_mtx_lock(&nmp->nm_lock);
4646 nmp->nm_jbreqs--;
4647 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4648 lck_mtx_unlock(&nmp->nm_lock);
4649 nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again");
4650 }
4651
4652 #if CONFIG_NFS4
4653 if ((nmp->nm_vers >= NFS_VER4) && (*status == NFSERR_WRONGSEC)) {
4654 /*
4655 * Hmmm... we need to try a different security flavor.
4656 * The first time a request hits this, we will allocate an array
4657 * to track flavors to try. We fill the array with the mount's
4658 * preferred flavors or the server's preferred flavors or just the
4659 * flavors we support.
4660 */
4661 uint32_t srvflavors[NX_MAX_SEC_FLAVORS];
4662 int srvcount, i, j;
4663
4664 /* Call SECINFO to try to get list of flavors from server. */
4665 srvcount = NX_MAX_SEC_FLAVORS;
4666 nfs4_secinfo_rpc(nmp, &req->r_secinfo, req->r_cred, srvflavors, &srvcount);
4667
4668 if (!req->r_wrongsec) {
4669 /* first time... set up flavor array */
4670 req->r_wrongsec = kalloc_data(NX_MAX_SEC_FLAVORS * sizeof(uint32_t), Z_WAITOK);
4671 if (!req->r_wrongsec) {
4672 error = EACCES;
4673 goto nfsmout;
4674 }
4675 i = 0;
4676 if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */
4677 for (; i < nmp->nm_sec.count; i++) {
4678 req->r_wrongsec[i] = nmp->nm_sec.flavors[i];
4679 }
4680 } else if (srvcount) { /* otherwise use the server's list of flavors */
4681 for (; i < srvcount; i++) {
4682 req->r_wrongsec[i] = srvflavors[i];
4683 }
4684 } else { /* otherwise, just try the flavors we support. */
4685 req->r_wrongsec[i++] = RPCAUTH_KRB5P;
4686 req->r_wrongsec[i++] = RPCAUTH_KRB5I;
4687 req->r_wrongsec[i++] = RPCAUTH_KRB5;
4688 req->r_wrongsec[i++] = RPCAUTH_SYS;
4689 req->r_wrongsec[i++] = RPCAUTH_NONE;
4690 }
4691 for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */
4692 req->r_wrongsec[i] = RPCAUTH_INVALID;
4693 }
4694 }
4695
4696 /* clear the current flavor from the list */
4697 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4698 if (req->r_wrongsec[i] == req->r_auth) {
4699 req->r_wrongsec[i] = RPCAUTH_INVALID;
4700 }
4701 }
4702
4703 /* find the next flavor to try */
4704 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4705 if (req->r_wrongsec[i] != RPCAUTH_INVALID) {
4706 if (!srvcount) { /* no server list, just try it */
4707 break;
4708 }
4709 /* check that it's in the server's list */
4710 for (j = 0; j < srvcount; j++) {
4711 if (req->r_wrongsec[i] == srvflavors[j]) {
4712 break;
4713 }
4714 }
4715 if (j < srvcount) { /* found */
4716 break;
4717 }
4718 /* not found in server list */
4719 req->r_wrongsec[i] = RPCAUTH_INVALID;
4720 }
4721 }
4722 if (i == NX_MAX_SEC_FLAVORS) {
4723 /* nothing left to try! */
4724 error = EACCES;
4725 goto nfsmout;
4726 }
4727
4728 /* retry with the next auth flavor */
4729 req->r_auth = req->r_wrongsec[i];
4730 req->r_xid = 0; // get a new XID
4731 req->r_flags |= R_RESTART;
4732 req->r_start = 0;
4733 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC);
4734 return 0;
4735 }
4736 if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) {
4737 /*
4738 * We renegotiated security for this request; so update the
4739 * default security flavor for the associated node.
4740 */
4741 if (req->r_np) {
4742 req->r_np->n_auth = req->r_auth;
4743 }
4744 }
4745 #endif /* CONFIG_NFS4 */
4746 if (*status == NFS_OK) {
4747 /*
4748 * Successful NFS request
4749 */
4750 *nmrepp = nmrep;
4751 req->r_nmrep.nmc_mhead = NULL;
4752 break;
4753 }
4754 /* Got an NFS error of some kind */
4755
4756 /*
4757 * If the File Handle was stale, invalidate the
4758 * lookup cache, just in case.
4759 */
4760 if ((*status == ESTALE) && req->r_np) {
4761 cache_purge(NFSTOV(req->r_np));
4762 /* if monitored, also send delete event */
4763 if (vnode_ismonitored(NFSTOV(req->r_np))) {
4764 nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE));
4765 }
4766 }
4767 if (nmp->nm_vers == NFS_VER2) {
4768 mbuf_freem(mrep);
4769 } else {
4770 *nmrepp = nmrep;
4771 }
4772 req->r_nmrep.nmc_mhead = NULL;
4773 error = 0;
4774 break;
4775 case RPC_PROGUNAVAIL:
4776 error = EPROGUNAVAIL;
4777 break;
4778 case RPC_PROGMISMATCH:
4779 error = ERPCMISMATCH;
4780 break;
4781 case RPC_PROCUNAVAIL:
4782 error = EPROCUNAVAIL;
4783 break;
4784 case RPC_GARBAGE:
4785 error = EBADRPC;
4786 break;
4787 case RPC_SYSTEM_ERR:
4788 default:
4789 error = EIO;
4790 break;
4791 }
4792 nfsmout:
4793 if (req->r_flags & R_JBTPRINTFMSG) {
4794 req->r_flags &= ~R_JBTPRINTFMSG;
4795 lck_mtx_lock(&nmp->nm_lock);
4796 nmp->nm_jbreqs--;
4797 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4798 lck_mtx_unlock(&nmp->nm_lock);
4799 if (clearjbtimeo) {
4800 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4801 }
4802 }
4803 FSDBG(273, R_XID32(req->r_xid), nmp, req,
4804 (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error);
4805 return error;
4806 }
4807
4808 /*
4809 * NFS request using a GSS/Kerberos security flavor?
4810 */
4811 int
nfs_request_using_gss(struct nfsreq * req)4812 nfs_request_using_gss(struct nfsreq *req)
4813 {
4814 if (!req->r_gss_ctx) {
4815 return 0;
4816 }
4817 switch (req->r_auth) {
4818 case RPCAUTH_KRB5:
4819 case RPCAUTH_KRB5I:
4820 case RPCAUTH_KRB5P:
4821 return 1;
4822 }
4823 return 0;
4824 }
4825
4826 /*
4827 * Perform an NFS request synchronously.
4828 */
4829
4830 int
nfs_request(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,vfs_context_t ctx,struct nfsreq_secinfo_args * si,struct nfsm_chain * nmrepp,u_int64_t * xidp,int * status)4831 nfs_request(
4832 nfsnode_t np,
4833 mount_t mp, /* used only if !np */
4834 struct nfsm_chain *nmrest,
4835 int procnum,
4836 vfs_context_t ctx,
4837 struct nfsreq_secinfo_args *si,
4838 struct nfsm_chain *nmrepp,
4839 u_int64_t *xidp,
4840 int *status)
4841 {
4842 return nfs_request2(np, mp, nmrest, procnum,
4843 vfs_context_thread(ctx), vfs_context_ucred(ctx),
4844 si, 0, nmrepp, xidp, status);
4845 }
4846
4847 int
nfs_request2(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,thread_t thd,kauth_cred_t cred,struct nfsreq_secinfo_args * si,int flags,struct nfsm_chain * nmrepp,u_int64_t * xidp,int * status)4848 nfs_request2(
4849 nfsnode_t np,
4850 mount_t mp, /* used only if !np */
4851 struct nfsm_chain *nmrest,
4852 int procnum,
4853 thread_t thd,
4854 kauth_cred_t cred,
4855 struct nfsreq_secinfo_args *si,
4856 int flags,
4857 struct nfsm_chain *nmrepp,
4858 u_int64_t *xidp,
4859 int *status)
4860 {
4861 struct nfsreq *req;
4862 int error;
4863
4864 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4865 if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
4866 goto out_free;
4867 }
4868 req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
4869 if (si) {
4870 req->r_secinfo = *si;
4871 }
4872
4873 FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0);
4874 do {
4875 req->r_error = 0;
4876 req->r_flags &= ~R_RESTART;
4877 if ((error = nfs_request_add_header(req))) {
4878 break;
4879 }
4880 if (xidp) {
4881 *xidp = req->r_xid;
4882 }
4883 if ((error = nfs_request_send(req, 1))) {
4884 break;
4885 }
4886 nfs_request_wait(req);
4887 if ((error = nfs_request_finish(req, nmrepp, status))) {
4888 break;
4889 }
4890 } while (req->r_flags & R_RESTART);
4891
4892 FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
4893 nfs_request_rele(req);
4894 out_free:
4895 NFS_ZFREE(nfs_req_zone, req);
4896 return error;
4897 }
4898
4899
4900 #if CONFIG_NFS_GSS
4901 /*
4902 * Set up a new null proc request to exchange GSS context tokens with the
4903 * server. Associate the context that we are setting up with the request that we
4904 * are sending.
4905 */
4906
4907 int
nfs_request_gss(mount_t mp,struct nfsm_chain * nmrest,thread_t thd,kauth_cred_t cred,int flags,struct nfs_gss_clnt_ctx * cp,struct nfsm_chain * nmrepp,int * status)4908 nfs_request_gss(
4909 mount_t mp,
4910 struct nfsm_chain *nmrest,
4911 thread_t thd,
4912 kauth_cred_t cred,
4913 int flags,
4914 struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */
4915 struct nfsm_chain *nmrepp,
4916 int *status)
4917 {
4918 struct nfsreq *req;
4919 int error, wait = 1;
4920
4921 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4922 if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
4923 goto out_free;
4924 }
4925 req->r_flags |= (flags & R_OPTMASK);
4926
4927 if (cp == NULL) {
4928 printf("nfs_request_gss request has no context\n");
4929 nfs_request_rele(req);
4930 error = NFSERR_EAUTH;
4931 goto out_free;
4932 }
4933 nfs_gss_clnt_ctx_ref(req, cp);
4934
4935 /*
4936 * Don't wait for a reply to a context destroy advisory
4937 * to avoid hanging on a dead server.
4938 */
4939 if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
4940 wait = 0;
4941 }
4942
4943 FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
4944 do {
4945 req->r_error = 0;
4946 req->r_flags &= ~R_RESTART;
4947 if ((error = nfs_request_add_header(req))) {
4948 break;
4949 }
4950
4951 if ((error = nfs_request_send(req, wait))) {
4952 break;
4953 }
4954 if (!wait) {
4955 break;
4956 }
4957
4958 nfs_request_wait(req);
4959 if ((error = nfs_request_finish(req, nmrepp, status))) {
4960 break;
4961 }
4962 } while (req->r_flags & R_RESTART);
4963
4964 FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
4965
4966 nfs_gss_clnt_ctx_unref(req);
4967 nfs_request_rele(req);
4968 out_free:
4969 NFS_ZFREE(nfs_req_zone, req);
4970 return error;
4971 }
4972 #endif /* CONFIG_NFS_GSS */
4973
4974 /*
4975 * Create and start an asynchronous NFS request.
4976 */
4977 int
nfs_request_async(nfsnode_t np,mount_t mp,struct nfsm_chain * nmrest,int procnum,thread_t thd,kauth_cred_t cred,struct nfsreq_secinfo_args * si,int flags,struct nfsreq_cbinfo * cb,struct nfsreq ** reqp)4978 nfs_request_async(
4979 nfsnode_t np,
4980 mount_t mp, /* used only if !np */
4981 struct nfsm_chain *nmrest,
4982 int procnum,
4983 thread_t thd,
4984 kauth_cred_t cred,
4985 struct nfsreq_secinfo_args *si,
4986 int flags,
4987 struct nfsreq_cbinfo *cb,
4988 struct nfsreq **reqp)
4989 {
4990 struct nfsreq *req;
4991 struct nfsmount *nmp;
4992 int error, sent;
4993
4994 error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp);
4995 req = *reqp;
4996 FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error);
4997 if (error) {
4998 return error;
4999 }
5000 req->r_flags |= (flags & R_OPTMASK);
5001 req->r_flags |= R_ASYNC;
5002 if (si) {
5003 req->r_secinfo = *si;
5004 }
5005 if (cb) {
5006 req->r_callback = *cb;
5007 }
5008 error = nfs_request_add_header(req);
5009 if (!error) {
5010 req->r_flags |= R_WAITSENT;
5011 if (req->r_callback.rcb_func) {
5012 nfs_request_ref(req, 0);
5013 }
5014 error = nfs_request_send(req, 1);
5015 lck_mtx_lock(&req->r_mtx);
5016 if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) {
5017 /* make sure to wait until this async I/O request gets sent */
5018 int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
5019 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5020 while (!(req->r_flags & R_SENT)) {
5021 nmp = req->r_nmp;
5022 if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
5023 lck_mtx_lock(&nmp->nm_lock);
5024 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5025 /*
5026 * It's not going to get off the resend queue if we're in recovery.
5027 * So, just take it off ourselves. We could be holding mount state
5028 * busy and thus holding up the start of recovery.
5029 */
5030 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5031 req->r_flags &= ~R_RESENDQ;
5032 req->r_rchain.tqe_next = NFSREQNOLIST;
5033 lck_mtx_unlock(&nmp->nm_lock);
5034 req->r_flags |= R_SENDING;
5035 lck_mtx_unlock(&req->r_mtx);
5036 error = nfs_send(req, 1);
5037 /* Remove the R_RESENDQ reference */
5038 nfs_request_rele(req);
5039 lck_mtx_lock(&req->r_mtx);
5040 if (error) {
5041 break;
5042 }
5043 continue;
5044 }
5045 lck_mtx_unlock(&nmp->nm_lock);
5046 }
5047 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5048 break;
5049 }
5050 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts);
5051 slpflag = 0;
5052 }
5053 }
5054 sent = req->r_flags & R_SENT;
5055 lck_mtx_unlock(&req->r_mtx);
5056 if (error && req->r_callback.rcb_func && !sent) {
5057 nfs_request_rele(req);
5058 }
5059 }
5060 FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
5061 if (error || req->r_callback.rcb_func) {
5062 nfs_request_rele(req);
5063 }
5064
5065 return error;
5066 }
5067
5068 /*
5069 * Wait for and finish an asynchronous NFS request.
5070 */
5071 int
nfs_request_async_finish(struct nfsreq * req,struct nfsm_chain * nmrepp,u_int64_t * xidp,int * status)5072 nfs_request_async_finish(
5073 struct nfsreq *req,
5074 struct nfsm_chain *nmrepp,
5075 u_int64_t *xidp,
5076 int *status)
5077 {
5078 int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
5079 struct nfsmount *nmp;
5080
5081 lck_mtx_lock(&req->r_mtx);
5082 if (!asyncio) {
5083 req->r_flags |= R_ASYNCWAIT;
5084 }
5085 while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
5086 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5087
5088 if ((nmp = req->r_nmp)) {
5089 lck_mtx_lock(&nmp->nm_lock);
5090 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5091 /*
5092 * It's not going to get off the resend queue if we're in recovery.
5093 * So, just take it off ourselves. We could be holding mount state
5094 * busy and thus holding up the start of recovery.
5095 */
5096 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5097 req->r_flags &= ~R_RESENDQ;
5098 req->r_rchain.tqe_next = NFSREQNOLIST;
5099 /* Remove the R_RESENDQ reference */
5100 assert(req->r_refs > 0);
5101 req->r_refs--;
5102 lck_mtx_unlock(&nmp->nm_lock);
5103 break;
5104 }
5105 lck_mtx_unlock(&nmp->nm_lock);
5106 }
5107 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5108 break;
5109 }
5110 msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts);
5111 }
5112 lck_mtx_unlock(&req->r_mtx);
5113
5114 if (!error) {
5115 nfs_request_wait(req);
5116 error = nfs_request_finish(req, nmrepp, status);
5117 }
5118
5119 while (!error && (req->r_flags & R_RESTART)) {
5120 if (asyncio) {
5121 assert(req->r_achain.tqe_next == NFSREQNOLIST);
5122 lck_mtx_lock(&req->r_mtx);
5123 req->r_flags &= ~R_IOD;
5124 if (req->r_resendtime) { /* send later */
5125 nfs_asyncio_resend(req);
5126 lck_mtx_unlock(&req->r_mtx);
5127 return EINPROGRESS;
5128 }
5129 lck_mtx_unlock(&req->r_mtx);
5130 }
5131 req->r_error = 0;
5132 req->r_flags &= ~R_RESTART;
5133 if ((error = nfs_request_add_header(req))) {
5134 break;
5135 }
5136 if ((error = nfs_request_send(req, !asyncio))) {
5137 break;
5138 }
5139 if (asyncio) {
5140 return EINPROGRESS;
5141 }
5142 nfs_request_wait(req);
5143 if ((error = nfs_request_finish(req, nmrepp, status))) {
5144 break;
5145 }
5146 }
5147 if (xidp) {
5148 *xidp = req->r_xid;
5149 }
5150
5151 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error);
5152 nfs_request_rele(req);
5153 return error;
5154 }
5155
5156 /*
5157 * Cancel a pending asynchronous NFS request.
5158 */
5159 void
nfs_request_async_cancel(struct nfsreq * req)5160 nfs_request_async_cancel(struct nfsreq *req)
5161 {
5162 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
5163 nfs_request_rele(req);
5164 }
5165
5166 /*
5167 * Flag a request as being terminated.
5168 */
5169 void
nfs_softterm(struct nfsreq * req)5170 nfs_softterm(struct nfsreq *req)
5171 {
5172 struct nfsmount *nmp = req->r_nmp;
5173 req->r_flags |= R_SOFTTERM;
5174 req->r_error = ETIMEDOUT;
5175 if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) {
5176 return;
5177 }
5178 /* update congestion window */
5179 req->r_flags &= ~R_CWND;
5180 lck_mtx_lock(&nmp->nm_lock);
5181 FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
5182 nmp->nm_sent -= NFS_CWNDSCALE;
5183 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
5184 /* congestion window is open, poke the cwnd queue */
5185 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
5186 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
5187 req2->r_cchain.tqe_next = NFSREQNOLIST;
5188 wakeup(req2);
5189 }
5190 lck_mtx_unlock(&nmp->nm_lock);
5191 }
5192
5193 /*
5194 * Ensure req isn't in use by the timer, then dequeue it.
5195 */
5196 void
nfs_reqdequeue(struct nfsreq * req)5197 nfs_reqdequeue(struct nfsreq *req)
5198 {
5199 lck_mtx_lock(&nfs_request_mutex);
5200 while (req->r_lflags & RL_BUSY) {
5201 req->r_lflags |= RL_WAITING;
5202 msleep(&req->r_lflags, &nfs_request_mutex, PSOCK, "reqdeq", NULL);
5203 }
5204 if (req->r_lflags & RL_QUEUED) {
5205 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
5206 req->r_lflags &= ~RL_QUEUED;
5207 }
5208 lck_mtx_unlock(&nfs_request_mutex);
5209 }
5210
5211 /*
5212 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5213 * free()'d out from under it.
5214 */
5215 void
nfs_reqbusy(struct nfsreq * req)5216 nfs_reqbusy(struct nfsreq *req)
5217 {
5218 if (req->r_lflags & RL_BUSY) {
5219 panic("req locked");
5220 }
5221 req->r_lflags |= RL_BUSY;
5222 }
5223
5224 /*
5225 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5226 */
5227 struct nfsreq *
nfs_reqnext(struct nfsreq * req)5228 nfs_reqnext(struct nfsreq *req)
5229 {
5230 struct nfsreq * nextreq;
5231
5232 if (req == NULL) {
5233 return NULL;
5234 }
5235 /*
5236 * We need to get and busy the next req before signalling the
5237 * current one, otherwise wakeup() may block us and we'll race to
5238 * grab the next req.
5239 */
5240 nextreq = TAILQ_NEXT(req, r_chain);
5241 if (nextreq != NULL) {
5242 nfs_reqbusy(nextreq);
5243 }
5244 /* unbusy and signal. */
5245 req->r_lflags &= ~RL_BUSY;
5246 if (req->r_lflags & RL_WAITING) {
5247 req->r_lflags &= ~RL_WAITING;
5248 wakeup(&req->r_lflags);
5249 }
5250 return nextreq;
5251 }
5252
5253 /*
5254 * NFS request queue timer routine
5255 *
5256 * Scan the NFS request queue for any requests that have timed out.
5257 *
5258 * Alert the system of unresponsive servers.
5259 * Mark expired requests on soft mounts as terminated.
5260 * For UDP, mark/signal requests for retransmission.
5261 */
5262 void
nfs_request_timer(void * param0,__unused void * param1)5263 nfs_request_timer(void *param0, __unused void *param1)
5264 {
5265 struct nfsreq *req;
5266 struct nfsmount *nmp, *unmountp = param0;
5267 int timeo, maxtime, finish_asyncio, error;
5268 struct timeval now;
5269 TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
5270 TAILQ_INIT(&nfs_mount_poke_queue);
5271
5272 restart:
5273 lck_mtx_lock(&nfs_request_mutex);
5274 req = TAILQ_FIRST(&nfs_reqq);
5275 if (req == NULL) { /* no requests - turn timer off */
5276 nfs_request_timer_on = 0;
5277 lck_mtx_unlock(&nfs_request_mutex);
5278 return;
5279 }
5280
5281 nfs_reqbusy(req);
5282
5283 microuptime(&now);
5284 for (; req != NULL; req = nfs_reqnext(req)) {
5285 nmp = req->r_nmp;
5286 if (nmp == NULL) {
5287 NFS_SOCK_DBG("Found a request with out a mount!\n");
5288 continue;
5289 }
5290 if (req->r_error || req->r_nmrep.nmc_mhead) {
5291 continue;
5292 }
5293 if (unmountp && unmountp != nmp) {
5294 /* Ignore other mounts during unmount */
5295 continue;
5296 }
5297 if (unmountp && !req->r_callback.rcb_func) {
5298 /* just wakeup sync RPCs */
5299 wakeup(req);
5300 continue;
5301 }
5302 if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
5303 if (req->r_callback.rcb_func != NULL) {
5304 /* async I/O RPC needs to be finished */
5305 lck_mtx_lock(&req->r_mtx);
5306 req->r_error = error;
5307 finish_asyncio = !(req->r_flags & R_WAITSENT);
5308 wakeup(req);
5309 lck_mtx_unlock(&req->r_mtx);
5310 if (finish_asyncio) {
5311 nfs_asyncio_finish(req);
5312 }
5313 }
5314 continue;
5315 }
5316
5317 if (unmountp) {
5318 /* Skip request processing */
5319 continue;
5320 }
5321
5322 lck_mtx_lock(&req->r_mtx);
5323
5324 if (nmp->nm_tprintf_initial_delay &&
5325 ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) &&
5326 ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
5327 req->r_lastmsg = now.tv_sec;
5328 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5329 "not responding", 1);
5330 req->r_flags |= R_TPRINTFMSG;
5331 lck_mtx_lock(&nmp->nm_lock);
5332 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
5333 lck_mtx_unlock(&nmp->nm_lock);
5334 /* we're not yet completely mounted and */
5335 /* we can't complete an RPC, so we fail */
5336 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
5337 nfs_softterm(req);
5338 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5339 wakeup(req);
5340 lck_mtx_unlock(&req->r_mtx);
5341 if (finish_asyncio) {
5342 nfs_asyncio_finish(req);
5343 }
5344 continue;
5345 }
5346 lck_mtx_unlock(&nmp->nm_lock);
5347 }
5348
5349 /*
5350 * Put a reasonable limit on the maximum timeout,
5351 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5352 */
5353 if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) {
5354 maxtime = NFS_MAXTIMEO;
5355 } else if ((req->r_flags & (R_SETUP | R_RECOVER)) ||
5356 ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) {
5357 maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2;
5358 } else {
5359 maxtime = NFS_MINTIMEO / 4;
5360 }
5361
5362 /*
5363 * Check for request timeout.
5364 */
5365 if (req->r_rtt >= 0) {
5366 req->r_rtt++;
5367 lck_mtx_lock(&nmp->nm_lock);
5368 if (req->r_flags & R_RESENDERR) {
5369 /* with resend errors, retry every few seconds */
5370 timeo = 4 * hz;
5371 } else {
5372 if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) {
5373 timeo = NFS_MINIDEMTIMEO; // gss context setup
5374 } else if (NMFLAG(nmp, DUMBTIMER)) {
5375 timeo = nmp->nm_timeo;
5376 } else {
5377 timeo = NFS_RTO(nmp, proct[req->r_procnum]);
5378 }
5379
5380 /* ensure 62.5 ms floor */
5381 while (16 * timeo < hz) {
5382 timeo *= 2;
5383 }
5384 if (nmp->nm_timeouts > 0) {
5385 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
5386 }
5387 }
5388 /* limit timeout to max */
5389 if (timeo > maxtime) {
5390 timeo = maxtime;
5391 }
5392 if (req->r_rtt <= timeo) {
5393 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo);
5394 lck_mtx_unlock(&nmp->nm_lock);
5395 lck_mtx_unlock(&req->r_mtx);
5396 continue;
5397 }
5398 /* The request has timed out */
5399 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5400 req->r_procnum, proct[req->r_procnum],
5401 req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts,
5402 (now.tv_sec - req->r_start) * NFS_HZ, maxtime);
5403 if (nmp->nm_timeouts < 8) {
5404 nmp->nm_timeouts++;
5405 }
5406 if (nfs_mount_check_dead_timeout(nmp)) {
5407 /* Unbusy this request */
5408 req->r_lflags &= ~RL_BUSY;
5409 if (req->r_lflags & RL_WAITING) {
5410 req->r_lflags &= ~RL_WAITING;
5411 wakeup(&req->r_lflags);
5412 }
5413 lck_mtx_unlock(&req->r_mtx);
5414
5415 /* No need to poke this mount */
5416 if (nmp->nm_sockflags & NMSOCK_POKE) {
5417 nmp->nm_sockflags &= ~NMSOCK_POKE;
5418 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5419 }
5420 /* Release our lock state, so we can become a zombie */
5421 lck_mtx_unlock(&nfs_request_mutex);
5422
5423 /*
5424 * Note nfs_mount_make zombie(nmp) must be
5425 * called with nm_lock held. After doing some
5426 * work we release nm_lock in
5427 * nfs_make_mount_zombie with out acquiring any
5428 * other locks. (Later, in nfs_mount_zombie we
5429 * will acquire &nfs_request_mutex, r_mtx,
5430 * nm_lock in that order). So we should not be
5431 * introducing deadlock here. We take a reference
5432 * on the mount so that its still there when we
5433 * release the lock.
5434 */
5435 nmp->nm_ref++;
5436 nfs_mount_make_zombie(nmp);
5437 lck_mtx_unlock(&nmp->nm_lock);
5438 nfs_mount_rele(nmp);
5439
5440 /*
5441 * All the request for this mount have now been
5442 * removed from the request queue. Restart to
5443 * process the remaining mounts
5444 */
5445 goto restart;
5446 }
5447
5448 /* if it's been a few seconds, try poking the socket */
5449 if ((nmp->nm_sotype == SOCK_STREAM) &&
5450 ((now.tv_sec - req->r_start) >= 3) &&
5451 !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) &&
5452 (nmp->nm_sockflags & NMSOCK_READY)) {
5453 nmp->nm_sockflags |= NMSOCK_POKE;
5454 /*
5455 * We take a ref on the mount so that we know the mount will still be there
5456 * when we process the nfs_mount_poke_queue. An unmount request will block
5457 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5458 * the reference after calling nfs_sock_poke below;
5459 */
5460 nmp->nm_ref++;
5461 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
5462 }
5463 lck_mtx_unlock(&nmp->nm_lock);
5464 }
5465
5466 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5467 if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) &&
5468 ((req->r_rexmit >= req->r_retry) || /* too many */
5469 ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */
5470 OSAddAtomic64(1, &nfsclntstats.rpctimeouts);
5471 lck_mtx_lock(&nmp->nm_lock);
5472 if (!(nmp->nm_state & NFSSTA_TIMEO)) {
5473 lck_mtx_unlock(&nmp->nm_lock);
5474 /* make sure we note the unresponsive server */
5475 /* (maxtime may be less than tprintf delay) */
5476 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5477 "not responding", 1);
5478 req->r_lastmsg = now.tv_sec;
5479 req->r_flags |= R_TPRINTFMSG;
5480 } else {
5481 lck_mtx_unlock(&nmp->nm_lock);
5482 }
5483 if (req->r_flags & R_NOINTR) {
5484 /* don't terminate nointr requests on timeout */
5485 lck_mtx_unlock(&req->r_mtx);
5486 continue;
5487 }
5488 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5489 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt,
5490 now.tv_sec - req->r_start);
5491 nfs_softterm(req);
5492 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5493 wakeup(req);
5494 lck_mtx_unlock(&req->r_mtx);
5495 if (finish_asyncio) {
5496 nfs_asyncio_finish(req);
5497 }
5498 continue;
5499 }
5500
5501 /* for TCP, only resend if explicitly requested */
5502 if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) {
5503 if (++req->r_rexmit > NFS_MAXREXMIT) {
5504 req->r_rexmit = NFS_MAXREXMIT;
5505 }
5506 req->r_rtt = 0;
5507 lck_mtx_unlock(&req->r_mtx);
5508 continue;
5509 }
5510
5511 /*
5512 * The request needs to be (re)sent. Kick the requester to resend it.
5513 * (unless it's already marked as needing a resend)
5514 */
5515 if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) {
5516 lck_mtx_unlock(&req->r_mtx);
5517 continue;
5518 }
5519 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5520 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
5521 req->r_flags |= R_MUSTRESEND;
5522 req->r_rtt = -1;
5523 wakeup(req);
5524 if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
5525 nfs_asyncio_resend(req);
5526 }
5527 lck_mtx_unlock(&req->r_mtx);
5528 }
5529
5530 lck_mtx_unlock(&nfs_request_mutex);
5531
5532 /* poke any sockets */
5533 while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
5534 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5535 nfs_sock_poke(nmp);
5536 nfs_mount_rele(nmp);
5537 }
5538
5539 nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
5540 }
5541
5542 /*
5543 * check a thread's proc for the "noremotehang" flag.
5544 */
5545 int
nfs_noremotehang(thread_t thd)5546 nfs_noremotehang(thread_t thd)
5547 {
5548 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
5549 return p && proc_noremotehang(p);
5550 }
5551
5552 /*
5553 * Test for a termination condition pending on the process.
5554 * This is used to determine if we need to bail on a mount.
5555 * ETIMEDOUT is returned if there has been a soft timeout.
5556 * EINTR is returned if there is a signal pending that is not being ignored
5557 * ESHUTDOWN is return if the system is in shutdown.
5558 * and the mount is interruptable, or if we are a thread that is in the process
5559 * of cancellation (also SIGKILL posted).
5560 */
5561 int
nfs_sigintr(struct nfsmount * nmp,struct nfsreq * req,thread_t thd,int nmplocked)5562 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
5563 {
5564 proc_t p;
5565 int error = 0;
5566
5567 if (!nmp) {
5568 return ENXIO;
5569 }
5570
5571 /*
5572 * If the mount is hung and we've requested shutdown, then bail.
5573 * If reboot_kernel was called, no need to wait for mount to become unresponsive
5574 * because network state may be unknown.
5575 */
5576 if ((IOPMRootDomainGetWillShutdown() && (nmp->nm_state & NFSSTA_TIMEO)) ||
5577 get_system_inshutdown()) {
5578 NFS_SOCK_DBG("Shutdown in progress\n");
5579 return ESHUTDOWN;
5580 }
5581
5582 if (req && (req->r_flags & R_SOFTTERM)) {
5583 return ETIMEDOUT; /* request has been terminated. */
5584 }
5585 if (req && (req->r_flags & R_NOINTR)) {
5586 thd = NULL; /* don't check for signal on R_NOINTR */
5587 }
5588 if (!nmplocked) {
5589 lck_mtx_lock(&nmp->nm_lock);
5590 }
5591 if (nmp->nm_state & NFSSTA_FORCE) {
5592 /* If a force unmount is in progress then fail. */
5593 error = EIO;
5594 } else if (vfs_isforce(nmp->nm_mountp)) {
5595 /* Someone is unmounting us, go soft and mark it. */
5596 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
5597 nmp->nm_state |= NFSSTA_FORCE;
5598 }
5599
5600 /*
5601 * Unmount in progress and mount is not responding.
5602 * We should abort all "read" requests.
5603 */
5604 if (req && vfs_isunmount(nmp->nm_mountp) &&
5605 (nmp->nm_state & NFSSTA_TIMEO) && !(req->r_flags & R_NOUMOUNTINTR)) {
5606 error = EINTR;
5607 }
5608 /* Check if the mount is marked dead. */
5609 if (!error && (nmp->nm_state & NFSSTA_DEAD)) {
5610 error = ENXIO;
5611 }
5612
5613 /*
5614 * If the mount is hung and we've requested not to hang
5615 * on remote filesystems, then bail now.
5616 */
5617 if (current_proc() != kernproc &&
5618 !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) {
5619 error = EIO;
5620 }
5621
5622 if (!nmplocked) {
5623 lck_mtx_unlock(&nmp->nm_lock);
5624 }
5625 if (error) {
5626 return error;
5627 }
5628
5629 /* may not have a thread for async I/O */
5630 if (thd == NULL || current_proc() == kernproc) {
5631 return 0;
5632 }
5633
5634 /*
5635 * Check if the process is aborted, but don't interrupt if we
5636 * were killed by a signal and this is the exiting thread which
5637 * is attempting to dump core.
5638 */
5639 if (proc_isabortedsignal(current_proc())) {
5640 return EINTR;
5641 }
5642
5643 /* mask off thread and process blocked signals. */
5644 if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) &&
5645 proc_pendingsignals(p, NFSINT_SIGMASK)) {
5646 return EINTR;
5647 }
5648 return 0;
5649 }
5650
5651 /*
5652 * Lock a socket against others.
5653 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5654 * and also to avoid race conditions between the processes with nfs requests
5655 * in progress when a reconnect is necessary.
5656 */
5657 int
nfs_sndlock(struct nfsreq * req)5658 nfs_sndlock(struct nfsreq *req)
5659 {
5660 struct nfsmount *nmp = req->r_nmp;
5661 int *statep;
5662 int error = 0, slpflag = 0;
5663 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
5664
5665 if (nfs_mount_gone(nmp)) {
5666 return ENXIO;
5667 }
5668
5669 lck_mtx_lock(&nmp->nm_lock);
5670 statep = &nmp->nm_state;
5671
5672 if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
5673 slpflag = PCATCH;
5674 }
5675 while (*statep & NFSSTA_SNDLOCK) {
5676 if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) {
5677 break;
5678 }
5679 *statep |= NFSSTA_WANTSND;
5680 if (nfs_noremotehang(req->r_thread)) {
5681 ts.tv_sec = 1;
5682 }
5683 msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts);
5684 if (slpflag == PCATCH) {
5685 slpflag = 0;
5686 ts.tv_sec = 2;
5687 }
5688 }
5689 if (!error) {
5690 *statep |= NFSSTA_SNDLOCK;
5691 }
5692 lck_mtx_unlock(&nmp->nm_lock);
5693 return error;
5694 }
5695
5696 /*
5697 * Unlock the stream socket for others.
5698 */
5699 void
nfs_sndunlock(struct nfsreq * req)5700 nfs_sndunlock(struct nfsreq *req)
5701 {
5702 struct nfsmount *nmp = req->r_nmp;
5703 int *statep, wake = 0;
5704
5705 if (!nmp) {
5706 return;
5707 }
5708 lck_mtx_lock(&nmp->nm_lock);
5709 statep = &nmp->nm_state;
5710 if ((*statep & NFSSTA_SNDLOCK) == 0) {
5711 panic("nfs sndunlock");
5712 }
5713 *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING);
5714 if (*statep & NFSSTA_WANTSND) {
5715 *statep &= ~NFSSTA_WANTSND;
5716 wake = 1;
5717 }
5718 lck_mtx_unlock(&nmp->nm_lock);
5719 if (wake) {
5720 wakeup(statep);
5721 }
5722 }
5723
5724 int
nfs_aux_request(struct nfsmount * nmp,thread_t thd,struct sockaddr * saddr,socket_t so,int sotype,mbuf_t mreq,uint32_t xid,int bindresv,int timeo,struct nfsm_chain * nmrep)5725 nfs_aux_request(
5726 struct nfsmount *nmp,
5727 thread_t thd,
5728 struct sockaddr *saddr,
5729 socket_t so,
5730 int sotype,
5731 mbuf_t mreq,
5732 uint32_t xid,
5733 int bindresv,
5734 int timeo,
5735 struct nfsm_chain *nmrep)
5736 {
5737 int error = 0, on = 1, try, sendat = 2, soproto, recv, optlen, restoreto = 0;
5738 socket_t newso = NULL;
5739 struct sockaddr_storage ss;
5740 struct timeval orig_rcvto, orig_sndto, tv = { .tv_sec = 1, .tv_usec = 0 };
5741 mbuf_t m, mrep = NULL;
5742 struct msghdr msg;
5743 uint32_t rxid = 0, reply = 0, reply_status, rejected_status;
5744 uint32_t verf_type, verf_len, accepted_status;
5745 size_t readlen, sentlen;
5746 struct nfs_rpc_record_state nrrs;
5747
5748 if (!so) {
5749 /* create socket and set options */
5750 if (saddr->sa_family == AF_LOCAL) {
5751 soproto = 0;
5752 } else {
5753 soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP;
5754 }
5755 if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) {
5756 goto nfsmout;
5757 }
5758
5759 if (bindresv && saddr->sa_family != AF_LOCAL) {
5760 int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
5761 int optname = (saddr->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
5762 int portrange = IP_PORTRANGE_LOW;
5763 error = sock_setsockopt(newso, level, optname, &portrange, sizeof(portrange));
5764 nfsmout_if(error);
5765 ss.ss_len = saddr->sa_len;
5766 ss.ss_family = saddr->sa_family;
5767 if (ss.ss_family == AF_INET) {
5768 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
5769 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
5770 } else if (ss.ss_family == AF_INET6) {
5771 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
5772 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
5773 } else {
5774 error = EINVAL;
5775 }
5776 if (!error) {
5777 error = sock_bind(newso, (struct sockaddr *)&ss);
5778 }
5779 nfsmout_if(error);
5780 }
5781
5782 if (sotype == SOCK_STREAM) {
5783 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5784 int count = 0;
5785
5786 error = sock_connect(newso, saddr, MSG_DONTWAIT);
5787 if (error == EINPROGRESS) {
5788 error = 0;
5789 }
5790 nfsmout_if(error);
5791
5792 while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) {
5793 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5794 if (++count >= NFS_AUX_CONNECTION_TIMEOUT) {
5795 error = ETIMEDOUT;
5796 break;
5797 }
5798 }
5799 nfsmout_if(error);
5800 }
5801 if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
5802 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) ||
5803 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) {
5804 goto nfsmout;
5805 }
5806 so = newso;
5807 } else {
5808 /* make sure socket is using a one second timeout in this function */
5809 optlen = sizeof(orig_rcvto);
5810 error = sock_getsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, &optlen);
5811 if (!error) {
5812 optlen = sizeof(orig_sndto);
5813 error = sock_getsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, &optlen);
5814 }
5815 if (!error) {
5816 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
5817 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
5818 restoreto = 1;
5819 }
5820 }
5821
5822 if (sotype == SOCK_STREAM) {
5823 sendat = 0; /* we only resend the request for UDP */
5824 nfs_rpc_record_state_init(&nrrs);
5825 }
5826
5827 for (try = 0; try < timeo; try++) {
5828 if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) {
5829 break;
5830 }
5831 if (!try || (try == sendat)) {
5832 /* send the request (resending periodically for UDP) */
5833 if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) {
5834 goto nfsmout;
5835 }
5836 bzero(&msg, sizeof(msg));
5837 if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) {
5838 msg.msg_name = saddr;
5839 msg.msg_namelen = saddr->sa_len;
5840 }
5841 if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) {
5842 goto nfsmout;
5843 }
5844 sendat *= 2;
5845 if (sendat > 30) {
5846 sendat = 30;
5847 }
5848 }
5849 /* wait for the response */
5850 if (sotype == SOCK_STREAM) {
5851 /* try to read (more of) record */
5852 error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep);
5853 /* if we don't have the whole record yet, we'll keep trying */
5854 } else {
5855 readlen = 1 << 18;
5856 bzero(&msg, sizeof(msg));
5857 error = sock_receivembuf(so, &msg, &mrep, 0, &readlen);
5858 }
5859 if (error == EWOULDBLOCK) {
5860 continue;
5861 }
5862 nfsmout_if(error);
5863 /* parse the response */
5864 nfsm_chain_dissect_init(error, nmrep, mrep);
5865 nfsm_chain_get_32(error, nmrep, rxid);
5866 nfsm_chain_get_32(error, nmrep, reply);
5867 nfsmout_if(error);
5868 if ((rxid != xid) || (reply != RPC_REPLY)) {
5869 error = EBADRPC;
5870 }
5871 nfsm_chain_get_32(error, nmrep, reply_status);
5872 nfsmout_if(error);
5873 if (reply_status == RPC_MSGDENIED) {
5874 nfsm_chain_get_32(error, nmrep, rejected_status);
5875 nfsmout_if(error);
5876 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
5877 goto nfsmout;
5878 }
5879 nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */
5880 nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */
5881 nfsmout_if(error);
5882 if (verf_len) {
5883 nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len));
5884 }
5885 nfsm_chain_get_32(error, nmrep, accepted_status);
5886 nfsmout_if(error);
5887 switch (accepted_status) {
5888 case RPC_SUCCESS:
5889 error = 0;
5890 break;
5891 case RPC_PROGUNAVAIL:
5892 error = EPROGUNAVAIL;
5893 break;
5894 case RPC_PROGMISMATCH:
5895 error = EPROGMISMATCH;
5896 break;
5897 case RPC_PROCUNAVAIL:
5898 error = EPROCUNAVAIL;
5899 break;
5900 case RPC_GARBAGE:
5901 error = EBADRPC;
5902 break;
5903 case RPC_SYSTEM_ERR:
5904 default:
5905 error = EIO;
5906 break;
5907 }
5908 break;
5909 }
5910 nfsmout:
5911 if (restoreto) {
5912 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, sizeof(tv));
5913 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, sizeof(tv));
5914 }
5915 if (newso) {
5916 sock_shutdown(newso, SHUT_RDWR);
5917 sock_close(newso);
5918 }
5919 mbuf_freem(mreq);
5920 return error;
5921 }
5922
5923 int
nfs_portmap_lookup(struct nfsmount * nmp,vfs_context_t ctx,struct sockaddr * sa,socket_t so,uint32_t protocol,uint32_t vers,uint32_t stype,int timeo)5924 nfs_portmap_lookup(
5925 struct nfsmount *nmp,
5926 vfs_context_t ctx,
5927 struct sockaddr *sa,
5928 socket_t so,
5929 uint32_t protocol,
5930 uint32_t vers,
5931 uint32_t stype,
5932 int timeo)
5933 {
5934 thread_t thd = vfs_context_thread(ctx);
5935 kauth_cred_t cred = vfs_context_ucred(ctx);
5936 struct sockaddr_storage ss;
5937 struct sockaddr *saddr = (struct sockaddr*)&ss;
5938 static struct sockaddr_un rpcbind_cots = {
5939 sizeof(struct sockaddr_un),
5940 AF_LOCAL,
5941 RPCB_TICOTSORD_PATH
5942 };
5943 static struct sockaddr_un rpcbind_clts = {
5944 sizeof(struct sockaddr_un),
5945 AF_LOCAL,
5946 RPCB_TICLTS_PATH
5947 };
5948 struct nfsm_chain nmreq, nmrep;
5949 mbuf_t mreq;
5950 int error = 0, ip, pmprog, pmvers, pmproc;
5951 uint32_t ualen = 0, scopeid = 0, port32;
5952 uint64_t xid = 0;
5953 char uaddr[MAX_IPv6_STR_LEN + 16];
5954
5955 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5956 if (saddr->sa_family == AF_INET) {
5957 ip = 4;
5958 pmprog = PMAPPROG;
5959 pmvers = PMAPVERS;
5960 pmproc = PMAPPROC_GETPORT;
5961 } else if (saddr->sa_family == AF_INET6) {
5962 ip = 6;
5963 pmprog = RPCBPROG;
5964 pmvers = RPCBVERS4;
5965 pmproc = RPCBPROC_GETVERSADDR;
5966 } else if (saddr->sa_family == AF_LOCAL) {
5967 ip = 0;
5968 pmprog = RPCBPROG;
5969 pmvers = RPCBVERS4;
5970 pmproc = RPCBPROC_GETVERSADDR;
5971 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un*)sa)->sun_path);
5972 saddr = (struct sockaddr*)((stype == SOCK_STREAM) ? &rpcbind_cots : &rpcbind_clts);
5973 } else {
5974 return EINVAL;
5975 }
5976 nfsm_chain_null(&nmreq);
5977 nfsm_chain_null(&nmrep);
5978
5979 tryagain:
5980 /* send portmapper request to get port/uaddr */
5981 if (ip == 4) {
5982 ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT);
5983 } else if (ip == 6) {
5984 ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT);
5985 }
5986 nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED);
5987 nfsm_chain_add_32(error, &nmreq, protocol);
5988 nfsm_chain_add_32(error, &nmreq, vers);
5989 if (ip == 4) {
5990 nfsm_chain_add_32(error, &nmreq, stype == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP);
5991 nfsm_chain_add_32(error, &nmreq, 0);
5992 } else {
5993 if (stype == SOCK_STREAM) {
5994 if (ip == 6) {
5995 nfsm_chain_add_string(error, &nmreq, "tcp6", 4);
5996 } else {
5997 nfsm_chain_add_string(error, &nmreq, "ticotsord", 9);
5998 }
5999 } else {
6000 if (ip == 6) {
6001 nfsm_chain_add_string(error, &nmreq, "udp6", 4);
6002 } else {
6003 nfsm_chain_add_string(error, &nmreq, "ticlts", 6);
6004 }
6005 }
6006 nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */
6007 nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */
6008 }
6009 nfsm_chain_build_done(error, &nmreq);
6010 nfsmout_if(error);
6011 error = nfsm_rpchead2(nmp, stype, pmprog, pmvers, pmproc,
6012 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
6013 nfsmout_if(error);
6014 nmreq.nmc_mhead = NULL;
6015
6016 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq);
6017 error = nfs_aux_request(nmp, thd, saddr, so,
6018 stype, mreq, R_XID32(xid), 0, timeo, &nmrep);
6019 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep.nmc_mhead);
6020 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s, socktype %d\n", error, protocol, vers,
6021 (saddr->sa_family == AF_LOCAL) ? ((struct sockaddr_un *)saddr)->sun_path :
6022 (saddr->sa_family == AF_INET6) ? "INET6 socket" : "INET socket", stype);
6023
6024 /* grab port from portmap response */
6025 if (ip == 4) {
6026 nfsm_chain_get_32(error, &nmrep, port32);
6027 if (!error) {
6028 if (NFS_PORT_INVALID(port32)) {
6029 error = EBADRPC;
6030 } else {
6031 ((struct sockaddr_in*)sa)->sin_port = htons((in_port_t)port32);
6032 }
6033 }
6034 } else {
6035 /* get uaddr string and convert to sockaddr */
6036 nfsm_chain_get_32(error, &nmrep, ualen);
6037 if (!error) {
6038 if (ualen > (sizeof(uaddr) - 1)) {
6039 error = EIO;
6040 }
6041 if (ualen < 1) {
6042 /* program is not available, just return a zero port */
6043 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6044 if (ip == 6) {
6045 ((struct sockaddr_in6*)saddr)->sin6_port = htons(0);
6046 } else {
6047 ((struct sockaddr_un*)saddr)->sun_path[0] = '\0';
6048 }
6049 NFS_SOCK_DBG("Program %u version %u unavailable", protocol, vers);
6050 } else {
6051 nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr);
6052 NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
6053 if (!error) {
6054 uaddr[ualen] = '\0';
6055 if (ip == 6) {
6056 scopeid = ((struct sockaddr_in6*)saddr)->sin6_scope_id;
6057 }
6058 if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
6059 error = EIO;
6060 }
6061 if (ip == 6 && scopeid != ((struct sockaddr_in6*)saddr)->sin6_scope_id) {
6062 NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6*)saddr)->sin6_scope_id, scopeid);
6063 ((struct sockaddr_in6*)saddr)->sin6_scope_id = scopeid;
6064 }
6065 }
6066 }
6067 }
6068 if ((error == EPROGMISMATCH) || (error == EPROCUNAVAIL) || (error == EIO) || (error == EBADRPC)) {
6069 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
6070 if (pmvers == RPCBVERS4) {
6071 /* fall back to v3 and GETADDR */
6072 pmvers = RPCBVERS3;
6073 pmproc = RPCBPROC_GETADDR;
6074 nfsm_chain_cleanup(&nmreq);
6075 nfsm_chain_cleanup(&nmrep);
6076 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6077 xid = 0;
6078 error = 0;
6079 goto tryagain;
6080 }
6081 }
6082 if (!error) {
6083 bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len));
6084 }
6085 }
6086 nfsmout:
6087 nfsm_chain_cleanup(&nmreq);
6088 nfsm_chain_cleanup(&nmrep);
6089 NFS_SOCK_DBG("Returned %d\n", error);
6090
6091 return error;
6092 }
6093
6094 int
nfs_msg(thread_t thd,const char * server,const char * msg,int error)6095 nfs_msg(thread_t thd,
6096 const char *server,
6097 const char *msg,
6098 int error)
6099 {
6100 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
6101 tpr_t tpr;
6102
6103 if (p) {
6104 tpr = tprintf_open(p);
6105 } else {
6106 tpr = NULL;
6107 }
6108 if (error) {
6109 tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error);
6110 } else {
6111 tprintf(tpr, "nfs server %s: %s\n", server, msg);
6112 }
6113 tprintf_close(tpr);
6114 return 0;
6115 }
6116
6117 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6118 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6119 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6120 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6121 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
6122
6123 uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
6124 uint32_t nfs_tcp_sockbuf = 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */
6125 int32_t nfs_is_mobile;
6126
6127 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6128 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
6129
6130 /*
6131 * Could this mount be squished?
6132 */
6133 int
nfs_can_squish(struct nfsmount * nmp)6134 nfs_can_squish(struct nfsmount *nmp)
6135 {
6136 uint64_t flags = vfs_flags(nmp->nm_mountp);
6137 int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT));
6138
6139 if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) {
6140 return 0;
6141 }
6142
6143 if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) {
6144 return 0;
6145 }
6146
6147 return 1;
6148 }
6149
6150 /*
6151 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6152 * the mount may become "not responding". It's desirable to be able
6153 * to unmount these dead mounts, but only if there is no risk of
6154 * losing data or crashing applications. A "squishy" NFS mount is one
6155 * that can be force unmounted with little risk of harm.
6156 *
6157 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6158 * in a squishy state iff it is allowed to be squishy and there are no
6159 * dirty pages and there are no mmapped files and there are no files
6160 * open for write. Mounts are allowed to be squishy is controlled by
6161 * the settings of the nfs_squishy_flags and its mobility state. These
6162 * flags can be set by sysctls.
6163 *
6164 * If nfs_is_squishy determines that we are in a squishy state we will
6165 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6166 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6167 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6168 * update the current dead timeout with the mount's nm_deadtimeout
6169 * value set at mount time.
6170 *
6171 * Assumes that nm_lock is held.
6172 *
6173 * Note this routine is racey, but its effects on setting the
6174 * dead timeout only have effects when we're in trouble and are likely
6175 * to stay that way. Since by default its only for automounted
6176 * volumes on mobile machines; this is a reasonable trade off between
6177 * data integrity and user experience. It can be disabled or set via
6178 * nfs.conf file.
6179 */
6180
6181 int
nfs_is_squishy(struct nfsmount * nmp)6182 nfs_is_squishy(struct nfsmount *nmp)
6183 {
6184 mount_t mp = nmp->nm_mountp;
6185 int squishy = 0;
6186 int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT;
6187
6188 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
6189 vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile);
6190
6191 if (!nfs_can_squish(nmp)) {
6192 goto out;
6193 }
6194
6195 timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo;
6196 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo);
6197
6198 if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) {
6199 uint64_t flags = mp ? vfs_flags(mp) : 0;
6200 squishy = 1;
6201
6202 /*
6203 * Walk the nfs nodes and check for dirty buffers it we're not
6204 * RDONLY and we've not already been declared as squishy since
6205 * this can be a bit expensive.
6206 */
6207 if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) {
6208 squishy = !nfs_mount_is_dirty(mp);
6209 }
6210 }
6211
6212 out:
6213 if (squishy) {
6214 nmp->nm_state |= NFSSTA_SQUISHY;
6215 } else {
6216 nmp->nm_state &= ~NFSSTA_SQUISHY;
6217 }
6218
6219 nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout;
6220
6221 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout);
6222
6223 return squishy;
6224 }
6225
6226 /*
6227 * On a send operation, if we can't reach the server and we've got only one server to talk to
6228 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6229 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6230 */
6231 int
nfs_is_dead(int error,struct nfsmount * nmp)6232 nfs_is_dead(int error, struct nfsmount *nmp)
6233 {
6234 fsid_t fsid;
6235
6236 lck_mtx_lock(&nmp->nm_lock);
6237 if (nmp->nm_state & NFSSTA_DEAD) {
6238 lck_mtx_unlock(&nmp->nm_lock);
6239 return 1;
6240 }
6241
6242 if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) ||
6243 !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) {
6244 lck_mtx_unlock(&nmp->nm_lock);
6245 return 0;
6246 }
6247
6248 if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) {
6249 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
6250 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
6251 lck_mtx_unlock(&nmp->nm_lock);
6252 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6253 vfs_event_signal(&fsid, VQ_DEAD, 0);
6254 return 1;
6255 }
6256 lck_mtx_unlock(&nmp->nm_lock);
6257 return 0;
6258 }
6259
6260 /*
6261 * If we've experienced timeouts and we're not really a
6262 * classic hard mount, then just return cached data to
6263 * the caller instead of likely hanging on an RPC.
6264 */
6265 int
nfs_use_cache(struct nfsmount * nmp)6266 nfs_use_cache(struct nfsmount *nmp)
6267 {
6268 /*
6269 *%%% We always let mobile users goto the cache,
6270 * perhaps we should not even require them to have
6271 * a timeout?
6272 */
6273 int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) ||
6274 nfs_can_squish(nmp) || nmp->nm_deadtimeout);
6275
6276 int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6277
6278 /*
6279 * So if we have a timeout and we're not really a hard hard-mount,
6280 * return 1 to not get things out of the cache.
6281 */
6282
6283 return (nmp->nm_state & timeoutmask) && cache_ok;
6284 }
6285
6286 /*
6287 * Log a message that nfs or lockd server is unresponsive. Check if we
6288 * can be squished and if we can, or that our dead timeout has
6289 * expired, and we're not holding state, set our mount as dead, remove
6290 * our mount state and ask to be unmounted. If we are holding state
6291 * we're being called from the nfs_request_timer and will soon detect
6292 * that we need to unmount.
6293 */
6294 void
nfs_down(struct nfsmount * nmp,thread_t thd,int error,int flags,const char * msg,int holding_state)6295 nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state)
6296 {
6297 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6298 uint32_t do_vfs_signal = 0;
6299 struct timeval now;
6300
6301 if (nfs_mount_gone(nmp)) {
6302 return;
6303 }
6304
6305 lck_mtx_lock(&nmp->nm_lock);
6306
6307 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6308 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6309 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6310 }
6311 wasunresponsive = (nmp->nm_state & timeoutmask);
6312
6313 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6314 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6315
6316 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
6317 nmp->nm_state |= NFSSTA_TIMEO;
6318 }
6319 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6320 nmp->nm_state |= NFSSTA_LOCKTIMEO;
6321 }
6322 if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6323 nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
6324 }
6325
6326 unresponsive = (nmp->nm_state & timeoutmask);
6327
6328 nfs_is_squishy(nmp);
6329
6330 if (unresponsive && (nmp->nm_curdeadtimeout > 0)) {
6331 microuptime(&now);
6332 if (!wasunresponsive) {
6333 nmp->nm_deadto_start = now.tv_sec;
6334 nfs_mount_sock_thread_wake(nmp);
6335 } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) {
6336 if (!(nmp->nm_state & NFSSTA_DEAD)) {
6337 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
6338 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
6339 }
6340 do_vfs_signal = VQ_DEAD;
6341 }
6342 }
6343 lck_mtx_unlock(&nmp->nm_lock);
6344
6345 if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) {
6346 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6347 } else if (softnobrowse || wasunresponsive || !unresponsive) {
6348 do_vfs_signal = 0;
6349 } else {
6350 do_vfs_signal = VQ_NOTRESP;
6351 }
6352 if (do_vfs_signal) {
6353 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0);
6354 }
6355
6356 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
6357 }
6358
6359 void
nfs_up(struct nfsmount * nmp,thread_t thd,int flags,const char * msg)6360 nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
6361 {
6362 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6363 int do_vfs_signal;
6364
6365 if (nfs_mount_gone(nmp)) {
6366 return;
6367 }
6368
6369 if (msg) {
6370 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
6371 }
6372
6373 lck_mtx_lock(&nmp->nm_lock);
6374
6375 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6376 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6377 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6378 }
6379 wasunresponsive = (nmp->nm_state & timeoutmask);
6380
6381 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6382 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6383
6384 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
6385 nmp->nm_state &= ~NFSSTA_TIMEO;
6386 }
6387 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6388 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
6389 }
6390 if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6391 nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO;
6392 }
6393
6394 unresponsive = (nmp->nm_state & timeoutmask);
6395
6396 nmp->nm_deadto_start = 0;
6397 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
6398 nmp->nm_state &= ~NFSSTA_SQUISHY;
6399 lck_mtx_unlock(&nmp->nm_lock);
6400
6401 if (softnobrowse) {
6402 do_vfs_signal = 0;
6403 } else {
6404 do_vfs_signal = (wasunresponsive && !unresponsive);
6405 }
6406 if (do_vfs_signal) {
6407 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
6408 }
6409 }
6410
6411
6412 #endif /* CONFIG_NFS_CLIENT */
6413
6414 #if CONFIG_NFS_SERVER
6415
6416 /*
6417 * Generate the rpc reply header
6418 * siz arg. is used to decide if adding a cluster is worthwhile
6419 */
6420 int
nfsrv_rephead(struct nfsrv_descript * nd,__unused struct nfsrv_sock * slp,struct nfsm_chain * nmrepp,size_t siz)6421 nfsrv_rephead(
6422 struct nfsrv_descript *nd,
6423 __unused struct nfsrv_sock *slp,
6424 struct nfsm_chain *nmrepp,
6425 size_t siz)
6426 {
6427 mbuf_t mrep;
6428 u_int32_t *tl;
6429 struct nfsm_chain nmrep;
6430 int err, error;
6431
6432 err = nd->nd_repstat;
6433 if (err && (nd->nd_vers == NFS_VER2)) {
6434 siz = 0;
6435 }
6436
6437 /*
6438 * If this is a big reply, use a cluster else
6439 * try and leave leading space for the lower level headers.
6440 */
6441 siz += RPC_REPLYSIZ;
6442 if (siz >= nfs_mbuf_minclsize) {
6443 error = mbuf_getpacket(MBUF_WAITOK, &mrep);
6444 } else {
6445 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep);
6446 }
6447 if (error) {
6448 /* unable to allocate packet */
6449 /* XXX should we keep statistics for these errors? */
6450 return error;
6451 }
6452 if (siz < nfs_mbuf_minclsize) {
6453 /* leave space for lower level headers */
6454 tl = mbuf_data(mrep);
6455 tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */
6456 mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED);
6457 }
6458 nfsm_chain_init(&nmrep, mrep);
6459 nfsm_chain_add_32(error, &nmrep, nd->nd_retxid);
6460 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
6461 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
6462 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
6463 if (err & NFSERR_AUTHERR) {
6464 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
6465 nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR));
6466 } else {
6467 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
6468 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6469 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6470 }
6471 } else {
6472 /* reply status */
6473 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
6474 if (nd->nd_gss_context != NULL) {
6475 /* RPCSEC_GSS verifier */
6476 error = nfs_gss_svc_verf_put(nd, &nmrep);
6477 if (error) {
6478 nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR);
6479 goto done;
6480 }
6481 } else {
6482 /* RPCAUTH_NULL verifier */
6483 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
6484 nfsm_chain_add_32(error, &nmrep, 0);
6485 }
6486 /* accepted status */
6487 switch (err) {
6488 case EPROGUNAVAIL:
6489 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
6490 break;
6491 case EPROGMISMATCH:
6492 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
6493 /* XXX hard coded versions? */
6494 nfsm_chain_add_32(error, &nmrep, NFS_VER2);
6495 nfsm_chain_add_32(error, &nmrep, NFS_VER3);
6496 break;
6497 case EPROCUNAVAIL:
6498 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
6499 break;
6500 case EBADRPC:
6501 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
6502 break;
6503 default:
6504 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
6505 if (nd->nd_gss_context != NULL) {
6506 error = nfs_gss_svc_prepare_reply(nd, &nmrep);
6507 }
6508 if (err != NFSERR_RETVOID) {
6509 nfsm_chain_add_32(error, &nmrep,
6510 (err ? nfsrv_errmap(nd, err) : 0));
6511 }
6512 break;
6513 }
6514 }
6515
6516 done:
6517 nfsm_chain_build_done(error, &nmrep);
6518 if (error) {
6519 /* error composing reply header */
6520 /* XXX should we keep statistics for these errors? */
6521 mbuf_freem(mrep);
6522 return error;
6523 }
6524
6525 *nmrepp = nmrep;
6526 if ((err != 0) && (err != NFSERR_RETVOID)) {
6527 OSAddAtomic64(1, &nfsrvstats.srvrpc_errs);
6528 }
6529 return 0;
6530 }
6531
6532 /*
6533 * The nfs server send routine.
6534 *
6535 * - return EINTR or ERESTART if interrupted by a signal
6536 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6537 * - do any cleanup required by recoverable socket errors (???)
6538 */
6539 int
nfsrv_send(struct nfsrv_sock * slp,mbuf_t nam,mbuf_t top)6540 nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top)
6541 {
6542 int error;
6543 socket_t so = slp->ns_so;
6544 struct sockaddr *sendnam;
6545 struct msghdr msg;
6546
6547 bzero(&msg, sizeof(msg));
6548 if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) {
6549 if ((sendnam = mbuf_data(nam))) {
6550 msg.msg_name = (caddr_t)sendnam;
6551 msg.msg_namelen = sendnam->sa_len;
6552 }
6553 }
6554 if (NFSRV_IS_DBG(NFSRV_FAC_SRV, 15)) {
6555 nfs_dump_mbuf(__func__, __LINE__, "nfsrv_send\n", top);
6556 }
6557 error = sock_sendmbuf(so, &msg, top, 0, NULL);
6558 if (!error) {
6559 return 0;
6560 }
6561 log(LOG_INFO, "nfsd send error %d\n", error);
6562
6563 if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) {
6564 error = EPIPE; /* zap TCP sockets if they time out on send */
6565 }
6566 /* Handle any recoverable (soft) socket errors here. (???) */
6567 if (error != EINTR && error != ERESTART && error != EIO &&
6568 error != EWOULDBLOCK && error != EPIPE) {
6569 error = 0;
6570 }
6571
6572 return error;
6573 }
6574
6575 /*
6576 * Socket upcall routine for the nfsd sockets.
6577 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6578 * Essentially do as much as possible non-blocking, else punt and it will
6579 * be called with MBUF_WAITOK from an nfsd.
6580 */
6581 void
nfsrv_rcv(socket_t so,void * arg,int waitflag)6582 nfsrv_rcv(socket_t so, void *arg, int waitflag)
6583 {
6584 struct nfsrv_sock *slp = arg;
6585
6586 if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) {
6587 return;
6588 }
6589
6590 lck_rw_lock_exclusive(&slp->ns_rwlock);
6591 nfsrv_rcv_locked(so, slp, waitflag);
6592 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6593 }
6594 void
nfsrv_rcv_locked(socket_t so,struct nfsrv_sock * slp,int waitflag)6595 nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag)
6596 {
6597 mbuf_t m, mp, mhck, m2;
6598 int ns_flag = 0, error;
6599 struct msghdr msg;
6600 size_t bytes_read;
6601
6602 if ((slp->ns_flag & SLP_VALID) == 0) {
6603 if (waitflag == MBUF_DONTWAIT) {
6604 lck_rw_done(&slp->ns_rwlock);
6605 }
6606 return;
6607 }
6608
6609 #ifdef notdef
6610 /*
6611 * Define this to test for nfsds handling this under heavy load.
6612 */
6613 if (waitflag == MBUF_DONTWAIT) {
6614 ns_flag = SLP_NEEDQ;
6615 goto dorecs;
6616 }
6617 #endif
6618 if (slp->ns_sotype == SOCK_STREAM) {
6619 /*
6620 * If there are already records on the queue, defer soreceive()
6621 * to an(other) nfsd so that there is feedback to the TCP layer that
6622 * the nfs servers are heavily loaded.
6623 */
6624 if (slp->ns_rec) {
6625 ns_flag = SLP_NEEDQ;
6626 goto dorecs;
6627 }
6628
6629 /*
6630 * Do soreceive().
6631 */
6632 bytes_read = 1000000000;
6633 error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
6634 if (error || mp == NULL) {
6635 if (error == EWOULDBLOCK) {
6636 ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0;
6637 } else {
6638 ns_flag = SLP_DISCONN;
6639 }
6640 goto dorecs;
6641 }
6642 m = mp;
6643 if (slp->ns_rawend) {
6644 if ((error = mbuf_setnext(slp->ns_rawend, m))) {
6645 panic("nfsrv_rcv: mbuf_setnext failed %d", error);
6646 }
6647 slp->ns_cc += bytes_read;
6648 } else {
6649 slp->ns_raw = m;
6650 slp->ns_cc = bytes_read;
6651 }
6652 while ((m2 = mbuf_next(m))) {
6653 m = m2;
6654 }
6655 slp->ns_rawend = m;
6656
6657 /*
6658 * Now try and parse record(s) out of the raw stream data.
6659 */
6660 error = nfsrv_getstream(slp, waitflag);
6661 if (error) {
6662 if (error == EPERM) {
6663 ns_flag = SLP_DISCONN;
6664 } else {
6665 ns_flag = SLP_NEEDQ;
6666 }
6667 }
6668 } else {
6669 struct sockaddr_storage nam;
6670
6671 if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) {
6672 /* already have max # RPC records queued on this socket */
6673 ns_flag = SLP_NEEDQ;
6674 goto dorecs;
6675 }
6676
6677 bzero(&msg, sizeof(msg));
6678 msg.msg_name = (caddr_t)&nam;
6679 msg.msg_namelen = sizeof(nam);
6680
6681 do {
6682 bytes_read = 1000000000;
6683 error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
6684 if (mp) {
6685 if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
6686 mbuf_setlen(mhck, nam.ss_len);
6687 bcopy(&nam, mbuf_data(mhck), nam.ss_len);
6688 m = mhck;
6689 if (mbuf_setnext(m, mp)) {
6690 /* trouble... just drop it */
6691 printf("nfsrv_rcv: mbuf_setnext failed\n");
6692 mbuf_free(mhck);
6693 m = mp;
6694 }
6695 } else {
6696 m = mp;
6697 }
6698 if (slp->ns_recend) {
6699 mbuf_setnextpkt(slp->ns_recend, m);
6700 } else {
6701 slp->ns_rec = m;
6702 slp->ns_flag |= SLP_DOREC;
6703 }
6704 slp->ns_recend = m;
6705 mbuf_setnextpkt(m, NULL);
6706 slp->ns_reccnt++;
6707 }
6708 } while (mp);
6709 }
6710
6711 /*
6712 * Now try and process the request records, non-blocking.
6713 */
6714 dorecs:
6715 if (ns_flag) {
6716 slp->ns_flag |= ns_flag;
6717 }
6718 if (waitflag == MBUF_DONTWAIT) {
6719 int wake = (slp->ns_flag & SLP_WORKTODO);
6720 lck_rw_done(&slp->ns_rwlock);
6721 if (wake && nfsd_thread_count) {
6722 lck_mtx_lock(&nfsd_mutex);
6723 nfsrv_wakenfsd(slp);
6724 lck_mtx_unlock(&nfsd_mutex);
6725 }
6726 }
6727 }
6728
6729 /*
6730 * Try and extract an RPC request from the mbuf data list received on a
6731 * stream socket. The "waitflag" argument indicates whether or not it
6732 * can sleep.
6733 */
6734 int
nfsrv_getstream(struct nfsrv_sock * slp,int waitflag)6735 nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
6736 {
6737 mbuf_t m;
6738 char *cp1, *cp2, *mdata;
6739 int error;
6740 size_t len, mlen;
6741 mbuf_t om, m2, recm;
6742 u_int32_t recmark;
6743
6744 if (slp->ns_flag & SLP_GETSTREAM) {
6745 panic("nfs getstream");
6746 }
6747 slp->ns_flag |= SLP_GETSTREAM;
6748 for (;;) {
6749 if (slp->ns_reclen == 0) {
6750 if (slp->ns_cc < NFSX_UNSIGNED) {
6751 slp->ns_flag &= ~SLP_GETSTREAM;
6752 return 0;
6753 }
6754 m = slp->ns_raw;
6755 mdata = mbuf_data(m);
6756 mlen = mbuf_len(m);
6757 if (mlen >= NFSX_UNSIGNED) {
6758 bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED);
6759 mdata += NFSX_UNSIGNED;
6760 mlen -= NFSX_UNSIGNED;
6761 mbuf_setdata(m, mdata, mlen);
6762 } else {
6763 cp1 = (caddr_t)&recmark;
6764 cp2 = mdata;
6765 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
6766 while (mlen == 0) {
6767 m = mbuf_next(m);
6768 cp2 = mbuf_data(m);
6769 mlen = mbuf_len(m);
6770 }
6771 *cp1++ = *cp2++;
6772 mlen--;
6773 mbuf_setdata(m, cp2, mlen);
6774 }
6775 }
6776 slp->ns_cc -= NFSX_UNSIGNED;
6777 recmark = ntohl(recmark);
6778 slp->ns_reclen = recmark & ~0x80000000;
6779 if (recmark & 0x80000000) {
6780 slp->ns_flag |= SLP_LASTFRAG;
6781 } else {
6782 slp->ns_flag &= ~SLP_LASTFRAG;
6783 }
6784 if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) {
6785 slp->ns_flag &= ~SLP_GETSTREAM;
6786 return EPERM;
6787 }
6788 }
6789
6790 /*
6791 * Now get the record part.
6792 *
6793 * Note that slp->ns_reclen may be 0. Linux sometimes
6794 * generates 0-length RPCs
6795 */
6796 recm = NULL;
6797 if (slp->ns_cc == slp->ns_reclen) {
6798 recm = slp->ns_raw;
6799 slp->ns_raw = slp->ns_rawend = NULL;
6800 slp->ns_cc = slp->ns_reclen = 0;
6801 } else if (slp->ns_cc > slp->ns_reclen) {
6802 len = 0;
6803 m = slp->ns_raw;
6804 mlen = mbuf_len(m);
6805 mdata = mbuf_data(m);
6806 om = NULL;
6807 while (len < slp->ns_reclen) {
6808 if ((len + mlen) > slp->ns_reclen) {
6809 if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) {
6810 slp->ns_flag &= ~SLP_GETSTREAM;
6811 return EWOULDBLOCK;
6812 }
6813 if (om) {
6814 if (mbuf_setnext(om, m2)) {
6815 /* trouble... just drop it */
6816 printf("nfsrv_getstream: mbuf_setnext failed\n");
6817 mbuf_freem(m2);
6818 slp->ns_flag &= ~SLP_GETSTREAM;
6819 return EWOULDBLOCK;
6820 }
6821 recm = slp->ns_raw;
6822 } else {
6823 recm = m2;
6824 }
6825 mdata += slp->ns_reclen - len;
6826 mlen -= slp->ns_reclen - len;
6827 mbuf_setdata(m, mdata, mlen);
6828 len = slp->ns_reclen;
6829 } else if ((len + mlen) == slp->ns_reclen) {
6830 om = m;
6831 len += mlen;
6832 m = mbuf_next(m);
6833 recm = slp->ns_raw;
6834 if (mbuf_setnext(om, NULL)) {
6835 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6836 slp->ns_flag &= ~SLP_GETSTREAM;
6837 return EWOULDBLOCK;
6838 }
6839 mlen = mbuf_len(m);
6840 mdata = mbuf_data(m);
6841 } else {
6842 om = m;
6843 len += mlen;
6844 m = mbuf_next(m);
6845 mlen = mbuf_len(m);
6846 mdata = mbuf_data(m);
6847 }
6848 }
6849 slp->ns_raw = m;
6850 slp->ns_cc -= len;
6851 slp->ns_reclen = 0;
6852 } else {
6853 slp->ns_flag &= ~SLP_GETSTREAM;
6854 return 0;
6855 }
6856
6857 /*
6858 * Accumulate the fragments into a record.
6859 */
6860 if (slp->ns_frag == NULL) {
6861 slp->ns_frag = recm;
6862 } else {
6863 m = slp->ns_frag;
6864 while ((m2 = mbuf_next(m))) {
6865 m = m2;
6866 }
6867 if ((error = mbuf_setnext(m, recm))) {
6868 panic("nfsrv_getstream: mbuf_setnext failed 3, %d", error);
6869 }
6870 }
6871 if (slp->ns_flag & SLP_LASTFRAG) {
6872 if (slp->ns_recend) {
6873 mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
6874 } else {
6875 slp->ns_rec = slp->ns_frag;
6876 slp->ns_flag |= SLP_DOREC;
6877 }
6878 slp->ns_recend = slp->ns_frag;
6879 slp->ns_frag = NULL;
6880 }
6881 }
6882 }
6883
6884 /*
6885 * Parse an RPC header.
6886 */
6887 int
nfsrv_dorec(struct nfsrv_sock * slp,struct nfsd * nfsd,struct nfsrv_descript ** ndp)6888 nfsrv_dorec(
6889 struct nfsrv_sock *slp,
6890 struct nfsd *nfsd,
6891 struct nfsrv_descript **ndp)
6892 {
6893 mbuf_t m;
6894 mbuf_t nam;
6895 struct nfsrv_descript *nd;
6896 int error = 0;
6897
6898 *ndp = NULL;
6899 if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
6900 return ENOBUFS;
6901 }
6902 nd = zalloc(nfsrv_descript_zone);
6903 m = slp->ns_rec;
6904 slp->ns_rec = mbuf_nextpkt(m);
6905 if (slp->ns_rec) {
6906 mbuf_setnextpkt(m, NULL);
6907 } else {
6908 slp->ns_flag &= ~SLP_DOREC;
6909 slp->ns_recend = NULL;
6910 }
6911 slp->ns_reccnt--;
6912 if (mbuf_type(m) == MBUF_TYPE_SONAME) {
6913 nam = m;
6914 m = mbuf_next(m);
6915 if ((error = mbuf_setnext(nam, NULL))) {
6916 panic("nfsrv_dorec: mbuf_setnext failed %d", error);
6917 }
6918 } else {
6919 nam = NULL;
6920 }
6921 nd->nd_nam2 = nam;
6922 nfsm_chain_dissect_init(error, &nd->nd_nmreq, m);
6923 if (!error) {
6924 error = nfsrv_getreq(nd);
6925 }
6926 if (error) {
6927 if (nam) {
6928 mbuf_freem(nam);
6929 }
6930 if (nd->nd_gss_context) {
6931 nfs_gss_svc_ctx_deref(nd->nd_gss_context);
6932 }
6933 NFS_ZFREE(nfsrv_descript_zone, nd);
6934 return error;
6935 }
6936 nd->nd_mrep = NULL;
6937 *ndp = nd;
6938 nfsd->nfsd_nd = nd;
6939 return 0;
6940 }
6941
6942 /*
6943 * Parse an RPC request
6944 * - verify it
6945 * - fill in the cred struct.
6946 */
6947 int
nfsrv_getreq(struct nfsrv_descript * nd)6948 nfsrv_getreq(struct nfsrv_descript *nd)
6949 {
6950 struct nfsm_chain *nmreq;
6951 int len, i;
6952 u_int32_t nfsvers, auth_type;
6953 int error = 0;
6954 uid_t user_id;
6955 gid_t group_id;
6956 short ngroups;
6957 uint32_t val;
6958
6959 nd->nd_cr = NULL;
6960 nd->nd_gss_context = NULL;
6961 nd->nd_gss_seqnum = 0;
6962 nd->nd_gss_mb = NULL;
6963
6964 user_id = group_id = -2;
6965 val = auth_type = len = 0;
6966
6967 nmreq = &nd->nd_nmreq;
6968 nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID
6969 nfsm_chain_get_32(error, nmreq, val); // RPC Call
6970 if (!error && (val != RPC_CALL)) {
6971 error = EBADRPC;
6972 }
6973 nfsmout_if(error);
6974 nd->nd_repstat = 0;
6975 nfsm_chain_get_32(error, nmreq, val); // RPC Version
6976 nfsmout_if(error);
6977 if (val != RPC_VER2) {
6978 nd->nd_repstat = ERPCMISMATCH;
6979 nd->nd_procnum = NFSPROC_NOOP;
6980 return 0;
6981 }
6982 nfsm_chain_get_32(error, nmreq, val); // RPC Program Number
6983 nfsmout_if(error);
6984 if (val != NFS_PROG) {
6985 nd->nd_repstat = EPROGUNAVAIL;
6986 nd->nd_procnum = NFSPROC_NOOP;
6987 return 0;
6988 }
6989 nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number
6990 nfsmout_if(error);
6991 if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) {
6992 nd->nd_repstat = EPROGMISMATCH;
6993 nd->nd_procnum = NFSPROC_NOOP;
6994 return 0;
6995 }
6996 nd->nd_vers = nfsvers;
6997 nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number
6998 nfsmout_if(error);
6999 if ((nd->nd_procnum >= NFS_NPROCS) ||
7000 ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) {
7001 nd->nd_repstat = EPROCUNAVAIL;
7002 nd->nd_procnum = NFSPROC_NOOP;
7003 return 0;
7004 }
7005 if (nfsvers != NFS_VER3) {
7006 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
7007 }
7008 nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor
7009 nfsm_chain_get_32(error, nmreq, len); // Auth Length
7010 if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) {
7011 error = EBADRPC;
7012 }
7013 nfsmout_if(error);
7014
7015 /* Handle authentication */
7016 if (auth_type == RPCAUTH_SYS) {
7017 struct posix_cred temp_pcred;
7018 if (nd->nd_procnum == NFSPROC_NULL) {
7019 return 0;
7020 }
7021 nd->nd_sec = RPCAUTH_SYS;
7022 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp
7023 nfsm_chain_get_32(error, nmreq, len); // hostname length
7024 if (len < 0 || len > NFS_MAXNAMLEN) {
7025 error = EBADRPC;
7026 }
7027 nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname
7028 nfsmout_if(error);
7029
7030 /* create a temporary credential using the bits from the wire */
7031 bzero(&temp_pcred, sizeof(temp_pcred));
7032 nfsm_chain_get_32(error, nmreq, user_id);
7033 nfsm_chain_get_32(error, nmreq, group_id);
7034 temp_pcred.cr_groups[0] = group_id;
7035 nfsm_chain_get_32(error, nmreq, len); // extra GID count
7036 if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) {
7037 error = EBADRPC;
7038 }
7039 nfsmout_if(error);
7040 for (i = 1; i <= len; i++) {
7041 if (i < NGROUPS) {
7042 nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]);
7043 } else {
7044 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);
7045 }
7046 }
7047 nfsmout_if(error);
7048 ngroups = (len >= NGROUPS) ? NGROUPS : (short)(len + 1);
7049 if (ngroups > 1) {
7050 nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
7051 }
7052 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
7053 nfsm_chain_get_32(error, nmreq, len); // verifier length
7054 if (len < 0 || len > RPCAUTH_MAXSIZ) {
7055 error = EBADRPC;
7056 }
7057 if (len > 0) {
7058 nfsm_chain_adv(error, nmreq, nfsm_rndup(len));
7059 }
7060
7061 /* request creation of a real credential */
7062 temp_pcred.cr_uid = user_id;
7063 temp_pcred.cr_ngroups = ngroups;
7064 nd->nd_cr = posix_cred_create(&temp_pcred);
7065 if (nd->nd_cr == NULL) {
7066 nd->nd_repstat = ENOMEM;
7067 nd->nd_procnum = NFSPROC_NOOP;
7068 return 0;
7069 }
7070 } else if (auth_type == RPCSEC_GSS) {
7071 error = nfs_gss_svc_cred_get(nd, nmreq);
7072 if (error) {
7073 if (error == EINVAL) {
7074 goto nfsmout; // drop the request
7075 }
7076 nd->nd_repstat = error;
7077 nd->nd_procnum = NFSPROC_NOOP;
7078 return 0;
7079 }
7080 } else {
7081 if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE
7082 return 0;
7083 }
7084 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
7085 nd->nd_procnum = NFSPROC_NOOP;
7086 return 0;
7087 }
7088 return 0;
7089 nfsmout:
7090 if (IS_VALID_CRED(nd->nd_cr)) {
7091 kauth_cred_unref(&nd->nd_cr);
7092 }
7093 nfsm_chain_cleanup(nmreq);
7094 return error;
7095 }
7096
7097 /*
7098 * Search for a sleeping nfsd and wake it up.
7099 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7100 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
7101 * Note: Must be called with nfsd_mutex held.
7102 */
7103 void
nfsrv_wakenfsd(struct nfsrv_sock * slp)7104 nfsrv_wakenfsd(struct nfsrv_sock *slp)
7105 {
7106 struct nfsd *nd;
7107
7108 if ((slp->ns_flag & SLP_VALID) == 0) {
7109 return;
7110 }
7111
7112 lck_rw_lock_exclusive(&slp->ns_rwlock);
7113 /* if there's work to do on this socket, make sure it's queued up */
7114 if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) {
7115 TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq);
7116 slp->ns_flag |= SLP_WAITQ;
7117 }
7118 lck_rw_done(&slp->ns_rwlock);
7119
7120 /* wake up a waiting nfsd, if possible */
7121 nd = TAILQ_FIRST(&nfsd_queue);
7122 if (!nd) {
7123 return;
7124 }
7125
7126 TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue);
7127 nd->nfsd_flag &= ~NFSD_WAITING;
7128 wakeup(nd);
7129 }
7130
7131 #endif /* CONFIG_NFS_SERVER */
7132
7133 #endif /* CONFIG_NFS */
7134