1 /*
2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <nfs/nfs_conf.h>
30 #if CONFIG_NFS_CLIENT
31
32 /*
33 * vnode op calls for NFS version 4
34 */
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h>
39 #include <sys/proc_internal.h>
40 #include <sys/kauth.h>
41 #include <sys/mount_internal.h>
42 #include <sys/malloc.h>
43 #include <sys/kpi_mbuf.h>
44 #include <sys/conf.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/dirent.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49 #include <sys/ubc_internal.h>
50 #include <sys/attr.h>
51 #include <sys/signalvar.h>
52 #include <sys/uio_internal.h>
53 #include <sys/xattr.h>
54 #include <sys/paths.h>
55
56 #include <vfs/vfs_support.h>
57
58 #include <sys/vm.h>
59
60 #include <sys/time.h>
61 #include <kern/clock.h>
62 #include <libkern/OSAtomic.h>
63
64 #include <miscfs/fifofs/fifo.h>
65 #include <miscfs/specfs/specdev.h>
66
67 #include <nfs/rpcv2.h>
68 #include <nfs/nfsproto.h>
69 #include <nfs/nfs.h>
70 #include <nfs/nfsnode.h>
71 #include <nfs/nfs_gss.h>
72 #include <nfs/nfsmount.h>
73 #include <nfs/nfs_lock.h>
74 #include <nfs/xdr_subs.h>
75 #include <nfs/nfsm_subs.h>
76
77 #include <net/if.h>
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <vm/vm_kern.h>
81
82 #include <kern/task.h>
83 #include <kern/sched_prim.h>
84
85 #if CONFIG_NFS4
86
87 int
nfs4_access_rpc(nfsnode_t np,u_int32_t * access,int rpcflags,vfs_context_t ctx)88 nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
89 {
90 int error = 0, lockerror = ENOENT, status, numops, slot;
91 u_int64_t xid;
92 struct nfsm_chain nmreq, nmrep;
93 struct timeval now;
94 uint32_t access_result = 0, supported = 0, missing;
95 struct nfsmount *nmp = NFSTONMP(np);
96 int nfsvers = nmp->nm_vers;
97 uid_t uid;
98 struct nfsreq_secinfo_args si;
99
100 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
101 return 0;
102 }
103
104 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
105 nfsm_chain_null(&nmreq);
106 nfsm_chain_null(&nmrep);
107
108 // PUTFH, ACCESS, GETATTR
109 numops = 3;
110 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
111 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
112 numops--;
113 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
114 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
115 numops--;
116 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_ACCESS);
117 nfsm_chain_add_32(error, &nmreq, *access);
118 numops--;
119 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
120 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
121 nfsm_chain_build_done(error, &nmreq);
122 nfsm_assert(error, (numops == 0), EPROTO);
123 nfsmout_if(error);
124 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
125 vfs_context_thread(ctx), vfs_context_ucred(ctx),
126 &si, rpcflags, &nmrep, &xid, &status);
127
128 if ((lockerror = nfs_node_lock(np))) {
129 error = lockerror;
130 }
131 nfsm_chain_skip_tag(error, &nmrep);
132 nfsm_chain_get_32(error, &nmrep, numops);
133 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
134 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
135 nfsm_chain_get_32(error, &nmrep, supported);
136 nfsm_chain_get_32(error, &nmrep, access_result);
137 nfsmout_if(error);
138 if ((missing = (*access & ~supported))) {
139 /* missing support for something(s) we wanted */
140 if (missing & NFS_ACCESS_DELETE) {
141 /*
142 * If the server doesn't report DELETE (possible
143 * on UNIX systems), we'll assume that it is OK
144 * and just let any subsequent delete action fail
145 * if it really isn't deletable.
146 */
147 access_result |= NFS_ACCESS_DELETE;
148 }
149 }
150 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
151 if (nfs_access_dotzfs) {
152 vnode_t dvp = NULLVP;
153 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
154 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
155 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
156 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
157 }
158 if (dvp != NULLVP) {
159 vnode_put(dvp);
160 }
161 }
162 /* Some servers report DELETE support but erroneously give a denied answer. */
163 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
164 access_result |= NFS_ACCESS_DELETE;
165 }
166 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
167 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
168 nfsmout_if(error);
169
170 if (nfs_mount_gone(nmp)) {
171 error = ENXIO;
172 }
173 nfsmout_if(error);
174
175 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
176 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
177 } else {
178 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
179 }
180 slot = nfs_node_access_slot(np, uid, 1);
181 np->n_accessuid[slot] = uid;
182 microuptime(&now);
183 np->n_accessstamp[slot] = now.tv_sec;
184 np->n_access[slot] = access_result;
185
186 /* pass back the access returned with this request */
187 *access = np->n_access[slot];
188 nfsmout:
189 if (!lockerror) {
190 nfs_node_unlock(np);
191 }
192 nfsm_chain_cleanup(&nmreq);
193 nfsm_chain_cleanup(&nmrep);
194 return error;
195 }
196
197 int
nfs4_getattr_rpc(nfsnode_t np,mount_t mp,u_char * fhp,size_t fhsize,int flags,vfs_context_t ctx,struct nfs_vattr * nvap,u_int64_t * xidp)198 nfs4_getattr_rpc(
199 nfsnode_t np,
200 mount_t mp,
201 u_char *fhp,
202 size_t fhsize,
203 int flags,
204 vfs_context_t ctx,
205 struct nfs_vattr *nvap,
206 u_int64_t *xidp)
207 {
208 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
209 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
210 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
211 struct nfsm_chain nmreq, nmrep;
212 struct nfsreq_secinfo_args si;
213
214 if (nfs_mount_gone(nmp)) {
215 return ENXIO;
216 }
217 nfsvers = nmp->nm_vers;
218 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
219
220 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
221 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
222 return 0;
223 }
224
225 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
226 rpcflags = R_RECOVER;
227 }
228
229 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
230 rpcflags |= R_SOFT;
231 }
232
233 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
234 nfsm_chain_null(&nmreq);
235 nfsm_chain_null(&nmrep);
236
237 // PUTFH, GETATTR
238 numops = 2;
239 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
240 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
241 numops--;
242 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
243 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
244 numops--;
245 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
246 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
247 if ((flags & NGA_ACL) && acls) {
248 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
249 }
250 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
251 nfsm_chain_build_done(error, &nmreq);
252 nfsm_assert(error, (numops == 0), EPROTO);
253 nfsmout_if(error);
254 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
255 vfs_context_thread(ctx), vfs_context_ucred(ctx),
256 NULL, rpcflags, &nmrep, xidp, &status);
257
258 nfsm_chain_skip_tag(error, &nmrep);
259 nfsm_chain_get_32(error, &nmrep, numops);
260 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
261 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
262 nfsmout_if(error);
263 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
264 nfsmout_if(error);
265 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
266 /* we asked for the ACL but didn't get one... assume there isn't one */
267 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
268 nvap->nva_acl = NULL;
269 }
270 nfsmout:
271 nfsm_chain_cleanup(&nmreq);
272 nfsm_chain_cleanup(&nmrep);
273 return error;
274 }
275
276 int
nfs4_readlink_rpc(nfsnode_t np,char * buf,size_t * buflenp,vfs_context_t ctx)277 nfs4_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx)
278 {
279 struct nfsmount *nmp;
280 int error = 0, lockerror = ENOENT, status, numops;
281 size_t len = 0;
282 u_int64_t xid;
283 struct nfsm_chain nmreq, nmrep;
284 struct nfsreq_secinfo_args si;
285
286 nmp = NFSTONMP(np);
287 if (nfs_mount_gone(nmp)) {
288 return ENXIO;
289 }
290 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
291 return EINVAL;
292 }
293 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
294 nfsm_chain_null(&nmreq);
295 nfsm_chain_null(&nmrep);
296
297 // PUTFH, GETATTR, READLINK
298 numops = 3;
299 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
300 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
301 numops--;
302 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
303 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
304 numops--;
305 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
306 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
307 numops--;
308 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_READLINK);
309 nfsm_chain_build_done(error, &nmreq);
310 nfsm_assert(error, (numops == 0), EPROTO);
311 nfsmout_if(error);
312 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
313
314 if ((lockerror = nfs_node_lock(np))) {
315 error = lockerror;
316 }
317 nfsm_chain_skip_tag(error, &nmrep);
318 nfsm_chain_get_32(error, &nmrep, numops);
319 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
320 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
321 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
322 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
323 nfsm_chain_get_32(error, &nmrep, len);
324 nfsmout_if(error);
325 if (len >= *buflenp) {
326 if (np->n_size && (np->n_size < *buflenp)) {
327 len = np->n_size;
328 } else {
329 len = *buflenp - 1;
330 }
331 }
332 nfsm_chain_get_opaque(error, &nmrep, len, buf);
333 if (!error) {
334 *buflenp = len;
335 }
336 nfsmout:
337 if (!lockerror) {
338 nfs_node_unlock(np);
339 }
340 nfsm_chain_cleanup(&nmreq);
341 nfsm_chain_cleanup(&nmrep);
342 return error;
343 }
344
345 int
nfs4_read_rpc_async(nfsnode_t np,off_t offset,size_t len,thread_t thd,kauth_cred_t cred,struct nfsreq_cbinfo * cb,struct nfsreq ** reqp)346 nfs4_read_rpc_async(
347 nfsnode_t np,
348 off_t offset,
349 size_t len,
350 thread_t thd,
351 kauth_cred_t cred,
352 struct nfsreq_cbinfo *cb,
353 struct nfsreq **reqp)
354 {
355 struct nfsmount *nmp;
356 int error = 0, nfsvers, numops;
357 nfs_stateid stateid;
358 struct nfsm_chain nmreq;
359 struct nfsreq_secinfo_args si;
360
361 nmp = NFSTONMP(np);
362 if (nfs_mount_gone(nmp)) {
363 return ENXIO;
364 }
365 nfsvers = nmp->nm_vers;
366 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
367 return EINVAL;
368 }
369
370 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
371 nfsm_chain_null(&nmreq);
372
373 // PUTFH, READ
374 numops = 2;
375 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
376 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
377 numops--;
378 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
379 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
380 numops--;
381 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_READ);
382 nfs_get_stateid(np, thd, cred, &stateid, 0);
383 nfsm_chain_add_stateid(error, &nmreq, &stateid);
384 nfsm_chain_add_64(error, &nmreq, offset);
385 nfsm_chain_add_32(error, &nmreq, len);
386 nfsm_chain_build_done(error, &nmreq);
387 nfsm_assert(error, (numops == 0), EPROTO);
388 nfsmout_if(error);
389 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
390 nfsmout:
391 nfsm_chain_cleanup(&nmreq);
392 return error;
393 }
394
395 int
nfs4_read_rpc_async_finish(nfsnode_t np,struct nfsreq * req,uio_t uio,size_t * lenp,int * eofp)396 nfs4_read_rpc_async_finish(
397 nfsnode_t np,
398 struct nfsreq *req,
399 uio_t uio,
400 size_t *lenp,
401 int *eofp)
402 {
403 struct nfsmount *nmp;
404 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
405 size_t retlen = 0;
406 u_int64_t xid;
407 struct nfsm_chain nmrep;
408
409 nmp = NFSTONMP(np);
410 if (nfs_mount_gone(nmp)) {
411 nfs_request_async_cancel(req);
412 return ENXIO;
413 }
414 nfsvers = nmp->nm_vers;
415
416 nfsm_chain_null(&nmrep);
417
418 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
419 if (error == EINPROGRESS) { /* async request restarted */
420 return error;
421 }
422
423 if ((lockerror = nfs_node_lock(np))) {
424 error = lockerror;
425 }
426 nfsm_chain_skip_tag(error, &nmrep);
427 nfsm_chain_get_32(error, &nmrep, numops);
428 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
429 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
430 nfsm_chain_get_32(error, &nmrep, eof);
431 nfsm_chain_get_32(error, &nmrep, retlen);
432 if (!error) {
433 *lenp = MIN(retlen, *lenp);
434 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
435 }
436 if (!lockerror) {
437 nfs_node_unlock(np);
438 }
439 if (eofp) {
440 if (!eof && !retlen) {
441 eof = 1;
442 }
443 *eofp = eof;
444 }
445 nfsm_chain_cleanup(&nmrep);
446 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
447 microuptime(&np->n_lastio);
448 }
449 return error;
450 }
451
452 int
nfs4_write_rpc_async(nfsnode_t np,uio_t uio,size_t len,thread_t thd,kauth_cred_t cred,int iomode,struct nfsreq_cbinfo * cb,struct nfsreq ** reqp)453 nfs4_write_rpc_async(
454 nfsnode_t np,
455 uio_t uio,
456 size_t len,
457 thread_t thd,
458 kauth_cred_t cred,
459 int iomode,
460 struct nfsreq_cbinfo *cb,
461 struct nfsreq **reqp)
462 {
463 struct nfsmount *nmp;
464 mount_t mp;
465 int error = 0, nfsvers, numops;
466 nfs_stateid stateid;
467 struct nfsm_chain nmreq;
468 struct nfsreq_secinfo_args si;
469
470 nmp = NFSTONMP(np);
471 if (nfs_mount_gone(nmp)) {
472 return ENXIO;
473 }
474 nfsvers = nmp->nm_vers;
475 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
476 return EINVAL;
477 }
478
479 /* for async mounts, don't bother sending sync write requests */
480 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
481 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
482 iomode = NFS_WRITE_UNSTABLE;
483 }
484
485 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
486 nfsm_chain_null(&nmreq);
487
488 // PUTFH, WRITE, GETATTR
489 numops = 3;
490 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
491 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
492 numops--;
493 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
494 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
495 numops--;
496 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_WRITE);
497 nfs_get_stateid(np, thd, cred, &stateid, 1);
498 nfsm_chain_add_stateid(error, &nmreq, &stateid);
499 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
500 nfsm_chain_add_32(error, &nmreq, iomode);
501 nfsm_chain_add_32(error, &nmreq, len);
502 if (!error) {
503 error = nfsm_chain_add_uio(&nmreq, uio, len);
504 }
505 numops--;
506 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
507 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs4_getattr_write_bitmap, nmp, np);
508 nfsm_chain_build_done(error, &nmreq);
509 nfsm_assert(error, (numops == 0), EPROTO);
510 nfsmout_if(error);
511
512 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
513 thd, cred, &si, R_NOUMOUNTINTR, cb, reqp);
514 nfsmout:
515 nfsm_chain_cleanup(&nmreq);
516 return error;
517 }
518
519 int
nfs4_write_rpc_async_finish(nfsnode_t np,struct nfsreq * req,int * iomodep,size_t * rlenp,uint64_t * wverfp)520 nfs4_write_rpc_async_finish(
521 nfsnode_t np,
522 struct nfsreq *req,
523 int *iomodep,
524 size_t *rlenp,
525 uint64_t *wverfp)
526 {
527 struct nfsmount *nmp;
528 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
529 int committed = NFS_WRITE_FILESYNC;
530 size_t rlen = 0;
531 u_int64_t xid, wverf;
532 mount_t mp;
533 struct nfsm_chain nmrep;
534
535 nmp = NFSTONMP(np);
536 if (nfs_mount_gone(nmp)) {
537 nfs_request_async_cancel(req);
538 return ENXIO;
539 }
540 nfsvers = nmp->nm_vers;
541
542 nfsm_chain_null(&nmrep);
543
544 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
545 if (error == EINPROGRESS) { /* async request restarted */
546 return error;
547 }
548 nmp = NFSTONMP(np);
549 if (nfs_mount_gone(nmp)) {
550 error = ENXIO;
551 }
552 if (!error && (lockerror = nfs_node_lock(np))) {
553 error = lockerror;
554 }
555 nfsm_chain_skip_tag(error, &nmrep);
556 nfsm_chain_get_32(error, &nmrep, numops);
557 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
558 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
559 nfsm_chain_get_32(error, &nmrep, rlen);
560 nfsmout_if(error);
561 *rlenp = rlen;
562 if (rlen <= 0) {
563 error = NFSERR_IO;
564 }
565 nfsm_chain_get_32(error, &nmrep, committed);
566 nfsm_chain_get_64(error, &nmrep, wverf);
567 nfsmout_if(error);
568 if (wverfp) {
569 *wverfp = wverf;
570 }
571 lck_mtx_lock(&nmp->nm_lock);
572 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
573 nmp->nm_verf = wverf;
574 nmp->nm_state |= NFSSTA_HASWRITEVERF;
575 } else if (nmp->nm_verf != wverf) {
576 nmp->nm_verf = wverf;
577 }
578 lck_mtx_unlock(&nmp->nm_lock);
579 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
580
581 /*
582 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
583 * In such cases, we do not update the time stamp - but the requested attributes.
584 */
585 np->n_vattr.nva_flags |= NFS_FFLAG_PARTIAL_WRITE;
586 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
587 np->n_vattr.nva_flags &= ~NFS_FFLAG_PARTIAL_WRITE;
588
589 nfsmout:
590 if (!lockerror) {
591 nfs_node_unlock(np);
592 }
593 nfsm_chain_cleanup(&nmrep);
594 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
595 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
596 committed = NFS_WRITE_FILESYNC;
597 }
598 *iomodep = committed;
599 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
600 microuptime(&np->n_lastio);
601 }
602 return error;
603 }
604
605 int
nfs4_remove_rpc(nfsnode_t dnp,char * name,int namelen,thread_t thd,kauth_cred_t cred)606 nfs4_remove_rpc(
607 nfsnode_t dnp,
608 char *name,
609 int namelen,
610 thread_t thd,
611 kauth_cred_t cred)
612 {
613 int error = 0, lockerror = ENOENT, remove_error = 0, status;
614 struct nfsmount *nmp;
615 int nfsvers, numops;
616 u_int64_t xid;
617 struct nfsm_chain nmreq, nmrep;
618 struct nfsreq_secinfo_args si;
619
620 nmp = NFSTONMP(dnp);
621 if (nfs_mount_gone(nmp)) {
622 return ENXIO;
623 }
624 nfsvers = nmp->nm_vers;
625 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
626 return EINVAL;
627 }
628 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
629 restart:
630 nfsm_chain_null(&nmreq);
631 nfsm_chain_null(&nmrep);
632
633 // PUTFH, REMOVE, GETATTR
634 numops = 3;
635 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
636 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
637 numops--;
638 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
639 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
640 numops--;
641 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_REMOVE);
642 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
643 numops--;
644 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
645 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
646 nfsm_chain_build_done(error, &nmreq);
647 nfsm_assert(error, (numops == 0), EPROTO);
648 nfsmout_if(error);
649
650 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
651 thd, cred, &si, R_NOUMOUNTINTR, &nmrep, &xid, &status);
652
653 if ((lockerror = nfs_node_lock(dnp))) {
654 error = lockerror;
655 }
656 nfsm_chain_skip_tag(error, &nmrep);
657 nfsm_chain_get_32(error, &nmrep, numops);
658 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
659 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
660 remove_error = error;
661 nfsm_chain_check_change_info(error, &nmrep, dnp);
662 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
663 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
664 if (error && !lockerror) {
665 NATTRINVALIDATE(dnp);
666 }
667 nfsmout:
668 nfsm_chain_cleanup(&nmreq);
669 nfsm_chain_cleanup(&nmrep);
670
671 if (!lockerror) {
672 dnp->n_flag |= NMODIFIED;
673 nfs_node_unlock(dnp);
674 }
675 if (error == NFSERR_GRACE) {
676 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
677 goto restart;
678 }
679
680 return remove_error;
681 }
682
683 int
nfs4_rename_rpc(nfsnode_t fdnp,char * fnameptr,int fnamelen,nfsnode_t tdnp,char * tnameptr,int tnamelen,vfs_context_t ctx)684 nfs4_rename_rpc(
685 nfsnode_t fdnp,
686 char *fnameptr,
687 int fnamelen,
688 nfsnode_t tdnp,
689 char *tnameptr,
690 int tnamelen,
691 vfs_context_t ctx)
692 {
693 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
694 struct nfsmount *nmp;
695 u_int64_t xid, savedxid;
696 struct nfsm_chain nmreq, nmrep;
697 struct nfsreq_secinfo_args si;
698
699 nmp = NFSTONMP(fdnp);
700 if (nfs_mount_gone(nmp)) {
701 return ENXIO;
702 }
703 nfsvers = nmp->nm_vers;
704 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
705 return EINVAL;
706 }
707 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
708 return EINVAL;
709 }
710
711 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
712 nfsm_chain_null(&nmreq);
713 nfsm_chain_null(&nmrep);
714
715 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
716 numops = 7;
717 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
718 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
719 numops--;
720 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
721 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
722 numops--;
723 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_SAVEFH);
724 numops--;
725 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
726 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
727 numops--;
728 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_RENAME);
729 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
730 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
731 numops--;
732 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
733 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
734 numops--;
735 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_RESTOREFH);
736 numops--;
737 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
738 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
739 nfsm_chain_build_done(error, &nmreq);
740 nfsm_assert(error, (numops == 0), EPROTO);
741 nfsmout_if(error);
742
743 error = nfs_request2(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND,
744 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, R_NOUMOUNTINTR, &nmrep, &xid, &status);
745
746 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
747 error = lockerror;
748 }
749 nfsm_chain_skip_tag(error, &nmrep);
750 nfsm_chain_get_32(error, &nmrep, numops);
751 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
752 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
753 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
755 nfsm_chain_check_change_info(error, &nmrep, fdnp);
756 nfsm_chain_check_change_info(error, &nmrep, tdnp);
757 /* directory attributes: if we don't get them, make sure to invalidate */
758 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
759 savedxid = xid;
760 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
761 if (error && !lockerror) {
762 NATTRINVALIDATE(tdnp);
763 }
764 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
765 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
766 xid = savedxid;
767 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
768 if (error && !lockerror) {
769 NATTRINVALIDATE(fdnp);
770 }
771 nfsmout:
772 nfsm_chain_cleanup(&nmreq);
773 nfsm_chain_cleanup(&nmrep);
774 if (!lockerror) {
775 fdnp->n_flag |= NMODIFIED;
776 tdnp->n_flag |= NMODIFIED;
777 nfs_node_unlock2(fdnp, tdnp);
778 }
779 return error;
780 }
781
782 /*
783 * NFS V4 readdir RPC.
784 */
785 int
nfs4_readdir_rpc(nfsnode_t dnp,struct nfsbuf * bp,vfs_context_t ctx)786 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
787 {
788 struct nfsmount *nmp;
789 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
790 int i, status, more_entries = 1, eof, bp_dropped = 0;
791 uint16_t namlen, reclen;
792 uint32_t nmreaddirsize, nmrsize;
793 uint32_t namlen32, skiplen, fhlen, xlen, attrlen;
794 uint64_t padlen, cookie, lastcookie, xid, savedxid, space_free, space_needed;
795 struct nfsm_chain nmreq, nmrep, nmrepsave;
796 fhandle_t *fh;
797 struct nfs_vattr *nvattr, *nvattrp;
798 struct nfs_dir_buf_header *ndbhp;
799 struct direntry *dp;
800 char *padstart;
801 const char *tag;
802 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
803 struct timeval now;
804 struct nfsreq_secinfo_args si;
805
806 nmp = NFSTONMP(dnp);
807 if (nfs_mount_gone(nmp)) {
808 return ENXIO;
809 }
810 nfsvers = nmp->nm_vers;
811 nmreaddirsize = nmp->nm_readdirsize;
812 nmrsize = nmp->nm_rsize;
813 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
814 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
815 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
816 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
817 return EINVAL;
818 }
819 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
820
821 /*
822 * Set up attribute request for entries.
823 * For READDIRPLUS functionality, get everything.
824 * Otherwise, just get what we need for struct direntry.
825 */
826 if (rdirplus) {
827 tag = "readdirplus";
828 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
829 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
830 } else {
831 tag = "readdir";
832 NFS_CLEAR_ATTRIBUTES(entry_attrs);
833 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
834 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
835 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
836 }
837 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
838
839 /* lock to protect access to cookie verifier */
840 if ((lockerror = nfs_node_lock(dnp))) {
841 return lockerror;
842 }
843
844 fh = zalloc(nfs_fhandle_zone);
845 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
846
847 /* determine cookie to use, and move dp to the right offset */
848 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
849 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
850 if (ndbhp->ndbh_count) {
851 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
852 dp = NFS_DIRENTRY_NEXT(dp);
853 }
854 cookie = dp->d_seekoff;
855 dp = NFS_DIRENTRY_NEXT(dp);
856 } else {
857 cookie = bp->nb_lblkno;
858 /* increment with every buffer read */
859 OSAddAtomic64(1, &nfsclntstats.readdir_bios);
860 }
861 lastcookie = cookie;
862
863 /*
864 * The NFS client is responsible for the "." and ".." entries in the
865 * directory. So, we put them at the start of the first buffer.
866 * Don't bother for attribute directories.
867 */
868 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
869 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
870 fh->fh_len = 0;
871 fhlen = rdirplus ? fh->fh_len + 1 : 0;
872 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
873 /* "." */
874 namlen = 1;
875 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
876 if (xlen) {
877 bzero(&dp->d_name[namlen + 1], xlen);
878 }
879 dp->d_namlen = namlen;
880 strlcpy(dp->d_name, ".", namlen + 1);
881 dp->d_fileno = dnp->n_vattr.nva_fileid;
882 dp->d_type = DT_DIR;
883 dp->d_reclen = reclen;
884 dp->d_seekoff = 1;
885 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
886 dp = NFS_DIRENTRY_NEXT(dp);
887 padlen = (char*)dp - padstart;
888 if (padlen > 0) {
889 bzero(padstart, padlen);
890 }
891 if (rdirplus) { /* zero out attributes */
892 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
893 }
894
895 /* ".." */
896 namlen = 2;
897 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
898 if (xlen) {
899 bzero(&dp->d_name[namlen + 1], xlen);
900 }
901 dp->d_namlen = namlen;
902 strlcpy(dp->d_name, "..", namlen + 1);
903 if (dnp->n_parent) {
904 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
905 } else {
906 dp->d_fileno = dnp->n_vattr.nva_fileid;
907 }
908 dp->d_type = DT_DIR;
909 dp->d_reclen = reclen;
910 dp->d_seekoff = 2;
911 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
912 dp = NFS_DIRENTRY_NEXT(dp);
913 padlen = (char*)dp - padstart;
914 if (padlen > 0) {
915 bzero(padstart, padlen);
916 }
917 if (rdirplus) { /* zero out attributes */
918 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
919 }
920
921 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
922 ndbhp->ndbh_count = 2;
923 }
924
925 /*
926 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
927 * the buffer is full (or we hit EOF). Then put the remainder of the
928 * results in the next buffer(s).
929 */
930 nfsm_chain_null(&nmreq);
931 nfsm_chain_null(&nmrep);
932 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
933 // PUTFH, GETATTR, READDIR
934 numops = 3;
935 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
936 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
937 numops--;
938 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
939 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
940 numops--;
941 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
942 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
943 numops--;
944 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_READDIR);
945 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
946 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
947 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
948 nfsm_chain_add_32(error, &nmreq, nmrsize);
949 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
950 nfsm_chain_build_done(error, &nmreq);
951 nfsm_assert(error, (numops == 0), EPROTO);
952 nfs_node_unlock(dnp);
953 nfsmout_if(error);
954 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
955
956 if ((lockerror = nfs_node_lock(dnp))) {
957 error = lockerror;
958 }
959
960 savedxid = xid;
961 nfsm_chain_skip_tag(error, &nmrep);
962 nfsm_chain_get_32(error, &nmrep, numops);
963 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
964 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
965 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
966 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
967 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
968 nfsm_chain_get_32(error, &nmrep, more_entries);
969
970 if (!lockerror) {
971 nfs_node_unlock(dnp);
972 lockerror = ENOENT;
973 }
974 nfsmout_if(error);
975
976 if (rdirplus) {
977 microuptime(&now);
978 if (lastcookie == 0) {
979 dnp->n_rdirplusstamp_sof = now.tv_sec;
980 dnp->n_rdirplusstamp_eof = 0;
981 }
982 }
983
984 /* loop through the entries packing them into the buffer */
985 while (more_entries) {
986 /* Entry: COOKIE, NAME, FATTR */
987 nfsm_chain_get_64(error, &nmrep, cookie);
988 nfsm_chain_get_32(error, &nmrep, namlen32);
989 if (namlen32 > UINT16_MAX) {
990 error = EBADRPC;
991 goto nfsmout;
992 }
993 namlen = (uint16_t)namlen32;
994 nfsmout_if(error);
995 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
996 /* we've got a big cookie, make sure flag is set */
997 lck_mtx_lock(&nmp->nm_lock);
998 nmp->nm_state |= NFSSTA_BIGCOOKIES;
999 lck_mtx_unlock(&nmp->nm_lock);
1000 bigcookies = 1;
1001 }
1002 /* just truncate names that don't fit in direntry.d_name */
1003 if (namlen <= 0) {
1004 error = EBADRPC;
1005 goto nfsmout;
1006 }
1007 if (namlen > (sizeof(dp->d_name) - 1)) {
1008 skiplen = namlen - sizeof(dp->d_name) + 1;
1009 namlen = sizeof(dp->d_name) - 1;
1010 } else {
1011 skiplen = 0;
1012 }
1013 /* guess that fh size will be same as parent */
1014 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
1015 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
1016 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
1017 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1018 space_needed = reclen + attrlen;
1019 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1020 if (space_needed > space_free) {
1021 /*
1022 * We still have entries to pack, but we've
1023 * run out of room in the current buffer.
1024 * So we need to move to the next buffer.
1025 * The block# for the next buffer is the
1026 * last cookie in the current buffer.
1027 */
1028 nextbuffer:
1029 ndbhp->ndbh_flags |= NDB_FULL;
1030 nfs_buf_release(bp, 0);
1031 bp_dropped = 1;
1032 bp = NULL;
1033 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1034 nfsmout_if(error);
1035 /* initialize buffer */
1036 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1037 ndbhp->ndbh_flags = 0;
1038 ndbhp->ndbh_count = 0;
1039 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1040 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1041 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1042 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1043 /* increment with every buffer read */
1044 OSAddAtomic64(1, &nfsclntstats.readdir_bios);
1045 }
1046 nmrepsave = nmrep;
1047 dp->d_fileno = cookie; /* placeholder */
1048 dp->d_seekoff = cookie;
1049 dp->d_namlen = namlen;
1050 dp->d_reclen = reclen;
1051 dp->d_type = DT_UNKNOWN;
1052 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1053 nfsmout_if(error);
1054 dp->d_name[namlen] = '\0';
1055 if (skiplen) {
1056 nfsm_chain_adv(error, &nmrep,
1057 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1058 }
1059 nfsmout_if(error);
1060 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : nvattr;
1061 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, fh, NULL, NULL);
1062 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1063 /* we do NOT want ACLs returned to us here */
1064 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1065 if (nvattrp->nva_acl) {
1066 kauth_acl_free(nvattrp->nva_acl);
1067 nvattrp->nva_acl = NULL;
1068 }
1069 }
1070 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
1071 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1072 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1073 /* set this up to look like a referral trigger */
1074 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, fh);
1075 }
1076 error = 0;
1077 }
1078 /* check for more entries after this one */
1079 nfsm_chain_get_32(error, &nmrep, more_entries);
1080 nfsmout_if(error);
1081
1082 /* Skip any "." and ".." entries returned from server. */
1083 /* Also skip any bothersome named attribute entries. */
1084 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1085 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1086 lastcookie = cookie;
1087 continue;
1088 }
1089
1090 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
1091 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1092 }
1093 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
1094 dp->d_fileno = nvattrp->nva_fileid;
1095 }
1096 if (rdirplus) {
1097 /* fileid is already in d_fileno, so stash xid in attrs */
1098 nvattrp->nva_fileid = savedxid;
1099 nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID;
1100 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1101 fhlen = fh->fh_len + 1;
1102 xlen = fhlen + sizeof(time_t);
1103 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1104 space_needed = reclen + attrlen;
1105 if (space_needed > space_free) {
1106 /* didn't actually have the room... move on to next buffer */
1107 nmrep = nmrepsave;
1108 goto nextbuffer;
1109 }
1110 /* pack the file handle into the record */
1111 dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
1112 bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len);
1113 } else {
1114 /* mark the file handle invalid */
1115 fh->fh_len = 0;
1116 fhlen = fh->fh_len + 1;
1117 xlen = fhlen + sizeof(time_t);
1118 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1119 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
1120 }
1121 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
1122 dp->d_reclen = reclen;
1123 nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid);
1124 }
1125 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1126 ndbhp->ndbh_count++;
1127 lastcookie = cookie;
1128
1129 /* advance to next direntry in buffer */
1130 dp = NFS_DIRENTRY_NEXT(dp);
1131 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1132 /* zero out the pad bytes */
1133 padlen = (char*)dp - padstart;
1134 if (padlen > 0) {
1135 bzero(padstart, padlen);
1136 }
1137 }
1138 /* Finally, get the eof boolean */
1139 nfsm_chain_get_32(error, &nmrep, eof);
1140 nfsmout_if(error);
1141 if (eof) {
1142 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
1143 nfs_node_lock_force(dnp);
1144 dnp->n_eofcookie = lastcookie;
1145 if (rdirplus) {
1146 dnp->n_rdirplusstamp_eof = now.tv_sec;
1147 }
1148 nfs_node_unlock(dnp);
1149 } else {
1150 more_entries = 1;
1151 }
1152 if (bp_dropped) {
1153 nfs_buf_release(bp, 0);
1154 bp = NULL;
1155 break;
1156 }
1157 if ((lockerror = nfs_node_lock(dnp))) {
1158 error = lockerror;
1159 }
1160 nfsmout_if(error);
1161 nfsm_chain_cleanup(&nmrep);
1162 nfsm_chain_null(&nmreq);
1163 }
1164 nfsmout:
1165 if (bp_dropped && bp) {
1166 nfs_buf_release(bp, 0);
1167 }
1168 if (!lockerror) {
1169 nfs_node_unlock(dnp);
1170 }
1171 nfsm_chain_cleanup(&nmreq);
1172 nfsm_chain_cleanup(&nmrep);
1173 NFS_ZFREE(nfs_fhandle_zone, fh);
1174 zfree(KT_NFS_VATTR, nvattr);
1175 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
1176 }
1177
1178 int
nfs4_lookup_rpc_async(nfsnode_t dnp,char * name,int namelen,vfs_context_t ctx,struct nfsreq ** reqp)1179 nfs4_lookup_rpc_async(
1180 nfsnode_t dnp,
1181 char *name,
1182 int namelen,
1183 vfs_context_t ctx,
1184 struct nfsreq **reqp)
1185 {
1186 int error = 0, isdotdot = 0, nfsvers, numops;
1187 struct nfsm_chain nmreq;
1188 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1189 struct nfsmount *nmp;
1190 struct nfsreq_secinfo_args si;
1191
1192 nmp = NFSTONMP(dnp);
1193 if (nfs_mount_gone(nmp)) {
1194 return ENXIO;
1195 }
1196 nfsvers = nmp->nm_vers;
1197 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1198 return EINVAL;
1199 }
1200
1201 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1202 isdotdot = 1;
1203 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1204 } else {
1205 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1206 }
1207
1208 nfsm_chain_null(&nmreq);
1209
1210 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1211 numops = 5;
1212 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1213 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1214 numops--;
1215 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1216 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1217 numops--;
1218 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1219 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1220 numops--;
1221 if (isdotdot) {
1222 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOOKUPP);
1223 } else {
1224 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOOKUP);
1225 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1226 }
1227 numops--;
1228 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETFH);
1229 numops--;
1230 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1231 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1232 /* some ".zfs" directories can't handle being asked for some attributes */
1233 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
1234 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1235 }
1236 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
1237 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1238 }
1239 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
1240 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1241 }
1242 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1243 nfsm_chain_build_done(error, &nmreq);
1244 nfsm_assert(error, (numops == 0), EPROTO);
1245 nfsmout_if(error);
1246 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1247 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1248 nfsmout:
1249 nfsm_chain_cleanup(&nmreq);
1250 return error;
1251 }
1252
1253
1254 int
nfs4_lookup_rpc_async_finish(nfsnode_t dnp,char * name,int namelen,vfs_context_t ctx,struct nfsreq * req,u_int64_t * xidp,fhandle_t * fhp,struct nfs_vattr * nvap)1255 nfs4_lookup_rpc_async_finish(
1256 nfsnode_t dnp,
1257 char *name,
1258 int namelen,
1259 vfs_context_t ctx,
1260 struct nfsreq *req,
1261 u_int64_t *xidp,
1262 fhandle_t *fhp,
1263 struct nfs_vattr *nvap)
1264 {
1265 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1266 uint32_t op = NFS_OP_LOOKUP;
1267 u_int64_t xid;
1268 struct nfsmount *nmp;
1269 struct nfsm_chain nmrep;
1270
1271 nmp = NFSTONMP(dnp);
1272 if (nmp == NULL) {
1273 return ENXIO;
1274 }
1275 nfsvers = nmp->nm_vers;
1276 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1277 isdotdot = 1;
1278 }
1279
1280 nfsm_chain_null(&nmrep);
1281
1282 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1283
1284 if ((lockerror = nfs_node_lock(dnp))) {
1285 error = lockerror;
1286 }
1287 nfsm_chain_skip_tag(error, &nmrep);
1288 nfsm_chain_get_32(error, &nmrep, numops);
1289 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1290 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1291 if (xidp) {
1292 *xidp = xid;
1293 }
1294 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1295
1296 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1297 nfsmout_if(error || !fhp || !nvap);
1298 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1299 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1300 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
1301 error = EBADRPC;
1302 }
1303 nfsmout_if(error);
1304 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1305 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1306 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1307 /* set this up to look like a referral trigger */
1308 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1309 error = 0;
1310 } else {
1311 nfsmout_if(error);
1312 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1313 }
1314 nfsmout:
1315 if (!lockerror) {
1316 nfs_node_unlock(dnp);
1317 }
1318 nfsm_chain_cleanup(&nmrep);
1319 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1320 /* We still need to get SECINFO to set default for mount. */
1321 /* Do so for the first LOOKUP that returns successfully. */
1322 struct nfs_sec sec;
1323
1324 sec.count = NX_MAX_SEC_FLAVORS;
1325 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1326 /* [sigh] some implementations return "illegal" error for unsupported ops */
1327 if (error == NFSERR_OP_ILLEGAL) {
1328 error = 0;
1329 }
1330 if (!error) {
1331 /* set our default security flavor to the first in the list */
1332 lck_mtx_lock(&nmp->nm_lock);
1333 if (sec.count) {
1334 nmp->nm_auth = sec.flavors[0];
1335 }
1336 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1337 lck_mtx_unlock(&nmp->nm_lock);
1338 }
1339 }
1340 return error;
1341 }
1342
1343 int
nfs4_commit_rpc(nfsnode_t np,uint64_t offset,uint64_t count,kauth_cred_t cred,uint64_t wverf)1344 nfs4_commit_rpc(
1345 nfsnode_t np,
1346 uint64_t offset,
1347 uint64_t count,
1348 kauth_cred_t cred,
1349 uint64_t wverf)
1350 {
1351 struct nfsmount *nmp;
1352 int error = 0, lockerror, status, nfsvers, numops;
1353 u_int64_t xid, newwverf;
1354 uint32_t count32;
1355 struct nfsm_chain nmreq, nmrep;
1356 struct nfsreq_secinfo_args si;
1357
1358 nmp = NFSTONMP(np);
1359 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1360 if (nfs_mount_gone(nmp)) {
1361 return ENXIO;
1362 }
1363 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1364 return EINVAL;
1365 }
1366 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1367 return 0;
1368 }
1369 nfsvers = nmp->nm_vers;
1370 count32 = count > UINT32_MAX ? 0 : (uint32_t)count;
1371
1372 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1373 nfsm_chain_null(&nmreq);
1374 nfsm_chain_null(&nmrep);
1375
1376 // PUTFH, COMMIT, GETATTR
1377 numops = 3;
1378 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1379 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1380 numops--;
1381 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1382 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1383 numops--;
1384 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_COMMIT);
1385 nfsm_chain_add_64(error, &nmreq, offset);
1386 nfsm_chain_add_32(error, &nmreq, count32);
1387 numops--;
1388 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1389 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1390 nfsm_chain_build_done(error, &nmreq);
1391 nfsm_assert(error, (numops == 0), EPROTO);
1392 nfsmout_if(error);
1393 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1394 current_thread(), cred, &si, R_NOUMOUNTINTR, &nmrep, &xid, &status);
1395
1396 if ((lockerror = nfs_node_lock(np))) {
1397 error = lockerror;
1398 }
1399 nfsm_chain_skip_tag(error, &nmrep);
1400 nfsm_chain_get_32(error, &nmrep, numops);
1401 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1402 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1403 nfsm_chain_get_64(error, &nmrep, newwverf);
1404 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1405 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1406 if (!lockerror) {
1407 nfs_node_unlock(np);
1408 }
1409 nfsmout_if(error);
1410 lck_mtx_lock(&nmp->nm_lock);
1411 if (nmp->nm_verf != newwverf) {
1412 nmp->nm_verf = newwverf;
1413 }
1414 if (wverf != newwverf) {
1415 error = NFSERR_STALEWRITEVERF;
1416 }
1417 lck_mtx_unlock(&nmp->nm_lock);
1418 nfsmout:
1419 nfsm_chain_cleanup(&nmreq);
1420 nfsm_chain_cleanup(&nmrep);
1421 return error;
1422 }
1423
1424 int
nfs4_pathconf_rpc(nfsnode_t np,struct nfs_fsattr * nfsap,vfs_context_t ctx)1425 nfs4_pathconf_rpc(
1426 nfsnode_t np,
1427 struct nfs_fsattr *nfsap,
1428 vfs_context_t ctx)
1429 {
1430 u_int64_t xid;
1431 int error = 0, lockerror, status, nfsvers, numops;
1432 struct nfsm_chain nmreq, nmrep;
1433 struct nfsmount *nmp = NFSTONMP(np);
1434 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1435 struct nfs_vattr *nvattr;
1436 struct nfsreq_secinfo_args si;
1437
1438 if (nfs_mount_gone(nmp)) {
1439 return ENXIO;
1440 }
1441 nfsvers = nmp->nm_vers;
1442 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1443 return EINVAL;
1444 }
1445
1446 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1447 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
1448 NVATTR_INIT(nvattr);
1449 nfsm_chain_null(&nmreq);
1450 nfsm_chain_null(&nmrep);
1451
1452 /* NFSv4: fetch "pathconf" info for this node */
1453 // PUTFH, GETATTR
1454 numops = 2;
1455 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1456 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1457 numops--;
1458 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1459 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1460 numops--;
1461 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1462 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1463 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1464 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1465 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1466 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1467 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1468 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1469 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1470 nfsm_chain_build_done(error, &nmreq);
1471 nfsm_assert(error, (numops == 0), EPROTO);
1472 nfsmout_if(error);
1473 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1474
1475 nfsm_chain_skip_tag(error, &nmrep);
1476 nfsm_chain_get_32(error, &nmrep, numops);
1477 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1478 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1479 nfsmout_if(error);
1480 error = nfs4_parsefattr(&nmrep, nfsap, nvattr, NULL, NULL, NULL);
1481 nfsmout_if(error);
1482 if ((lockerror = nfs_node_lock(np))) {
1483 error = lockerror;
1484 }
1485 if (!error) {
1486 nfs_loadattrcache(np, nvattr, &xid, 0);
1487 }
1488 if (!lockerror) {
1489 nfs_node_unlock(np);
1490 }
1491 nfsmout:
1492 NVATTR_CLEANUP(nvattr);
1493 zfree(KT_NFS_VATTR, nvattr);
1494 nfsm_chain_cleanup(&nmreq);
1495 nfsm_chain_cleanup(&nmrep);
1496 return error;
1497 }
1498
1499 int
nfs4_vnop_getattr(struct vnop_getattr_args * ap)1500 nfs4_vnop_getattr(
1501 struct vnop_getattr_args /* {
1502 * struct vnodeop_desc *a_desc;
1503 * vnode_t a_vp;
1504 * struct vnode_attr *a_vap;
1505 * vfs_context_t a_context;
1506 * } */*ap)
1507 {
1508 struct vnode_attr *vap = ap->a_vap;
1509 struct nfsmount *nmp;
1510 struct nfs_vattr *nva;
1511 int error, acls, ngaflags;
1512
1513 nmp = VTONMP(ap->a_vp);
1514 if (nfs_mount_gone(nmp)) {
1515 return ENXIO;
1516 }
1517 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1518
1519 ngaflags = NGA_CACHED;
1520 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1521 ngaflags |= NGA_ACL;
1522 }
1523 nva = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
1524 error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, ngaflags);
1525 if (error) {
1526 goto out;
1527 }
1528
1529 /* copy what we have in nva to *a_vap */
1530 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_RAWDEV)) {
1531 dev_t rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2);
1532 VATTR_RETURN(vap, va_rdev, rdev);
1533 }
1534 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_NUMLINKS)) {
1535 VATTR_RETURN(vap, va_nlink, nva->nva_nlink);
1536 }
1537 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SIZE)) {
1538 VATTR_RETURN(vap, va_data_size, nva->nva_size);
1539 }
1540 // VATTR_RETURN(vap, va_data_alloc, ???);
1541 // VATTR_RETURN(vap, va_total_size, ???);
1542 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SPACE_USED)) {
1543 VATTR_RETURN(vap, va_total_alloc, nva->nva_bytes);
1544 }
1545 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1546 VATTR_RETURN(vap, va_uid, nva->nva_uid);
1547 }
1548 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1549 VATTR_RETURN(vap, va_uuuid, nva->nva_uuuid);
1550 }
1551 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1552 VATTR_RETURN(vap, va_gid, nva->nva_gid);
1553 }
1554 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1555 VATTR_RETURN(vap, va_guuid, nva->nva_guuid);
1556 }
1557 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1558 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_MODE)) {
1559 VATTR_RETURN(vap, va_mode, ACCESSPERMS);
1560 } else {
1561 VATTR_RETURN(vap, va_mode, nva->nva_mode);
1562 }
1563 }
1564 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1565 (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) ||
1566 NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) ||
1567 (nva->nva_flags & NFS_FFLAG_TRIGGER))) {
1568 uint32_t flags = 0;
1569 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) &&
1570 (nva->nva_flags & NFS_FFLAG_ARCHIVED)) {
1571 flags |= SF_ARCHIVED;
1572 }
1573 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) &&
1574 (nva->nva_flags & NFS_FFLAG_HIDDEN)) {
1575 flags |= UF_HIDDEN;
1576 }
1577 VATTR_RETURN(vap, va_flags, flags);
1578 }
1579 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1580 vap->va_create_time.tv_sec = nva->nva_timesec[NFSTIME_CREATE];
1581 vap->va_create_time.tv_nsec = nva->nva_timensec[NFSTIME_CREATE];
1582 VATTR_SET_SUPPORTED(vap, va_create_time);
1583 }
1584 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1585 vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS];
1586 vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS];
1587 VATTR_SET_SUPPORTED(vap, va_access_time);
1588 }
1589 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1590 vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY];
1591 vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY];
1592 VATTR_SET_SUPPORTED(vap, va_modify_time);
1593 }
1594 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1595 vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE];
1596 vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE];
1597 VATTR_SET_SUPPORTED(vap, va_change_time);
1598 }
1599 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1600 vap->va_backup_time.tv_sec = nva->nva_timesec[NFSTIME_BACKUP];
1601 vap->va_backup_time.tv_nsec = nva->nva_timensec[NFSTIME_BACKUP];
1602 VATTR_SET_SUPPORTED(vap, va_backup_time);
1603 }
1604 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_FILEID)) {
1605 VATTR_RETURN(vap, va_fileid, nva->nva_fileid);
1606 }
1607 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TYPE)) {
1608 VATTR_RETURN(vap, va_type, nva->nva_type);
1609 }
1610 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_CHANGE)) {
1611 VATTR_RETURN(vap, va_filerev, nva->nva_change);
1612 }
1613
1614 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1615 VATTR_RETURN(vap, va_acl, nva->nva_acl);
1616 nva->nva_acl = NULL;
1617 }
1618
1619 // other attrs we might support someday:
1620 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1621
1622 NVATTR_CLEANUP(nva);
1623 out:
1624 zfree(KT_NFS_VATTR, nva);
1625 return NFS_MAPERR(error);
1626 }
1627
1628 int
nfs4_setattr_rpc(nfsnode_t np,struct vnode_attr * vap,vfs_context_t ctx)1629 nfs4_setattr_rpc(
1630 nfsnode_t np,
1631 struct vnode_attr *vap,
1632 vfs_context_t ctx)
1633 {
1634 struct nfsmount *nmp = NFSTONMP(np);
1635 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1636 u_int64_t xid, nextxid;
1637 struct nfsm_chain nmreq, nmrep;
1638 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1639 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1640 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1641 nfs_stateid stateid;
1642 struct nfsreq_secinfo_args si;
1643
1644 if (nfs_mount_gone(nmp)) {
1645 return ENXIO;
1646 }
1647 bzero(&setbitmap, sizeof(setbitmap));
1648 nfsvers = nmp->nm_vers;
1649 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1650 return EINVAL;
1651 }
1652
1653 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
1654 /* we don't support setting unsupported flags (duh!) */
1655 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1656 return EINVAL; /* return EINVAL if other attributes also set */
1657 } else {
1658 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1659 }
1660 }
1661
1662 /* don't bother requesting some changes if they don't look like they are changing */
1663 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
1664 VATTR_CLEAR_ACTIVE(vap, va_uid);
1665 }
1666 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
1667 VATTR_CLEAR_ACTIVE(vap, va_gid);
1668 }
1669 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
1670 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1671 }
1672 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
1673 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1674 }
1675
1676 tryagain:
1677 /* do nothing if no attributes will be sent */
1678 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1679 if (!bitmap[0] && !bitmap[1]) {
1680 return 0;
1681 }
1682
1683 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1684 nfsm_chain_null(&nmreq);
1685 nfsm_chain_null(&nmrep);
1686
1687 /*
1688 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1689 * need to invalidate any cached ACL. And if we had an ACL cached,
1690 * we might as well also fetch the new value.
1691 */
1692 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1693 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1694 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1695 if (NACLVALID(np)) {
1696 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1697 }
1698 NACLINVALIDATE(np);
1699 }
1700
1701 // PUTFH, SETATTR, GETATTR
1702 numops = 3;
1703 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1704 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1705 numops--;
1706 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1707 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1708 numops--;
1709 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_SETATTR);
1710 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1711 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid, 1);
1712 } else {
1713 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1714 }
1715 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1716 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1717 numops--;
1718 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1719 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1720 nfsm_chain_build_done(error, &nmreq);
1721 nfsm_assert(error, (numops == 0), EPROTO);
1722 nfsmout_if(error);
1723 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1724 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, R_NOUMOUNTINTR, &nmrep, &xid, &status);
1725
1726 if ((lockerror = nfs_node_lock(np))) {
1727 error = lockerror;
1728 }
1729 nfsm_chain_skip_tag(error, &nmrep);
1730 nfsm_chain_get_32(error, &nmrep, numops);
1731 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1732 nfsmout_if(error);
1733 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1734 nfsmout_if(error == EBADRPC);
1735 setattr_error = error;
1736 error = 0;
1737 bmlen = NFS_ATTR_BITMAP_LEN;
1738 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1739 if (!error) {
1740 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
1741 microuptime(&np->n_lastio);
1742 }
1743 nfs_vattr_set_supported(setbitmap, vap);
1744 error = setattr_error;
1745 }
1746 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1747 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1748 if (error) {
1749 NATTRINVALIDATE(np);
1750 }
1751 /*
1752 * We just changed the attributes and we want to make sure that we
1753 * see the latest attributes. Get the next XID. If it's not the
1754 * next XID after the SETATTR XID, then it's possible that another
1755 * RPC was in flight at the same time and it might put stale attributes
1756 * in the cache. In that case, we invalidate the attributes and set
1757 * the attribute cache XID to guarantee that newer attributes will
1758 * get loaded next.
1759 */
1760 nextxid = 0;
1761 nfs_get_xid(&nextxid);
1762 if (nextxid != (xid + 1)) {
1763 np->n_xid = nextxid;
1764 NATTRINVALIDATE(np);
1765 }
1766 nfsmout:
1767 if (!lockerror) {
1768 nfs_node_unlock(np);
1769 }
1770 nfsm_chain_cleanup(&nmreq);
1771 nfsm_chain_cleanup(&nmrep);
1772 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1773 /*
1774 * Some server's may not like ACL/mode combos that get sent.
1775 * If it looks like that's what the server choked on, try setting
1776 * just the ACL and not the mode (unless it looks like everything
1777 * but mode was already successfully set).
1778 */
1779 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1780 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
1781 VATTR_CLEAR_ACTIVE(vap, va_mode);
1782 error = 0;
1783 goto tryagain;
1784 }
1785 }
1786 return error;
1787 }
1788 #endif /* CONFIG_NFS4 */
1789
1790 /*
1791 * Wait for any pending recovery to complete.
1792 */
1793 int
nfs_mount_state_wait_for_recovery(struct nfsmount * nmp)1794 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1795 {
1796 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1797 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1798
1799 lck_mtx_lock(&nmp->nm_lock);
1800 while (nmp->nm_state & NFSSTA_RECOVER) {
1801 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
1802 break;
1803 }
1804 nfs_mount_sock_thread_wake(nmp);
1805 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1806 slpflag = 0;
1807 }
1808 lck_mtx_unlock(&nmp->nm_lock);
1809
1810 return error;
1811 }
1812
1813 /*
1814 * We're about to use/manipulate NFS mount's open/lock state.
1815 * Wait for any pending state recovery to complete, then
1816 * mark the state as being in use (which will hold off
1817 * the recovery thread until we're done).
1818 */
1819 int
nfs_mount_state_in_use_start(struct nfsmount * nmp,thread_t thd)1820 nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1821 {
1822 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1823 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1824
1825 if (nfs_mount_gone(nmp)) {
1826 return ENXIO;
1827 }
1828 lck_mtx_lock(&nmp->nm_lock);
1829 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
1830 lck_mtx_unlock(&nmp->nm_lock);
1831 return ENXIO;
1832 }
1833 while (nmp->nm_state & NFSSTA_RECOVER) {
1834 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1835 break;
1836 }
1837 nfs_mount_sock_thread_wake(nmp);
1838 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1839 slpflag = 0;
1840 }
1841 if (!error) {
1842 nmp->nm_stateinuse++;
1843 }
1844 lck_mtx_unlock(&nmp->nm_lock);
1845
1846 return error;
1847 }
1848
1849 /*
1850 * We're done using/manipulating the NFS mount's open/lock
1851 * state. If the given error indicates that recovery should
1852 * be performed, we'll initiate recovery.
1853 */
1854 int
nfs_mount_state_in_use_end(struct nfsmount * nmp,int error)1855 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1856 {
1857 int restart = nfs_mount_state_error_should_restart(error);
1858
1859 if (nfs_mount_gone(nmp)) {
1860 return ENXIO;
1861 }
1862 lck_mtx_lock(&nmp->nm_lock);
1863 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1864 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1865 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1866 nfs_need_recover(nmp, error);
1867 }
1868 if (nmp->nm_stateinuse > 0) {
1869 nmp->nm_stateinuse--;
1870 } else {
1871 panic("NFS mount state in use count underrun");
1872 }
1873 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
1874 wakeup(&nmp->nm_stateinuse);
1875 }
1876 lck_mtx_unlock(&nmp->nm_lock);
1877 if (error == NFSERR_GRACE) {
1878 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1879 }
1880
1881 return restart;
1882 }
1883
1884 /*
1885 * Does the error mean we should restart/redo a state-related operation?
1886 */
1887 int
nfs_mount_state_error_should_restart(int error)1888 nfs_mount_state_error_should_restart(int error)
1889 {
1890 switch (error) {
1891 case NFSERR_STALE_STATEID:
1892 case NFSERR_STALE_CLIENTID:
1893 case NFSERR_ADMIN_REVOKED:
1894 case NFSERR_EXPIRED:
1895 case NFSERR_OLD_STATEID:
1896 case NFSERR_BAD_STATEID:
1897 case NFSERR_GRACE:
1898 return 1;
1899 }
1900 return 0;
1901 }
1902
1903 /*
1904 * In some cases we may want to limit how many times we restart a
1905 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1906 * Base the limit on the lease (as long as it's not too short).
1907 */
1908 uint
nfs_mount_state_max_restarts(struct nfsmount * nmp)1909 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1910 {
1911 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
1912 }
1913
1914 /*
1915 * Does the error mean we probably lost a delegation?
1916 */
1917 int
nfs_mount_state_error_delegation_lost(int error)1918 nfs_mount_state_error_delegation_lost(int error)
1919 {
1920 switch (error) {
1921 case NFSERR_STALE_STATEID:
1922 case NFSERR_ADMIN_REVOKED:
1923 case NFSERR_EXPIRED:
1924 case NFSERR_OLD_STATEID:
1925 case NFSERR_BAD_STATEID:
1926 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1927 return 1;
1928 }
1929 return 0;
1930 }
1931
1932
1933 /*
1934 * Mark an NFS node's open state as busy.
1935 */
1936 int
nfs_open_state_set_busy(nfsnode_t np,thread_t thd)1937 nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1938 {
1939 struct nfsmount *nmp;
1940 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1941 int error = 0, slpflag;
1942
1943 nmp = NFSTONMP(np);
1944 if (nfs_mount_gone(nmp)) {
1945 return ENXIO;
1946 }
1947 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1948
1949 lck_mtx_lock(&np->n_openlock);
1950 while (np->n_openflags & N_OPENBUSY) {
1951 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1952 break;
1953 }
1954 np->n_openflags |= N_OPENWANT;
1955 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1956 slpflag = 0;
1957 }
1958 if (!error) {
1959 np->n_openflags |= N_OPENBUSY;
1960 }
1961 lck_mtx_unlock(&np->n_openlock);
1962
1963 return error;
1964 }
1965
1966 /*
1967 * Clear an NFS node's open state busy flag and wake up
1968 * anyone wanting it.
1969 */
1970 void
nfs_open_state_clear_busy(nfsnode_t np)1971 nfs_open_state_clear_busy(nfsnode_t np)
1972 {
1973 int wanted;
1974
1975 lck_mtx_lock(&np->n_openlock);
1976 if (!(np->n_openflags & N_OPENBUSY)) {
1977 panic("nfs_open_state_clear_busy");
1978 }
1979 wanted = (np->n_openflags & N_OPENWANT);
1980 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
1981 lck_mtx_unlock(&np->n_openlock);
1982 if (wanted) {
1983 wakeup(&np->n_openflags);
1984 }
1985 }
1986
1987 static int
use_open_owner(uid_t uid1,pid_t pid1,uid_t uid2,pid_t pid2,int split_open_owner)1988 use_open_owner(uid_t uid1, pid_t pid1, uid_t uid2, pid_t pid2, int split_open_owner)
1989 {
1990 if (uid1 != uid2) {
1991 return 0;
1992 }
1993 if (!split_open_owner) {
1994 return 1;
1995 }
1996
1997 return pid1 == pid2;
1998 }
1999 /*
2000 * Search a mount's open owner list for the owner for this credential.
2001 * If not found and "alloc" is set, then allocate a new one.
2002 */
2003 struct nfs_open_owner *
nfs_open_owner_find(struct nfsmount * nmp,kauth_cred_t cred,proc_t p,int alloc)2004 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, proc_t p, int alloc)
2005 {
2006 pid_t pid = proc_pid(p);
2007 uid_t uid = kauth_cred_getuid(cred);
2008 int split_open_owner = ISSET(nmp->nm_state, NFSSTA_SPLIT_OPEN_OWNER);
2009 struct nfs_open_owner *noop, *newnoop = NULL;
2010
2011 tryagain:
2012 lck_mtx_lock(&nmp->nm_lock);
2013 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
2014 if (use_open_owner(uid, pid, kauth_cred_getuid(noop->noo_cred), noop->noo_pid, split_open_owner)) {
2015 break;
2016 }
2017 }
2018
2019 if (!noop && !newnoop && alloc) {
2020 lck_mtx_unlock(&nmp->nm_lock);
2021 newnoop = kalloc_type(struct nfs_open_owner,
2022 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2023 lck_mtx_init(&newnoop->noo_lock, &nfs_open_grp, LCK_ATTR_NULL);
2024 newnoop->noo_mount = nmp;
2025 kauth_cred_ref(cred);
2026 newnoop->noo_cred = cred;
2027 newnoop->noo_pid = pid;
2028 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
2029 TAILQ_INIT(&newnoop->noo_opens);
2030 goto tryagain;
2031 }
2032 if (!noop && newnoop) {
2033 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
2034 os_ref_init(&newnoop->noo_refcnt, NULL);
2035 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
2036 noop = newnoop;
2037 }
2038 lck_mtx_unlock(&nmp->nm_lock);
2039
2040 if (newnoop && (noop != newnoop)) {
2041 nfs_open_owner_destroy(newnoop);
2042 }
2043
2044 if (noop) {
2045 nfs_open_owner_ref(noop);
2046 }
2047
2048 return noop;
2049 }
2050
2051 /*
2052 * destroy an open owner that's no longer needed
2053 */
2054 void
nfs_open_owner_destroy(struct nfs_open_owner * noop)2055 nfs_open_owner_destroy(struct nfs_open_owner *noop)
2056 {
2057 if (noop->noo_cred) {
2058 kauth_cred_unref(&noop->noo_cred);
2059 }
2060 lck_mtx_destroy(&noop->noo_lock, &nfs_open_grp);
2061 kfree_type(struct nfs_open_owner, noop);
2062 }
2063
2064 /*
2065 * acquire a reference count on an open owner
2066 */
2067 void
nfs_open_owner_ref(struct nfs_open_owner * noop)2068 nfs_open_owner_ref(struct nfs_open_owner *noop)
2069 {
2070 lck_mtx_lock(&noop->noo_lock);
2071 os_ref_retain_locked(&noop->noo_refcnt);
2072 lck_mtx_unlock(&noop->noo_lock);
2073 }
2074
2075 /*
2076 * drop a reference count on an open owner and destroy it if
2077 * it is no longer referenced and no longer on the mount's list.
2078 */
2079 void
nfs_open_owner_rele(struct nfs_open_owner * noop)2080 nfs_open_owner_rele(struct nfs_open_owner *noop)
2081 {
2082 os_ref_count_t newcount;
2083
2084 lck_mtx_lock(&noop->noo_lock);
2085 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
2086 panic("nfs_open_owner_rele: no refcnt");
2087 }
2088 newcount = os_ref_release_locked(&noop->noo_refcnt);
2089 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2090 panic("nfs_open_owner_rele: busy");
2091 }
2092 /* XXX we may potentially want to clean up idle/unused open owner structures */
2093 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
2094 lck_mtx_unlock(&noop->noo_lock);
2095 return;
2096 }
2097 /* owner is no longer referenced or linked to mount, so destroy it */
2098 lck_mtx_unlock(&noop->noo_lock);
2099 nfs_open_owner_destroy(noop);
2100 }
2101
2102 /*
2103 * Mark an open owner as busy because we are about to
2104 * start an operation that uses and updates open owner state.
2105 */
2106 int
nfs_open_owner_set_busy(struct nfs_open_owner * noop,thread_t thd)2107 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2108 {
2109 struct nfsmount *nmp;
2110 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2111 int error = 0, slpflag;
2112
2113 nmp = noop->noo_mount;
2114 if (nfs_mount_gone(nmp)) {
2115 return ENXIO;
2116 }
2117 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2118
2119 lck_mtx_lock(&noop->noo_lock);
2120 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
2121 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2122 break;
2123 }
2124 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2125 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
2126 slpflag = 0;
2127 }
2128 if (!error) {
2129 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
2130 }
2131 lck_mtx_unlock(&noop->noo_lock);
2132
2133 return error;
2134 }
2135
2136 /*
2137 * Clear the busy flag on an open owner and wake up anyone waiting
2138 * to mark it busy.
2139 */
2140 void
nfs_open_owner_clear_busy(struct nfs_open_owner * noop)2141 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2142 {
2143 int wanted;
2144
2145 lck_mtx_lock(&noop->noo_lock);
2146 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2147 panic("nfs_open_owner_clear_busy");
2148 }
2149 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
2150 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
2151 lck_mtx_unlock(&noop->noo_lock);
2152 if (wanted) {
2153 wakeup(noop);
2154 }
2155 }
2156
2157 /*
2158 * Given an open/lock owner and an error code, increment the
2159 * sequence ID if appropriate.
2160 */
2161 void
nfs_owner_seqid_increment(struct nfs_open_owner * noop,struct nfs_lock_owner * nlop,int error)2162 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2163 {
2164 switch (error) {
2165 case NFSERR_STALE_CLIENTID:
2166 case NFSERR_STALE_STATEID:
2167 case NFSERR_OLD_STATEID:
2168 case NFSERR_BAD_STATEID:
2169 case NFSERR_BAD_SEQID:
2170 case NFSERR_BADXDR:
2171 case NFSERR_RESOURCE:
2172 case NFSERR_NOFILEHANDLE:
2173 /* do not increment the open seqid on these errors */
2174 return;
2175 }
2176 if (noop) {
2177 noop->noo_seqid++;
2178 }
2179 if (nlop) {
2180 nlop->nlo_seqid++;
2181 }
2182 }
2183
2184 /*
2185 * Search a node's open file list for any conflicts with this request.
2186 * Also find this open owner's open file structure.
2187 * If not found and "alloc" is set, then allocate one.
2188 */
2189 int
nfs_open_file_find(nfsnode_t np,struct nfs_open_owner * noop,struct nfs_open_file ** nofpp,uint32_t accessMode,uint32_t denyMode,int alloc)2190 nfs_open_file_find(
2191 nfsnode_t np,
2192 struct nfs_open_owner *noop,
2193 struct nfs_open_file **nofpp,
2194 uint32_t accessMode,
2195 uint32_t denyMode,
2196 int alloc)
2197 {
2198 *nofpp = NULL;
2199 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2200 }
2201
2202 /*
2203 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2204 * if an existing one is not found. This is used in "create" scenarios to
2205 * officially add the provisional nofp to the node once the node is created.
2206 */
2207 int
nfs_open_file_find_internal(nfsnode_t np,struct nfs_open_owner * noop,struct nfs_open_file ** nofpp,uint32_t accessMode,uint32_t denyMode,int alloc)2208 nfs_open_file_find_internal(
2209 nfsnode_t np,
2210 struct nfs_open_owner *noop,
2211 struct nfs_open_file **nofpp,
2212 uint32_t accessMode,
2213 uint32_t denyMode,
2214 int alloc)
2215 {
2216 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2217
2218 if (!np) {
2219 goto alloc;
2220 }
2221 tryagain:
2222 lck_mtx_lock(&np->n_openlock);
2223 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2224 if (nofp2->nof_owner == noop) {
2225 nofp = nofp2;
2226 if (!accessMode) {
2227 break;
2228 }
2229 }
2230 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2231 /* This request conflicts with an existing open on this client. */
2232 lck_mtx_unlock(&np->n_openlock);
2233 return EACCES;
2234 }
2235 }
2236
2237 /*
2238 * If this open owner doesn't have an open
2239 * file structure yet, we create one for it.
2240 */
2241 if (!nofp && !*nofpp && !newnofp && alloc) {
2242 lck_mtx_unlock(&np->n_openlock);
2243 alloc:
2244 newnofp = kalloc_type(struct nfs_open_file,
2245 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2246 lck_mtx_init(&newnofp->nof_lock, &nfs_open_grp, LCK_ATTR_NULL);
2247 newnofp->nof_owner = noop;
2248 nfs_open_owner_ref(noop);
2249 newnofp->nof_np = np;
2250 lck_mtx_lock(&noop->noo_lock);
2251 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2252 lck_mtx_unlock(&noop->noo_lock);
2253 if (np) {
2254 goto tryagain;
2255 }
2256 }
2257 if (!nofp) {
2258 if (*nofpp) {
2259 (*nofpp)->nof_np = np;
2260 nofp = *nofpp;
2261 } else {
2262 nofp = newnofp;
2263 }
2264 if (nofp && np) {
2265 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2266 }
2267 }
2268 if (np) {
2269 lck_mtx_unlock(&np->n_openlock);
2270 }
2271
2272 if (alloc && newnofp && (nofp != newnofp)) {
2273 nfs_open_file_destroy(newnofp);
2274 }
2275
2276 *nofpp = nofp;
2277 return nofp ? 0 : ESRCH;
2278 }
2279
2280 /*
2281 * Destroy an open file structure.
2282 */
2283 void
nfs_open_file_destroy(struct nfs_open_file * nofp)2284 nfs_open_file_destroy(struct nfs_open_file *nofp)
2285 {
2286 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2287 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2288 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2289 nfs_open_owner_rele(nofp->nof_owner);
2290 lck_mtx_destroy(&nofp->nof_lock, &nfs_open_grp);
2291 kfree_type(struct nfs_open_file, nofp);
2292 }
2293
2294 /*
2295 * Mark an open file as busy because we are about to
2296 * start an operation that uses and updates open file state.
2297 */
2298 int
nfs_open_file_set_busy(struct nfs_open_file * nofp,thread_t thd)2299 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2300 {
2301 struct nfsmount *nmp;
2302 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2303 int error = 0, slpflag;
2304
2305 nmp = nofp->nof_owner->noo_mount;
2306 if (nfs_mount_gone(nmp)) {
2307 return ENXIO;
2308 }
2309 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2310
2311 lck_mtx_lock(&nofp->nof_lock);
2312 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2313 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2314 break;
2315 }
2316 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2317 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2318 slpflag = 0;
2319 }
2320 if (!error) {
2321 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2322 }
2323 lck_mtx_unlock(&nofp->nof_lock);
2324
2325 return error;
2326 }
2327
2328 /*
2329 * Clear the busy flag on an open file and wake up anyone waiting
2330 * to mark it busy.
2331 */
2332 void
nfs_open_file_clear_busy(struct nfs_open_file * nofp)2333 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2334 {
2335 int wanted;
2336
2337 lck_mtx_lock(&nofp->nof_lock);
2338 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
2339 panic("nfs_open_file_clear_busy");
2340 }
2341 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2342 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
2343 lck_mtx_unlock(&nofp->nof_lock);
2344 if (wanted) {
2345 wakeup(nofp);
2346 }
2347 }
2348
2349 /*
2350 * Add the open state for the given access/deny modes to this open file.
2351 */
2352 void
nfs_open_file_add_open(struct nfs_open_file * nofp,uint32_t accessMode,uint32_t denyMode,int delegated)2353 nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2354 {
2355 lck_mtx_lock(&nofp->nof_lock);
2356 nofp->nof_access |= accessMode;
2357 nofp->nof_deny |= denyMode;
2358
2359 if (delegated) {
2360 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2361 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2362 nofp->nof_d_r++;
2363 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2364 nofp->nof_d_w++;
2365 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2366 nofp->nof_d_rw++;
2367 }
2368 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2369 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2370 nofp->nof_d_r_dw++;
2371 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2372 nofp->nof_d_w_dw++;
2373 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2374 nofp->nof_d_rw_dw++;
2375 }
2376 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2377 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2378 nofp->nof_d_r_drw++;
2379 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2380 nofp->nof_d_w_drw++;
2381 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2382 nofp->nof_d_rw_drw++;
2383 }
2384 }
2385 } else {
2386 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2387 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2388 nofp->nof_r++;
2389 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2390 nofp->nof_w++;
2391 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2392 nofp->nof_rw++;
2393 }
2394 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2395 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2396 nofp->nof_r_dw++;
2397 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2398 nofp->nof_w_dw++;
2399 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2400 nofp->nof_rw_dw++;
2401 }
2402 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2403 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2404 nofp->nof_r_drw++;
2405 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2406 nofp->nof_w_drw++;
2407 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2408 nofp->nof_rw_drw++;
2409 }
2410 }
2411 }
2412
2413 nofp->nof_opencnt++;
2414 lck_mtx_unlock(&nofp->nof_lock);
2415 }
2416
2417 /*
2418 * Find which particular open combo will be closed and report what
2419 * the new modes will be and whether the open was delegated.
2420 */
2421 void
nfs_open_file_remove_open_find(struct nfs_open_file * nofp,uint32_t accessMode,uint32_t denyMode,uint8_t * newAccessMode,uint8_t * newDenyMode,int * delegated)2422 nfs_open_file_remove_open_find(
2423 struct nfs_open_file *nofp,
2424 uint32_t accessMode,
2425 uint32_t denyMode,
2426 uint8_t *newAccessMode,
2427 uint8_t *newDenyMode,
2428 int *delegated)
2429 {
2430 /*
2431 * Calculate new modes: a mode bit gets removed when there's only
2432 * one count in all the corresponding counts
2433 */
2434 *newAccessMode = nofp->nof_access;
2435 *newDenyMode = nofp->nof_deny;
2436
2437 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2438 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2439 ((nofp->nof_r + nofp->nof_d_r +
2440 nofp->nof_rw + nofp->nof_d_rw +
2441 nofp->nof_r_dw + nofp->nof_d_r_dw +
2442 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2443 nofp->nof_r_drw + nofp->nof_d_r_drw +
2444 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2445 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2446 }
2447 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2448 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2449 ((nofp->nof_w + nofp->nof_d_w +
2450 nofp->nof_rw + nofp->nof_d_rw +
2451 nofp->nof_w_dw + nofp->nof_d_w_dw +
2452 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2453 nofp->nof_w_drw + nofp->nof_d_w_drw +
2454 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2455 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2456 }
2457 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2458 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2459 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2460 nofp->nof_w_drw + nofp->nof_d_w_drw +
2461 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
2462 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2463 }
2464 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2465 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2466 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2467 nofp->nof_w_drw + nofp->nof_d_w_drw +
2468 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2469 nofp->nof_r_dw + nofp->nof_d_r_dw +
2470 nofp->nof_w_dw + nofp->nof_d_w_dw +
2471 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2472 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2473 }
2474
2475 /* Find the corresponding open access/deny mode counter. */
2476 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2477 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2478 *delegated = (nofp->nof_d_r != 0);
2479 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2480 *delegated = (nofp->nof_d_w != 0);
2481 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2482 *delegated = (nofp->nof_d_rw != 0);
2483 } else {
2484 *delegated = 0;
2485 }
2486 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2487 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2488 *delegated = (nofp->nof_d_r_dw != 0);
2489 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2490 *delegated = (nofp->nof_d_w_dw != 0);
2491 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2492 *delegated = (nofp->nof_d_rw_dw != 0);
2493 } else {
2494 *delegated = 0;
2495 }
2496 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2497 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2498 *delegated = (nofp->nof_d_r_drw != 0);
2499 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2500 *delegated = (nofp->nof_d_w_drw != 0);
2501 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2502 *delegated = (nofp->nof_d_rw_drw != 0);
2503 } else {
2504 *delegated = 0;
2505 }
2506 }
2507 }
2508
2509 /*
2510 * Remove the open state for the given access/deny modes to this open file.
2511 */
2512 void
nfs_open_file_remove_open(struct nfs_open_file * nofp,uint32_t accessMode,uint32_t denyMode)2513 nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2514 {
2515 uint8_t newAccessMode, newDenyMode;
2516 int delegated = 0;
2517
2518 lck_mtx_lock(&nofp->nof_lock);
2519 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2520
2521 /* Decrement the corresponding open access/deny mode counter. */
2522 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2523 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2524 if (delegated) {
2525 if (nofp->nof_d_r == 0) {
2526 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2527 } else {
2528 nofp->nof_d_r--;
2529 }
2530 } else {
2531 if (nofp->nof_r == 0) {
2532 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2533 } else {
2534 nofp->nof_r--;
2535 }
2536 }
2537 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2538 if (delegated) {
2539 if (nofp->nof_d_w == 0) {
2540 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2541 } else {
2542 nofp->nof_d_w--;
2543 }
2544 } else {
2545 if (nofp->nof_w == 0) {
2546 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2547 } else {
2548 nofp->nof_w--;
2549 }
2550 }
2551 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2552 if (delegated) {
2553 if (nofp->nof_d_rw == 0) {
2554 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2555 } else {
2556 nofp->nof_d_rw--;
2557 }
2558 } else {
2559 if (nofp->nof_rw == 0) {
2560 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2561 } else {
2562 nofp->nof_rw--;
2563 }
2564 }
2565 }
2566 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2567 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2568 if (delegated) {
2569 if (nofp->nof_d_r_dw == 0) {
2570 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2571 } else {
2572 nofp->nof_d_r_dw--;
2573 }
2574 } else {
2575 if (nofp->nof_r_dw == 0) {
2576 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2577 } else {
2578 nofp->nof_r_dw--;
2579 }
2580 }
2581 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2582 if (delegated) {
2583 if (nofp->nof_d_w_dw == 0) {
2584 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2585 } else {
2586 nofp->nof_d_w_dw--;
2587 }
2588 } else {
2589 if (nofp->nof_w_dw == 0) {
2590 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2591 } else {
2592 nofp->nof_w_dw--;
2593 }
2594 }
2595 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2596 if (delegated) {
2597 if (nofp->nof_d_rw_dw == 0) {
2598 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2599 } else {
2600 nofp->nof_d_rw_dw--;
2601 }
2602 } else {
2603 if (nofp->nof_rw_dw == 0) {
2604 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2605 } else {
2606 nofp->nof_rw_dw--;
2607 }
2608 }
2609 }
2610 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2611 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2612 if (delegated) {
2613 if (nofp->nof_d_r_drw == 0) {
2614 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2615 } else {
2616 nofp->nof_d_r_drw--;
2617 }
2618 } else {
2619 if (nofp->nof_r_drw == 0) {
2620 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2621 } else {
2622 nofp->nof_r_drw--;
2623 }
2624 }
2625 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2626 if (delegated) {
2627 if (nofp->nof_d_w_drw == 0) {
2628 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2629 } else {
2630 nofp->nof_d_w_drw--;
2631 }
2632 } else {
2633 if (nofp->nof_w_drw == 0) {
2634 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2635 } else {
2636 nofp->nof_w_drw--;
2637 }
2638 }
2639 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2640 if (delegated) {
2641 if (nofp->nof_d_rw_drw == 0) {
2642 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2643 } else {
2644 nofp->nof_d_rw_drw--;
2645 }
2646 } else {
2647 if (nofp->nof_rw_drw == 0) {
2648 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2649 } else {
2650 nofp->nof_rw_drw--;
2651 }
2652 }
2653 }
2654 }
2655
2656 /* update the modes */
2657 nofp->nof_access = newAccessMode;
2658 nofp->nof_deny = newDenyMode;
2659 nofp->nof_opencnt--;
2660 lck_mtx_unlock(&nofp->nof_lock);
2661 }
2662
2663 #if CONFIG_NFS4
2664 /*
2665 * Get the current (delegation, lock, open, default) stateid for this node.
2666 * If node has a delegation, use that stateid.
2667 * If pid has a lock, use the lockowner's stateid.
2668 * Or use the open file's stateid.
2669 * If no open file, use a default stateid of all ones.
2670 */
2671 void
nfs_get_stateid(nfsnode_t np,thread_t thd,kauth_cred_t cred,nfs_stateid * sid,int writeaccess)2672 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid, int writeaccess)
2673 {
2674 struct nfsmount *nmp = NFSTONMP(np);
2675 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2676 struct nfs_open_owner *noop = NULL;
2677 struct nfs_open_file *nofp = NULL;
2678 struct nfs_lock_owner *nlop = NULL;
2679 nfs_stateid *s = NULL;
2680 int readaccess = !writeaccess;
2681
2682 if ((readaccess && (np->n_openflags & N_DELEG_MASK)) || (writeaccess && (np->n_openflags & N_DELEG_WRITE))) {
2683 s = &np->n_dstateid;
2684 } else {
2685 if (p) {
2686 nlop = nfs_lock_owner_find(np, p, 0, 0);
2687 }
2688 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2689 /* we hold locks, use lock stateid */
2690 s = &nlop->nlo_stateid;
2691 } else if (((noop = nfs_open_owner_find(nmp, cred, p, 0))) &&
2692 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2693 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2694 nofp->nof_access) {
2695 /* we (should) have the file open, use open stateid */
2696 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2697 nfs4_reopen(nofp, thd);
2698 }
2699 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2700 s = &nofp->nof_stateid;
2701 }
2702 }
2703 }
2704
2705 if (s) {
2706 sid->seqid = s->seqid;
2707 sid->other[0] = s->other[0];
2708 sid->other[1] = s->other[1];
2709 sid->other[2] = s->other[2];
2710 } else {
2711 /* named attributes may not have a stateid for reads, so don't complain for them */
2712 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
2713 NP(np, "nfs_get_stateid: no stateid");
2714 }
2715 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2716 }
2717 if (nlop) {
2718 nfs_lock_owner_rele(np, nlop, thd, cred);
2719 }
2720 if (noop) {
2721 nfs_open_owner_rele(noop);
2722 }
2723 }
2724
2725
2726 /*
2727 * When we have a delegation, we may be able to perform the OPEN locally.
2728 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2729 */
2730 int
nfs4_open_delegated(nfsnode_t np,struct nfs_open_file * nofp,uint32_t accessMode,uint32_t denyMode,vfs_context_t ctx)2731 nfs4_open_delegated(
2732 nfsnode_t np,
2733 struct nfs_open_file *nofp,
2734 uint32_t accessMode,
2735 uint32_t denyMode,
2736 vfs_context_t ctx)
2737 {
2738 int error = 0, ismember, readtoo = 0, authorized = 0;
2739 uint32_t action;
2740 struct kauth_acl_eval eval;
2741 kauth_cred_t cred = vfs_context_ucred(ctx);
2742
2743 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2744 /*
2745 * Try to open it for read access too,
2746 * so the buffer cache can read data.
2747 */
2748 readtoo = 1;
2749 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2750 }
2751
2752 tryagain:
2753 action = 0;
2754 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
2755 action |= KAUTH_VNODE_READ_DATA;
2756 }
2757 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
2758 action |= KAUTH_VNODE_WRITE_DATA;
2759 }
2760
2761 /* evaluate ACE (if we have one) */
2762 if (np->n_dace.ace_flags) {
2763 eval.ae_requested = action;
2764 eval.ae_acl = &np->n_dace;
2765 eval.ae_count = 1;
2766 eval.ae_options = 0;
2767 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
2768 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2769 }
2770 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2771 if (!error && ismember) {
2772 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2773 }
2774
2775 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2776 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2777 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2778 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2779
2780 error = kauth_acl_evaluate(cred, &eval);
2781
2782 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
2783 authorized = 1;
2784 }
2785 }
2786
2787 if (!authorized) {
2788 /* need to ask the server via ACCESS */
2789 struct vnop_access_args naa;
2790 naa.a_desc = &vnop_access_desc;
2791 naa.a_vp = NFSTOV(np);
2792 naa.a_action = action;
2793 naa.a_context = ctx;
2794 if (!(error = nfs_vnop_access(&naa))) {
2795 authorized = 1;
2796 }
2797 }
2798
2799 if (!authorized) {
2800 if (readtoo) {
2801 /* try again without the extra read access */
2802 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2803 readtoo = 0;
2804 goto tryagain;
2805 }
2806 return error ? error : EACCES;
2807 }
2808
2809 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2810
2811 return 0;
2812 }
2813
2814
2815 /*
2816 * Open a file with the given access/deny modes.
2817 *
2818 * If we have a delegation, we may be able to handle the open locally.
2819 * Otherwise, we will always send the open RPC even if this open's mode is
2820 * a subset of all the existing opens. This makes sure that we will always
2821 * be able to do a downgrade to any of the open modes.
2822 *
2823 * Note: local conflicts should have already been checked in nfs_open_file_find().
2824 */
2825 int
nfs4_open(nfsnode_t np,struct nfs_open_file * nofp,uint32_t accessMode,uint32_t denyMode,vfs_context_t ctx)2826 nfs4_open(
2827 nfsnode_t np,
2828 struct nfs_open_file *nofp,
2829 uint32_t accessMode,
2830 uint32_t denyMode,
2831 vfs_context_t ctx)
2832 {
2833 vnode_t vp = NFSTOV(np);
2834 vnode_t dvp = NULL;
2835 struct componentname cn;
2836 const char *vname = NULL;
2837 uint32_t namelen = 0;
2838 char smallname[128];
2839 char *filename = NULL;
2840 int error = 0, readtoo = 0;
2841
2842 /*
2843 * We can handle the OPEN ourselves if we have a delegation,
2844 * unless it's a read delegation and the open is asking for
2845 * either write access or deny read. We also don't bother to
2846 * use the delegation if it's being returned.
2847 */
2848 if (np->n_openflags & N_DELEG_MASK) {
2849 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2850 return error;
2851 }
2852 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2853 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2854 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2855 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2856 nfs_open_state_clear_busy(np);
2857 return error;
2858 }
2859 nfs_open_state_clear_busy(np);
2860 }
2861
2862 /*
2863 * [sigh] We can't trust VFS to get the parent right for named
2864 * attribute nodes. (It likes to reparent the nodes after we've
2865 * created them.) Luckily we can probably get the right parent
2866 * from the n_parent we have stashed away.
2867 */
2868 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2869 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
2870 dvp = NULL;
2871 }
2872 if (!dvp) {
2873 dvp = vnode_getparent(vp);
2874 }
2875 vname = vnode_getname(vp);
2876 if (!dvp || !vname) {
2877 if (!error) {
2878 error = EIO;
2879 }
2880 goto out;
2881 }
2882 filename = &smallname[0];
2883 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2884 if (namelen >= sizeof(smallname)) {
2885 filename = kalloc_data(namelen + 1, Z_WAITOK);
2886 if (!filename) {
2887 error = ENOMEM;
2888 goto out;
2889 }
2890 snprintf(filename, namelen + 1, "%s", vname);
2891 }
2892 bzero(&cn, sizeof(cn));
2893 cn.cn_nameptr = filename;
2894 cn.cn_namelen = namelen;
2895
2896 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2897 /*
2898 * Try to open it for read access too,
2899 * so the buffer cache can read data.
2900 */
2901 readtoo = 1;
2902 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2903 }
2904 tryagain:
2905 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2906 if (error) {
2907 if (!nfs_mount_state_error_should_restart(error) &&
2908 (error != EINTR) && (error != ERESTART) && readtoo) {
2909 /* try again without the extra read access */
2910 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2911 readtoo = 0;
2912 goto tryagain;
2913 }
2914 goto out;
2915 }
2916 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2917 out:
2918 if (filename && (filename != &smallname[0])) {
2919 kfree_data(filename, namelen + 1);
2920 }
2921 if (vname) {
2922 vnode_putname(vname);
2923 }
2924 if (dvp != NULLVP) {
2925 vnode_put(dvp);
2926 }
2927 return error;
2928 }
2929 #endif /* CONFIG_NFS4 */
2930
2931 int
nfs_vnop_mmap(struct vnop_mmap_args * ap)2932 nfs_vnop_mmap(
2933 struct vnop_mmap_args /* {
2934 * struct vnodeop_desc *a_desc;
2935 * vnode_t a_vp;
2936 * int a_fflags;
2937 * vfs_context_t a_context;
2938 * } */*ap)
2939 {
2940 vfs_context_t ctx = ap->a_context;
2941 vnode_t vp = ap->a_vp;
2942 nfsnode_t np = VTONFS(vp);
2943 int error = 0, delegated = 0;
2944 uint8_t accessMode, denyMode;
2945 struct nfsmount *nmp;
2946 struct nfs_open_owner *noop = NULL;
2947 struct nfs_open_file *nofp = NULL;
2948
2949 nmp = VTONMP(vp);
2950 if (nfs_mount_gone(nmp)) {
2951 return ENXIO;
2952 }
2953
2954 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2955 return EINVAL;
2956 }
2957 if (np->n_flag & NREVOKE) {
2958 return EIO;
2959 }
2960
2961 /*
2962 * fflags contains some combination of: PROT_READ, PROT_WRITE
2963 * Since it's not possible to mmap() without having the file open for reading,
2964 * read access is always there (regardless if PROT_READ is not set).
2965 */
2966 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2967 if (ap->a_fflags & PROT_WRITE) {
2968 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2969 }
2970 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2971
2972 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 1);
2973 if (!noop) {
2974 return ENOMEM;
2975 }
2976
2977 restart:
2978 error = nfs_mount_state_in_use_start(nmp, NULL);
2979 if (error) {
2980 nfs_open_owner_rele(noop);
2981 return NFS_MAPERR(error);
2982 }
2983 if (np->n_flag & NREVOKE) {
2984 error = EIO;
2985 nfs_mount_state_in_use_end(nmp, 0);
2986 nfs_open_owner_rele(noop);
2987 return NFS_MAPERR(error);
2988 }
2989
2990 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2991 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2992 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2993 error = EPERM;
2994 }
2995 #if CONFIG_NFS4
2996 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2997 error = nfs4_reopen(nofp, NULL);
2998 nofp = NULL;
2999 if (!error) {
3000 nfs_mount_state_in_use_end(nmp, 0);
3001 goto restart;
3002 }
3003 }
3004 #endif
3005 if (!error) {
3006 error = nfs_open_file_set_busy(nofp, NULL);
3007 }
3008 if (error) {
3009 nofp = NULL;
3010 goto out;
3011 }
3012
3013 /*
3014 * The open reference for mmap must mirror an existing open because
3015 * we may need to reclaim it after the file is closed.
3016 * So grab another open count matching the accessMode passed in.
3017 * If we already had an mmap open, prefer read/write without deny mode.
3018 * This means we may have to drop the current mmap open first.
3019 *
3020 * N.B. We should have an open for the mmap, because, mmap was
3021 * called on an open descriptor, or we've created an open for read
3022 * from reading the first page for execve. However, if we piggy
3023 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
3024 * that open may have closed.
3025 */
3026
3027 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
3028 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
3029 /* We shouldn't get here. We've already open the file for execve */
3030 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
3031 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3032 }
3033 /*
3034 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3035 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3036 */
3037 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
3038 /* not asking for just read access -> fail */
3039 error = EPERM;
3040 goto out;
3041 }
3042 /* we don't have the file open, so open it for read access */
3043 if (nmp->nm_vers < NFS_VER4) {
3044 /* NFS v2/v3 opens are always allowed - so just add it. */
3045 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
3046 error = 0;
3047 }
3048 #if CONFIG_NFS4
3049 else {
3050 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
3051 }
3052 #endif
3053 if (!error) {
3054 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
3055 }
3056 if (error) {
3057 goto out;
3058 }
3059 }
3060
3061 /* determine deny mode for open */
3062 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3063 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3064 delegated = 1;
3065 if (nofp->nof_d_rw) {
3066 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3067 } else if (nofp->nof_d_rw_dw) {
3068 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3069 } else if (nofp->nof_d_rw_drw) {
3070 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3071 }
3072 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3073 delegated = 0;
3074 if (nofp->nof_rw) {
3075 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3076 } else if (nofp->nof_rw_dw) {
3077 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3078 } else if (nofp->nof_rw_drw) {
3079 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3080 }
3081 } else {
3082 error = EPERM;
3083 }
3084 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3085 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3086 delegated = 1;
3087 if (nofp->nof_d_r) {
3088 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3089 } else if (nofp->nof_d_r_dw) {
3090 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3091 } else if (nofp->nof_d_r_drw) {
3092 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3093 }
3094 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3095 delegated = 0;
3096 if (nofp->nof_r) {
3097 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3098 } else if (nofp->nof_r_dw) {
3099 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3100 } else if (nofp->nof_r_drw) {
3101 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3102 }
3103 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3104 /*
3105 * This clause and the one below is to co-opt a read write access
3106 * for a read only mmaping. We probably got here in that an
3107 * existing rw open for an executable file already exists.
3108 */
3109 delegated = 1;
3110 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3111 if (nofp->nof_d_rw) {
3112 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3113 } else if (nofp->nof_d_rw_dw) {
3114 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3115 } else if (nofp->nof_d_rw_drw) {
3116 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3117 }
3118 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3119 delegated = 0;
3120 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3121 if (nofp->nof_rw) {
3122 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3123 } else if (nofp->nof_rw_dw) {
3124 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3125 } else if (nofp->nof_rw_drw) {
3126 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3127 }
3128 } else {
3129 error = EPERM;
3130 }
3131 }
3132 if (error) { /* mmap mode without proper open mode */
3133 goto out;
3134 }
3135
3136 /*
3137 * If the existing mmap access is more than the new access OR the
3138 * existing access is the same and the existing deny mode is less,
3139 * then we'll stick with the existing mmap open mode.
3140 */
3141 if ((nofp->nof_mmap_access > accessMode) ||
3142 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
3143 goto out;
3144 }
3145
3146 /* update mmap open mode */
3147 if (nofp->nof_mmap_access) {
3148 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3149 if (error) {
3150 if (!nfs_mount_state_error_should_restart(error)) {
3151 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3152 }
3153 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3154 goto out;
3155 }
3156 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3157 }
3158
3159 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3160 nofp->nof_mmap_access = accessMode;
3161 nofp->nof_mmap_deny = denyMode;
3162
3163 out:
3164 if (nofp) {
3165 nfs_open_file_clear_busy(nofp);
3166 }
3167 if (nfs_mount_state_in_use_end(nmp, error)) {
3168 nofp = NULL;
3169 goto restart;
3170 }
3171 if (noop) {
3172 nfs_open_owner_rele(noop);
3173 }
3174
3175 if (!error) {
3176 int ismapped = 0;
3177 nfs_node_lock_force(np);
3178 if ((np->n_flag & NISMAPPED) == 0) {
3179 np->n_flag |= NISMAPPED;
3180 ismapped = 1;
3181 }
3182 nfs_node_unlock(np);
3183 if (ismapped) {
3184 lck_mtx_lock(&nmp->nm_lock);
3185 nmp->nm_state &= ~NFSSTA_SQUISHY;
3186 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
3187 if (nmp->nm_curdeadtimeout <= 0) {
3188 nmp->nm_deadto_start = 0;
3189 }
3190 nmp->nm_mappers++;
3191 lck_mtx_unlock(&nmp->nm_lock);
3192 }
3193 }
3194
3195 return NFS_MAPERR(error);
3196 }
3197
3198 int
nfs_vnop_mmap_check(struct vnop_mmap_check_args * ap)3199 nfs_vnop_mmap_check(
3200 struct vnop_mmap_check_args /* {
3201 * struct vnodeop_desc *a_desc;
3202 * vnode_t a_vp;
3203 * int a_flags;
3204 * vfs_context_t a_context;
3205 * } */*ap)
3206 {
3207 vfs_context_t ctx = ap->a_context;
3208 vnode_t vp = ap->a_vp;
3209 struct nfsmount *nmp = VTONMP(vp);
3210 struct vnop_access_args naa;
3211 int error = 0;
3212
3213 if (nfs_mount_gone(nmp)) {
3214 return ENXIO;
3215 }
3216
3217 if (vnode_isreg(vp)) {
3218 /*
3219 * We only need to ensure that a page-in will be
3220 * possible with these credentials. Everything
3221 * else has been checked at other layers.
3222 */
3223 naa.a_desc = &vnop_access_desc;
3224 naa.a_vp = vp;
3225 naa.a_action = KAUTH_VNODE_READ_DATA;
3226 naa.a_context = ctx;
3227
3228 /* compute actual success/failure based on accessibility */
3229 error = nfs_vnop_access(&naa);
3230 }
3231
3232 return NFS_MAPERR(error);
3233 }
3234
3235 int
nfs_vnop_mnomap(struct vnop_mnomap_args * ap)3236 nfs_vnop_mnomap(
3237 struct vnop_mnomap_args /* {
3238 * struct vnodeop_desc *a_desc;
3239 * vnode_t a_vp;
3240 * vfs_context_t a_context;
3241 * } */*ap)
3242 {
3243 vfs_context_t ctx = ap->a_context;
3244 vnode_t vp = ap->a_vp;
3245 nfsnode_t np = VTONFS(vp);
3246 struct nfsmount *nmp;
3247 struct nfs_open_file *nofp = NULL;
3248 off_t size;
3249 int error;
3250 int is_mapped_flag = 0;
3251
3252 nmp = VTONMP(vp);
3253 if (nfs_mount_gone(nmp)) {
3254 return ENXIO;
3255 }
3256
3257 nfs_node_lock_force(np);
3258 if (np->n_flag & NISMAPPED) {
3259 is_mapped_flag = 1;
3260 np->n_flag &= ~NISMAPPED;
3261 }
3262 nfs_node_unlock(np);
3263 if (is_mapped_flag) {
3264 lck_mtx_lock(&nmp->nm_lock);
3265 if (nmp->nm_mappers) {
3266 nmp->nm_mappers--;
3267 } else {
3268 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3269 }
3270 lck_mtx_unlock(&nmp->nm_lock);
3271 }
3272
3273 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3274 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
3275 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
3276 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
3277 }
3278
3279 /* walk all open files and close all mmap opens */
3280 loop:
3281 error = nfs_mount_state_in_use_start(nmp, NULL);
3282 if (error) {
3283 return NFS_MAPERR(error);
3284 }
3285 lck_mtx_lock(&np->n_openlock);
3286 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
3287 if (!nofp->nof_mmap_access) {
3288 continue;
3289 }
3290 lck_mtx_unlock(&np->n_openlock);
3291 #if CONFIG_NFS4
3292 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3293 error = nfs4_reopen(nofp, NULL);
3294 if (!error) {
3295 nfs_mount_state_in_use_end(nmp, 0);
3296 goto loop;
3297 }
3298 }
3299 #endif
3300 if (!error) {
3301 error = nfs_open_file_set_busy(nofp, NULL);
3302 }
3303 if (error) {
3304 lck_mtx_lock(&np->n_openlock);
3305 break;
3306 }
3307 if (nofp->nof_mmap_access) {
3308 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3309 if (!nfs_mount_state_error_should_restart(error)) {
3310 if (error) { /* not a state-operation-restarting error, so just clear the access */
3311 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3312 }
3313 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3314 }
3315 if (error) {
3316 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3317 }
3318 }
3319 nfs_open_file_clear_busy(nofp);
3320 nfs_mount_state_in_use_end(nmp, error);
3321 goto loop;
3322 }
3323 lck_mtx_unlock(&np->n_openlock);
3324 nfs_mount_state_in_use_end(nmp, error);
3325 return NFS_MAPERR(error);
3326 }
3327
3328 /*
3329 * Search a node's lock owner list for the owner for this process.
3330 * If not found and "alloc" is set, then allocate a new one.
3331 */
3332 struct nfs_lock_owner *
nfs_lock_owner_find(nfsnode_t np,proc_t p,caddr_t lockid,int flags)3333 nfs_lock_owner_find(nfsnode_t np, proc_t p, caddr_t lockid, int flags)
3334 {
3335 pid_t pid = proc_pid(p);
3336 struct timeval ptv;
3337 int alloc = flags & NFS_LOCK_OWNER_FIND_ALLOC;
3338 int dequeue = flags & NFS_LOCK_OWNER_FIND_DEQUEUE;
3339 struct nfs_lock_owner *nlop, *newnlop = NULL;
3340
3341 tryagain:
3342 lck_mtx_lock(&np->n_openlock);
3343 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3344 os_ref_count_t newcount;
3345
3346 if (lockid != 0 && lockid == nlop->nlo_lockid) {
3347 break;
3348 }
3349 if (nlop->nlo_pid != pid) {
3350 continue;
3351 }
3352 proc_starttime(p, &ptv);
3353 if (timevalcmp(&nlop->nlo_pid_start, &ptv, ==)) {
3354 break;
3355 }
3356 /* stale lock owner... reuse it if we can */
3357 if (os_ref_get_count(&nlop->nlo_refcnt)) {
3358 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3359 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3360 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3361 lck_mtx_unlock(&np->n_openlock);
3362 goto tryagain;
3363 }
3364 proc_starttime(p, &nlop->nlo_pid_start);
3365 nlop->nlo_seqid = 0;
3366 nlop->nlo_stategenid = 0;
3367 break;
3368 }
3369
3370 if (dequeue && nlop && (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3371 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3372 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3373 }
3374
3375 if (!nlop && !newnlop && alloc) {
3376 lck_mtx_unlock(&np->n_openlock);
3377 newnlop = kalloc_type(struct nfs_lock_owner,
3378 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3379 lck_mtx_init(&newnlop->nlo_lock, &nfs_open_grp, LCK_ATTR_NULL);
3380 newnlop->nlo_pid = pid;
3381 newnlop->nlo_lockid = lockid;
3382 proc_starttime(p, &newnlop->nlo_pid_start);
3383 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3384 TAILQ_INIT(&newnlop->nlo_locks);
3385 goto tryagain;
3386 }
3387 if (!nlop && newnlop) {
3388 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3389 os_ref_init(&newnlop->nlo_refcnt, NULL);
3390 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3391 nlop = newnlop;
3392 }
3393 lck_mtx_unlock(&np->n_openlock);
3394
3395 if (newnlop && (nlop != newnlop)) {
3396 nfs_lock_owner_destroy(newnlop);
3397 }
3398
3399 if (nlop && !dequeue) {
3400 nfs_lock_owner_ref(nlop);
3401 }
3402
3403 return nlop;
3404 }
3405
3406 /*
3407 * destroy a lock owner that's no longer needed
3408 */
3409 void
nfs_lock_owner_destroy(struct nfs_lock_owner * nlop)3410 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3411 {
3412 if (nlop->nlo_open_owner) {
3413 nfs_open_owner_rele(nlop->nlo_open_owner);
3414 nlop->nlo_open_owner = NULL;
3415 }
3416 lck_mtx_destroy(&nlop->nlo_lock, &nfs_open_grp);
3417 kfree_type(struct nfs_lock_owner, nlop);
3418 }
3419
3420 /*
3421 * acquire a reference count on a lock owner
3422 */
3423 void
nfs_lock_owner_ref(struct nfs_lock_owner * nlop)3424 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3425 {
3426 lck_mtx_lock(&nlop->nlo_lock);
3427 os_ref_retain_locked(&nlop->nlo_refcnt);
3428 lck_mtx_unlock(&nlop->nlo_lock);
3429 }
3430
3431 #if !CONFIG_NFS4
3432 #define __no_nfsv4_unused __unused
3433 #else
3434 #define __no_nfsv4_unused /* nothing */
3435 #endif
3436
3437 /*
3438 * drop a reference count on a lock owner and destroy it if
3439 * it is no longer referenced and no longer on the mount's list.
3440 */
3441 void
nfs_lock_owner_rele(nfsnode_t np __no_nfsv4_unused,struct nfs_lock_owner * nlop,thread_t thd __no_nfsv4_unused,kauth_cred_t cred __no_nfsv4_unused)3442 nfs_lock_owner_rele(nfsnode_t np __no_nfsv4_unused, struct nfs_lock_owner *nlop, thread_t thd __no_nfsv4_unused, kauth_cred_t cred __no_nfsv4_unused)
3443 {
3444 os_ref_count_t newcount;
3445
3446 lck_mtx_lock(&nlop->nlo_lock);
3447 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
3448 panic("nfs_lock_owner_rele: no refcnt");
3449 }
3450 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3451 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3452 panic("nfs_lock_owner_rele: busy");
3453 }
3454 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3455 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3456 lck_mtx_unlock(&nlop->nlo_lock);
3457 return;
3458 }
3459
3460
3461 #if CONFIG_NFS4
3462 if (NFSTONMP(np)->nm_vers >= NFS_VER4) {
3463 int error = nfs4_release_lockowner_rpc(np, nlop, thd, cred);
3464 if (error) {
3465 NP(np, "nfs_lock_owner_rele: was not able to release lock owner. error %d", error);
3466 }
3467 }
3468 #endif /* CONFIG_NFS4 */
3469
3470 /* owner is no longer referenced or linked to mount, so destroy it */
3471 lck_mtx_unlock(&nlop->nlo_lock);
3472 nfs_lock_owner_destroy(nlop);
3473 }
3474
3475 /*
3476 * Mark a lock owner as busy because we are about to
3477 * start an operation that uses and updates lock owner state.
3478 */
3479 int
nfs_lock_owner_set_busy(struct nfs_lock_owner * nlop,thread_t thd)3480 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3481 {
3482 struct nfsmount *nmp;
3483 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3484 int error = 0, slpflag;
3485
3486 nmp = nlop->nlo_open_owner->noo_mount;
3487 if (nfs_mount_gone(nmp)) {
3488 return ENXIO;
3489 }
3490 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3491
3492 lck_mtx_lock(&nlop->nlo_lock);
3493 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3494 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
3495 break;
3496 }
3497 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3498 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3499 slpflag = 0;
3500 }
3501 if (!error) {
3502 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3503 }
3504 lck_mtx_unlock(&nlop->nlo_lock);
3505
3506 return error;
3507 }
3508
3509 /*
3510 * Clear the busy flag on a lock owner and wake up anyone waiting
3511 * to mark it busy.
3512 */
3513 void
nfs_lock_owner_clear_busy(struct nfs_lock_owner * nlop)3514 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3515 {
3516 int wanted;
3517
3518 lck_mtx_lock(&nlop->nlo_lock);
3519 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3520 panic("nfs_lock_owner_clear_busy");
3521 }
3522 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3523 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
3524 lck_mtx_unlock(&nlop->nlo_lock);
3525 if (wanted) {
3526 wakeup(nlop);
3527 }
3528 }
3529
3530 /*
3531 * Insert a held lock into a lock owner's sorted list.
3532 * (flock locks are always inserted at the head the list)
3533 */
3534 void
nfs_lock_owner_insert_held_lock(struct nfs_lock_owner * nlop,struct nfs_file_lock * newnflp)3535 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3536 {
3537 struct nfs_file_lock *nflp;
3538
3539 /* insert new lock in lock owner's held lock list */
3540 lck_mtx_lock(&nlop->nlo_lock);
3541 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3542 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3543 } else {
3544 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3545 if (newnflp->nfl_start < nflp->nfl_start) {
3546 break;
3547 }
3548 }
3549 if (nflp) {
3550 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3551 } else {
3552 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3553 }
3554 }
3555 lck_mtx_unlock(&nlop->nlo_lock);
3556 }
3557
3558 /*
3559 * Get a file lock structure for this lock owner.
3560 */
3561 struct nfs_file_lock *
nfs_file_lock_alloc(struct nfs_lock_owner * nlop)3562 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3563 {
3564 struct nfs_file_lock *nflp = NULL;
3565
3566 lck_mtx_lock(&nlop->nlo_lock);
3567 if (!nlop->nlo_alock.nfl_owner) {
3568 nflp = &nlop->nlo_alock;
3569 nflp->nfl_owner = nlop;
3570 }
3571 lck_mtx_unlock(&nlop->nlo_lock);
3572 if (!nflp) {
3573 nflp = kalloc_type(struct nfs_file_lock,
3574 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3575 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3576 nflp->nfl_owner = nlop;
3577 }
3578 nfs_lock_owner_ref(nlop);
3579 return nflp;
3580 }
3581
3582 /*
3583 * destroy the given NFS file lock structure
3584 */
3585 void
nfs_file_lock_destroy(nfsnode_t np,struct nfs_file_lock * nflp,thread_t thd,kauth_cred_t cred)3586 nfs_file_lock_destroy(nfsnode_t np, struct nfs_file_lock *nflp, thread_t thd, kauth_cred_t cred)
3587 {
3588 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3589
3590 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3591 nflp->nfl_owner = NULL;
3592 kfree_type(struct nfs_file_lock, nflp);
3593 } else {
3594 lck_mtx_lock(&nlop->nlo_lock);
3595 bzero(nflp, sizeof(*nflp));
3596 lck_mtx_unlock(&nlop->nlo_lock);
3597 }
3598 nfs_lock_owner_rele(np, nlop, thd, cred);
3599 }
3600
3601 /*
3602 * Check if one file lock conflicts with another.
3603 * (nflp1 is the new lock. nflp2 is the existing lock.)
3604 */
3605 int
nfs_file_lock_conflict(struct nfs_file_lock * nflp1,struct nfs_file_lock * nflp2,int * willsplit)3606 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3607 {
3608 /* no conflict if lock is dead */
3609 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3610 return 0;
3611 }
3612 /* no conflict if it's ours - unless the lock style doesn't match */
3613 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3614 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3615 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3616 (nflp1->nfl_start > nflp2->nfl_start) &&
3617 (nflp1->nfl_end < nflp2->nfl_end)) {
3618 *willsplit = 1;
3619 }
3620 return 0;
3621 }
3622 /* no conflict if ranges don't overlap */
3623 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3624 return 0;
3625 }
3626 /* no conflict if neither lock is exclusive */
3627 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3628 return 0;
3629 }
3630 /* conflict */
3631 return 1;
3632 }
3633
3634 #if CONFIG_NFS4
3635 /*
3636 * Send an NFSv4 LOCK RPC to the server.
3637 */
3638 int
nfs4_setlock_rpc(nfsnode_t np,struct nfs_open_file * nofp,struct nfs_file_lock * nflp,int reclaim,int flags,thread_t thd,kauth_cred_t cred)3639 nfs4_setlock_rpc(
3640 nfsnode_t np,
3641 struct nfs_open_file *nofp,
3642 struct nfs_file_lock *nflp,
3643 int reclaim,
3644 int flags,
3645 thread_t thd,
3646 kauth_cred_t cred)
3647 {
3648 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3649 struct nfsmount *nmp;
3650 struct nfsm_chain nmreq, nmrep;
3651 uint64_t xid;
3652 uint32_t locktype;
3653 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3654 struct nfsreq_secinfo_args si;
3655
3656 nmp = NFSTONMP(np);
3657 if (nfs_mount_gone(nmp)) {
3658 return ENXIO;
3659 }
3660 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3661 return EINVAL;
3662 }
3663
3664 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3665 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3666 ((nflp->nfl_type == F_WRLCK) ?
3667 NFS_LOCK_TYPE_WRITEW :
3668 NFS_LOCK_TYPE_READW) :
3669 ((nflp->nfl_type == F_WRLCK) ?
3670 NFS_LOCK_TYPE_WRITE :
3671 NFS_LOCK_TYPE_READ);
3672 if (newlocker) {
3673 error = nfs_open_file_set_busy(nofp, thd);
3674 if (error) {
3675 return error;
3676 }
3677 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3678 if (error) {
3679 nfs_open_file_clear_busy(nofp);
3680 return error;
3681 }
3682 if (!nlop->nlo_open_owner) {
3683 nfs_open_owner_ref(nofp->nof_owner);
3684 nlop->nlo_open_owner = nofp->nof_owner;
3685 }
3686 }
3687 error = nfs_lock_owner_set_busy(nlop, thd);
3688 if (error) {
3689 if (newlocker) {
3690 nfs_open_owner_clear_busy(nofp->nof_owner);
3691 nfs_open_file_clear_busy(nofp);
3692 }
3693 return error;
3694 }
3695
3696 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3697 nfsm_chain_null(&nmreq);
3698 nfsm_chain_null(&nmrep);
3699
3700 // PUTFH, GETATTR, LOCK
3701 numops = 3;
3702 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3703 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3704 numops--;
3705 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
3706 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3707 numops--;
3708 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
3709 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3710 numops--;
3711 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOCK);
3712 nfsm_chain_add_32(error, &nmreq, locktype);
3713 nfsm_chain_add_32(error, &nmreq, reclaim);
3714 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3715 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3716 nfsm_chain_add_32(error, &nmreq, newlocker);
3717 if (newlocker) {
3718 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3719 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3720 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3721 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3722 } else {
3723 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3724 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3725 }
3726 nfsm_chain_build_done(error, &nmreq);
3727 nfsm_assert(error, (numops == 0), EPROTO);
3728 nfsmout_if(error);
3729
3730 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
3731 thd, cred, &si, flags | R_NOINTR | R_NOUMOUNTINTR, &nmrep, &xid, &status);
3732
3733 if ((lockerror = nfs_node_lock(np))) {
3734 error = lockerror;
3735 }
3736 nfsm_chain_skip_tag(error, &nmrep);
3737 nfsm_chain_get_32(error, &nmrep, numops);
3738 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3739 nfsmout_if(error);
3740 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3741 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3742 nfsmout_if(error);
3743 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3744 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3745 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3746
3747 /* Update the lock owner's stategenid once it appears the server has state for it. */
3748 /* We determine this by noting the request was successful (we got a stateid). */
3749 if (newlocker && !error) {
3750 nlop->nlo_stategenid = nmp->nm_stategenid;
3751 }
3752 nfsmout:
3753 if (!lockerror) {
3754 nfs_node_unlock(np);
3755 }
3756 nfs_lock_owner_clear_busy(nlop);
3757 if (newlocker) {
3758 nfs_open_owner_clear_busy(nofp->nof_owner);
3759 nfs_open_file_clear_busy(nofp);
3760 }
3761 nfsm_chain_cleanup(&nmreq);
3762 nfsm_chain_cleanup(&nmrep);
3763 return error;
3764 }
3765
3766 /*
3767 * Send an NFSv4 LOCKU RPC to the server.
3768 */
3769 int
nfs4_unlock_rpc(nfsnode_t np,struct nfs_lock_owner * nlop,int type,uint64_t start,uint64_t end,int flags,thread_t thd,kauth_cred_t cred)3770 nfs4_unlock_rpc(
3771 nfsnode_t np,
3772 struct nfs_lock_owner *nlop,
3773 int type,
3774 uint64_t start,
3775 uint64_t end,
3776 int flags,
3777 thread_t thd,
3778 kauth_cred_t cred)
3779 {
3780 struct nfsmount *nmp;
3781 struct nfsm_chain nmreq, nmrep;
3782 uint64_t xid;
3783 int error = 0, lockerror = ENOENT, numops, status;
3784 struct nfsreq_secinfo_args si;
3785
3786 nmp = NFSTONMP(np);
3787 if (nfs_mount_gone(nmp)) {
3788 return ENXIO;
3789 }
3790 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3791 return EINVAL;
3792 }
3793
3794 error = nfs_lock_owner_set_busy(nlop, NULL);
3795 if (error) {
3796 return error;
3797 }
3798
3799 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3800 nfsm_chain_null(&nmreq);
3801 nfsm_chain_null(&nmrep);
3802
3803 // PUTFH, GETATTR, LOCKU
3804 numops = 3;
3805 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3806 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3807 numops--;
3808 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
3809 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3810 numops--;
3811 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
3812 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3813 numops--;
3814 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOCKU);
3815 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3816 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3817 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3818 nfsm_chain_add_64(error, &nmreq, start);
3819 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3820 nfsm_chain_build_done(error, &nmreq);
3821 nfsm_assert(error, (numops == 0), EPROTO);
3822 nfsmout_if(error);
3823
3824 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
3825 thd, cred, &si, flags | R_NOINTR | R_NOUMOUNTINTR, &nmrep, &xid, &status);
3826
3827 if ((lockerror = nfs_node_lock(np))) {
3828 error = lockerror;
3829 }
3830 nfsm_chain_skip_tag(error, &nmrep);
3831 nfsm_chain_get_32(error, &nmrep, numops);
3832 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3833 nfsmout_if(error);
3834 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3835 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3836 nfsmout_if(error);
3837 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3838 nfs_owner_seqid_increment(NULL, nlop, error);
3839 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3840 nfsmout:
3841 if (!lockerror) {
3842 nfs_node_unlock(np);
3843 }
3844 nfs_lock_owner_clear_busy(nlop);
3845 nfsm_chain_cleanup(&nmreq);
3846 nfsm_chain_cleanup(&nmrep);
3847 return error;
3848 }
3849
3850 /*
3851 * Send an NFSv4 LOCKT RPC to the server.
3852 */
3853 int
nfs4_getlock_rpc(nfsnode_t np,struct nfs_lock_owner * nlop,struct flock * fl,uint64_t start,uint64_t end,vfs_context_t ctx)3854 nfs4_getlock_rpc(
3855 nfsnode_t np,
3856 struct nfs_lock_owner *nlop,
3857 struct flock *fl,
3858 uint64_t start,
3859 uint64_t end,
3860 vfs_context_t ctx)
3861 {
3862 struct nfsmount *nmp;
3863 struct nfsm_chain nmreq, nmrep;
3864 uint64_t xid, val64 = 0;
3865 uint32_t val = 0;
3866 int error = 0, lockerror, numops, status;
3867 struct nfsreq_secinfo_args si;
3868
3869 nmp = NFSTONMP(np);
3870 if (nfs_mount_gone(nmp)) {
3871 return ENXIO;
3872 }
3873 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3874 return EINVAL;
3875 }
3876
3877 lockerror = ENOENT;
3878 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3879 nfsm_chain_null(&nmreq);
3880 nfsm_chain_null(&nmrep);
3881
3882 // PUTFH, GETATTR, LOCKT
3883 numops = 3;
3884 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3885 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3886 numops--;
3887 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
3888 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3889 numops--;
3890 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
3891 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3892 numops--;
3893 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOCKT);
3894 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3895 nfsm_chain_add_64(error, &nmreq, start);
3896 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3897 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3898 nfsm_chain_build_done(error, &nmreq);
3899 nfsm_assert(error, (numops == 0), EPROTO);
3900 nfsmout_if(error);
3901
3902 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3903
3904 if ((lockerror = nfs_node_lock(np))) {
3905 error = lockerror;
3906 }
3907 nfsm_chain_skip_tag(error, &nmrep);
3908 nfsm_chain_get_32(error, &nmrep, numops);
3909 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3910 nfsmout_if(error);
3911 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3912 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3913 nfsmout_if(error);
3914 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3915 if (error == NFSERR_DENIED) {
3916 error = 0;
3917 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3918 nfsm_chain_get_64(error, &nmrep, val64);
3919 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3920 nfsm_chain_get_32(error, &nmrep, val);
3921 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3922 fl->l_pid = 0;
3923 fl->l_whence = SEEK_SET;
3924 } else if (!error) {
3925 fl->l_type = F_UNLCK;
3926 }
3927 nfsmout:
3928 if (!lockerror) {
3929 nfs_node_unlock(np);
3930 }
3931 nfsm_chain_cleanup(&nmreq);
3932 nfsm_chain_cleanup(&nmrep);
3933 return error;
3934 }
3935 #endif /* CONFIG_NFS4 */
3936
3937 /*
3938 * Check for any conflicts with the given lock.
3939 *
3940 * Checking for a lock doesn't require the file to be opened.
3941 * So we skip all the open owner, open file, lock owner work
3942 * and just check for a conflicting lock.
3943 */
3944 int
nfs_advlock_getlock(nfsnode_t np,struct nfs_lock_owner * nlop,struct flock * fl,uint64_t start,uint64_t end,vfs_context_t ctx)3945 nfs_advlock_getlock(
3946 nfsnode_t np,
3947 struct nfs_lock_owner *nlop,
3948 struct flock *fl,
3949 uint64_t start,
3950 uint64_t end,
3951 vfs_context_t ctx)
3952 {
3953 struct nfsmount *nmp;
3954 struct nfs_file_lock *nflp;
3955 int error = 0, answered = 0;
3956
3957 nmp = NFSTONMP(np);
3958 if (nfs_mount_gone(nmp)) {
3959 return ENXIO;
3960 }
3961
3962 restart:
3963 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3964 return error;
3965 }
3966
3967 lck_mtx_lock(&np->n_openlock);
3968 /* scan currently held locks for conflict */
3969 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3970 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
3971 continue;
3972 }
3973 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3974 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
3975 break;
3976 }
3977 }
3978 if (nflp) {
3979 /* found a conflicting lock */
3980 fl->l_type = nflp->nfl_type;
3981 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3982 fl->l_start = nflp->nfl_start;
3983 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3984 fl->l_whence = SEEK_SET;
3985 answered = 1;
3986 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3987 /*
3988 * If we have a write delegation, we know there can't be other
3989 * locks on the server. So the answer is no conflicting lock found.
3990 */
3991 fl->l_type = F_UNLCK;
3992 answered = 1;
3993 }
3994 lck_mtx_unlock(&np->n_openlock);
3995 if (answered) {
3996 nfs_mount_state_in_use_end(nmp, 0);
3997 return 0;
3998 }
3999
4000 /* no conflict found locally, so ask the server */
4001 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
4002
4003 if (nfs_mount_state_in_use_end(nmp, error)) {
4004 goto restart;
4005 }
4006 return error;
4007 }
4008
4009 /*
4010 * Acquire a file lock for the given range.
4011 *
4012 * Add the lock (request) to the lock queue.
4013 * Scan the lock queue for any conflicting locks.
4014 * If a conflict is found, block or return an error.
4015 * Once end of queue is reached, send request to the server.
4016 * If the server grants the lock, scan the lock queue and
4017 * update any existing locks. Then (optionally) scan the
4018 * queue again to coalesce any locks adjacent to the new one.
4019 */
4020 int
nfs_advlock_setlock(nfsnode_t np,struct nfs_open_file * nofp,struct nfs_lock_owner * nlop,int op,uint64_t start,uint64_t end,int style,short type,vfs_context_t ctx)4021 nfs_advlock_setlock(
4022 nfsnode_t np,
4023 struct nfs_open_file *nofp,
4024 struct nfs_lock_owner *nlop,
4025 int op,
4026 uint64_t start,
4027 uint64_t end,
4028 int style,
4029 short type,
4030 vfs_context_t ctx)
4031 {
4032 struct nfsmount *nmp;
4033 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
4034 struct nfs_file_lock *coalnflp;
4035 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
4036 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4037
4038 nmp = NFSTONMP(np);
4039 if (nfs_mount_gone(nmp)) {
4040 return ENXIO;
4041 }
4042 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4043
4044 if ((type != F_RDLCK) && (type != F_WRLCK)) {
4045 return EINVAL;
4046 }
4047
4048 /* allocate a new lock */
4049 newnflp = nfs_file_lock_alloc(nlop);
4050 if (!newnflp) {
4051 return ENOLCK;
4052 }
4053 newnflp->nfl_start = start;
4054 newnflp->nfl_end = end;
4055 newnflp->nfl_type = type;
4056 if (op == F_SETLKW) {
4057 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
4058 }
4059 newnflp->nfl_flags |= style;
4060 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
4061
4062 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
4063 /*
4064 * For exclusive flock-style locks, if we block waiting for the
4065 * lock, we need to first release any currently held shared
4066 * flock-style lock. So, the first thing we do is check if we
4067 * have a shared flock-style lock.
4068 */
4069 nflp = TAILQ_FIRST(&nlop->nlo_locks);
4070 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
4071 nflp = NULL;
4072 }
4073 if (nflp && (nflp->nfl_type != F_RDLCK)) {
4074 nflp = NULL;
4075 }
4076 flocknflp = nflp;
4077 }
4078
4079 restart:
4080 restart = 0;
4081 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4082 if (error) {
4083 goto error_out;
4084 }
4085 inuse = 1;
4086 if (np->n_flag & NREVOKE) {
4087 error = EIO;
4088 nfs_mount_state_in_use_end(nmp, 0);
4089 inuse = 0;
4090 goto error_out;
4091 }
4092 #if CONFIG_NFS4
4093 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4094 nfs_mount_state_in_use_end(nmp, 0);
4095 inuse = 0;
4096 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
4097 if (error) {
4098 goto error_out;
4099 }
4100 goto restart;
4101 }
4102 #endif
4103
4104 lck_mtx_lock(&np->n_openlock);
4105 if (!inqueue) {
4106 /* insert new lock at beginning of list */
4107 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4108 inqueue = 1;
4109 }
4110
4111 /* scan current list of locks (held and pending) for conflicts */
4112 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4113 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4114 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
4115 continue;
4116 }
4117 /* Conflict */
4118 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4119 error = EAGAIN;
4120 break;
4121 }
4122 /* Block until this lock is no longer held. */
4123 if (nflp->nfl_blockcnt == UINT_MAX) {
4124 error = ENOLCK;
4125 break;
4126 }
4127 nflp->nfl_blockcnt++;
4128 do {
4129 if (flocknflp) {
4130 /* release any currently held shared lock before sleeping */
4131 lck_mtx_unlock(&np->n_openlock);
4132 nfs_mount_state_in_use_end(nmp, 0);
4133 inuse = 0;
4134 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4135 flocknflp = NULL;
4136 if (!error) {
4137 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4138 }
4139 if (error) {
4140 lck_mtx_lock(&np->n_openlock);
4141 break;
4142 }
4143 inuse = 1;
4144 lck_mtx_lock(&np->n_openlock);
4145 /* no need to block/sleep if the conflict is gone */
4146 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
4147 break;
4148 }
4149 }
4150 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4151 slpflag = 0;
4152 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4153 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4154 /* looks like we have a recover pending... restart */
4155 restart = 1;
4156 lck_mtx_unlock(&np->n_openlock);
4157 nfs_mount_state_in_use_end(nmp, 0);
4158 inuse = 0;
4159 lck_mtx_lock(&np->n_openlock);
4160 break;
4161 }
4162 if (!error && (np->n_flag & NREVOKE)) {
4163 error = EIO;
4164 }
4165 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4166 nflp->nfl_blockcnt--;
4167 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4168 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4169 nfs_file_lock_destroy(np, nflp, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4170 }
4171 if (error || restart) {
4172 break;
4173 }
4174 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4175 /* So, start this lock-scanning loop over from where it started. */
4176 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
4177 }
4178 lck_mtx_unlock(&np->n_openlock);
4179 if (restart) {
4180 goto restart;
4181 }
4182 if (error) {
4183 goto error_out;
4184 }
4185
4186 if (willsplit) {
4187 /*
4188 * It looks like this operation is splitting a lock.
4189 * We allocate a new lock now so we don't have to worry
4190 * about the allocation failing after we've updated some state.
4191 */
4192 nflp2 = nfs_file_lock_alloc(nlop);
4193 if (!nflp2) {
4194 error = ENOLCK;
4195 goto error_out;
4196 }
4197 }
4198
4199 /* once scan for local conflicts is clear, send request to server */
4200 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
4201 goto error_out;
4202 }
4203 busy = 1;
4204 delay = 0;
4205 do {
4206 #if CONFIG_NFS4
4207 /* do we have a delegation? (that we're not returning?) */
4208 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4209 if (np->n_openflags & N_DELEG_WRITE) {
4210 /* with a write delegation, just take the lock delegated */
4211 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4212 error = 0;
4213 /* make sure the lock owner knows its open owner */
4214 if (!nlop->nlo_open_owner) {
4215 nfs_open_owner_ref(nofp->nof_owner);
4216 nlop->nlo_open_owner = nofp->nof_owner;
4217 }
4218 break;
4219 } else {
4220 /*
4221 * If we don't have any non-delegated opens but we do have
4222 * delegated opens, then we need to first claim the delegated
4223 * opens so that the lock request on the server can be associated
4224 * with an open it knows about.
4225 */
4226 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
4227 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4228 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
4229 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
4230 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4231 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
4232 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
4233 if (error) {
4234 break;
4235 }
4236 }
4237 }
4238 }
4239 #endif
4240 if (np->n_flag & NREVOKE) {
4241 error = EIO;
4242 }
4243 if (!error) {
4244 if (busy) {
4245 nfs_open_state_clear_busy(np);
4246 busy = 0;
4247 }
4248 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4249 if (!busy) {
4250 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
4251 if (error2) {
4252 error = error2;
4253 } else {
4254 busy = 1;
4255 }
4256 }
4257 }
4258 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
4259 break;
4260 }
4261 /* request was denied due to either conflict or grace period */
4262 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4263 error = EAGAIN;
4264 break;
4265 }
4266 if (flocknflp) {
4267 /* release any currently held shared lock before sleeping */
4268 nfs_open_state_clear_busy(np);
4269 busy = 0;
4270 if (inuse) {
4271 nfs_mount_state_in_use_end(nmp, 0);
4272 inuse = 0;
4273 }
4274 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4275 flocknflp = NULL;
4276 if (!error2) {
4277 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4278 }
4279 if (!error2) {
4280 inuse = 1;
4281 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
4282 }
4283 if (error2) {
4284 error = error2;
4285 break;
4286 }
4287 busy = 1;
4288 }
4289 /*
4290 * Wait a little bit and send the request again.
4291 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4292 */
4293 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
4294 if (error == NFSERR_GRACE) {
4295 delay = 4;
4296 }
4297 if (delay < 4) {
4298 delay++;
4299 }
4300 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
4301 slpflag = 0;
4302 }
4303 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4304 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4305 /* looks like we have a recover pending... restart */
4306 nfs_open_state_clear_busy(np);
4307 busy = 0;
4308 if (inuse) {
4309 nfs_mount_state_in_use_end(nmp, 0);
4310 inuse = 0;
4311 }
4312 goto restart;
4313 }
4314 if (!error && (np->n_flag & NREVOKE)) {
4315 error = EIO;
4316 }
4317 } while (!error);
4318
4319 error_out:
4320 if (nfs_mount_state_error_should_restart(error)) {
4321 /* looks like we need to restart this operation */
4322 if (busy) {
4323 nfs_open_state_clear_busy(np);
4324 busy = 0;
4325 }
4326 if (inuse) {
4327 nfs_mount_state_in_use_end(nmp, error);
4328 inuse = 0;
4329 }
4330 goto restart;
4331 }
4332 lck_mtx_lock(&np->n_openlock);
4333 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4334 if (error) {
4335 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4336 if (newnflp->nfl_blockcnt) {
4337 /* wake up anyone blocked on this lock */
4338 wakeup(newnflp);
4339 } else {
4340 /* remove newnflp from lock list and destroy */
4341 if (inqueue) {
4342 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
4343 }
4344 nfs_file_lock_destroy(np, newnflp, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4345 }
4346 lck_mtx_unlock(&np->n_openlock);
4347 if (busy) {
4348 nfs_open_state_clear_busy(np);
4349 }
4350 if (inuse) {
4351 nfs_mount_state_in_use_end(nmp, error);
4352 }
4353 if (nflp2) {
4354 nfs_file_lock_destroy(np, nflp2, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4355 }
4356 return error;
4357 }
4358
4359 /* server granted the lock */
4360
4361 /*
4362 * Scan for locks to update.
4363 *
4364 * Locks completely covered are killed.
4365 * At most two locks may need to be clipped.
4366 * It's possible that a single lock may need to be split.
4367 */
4368 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4369 if (nflp == newnflp) {
4370 continue;
4371 }
4372 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4373 continue;
4374 }
4375 if (nflp->nfl_owner != nlop) {
4376 continue;
4377 }
4378 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
4379 continue;
4380 }
4381 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
4382 continue;
4383 }
4384 /* here's one to update */
4385 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4386 /* The entire lock is being replaced. */
4387 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4388 lck_mtx_lock(&nlop->nlo_lock);
4389 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4390 lck_mtx_unlock(&nlop->nlo_lock);
4391 /* lock will be destroyed below, if no waiters */
4392 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4393 /* We're replacing a range in the middle of a lock. */
4394 /* The current lock will be split into two locks. */
4395 /* Update locks and insert new lock after current lock. */
4396 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4397 nflp2->nfl_type = nflp->nfl_type;
4398 nflp2->nfl_start = newnflp->nfl_end + 1;
4399 nflp2->nfl_end = nflp->nfl_end;
4400 nflp->nfl_end = newnflp->nfl_start - 1;
4401 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4402 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4403 nextnflp = nflp2;
4404 nflp2 = NULL;
4405 } else if (newnflp->nfl_start > nflp->nfl_start) {
4406 /* We're replacing the end of a lock. */
4407 nflp->nfl_end = newnflp->nfl_start - 1;
4408 } else if (newnflp->nfl_end < nflp->nfl_end) {
4409 /* We're replacing the start of a lock. */
4410 nflp->nfl_start = newnflp->nfl_end + 1;
4411 }
4412 if (nflp->nfl_blockcnt) {
4413 /* wake up anyone blocked on this lock */
4414 wakeup(nflp);
4415 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4416 /* remove nflp from lock list and destroy */
4417 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4418 nfs_file_lock_destroy(np, nflp, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4419 }
4420 }
4421
4422 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4423
4424 /*
4425 * POSIX locks should be coalesced when possible.
4426 */
4427 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4428 /*
4429 * Walk through the lock queue and check each of our held locks with
4430 * the previous and next locks in the lock owner's "held lock list".
4431 * If the two locks can be coalesced, we merge the current lock into
4432 * the other (previous or next) lock. Merging this way makes sure that
4433 * lock ranges are always merged forward in the lock queue. This is
4434 * important because anyone blocked on the lock being "merged away"
4435 * will still need to block on that range and it will simply continue
4436 * checking locks that are further down the list.
4437 */
4438 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4439 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4440 continue;
4441 }
4442 if (nflp->nfl_owner != nlop) {
4443 continue;
4444 }
4445 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
4446 continue;
4447 }
4448 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4449 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4450 (coalnflp->nfl_type == nflp->nfl_type) &&
4451 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4452 coalnflp->nfl_end = nflp->nfl_end;
4453 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4454 lck_mtx_lock(&nlop->nlo_lock);
4455 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4456 lck_mtx_unlock(&nlop->nlo_lock);
4457 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4458 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4459 (coalnflp->nfl_type == nflp->nfl_type) &&
4460 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4461 coalnflp->nfl_start = nflp->nfl_start;
4462 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4463 lck_mtx_lock(&nlop->nlo_lock);
4464 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4465 lck_mtx_unlock(&nlop->nlo_lock);
4466 }
4467 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
4468 continue;
4469 }
4470 if (nflp->nfl_blockcnt) {
4471 /* wake up anyone blocked on this lock */
4472 wakeup(nflp);
4473 } else {
4474 /* remove nflp from lock list and destroy */
4475 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4476 nfs_file_lock_destroy(np, nflp, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4477 }
4478 }
4479 }
4480
4481 lck_mtx_unlock(&np->n_openlock);
4482
4483 if (busy) {
4484 nfs_open_state_clear_busy(np);
4485 }
4486 if (inuse) {
4487 nfs_mount_state_in_use_end(nmp, error);
4488 }
4489 if (nflp2) {
4490 nfs_file_lock_destroy(np, nflp2, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4491 }
4492 return error;
4493 }
4494
4495 /*
4496 * Release all (same style) locks within the given range.
4497 */
4498 int
nfs_advlock_unlock(nfsnode_t np,struct nfs_open_file * nofp __unused,struct nfs_lock_owner * nlop,uint64_t start,uint64_t end,int style,vfs_context_t ctx)4499 nfs_advlock_unlock(
4500 nfsnode_t np,
4501 struct nfs_open_file *nofp
4502 #if !CONFIG_NFS4
4503 __unused
4504 #endif
4505 ,
4506 struct nfs_lock_owner *nlop,
4507 uint64_t start,
4508 uint64_t end,
4509 int style,
4510 vfs_context_t ctx)
4511 {
4512 struct nfsmount *nmp;
4513 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4514 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4515
4516 nmp = NFSTONMP(np);
4517 if (nfs_mount_gone(nmp)) {
4518 return ENXIO;
4519 }
4520
4521 restart:
4522 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4523 return error;
4524 }
4525 #if CONFIG_NFS4
4526 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4527 nfs_mount_state_in_use_end(nmp, 0);
4528 error = nfs4_reopen(nofp, NULL);
4529 if (error) {
4530 return error;
4531 }
4532 goto restart;
4533 }
4534 #endif
4535 if ((error = nfs_open_state_set_busy(np, NULL))) {
4536 nfs_mount_state_in_use_end(nmp, error);
4537 return error;
4538 }
4539
4540 lck_mtx_lock(&np->n_openlock);
4541 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4542 /*
4543 * We may need to allocate a new lock if an existing lock gets split.
4544 * So, we first scan the list to check for a split, and if there's
4545 * going to be one, we'll allocate one now.
4546 */
4547 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4548 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4549 continue;
4550 }
4551 if (nflp->nfl_owner != nlop) {
4552 continue;
4553 }
4554 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4555 continue;
4556 }
4557 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4558 continue;
4559 }
4560 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4561 willsplit = 1;
4562 break;
4563 }
4564 }
4565 if (willsplit) {
4566 lck_mtx_unlock(&np->n_openlock);
4567 nfs_open_state_clear_busy(np);
4568 nfs_mount_state_in_use_end(nmp, 0);
4569 newnflp = nfs_file_lock_alloc(nlop);
4570 if (!newnflp) {
4571 return ENOMEM;
4572 }
4573 goto restart;
4574 }
4575 }
4576
4577 /*
4578 * Free all of our locks in the given range.
4579 *
4580 * Note that this process requires sending requests to the server.
4581 * Because of this, we will release the n_openlock while performing
4582 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4583 * locks from changing underneath us. However, other entries in the
4584 * list may be removed. So we need to be careful walking the list.
4585 */
4586
4587 /*
4588 * Don't unlock ranges that are held by other-style locks.
4589 * If style is posix, don't send any unlock rpcs if flock is held.
4590 * If we unlock an flock, don't send unlock rpcs for any posix-style
4591 * ranges held - instead send unlocks for the ranges not held.
4592 */
4593 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4594 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4595 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
4596 send_unlock_rpcs = 0;
4597 }
4598 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4599 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4600 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4601 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4602 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4603 uint64_t s = 0;
4604 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4605 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4606 while (!delegated && nflp) {
4607 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4608 /* unlock the range preceding this lock */
4609 lck_mtx_unlock(&np->n_openlock);
4610 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4611 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4612 if (nfs_mount_state_error_should_restart(error)) {
4613 nfs_open_state_clear_busy(np);
4614 nfs_mount_state_in_use_end(nmp, error);
4615 goto restart;
4616 }
4617 lck_mtx_lock(&np->n_openlock);
4618 if (error) {
4619 goto out;
4620 }
4621 s = nflp->nfl_end + 1;
4622 }
4623 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4624 }
4625 if (!delegated) {
4626 lck_mtx_unlock(&np->n_openlock);
4627 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4628 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4629 if (nfs_mount_state_error_should_restart(error)) {
4630 nfs_open_state_clear_busy(np);
4631 nfs_mount_state_in_use_end(nmp, error);
4632 goto restart;
4633 }
4634 lck_mtx_lock(&np->n_openlock);
4635 if (error) {
4636 goto out;
4637 }
4638 }
4639 send_unlock_rpcs = 0;
4640 }
4641
4642 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4643 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4644 continue;
4645 }
4646 if (nflp->nfl_owner != nlop) {
4647 continue;
4648 }
4649 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4650 continue;
4651 }
4652 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4653 continue;
4654 }
4655 /* here's one to unlock */
4656 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4657 /* The entire lock is being unlocked. */
4658 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4659 lck_mtx_unlock(&np->n_openlock);
4660 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4661 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4662 if (nfs_mount_state_error_should_restart(error)) {
4663 nfs_open_state_clear_busy(np);
4664 nfs_mount_state_in_use_end(nmp, error);
4665 goto restart;
4666 }
4667 lck_mtx_lock(&np->n_openlock);
4668 }
4669 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4670 if (error) {
4671 break;
4672 }
4673 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4674 lck_mtx_lock(&nlop->nlo_lock);
4675 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4676 lck_mtx_unlock(&nlop->nlo_lock);
4677 /* lock will be destroyed below, if no waiters */
4678 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4679 /* We're unlocking a range in the middle of a lock. */
4680 /* The current lock will be split into two locks. */
4681 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4682 lck_mtx_unlock(&np->n_openlock);
4683 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4684 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4685 if (nfs_mount_state_error_should_restart(error)) {
4686 nfs_open_state_clear_busy(np);
4687 nfs_mount_state_in_use_end(nmp, error);
4688 goto restart;
4689 }
4690 lck_mtx_lock(&np->n_openlock);
4691 }
4692 if (error) {
4693 break;
4694 }
4695 /* update locks and insert new lock after current lock */
4696 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4697 newnflp->nfl_type = nflp->nfl_type;
4698 newnflp->nfl_start = end + 1;
4699 newnflp->nfl_end = nflp->nfl_end;
4700 nflp->nfl_end = start - 1;
4701 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4702 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4703 nextnflp = newnflp;
4704 newnflp = NULL;
4705 } else if (start > nflp->nfl_start) {
4706 /* We're unlocking the end of a lock. */
4707 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4708 lck_mtx_unlock(&np->n_openlock);
4709 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4710 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4711 if (nfs_mount_state_error_should_restart(error)) {
4712 nfs_open_state_clear_busy(np);
4713 nfs_mount_state_in_use_end(nmp, error);
4714 goto restart;
4715 }
4716 lck_mtx_lock(&np->n_openlock);
4717 }
4718 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4719 if (error) {
4720 break;
4721 }
4722 nflp->nfl_end = start - 1;
4723 } else if (end < nflp->nfl_end) {
4724 /* We're unlocking the start of a lock. */
4725 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4726 lck_mtx_unlock(&np->n_openlock);
4727 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4728 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4729 if (nfs_mount_state_error_should_restart(error)) {
4730 nfs_open_state_clear_busy(np);
4731 nfs_mount_state_in_use_end(nmp, error);
4732 goto restart;
4733 }
4734 lck_mtx_lock(&np->n_openlock);
4735 }
4736 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4737 if (error) {
4738 break;
4739 }
4740 nflp->nfl_start = end + 1;
4741 }
4742 if (nflp->nfl_blockcnt) {
4743 /* wake up anyone blocked on this lock */
4744 wakeup(nflp);
4745 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4746 /* remove nflp from lock list and destroy */
4747 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4748 nfs_file_lock_destroy(np, nflp, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4749 }
4750 }
4751 out:
4752 lck_mtx_unlock(&np->n_openlock);
4753 nfs_open_state_clear_busy(np);
4754 nfs_mount_state_in_use_end(nmp, 0);
4755
4756 if (newnflp) {
4757 nfs_file_lock_destroy(np, newnflp, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4758 }
4759 return error;
4760 }
4761
4762 /*
4763 * NFSv4 advisory file locking
4764 */
4765 int
nfs_vnop_advlock(struct vnop_advlock_args * ap)4766 nfs_vnop_advlock(
4767 struct vnop_advlock_args /* {
4768 * struct vnodeop_desc *a_desc;
4769 * vnode_t a_vp;
4770 * caddr_t a_id;
4771 * int a_op;
4772 * struct flock *a_fl;
4773 * int a_flags;
4774 * vfs_context_t a_context;
4775 * } */*ap)
4776 {
4777 vnode_t vp = ap->a_vp;
4778 nfsnode_t np = VTONFS(ap->a_vp);
4779 struct flock *fl = ap->a_fl;
4780 int op = ap->a_op;
4781 int flags = ap->a_flags;
4782 caddr_t lockid = ap->a_id;
4783 vfs_context_t ctx = ap->a_context;
4784 struct nfsmount *nmp;
4785 struct nfs_open_owner *noop = NULL;
4786 struct nfs_open_file *nofp = NULL;
4787 struct nfs_lock_owner *nlop = NULL;
4788 off_t lstart;
4789 uint64_t start, end;
4790 int error = 0, modified, style;
4791 enum vtype vtype;
4792 #define OFF_MAX QUAD_MAX
4793
4794 nmp = VTONMP(ap->a_vp);
4795 if (nfs_mount_gone(nmp)) {
4796 return ENXIO;
4797 }
4798 lck_mtx_lock(&nmp->nm_lock);
4799 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4800 lck_mtx_unlock(&nmp->nm_lock);
4801 return ENOTSUP;
4802 }
4803 lck_mtx_unlock(&nmp->nm_lock);
4804
4805 if (np->n_flag & NREVOKE) {
4806 return EIO;
4807 }
4808 vtype = vnode_vtype(ap->a_vp);
4809 if (vtype == VDIR) { /* ignore lock requests on directories */
4810 return 0;
4811 }
4812 if (vtype != VREG) { /* anything other than regular files is invalid */
4813 return EINVAL;
4814 }
4815
4816 /* Convert the flock structure into a start and end. */
4817 switch (fl->l_whence) {
4818 case SEEK_SET:
4819 case SEEK_CUR:
4820 /*
4821 * Caller is responsible for adding any necessary offset
4822 * to fl->l_start when SEEK_CUR is used.
4823 */
4824 lstart = fl->l_start;
4825 break;
4826 case SEEK_END:
4827 /* need to flush, and refetch attributes to make */
4828 /* sure we have the correct end of file offset */
4829 if ((error = nfs_node_lock(np))) {
4830 return NFS_MAPERR(error);
4831 }
4832 modified = (np->n_flag & NMODIFIED);
4833 nfs_node_unlock(np);
4834 if (modified && ((error = nfs_vinvalbuf1(vp, V_SAVE, ctx, 1)))) {
4835 return NFS_MAPERR(error);
4836 }
4837 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4838 return NFS_MAPERR(error);
4839 }
4840 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4841 if ((np->n_size > OFF_MAX) ||
4842 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
4843 error = EOVERFLOW;
4844 }
4845 lstart = np->n_size + fl->l_start;
4846 nfs_data_unlock(np);
4847 if (error) {
4848 return NFS_MAPERR(error);
4849 }
4850 break;
4851 default:
4852 return EINVAL;
4853 }
4854 if (lstart < 0) {
4855 return EINVAL;
4856 }
4857 start = lstart;
4858 if (fl->l_len == 0) {
4859 end = UINT64_MAX;
4860 } else if (fl->l_len > 0) {
4861 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4862 return EOVERFLOW;
4863 }
4864 end = start - 1 + fl->l_len;
4865 } else { /* l_len is negative */
4866 if ((lstart + fl->l_len) < 0) {
4867 return EINVAL;
4868 }
4869 end = start - 1;
4870 start += fl->l_len;
4871 }
4872 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4873 return EINVAL;
4874 }
4875
4876 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4877 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4878 return EINVAL;
4879 }
4880
4881 /* find the lock owner, alloc if not unlock */
4882 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), lockid, (op != F_UNLCK) ? NFS_LOCK_OWNER_FIND_ALLOC : 0);
4883 if (!nlop) {
4884 error = (op == F_UNLCK) ? 0 : ENOMEM;
4885 if (error) {
4886 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4887 }
4888 goto out;
4889 }
4890
4891 if (op == F_GETLK) {
4892 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4893 } else {
4894 /* find the open owner */
4895 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 0);
4896 if (!noop) {
4897 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4898 error = EPERM;
4899 goto out;
4900 }
4901 /* find the open file */
4902 #if CONFIG_NFS4
4903 restart:
4904 #endif
4905 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4906 if (error) {
4907 error = EBADF;
4908 }
4909 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4910 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4911 error = EIO;
4912 }
4913 #if CONFIG_NFS4
4914 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4915 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4916 nofp = NULL;
4917 if (!error) {
4918 goto restart;
4919 }
4920 }
4921 #endif
4922 if (error) {
4923 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4924 goto out;
4925 }
4926 if (op == F_UNLCK) {
4927 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4928 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4929 if ((op == F_SETLK) && (flags & F_WAIT)) {
4930 op = F_SETLKW;
4931 }
4932 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4933 } else {
4934 /* not getlk, unlock or lock? */
4935 error = EINVAL;
4936 }
4937 }
4938
4939 out:
4940 if (nlop) {
4941 nfs_lock_owner_rele(np, nlop, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4942 }
4943 if (noop) {
4944 nfs_open_owner_rele(noop);
4945 }
4946 return NFS_MAPERR(error);
4947 }
4948
4949 /*
4950 * Check if an open owner holds any locks on a file.
4951 */
4952 int
nfs_check_for_locks(struct nfs_open_owner * noop,struct nfs_open_file * nofp)4953 nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4954 {
4955 struct nfs_lock_owner *nlop;
4956
4957 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4958 if (nlop->nlo_open_owner != noop) {
4959 continue;
4960 }
4961 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
4962 break;
4963 }
4964 }
4965 return nlop ? 1 : 0;
4966 }
4967
4968 #if CONFIG_NFS4
4969 /*
4970 * Reopen simple (no deny, no locks) open state that was lost.
4971 */
4972 int
nfs4_reopen(struct nfs_open_file * nofp,thread_t thd)4973 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4974 {
4975 struct nfs_open_owner *noop = nofp->nof_owner;
4976 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4977 nfsnode_t np = nofp->nof_np;
4978 vnode_t vp = NFSTOV(np);
4979 vnode_t dvp = NULL;
4980 struct componentname cn;
4981 const char *vname = NULL;
4982 const char *name = NULL;
4983 uint32_t namelen = 0;
4984 char smallname[128];
4985 char *filename = NULL;
4986 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4987 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4988
4989 lck_mtx_lock(&nofp->nof_lock);
4990 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4991 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
4992 break;
4993 }
4994 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
4995 slpflag = 0;
4996 }
4997 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4998 lck_mtx_unlock(&nofp->nof_lock);
4999 return error;
5000 }
5001 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
5002 lck_mtx_unlock(&nofp->nof_lock);
5003
5004 nfs_node_lock_force(np);
5005 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
5006 /*
5007 * The node's been sillyrenamed, so we need to use
5008 * the sillyrename directory/name to do the open.
5009 */
5010 struct nfs_sillyrename *nsp = np->n_sillyrename;
5011 dvp = NFSTOV(nsp->nsr_dnp);
5012 if ((error = vnode_get(dvp))) {
5013 dvp = NULLVP;
5014 nfs_node_unlock(np);
5015 goto out;
5016 }
5017 name = nsp->nsr_name;
5018 } else {
5019 /*
5020 * [sigh] We can't trust VFS to get the parent right for named
5021 * attribute nodes. (It likes to reparent the nodes after we've
5022 * created them.) Luckily we can probably get the right parent
5023 * from the n_parent we have stashed away.
5024 */
5025 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5026 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
5027 dvp = NULL;
5028 }
5029 if (!dvp) {
5030 dvp = vnode_getparent(vp);
5031 }
5032 vname = vnode_getname(vp);
5033 if (!dvp || !vname) {
5034 if (!error) {
5035 error = EIO;
5036 }
5037 nfs_node_unlock(np);
5038 goto out;
5039 }
5040 name = vname;
5041 }
5042 filename = &smallname[0];
5043 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5044 if (namelen >= sizeof(smallname)) {
5045 filename = kalloc_data(namelen + 1, Z_WAITOK);
5046 if (!filename) {
5047 error = ENOMEM;
5048 goto out;
5049 }
5050 snprintf(filename, namelen + 1, "%s", name);
5051 }
5052 nfs_node_unlock(np);
5053 bzero(&cn, sizeof(cn));
5054 cn.cn_nameptr = filename;
5055 cn.cn_namelen = namelen;
5056
5057 restart:
5058 done = 0;
5059 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
5060 goto out;
5061 }
5062
5063 if (nofp->nof_rw) {
5064 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
5065 }
5066 if (!error && nofp->nof_w) {
5067 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
5068 }
5069 if (!error && nofp->nof_r) {
5070 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
5071 }
5072
5073 if (nfs_mount_state_in_use_end(nmp, error)) {
5074 if (error == NFSERR_GRACE) {
5075 goto restart;
5076 }
5077 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
5078 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5079 error = 0;
5080 goto out;
5081 }
5082 done = 1;
5083 out:
5084 if (error && (error != EINTR) && (error != ERESTART)) {
5085 nfs_revoke_open_state_for_node(np);
5086 }
5087 lck_mtx_lock(&nofp->nof_lock);
5088 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
5089 if (done) {
5090 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5091 } else if (error) {
5092 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
5093 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5094 }
5095 lck_mtx_unlock(&nofp->nof_lock);
5096 if (filename && (filename != &smallname[0])) {
5097 kfree_data(filename, namelen + 1);
5098 }
5099 if (vname) {
5100 vnode_putname(vname);
5101 }
5102 if (dvp != NULLVP) {
5103 vnode_put(dvp);
5104 }
5105 return error;
5106 }
5107
5108 /*
5109 * Send a normal OPEN RPC to open/create a file.
5110 */
5111 int
nfs4_open_rpc(struct nfs_open_file * nofp,vfs_context_t ctx,struct componentname * cnp,struct vnode_attr * vap,vnode_t dvp,vnode_t * vpp,int create,int share_access,int share_deny)5112 nfs4_open_rpc(
5113 struct nfs_open_file *nofp,
5114 vfs_context_t ctx,
5115 struct componentname *cnp,
5116 struct vnode_attr *vap,
5117 vnode_t dvp,
5118 vnode_t *vpp,
5119 int create,
5120 int share_access,
5121 int share_deny)
5122 {
5123 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
5124 cnp, vap, dvp, vpp, create, share_access, share_deny);
5125 }
5126
5127 /*
5128 * Send an OPEN RPC to reopen a file.
5129 */
5130 int
nfs4_open_reopen_rpc(struct nfs_open_file * nofp,thread_t thd,kauth_cred_t cred,struct componentname * cnp,vnode_t dvp,vnode_t * vpp,int share_access,int share_deny)5131 nfs4_open_reopen_rpc(
5132 struct nfs_open_file *nofp,
5133 thread_t thd,
5134 kauth_cred_t cred,
5135 struct componentname *cnp,
5136 vnode_t dvp,
5137 vnode_t *vpp,
5138 int share_access,
5139 int share_deny)
5140 {
5141 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
5142 }
5143
5144 /*
5145 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5146 */
5147 int
nfs4_open_confirm_rpc(struct nfsmount * nmp,nfsnode_t dnp,u_char * fhp,int fhlen,struct nfs_open_owner * noop,nfs_stateid * sid,thread_t thd,kauth_cred_t cred,struct nfs_vattr * nvap,uint64_t * xidp)5148 nfs4_open_confirm_rpc(
5149 struct nfsmount *nmp,
5150 nfsnode_t dnp,
5151 u_char *fhp,
5152 int fhlen,
5153 struct nfs_open_owner *noop,
5154 nfs_stateid *sid,
5155 thread_t thd,
5156 kauth_cred_t cred,
5157 struct nfs_vattr *nvap,
5158 uint64_t *xidp)
5159 {
5160 struct nfsm_chain nmreq, nmrep;
5161 int error = 0, status, numops;
5162 struct nfsreq_secinfo_args si;
5163
5164 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5165 nfsm_chain_null(&nmreq);
5166 nfsm_chain_null(&nmrep);
5167
5168 // PUTFH, OPEN_CONFIRM, GETATTR
5169 numops = 3;
5170 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5171 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
5172 numops--;
5173 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
5174 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5175 numops--;
5176 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5177 nfsm_chain_add_stateid(error, &nmreq, sid);
5178 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5179 numops--;
5180 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
5181 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5182 nfsm_chain_build_done(error, &nmreq);
5183 nfsm_assert(error, (numops == 0), EPROTO);
5184 nfsmout_if(error);
5185 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5186
5187 nfsm_chain_skip_tag(error, &nmrep);
5188 nfsm_chain_get_32(error, &nmrep, numops);
5189 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5190 nfsmout_if(error);
5191 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5192 nfs_owner_seqid_increment(noop, NULL, error);
5193 nfsm_chain_get_stateid(error, &nmrep, sid);
5194 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5195 nfsmout_if(error);
5196 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5197 nfsmout:
5198 nfsm_chain_cleanup(&nmreq);
5199 nfsm_chain_cleanup(&nmrep);
5200 return error;
5201 }
5202
5203 /*
5204 * common OPEN RPC code
5205 *
5206 * If create is set, ctx must be passed in.
5207 * Returns a node on success if no node passed in.
5208 */
5209 int
nfs4_open_rpc_internal(struct nfs_open_file * nofp,vfs_context_t ctx,thread_t thd,kauth_cred_t cred,struct componentname * cnp,struct vnode_attr * vap,vnode_t dvp,vnode_t * vpp,int create,int share_access,int share_deny)5210 nfs4_open_rpc_internal(
5211 struct nfs_open_file *nofp,
5212 vfs_context_t ctx,
5213 thread_t thd,
5214 kauth_cred_t cred,
5215 struct componentname *cnp,
5216 struct vnode_attr *vap,
5217 vnode_t dvp,
5218 vnode_t *vpp,
5219 int create,
5220 int share_access,
5221 int share_deny)
5222 {
5223 struct nfsmount *nmp;
5224 struct nfs_open_owner *noop = nofp->nof_owner;
5225 struct nfs_vattr *nvattr;
5226 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status, ciflag = 0;
5227 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid, flags = R_NOINTR;
5228 u_int64_t xid, savedxid = 0;
5229 nfsnode_t dnp = VTONFS(dvp);
5230 nfsnode_t np, newnp = NULL;
5231 vnode_t newvp = NULL;
5232 struct nfsm_chain nmreq, nmrep;
5233 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5234 uint32_t rflags, delegation, recall;
5235 struct nfs_stateid stateid, dstateid, *sid;
5236 fhandle_t *fh;
5237 struct nfsreq *req;
5238 struct nfs_dulookup *dul;
5239 char sbuf[64], *s;
5240 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5241 struct kauth_ace ace;
5242 struct nfsreq_secinfo_args si;
5243
5244 if (create && !ctx) {
5245 return EINVAL;
5246 }
5247
5248 nmp = VTONMP(dvp);
5249 if (nfs_mount_gone(nmp)) {
5250 return ENXIO;
5251 }
5252 nfsvers = nmp->nm_vers;
5253 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5254 bzero(&dstateid, sizeof(dstateid));
5255 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5256 return EINVAL;
5257 }
5258
5259 np = *vpp ? VTONFS(*vpp) : NULL;
5260 if (create && vap) {
5261 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5262 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5263 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5264 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5265 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
5266 vap->va_vaflags |= VA_UTIMES_NULL;
5267 }
5268 } else {
5269 exclusive = gotuid = gotgid = 0;
5270 }
5271 if (nofp) {
5272 sid = &nofp->nof_stateid;
5273 } else {
5274 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5275 sid = &stateid;
5276 }
5277
5278 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5279 return error;
5280 }
5281
5282 fh = zalloc(nfs_fhandle_zone);
5283 req = zalloc(nfs_req_zone);
5284 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
5285 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
5286
5287 again:
5288 rflags = delegation = recall = 0;
5289 ace.ace_flags = 0;
5290 s = sbuf;
5291 slen = sizeof(sbuf);
5292 NVATTR_INIT(nvattr);
5293 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
5294
5295 nfsm_chain_null(&nmreq);
5296 nfsm_chain_null(&nmrep);
5297
5298 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5299 numops = 6;
5300 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
5301 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
5302 numops--;
5303 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
5304 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5305 numops--;
5306 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_SAVEFH);
5307 numops--;
5308 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPEN);
5309 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5310 nfsm_chain_add_32(error, &nmreq, share_access);
5311 nfsm_chain_add_32(error, &nmreq, share_deny);
5312 nfsm_chain_add_openowner(error, &nmreq, nmp, noop);
5313 nfsm_chain_add_32(error, &nmreq, create);
5314 if (create) {
5315 flags |= R_NOUMOUNTINTR;
5316 if (exclusive) {
5317 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5318 error = nfsm_chaim_add_exclusive_create_verifier(error, &nmreq, nmp);
5319 ciflag = NFS_CREATE_EXCLUSIVE;
5320 } else {
5321 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5322 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5323 ciflag = NFS_CREATE_UNCHECKED;
5324 }
5325 }
5326 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
5327 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5328 numops--;
5329 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
5330 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5331 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5332 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5333 numops--;
5334 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_RESTOREFH);
5335 numops--;
5336 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
5337 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5338 nfsm_chain_build_done(error, &nmreq);
5339 nfsm_assert(error, (numops == 0), EPROTO);
5340 if (!error) {
5341 error = busyerror = nfs_node_set_busy(dnp, thd);
5342 }
5343 nfsmout_if(error);
5344
5345 if (create && !namedattrs) {
5346 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5347 }
5348
5349 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
5350 thd, cred, &si, flags, NULL, &req);
5351 if (!error) {
5352 if (create && !namedattrs) {
5353 nfs_dulookup_start(dul, dnp, ctx);
5354 }
5355 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5356 savedxid = xid;
5357 }
5358
5359 if (create && !namedattrs) {
5360 nfs_dulookup_finish(dul, dnp, ctx);
5361 }
5362
5363 if ((lockerror = nfs_node_lock(dnp))) {
5364 error = lockerror;
5365 }
5366 nfsm_chain_skip_tag(error, &nmrep);
5367 nfsm_chain_get_32(error, &nmrep, numops);
5368 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5369 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5370 nfsmout_if(error);
5371 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5372 nfs_owner_seqid_increment(noop, NULL, error);
5373 nfsm_chain_get_stateid(error, &nmrep, sid);
5374 nfsm_chain_check_change_info_open(error, &nmrep, dnp, ciflag);
5375 nfsm_chain_get_32(error, &nmrep, rflags);
5376 bmlen = NFS_ATTR_BITMAP_LEN;
5377 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5378 nfsm_chain_get_32(error, &nmrep, delegation);
5379 if (!error) {
5380 switch (delegation) {
5381 case NFS_OPEN_DELEGATE_NONE:
5382 break;
5383 case NFS_OPEN_DELEGATE_READ:
5384 case NFS_OPEN_DELEGATE_WRITE:
5385 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5386 nfsm_chain_get_32(error, &nmrep, recall);
5387 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5388 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5389 }
5390 /* if we have any trouble accepting the ACE, just invalidate it */
5391 ace_type = ace_flags = ace_mask = len = 0;
5392 nfsm_chain_get_32(error, &nmrep, ace_type);
5393 nfsm_chain_get_32(error, &nmrep, ace_flags);
5394 nfsm_chain_get_32(error, &nmrep, ace_mask);
5395 nfsm_chain_get_32(error, &nmrep, len);
5396 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5397 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5398 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5399 if (!error && (len >= slen)) {
5400 s = kalloc_data(len + 1, Z_WAITOK);
5401 if (s) {
5402 slen = len + 1;
5403 } else {
5404 ace.ace_flags = 0;
5405 }
5406 }
5407 if (s) {
5408 nfsm_chain_get_opaque(error, &nmrep, len, s);
5409 } else {
5410 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5411 }
5412 if (!error && s) {
5413 s[len] = '\0';
5414 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5415 ace.ace_flags = 0;
5416 }
5417 }
5418 if (error || !s) {
5419 ace.ace_flags = 0;
5420 }
5421 if (s && (s != sbuf)) {
5422 kfree_data(s, slen);
5423 }
5424 break;
5425 default:
5426 error = EBADRPC;
5427 break;
5428 }
5429 }
5430 /* At this point if we have no error, the object was created/opened. */
5431 open_error = error;
5432 nfsmout_if(error);
5433 if (create && vap && !exclusive) {
5434 nfs_vattr_set_supported(bitmap, vap);
5435 }
5436 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5437 nfsmout_if(error);
5438 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
5439 nfsmout_if(error);
5440 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5441 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
5442 error = EBADRPC;
5443 goto nfsmout;
5444 }
5445 if (!create && np && !NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5446 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5447 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5448 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5449 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
5450 }
5451 }
5452 /* directory attributes: if we don't get them, make sure to invalidate */
5453 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5454 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5455 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
5456 if (error) {
5457 NATTRINVALIDATE(dnp);
5458 }
5459 nfsmout_if(error);
5460
5461 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5462 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5463 }
5464
5465 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5466 nfs_node_unlock(dnp);
5467 lockerror = ENOENT;
5468 NVATTR_CLEANUP(nvattr);
5469 error = nfs4_open_confirm_rpc(nmp, dnp, fh->fh_data, fh->fh_len, noop, sid, thd, cred, nvattr, &xid);
5470 nfsmout_if(error);
5471 savedxid = xid;
5472 if ((lockerror = nfs_node_lock(dnp))) {
5473 error = lockerror;
5474 }
5475 }
5476
5477 nfsmout:
5478 nfsm_chain_cleanup(&nmreq);
5479 nfsm_chain_cleanup(&nmrep);
5480
5481 if (!lockerror && create) {
5482 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5483 dnp->n_flag &= ~NNEGNCENTRIES;
5484 cache_purge_negatives(dvp);
5485 }
5486 dnp->n_flag |= NMODIFIED;
5487 nfs_node_unlock(dnp);
5488 lockerror = ENOENT;
5489 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
5490 }
5491 if (!lockerror) {
5492 nfs_node_unlock(dnp);
5493 }
5494 if (!error && !np && fh->fh_len) {
5495 /* create the vnode with the filehandle and attributes */
5496 xid = savedxid;
5497 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &newnp);
5498 if (!error) {
5499 newvp = NFSTOV(newnp);
5500 }
5501 }
5502 NVATTR_CLEANUP(nvattr);
5503 if (!busyerror) {
5504 nfs_node_clear_busy(dnp);
5505 }
5506 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5507 if (!np) {
5508 np = newnp;
5509 }
5510 if (!error && np && !recall) {
5511 /* stuff the delegation state in the node */
5512 lck_mtx_lock(&np->n_openlock);
5513 np->n_openflags &= ~N_DELEG_MASK;
5514 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5515 np->n_dstateid = dstateid;
5516 np->n_dace = ace;
5517 if (np->n_dlink.tqe_next == NFSNOLIST) {
5518 lck_mtx_lock(&nmp->nm_lock);
5519 if (np->n_dlink.tqe_next == NFSNOLIST) {
5520 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5521 }
5522 lck_mtx_unlock(&nmp->nm_lock);
5523 }
5524 lck_mtx_unlock(&np->n_openlock);
5525 } else {
5526 /* give the delegation back */
5527 if (np) {
5528 if (NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5529 /* update delegation state and return it */
5530 lck_mtx_lock(&np->n_openlock);
5531 np->n_openflags &= ~N_DELEG_MASK;
5532 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5533 np->n_dstateid = dstateid;
5534 np->n_dace = ace;
5535 if (np->n_dlink.tqe_next == NFSNOLIST) {
5536 lck_mtx_lock(&nmp->nm_lock);
5537 if (np->n_dlink.tqe_next == NFSNOLIST) {
5538 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5539 }
5540 lck_mtx_unlock(&nmp->nm_lock);
5541 }
5542 lck_mtx_unlock(&np->n_openlock);
5543 /* don't need to send a separate delegreturn for fh */
5544 fh->fh_len = 0;
5545 }
5546 /* return np's current delegation */
5547 nfs4_delegation_return(np, 0, thd, cred);
5548 }
5549 if (fh->fh_len) { /* return fh's delegation if it wasn't for np */
5550 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
5551 }
5552 }
5553 }
5554 if (error) {
5555 if (exclusive && (error == NFSERR_NOTSUPP)) {
5556 exclusive = 0;
5557 goto again;
5558 }
5559 if (newvp) {
5560 nfs_node_unlock(newnp);
5561 vnode_put(newvp);
5562 }
5563 } else if (create) {
5564 nfs_node_unlock(newnp);
5565 if (exclusive) {
5566 error = nfs4_setattr_rpc(newnp, vap, ctx);
5567 if (error && (gotuid || gotgid)) {
5568 /* it's possible the server didn't like our attempt to set IDs. */
5569 /* so, let's try it again without those */
5570 VATTR_CLEAR_ACTIVE(vap, va_uid);
5571 VATTR_CLEAR_ACTIVE(vap, va_gid);
5572 error = nfs4_setattr_rpc(newnp, vap, ctx);
5573 }
5574 }
5575 if (error) {
5576 vnode_put(newvp);
5577 } else {
5578 *vpp = newvp;
5579 }
5580 }
5581 nfs_open_owner_clear_busy(noop);
5582 NFS_ZFREE(nfs_fhandle_zone, fh);
5583 NFS_ZFREE(nfs_req_zone, req);
5584 kfree_type(struct nfs_dulookup, dul);
5585 zfree(KT_NFS_VATTR, nvattr);
5586 return error;
5587 }
5588
5589
5590 /*
5591 * Send an OPEN RPC to claim a delegated open for a file
5592 */
5593 int
nfs4_claim_delegated_open_rpc(struct nfs_open_file * nofp,int share_access,int share_deny,int flags)5594 nfs4_claim_delegated_open_rpc(
5595 struct nfs_open_file *nofp,
5596 int share_access,
5597 int share_deny,
5598 int flags)
5599 {
5600 struct nfsmount *nmp;
5601 struct nfs_open_owner *noop = nofp->nof_owner;
5602 struct nfs_vattr *nvattr;
5603 int error = 0, lockerror = ENOENT, status;
5604 int nfsvers, numops;
5605 u_int64_t xid;
5606 nfsnode_t np = nofp->nof_np;
5607 struct nfsm_chain nmreq, nmrep;
5608 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5609 uint32_t rflags = 0, delegation, recall = 0;
5610 fhandle_t *fh;
5611 struct nfs_stateid dstateid;
5612 char sbuf[64], *s = sbuf;
5613 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5614 struct kauth_ace ace;
5615 vnode_t dvp = NULL;
5616 const char *vname = NULL;
5617 const char *name = NULL;
5618 uint32_t namelen = 0;
5619 char smallname[128];
5620 char *filename = NULL;
5621 struct nfsreq_secinfo_args si;
5622
5623 nmp = NFSTONMP(np);
5624 if (nfs_mount_gone(nmp)) {
5625 return ENXIO;
5626 }
5627 nfsvers = nmp->nm_vers;
5628
5629 nfs_node_lock_force(np);
5630 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5631 /*
5632 * The node's been sillyrenamed, so we need to use
5633 * the sillyrename directory/name to do the open.
5634 */
5635 struct nfs_sillyrename *nsp = np->n_sillyrename;
5636 dvp = NFSTOV(nsp->nsr_dnp);
5637 if ((error = vnode_get(dvp))) {
5638 dvp = NULLVP;
5639 nfs_node_unlock(np);
5640 goto out;
5641 }
5642 name = nsp->nsr_name;
5643 } else {
5644 /*
5645 * [sigh] We can't trust VFS to get the parent right for named
5646 * attribute nodes. (It likes to reparent the nodes after we've
5647 * created them.) Luckily we can probably get the right parent
5648 * from the n_parent we have stashed away.
5649 */
5650 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5651 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
5652 dvp = NULL;
5653 }
5654 if (!dvp) {
5655 dvp = vnode_getparent(NFSTOV(np));
5656 }
5657 vname = vnode_getname(NFSTOV(np));
5658 if (!dvp || !vname) {
5659 if (!error) {
5660 error = EIO;
5661 }
5662 nfs_node_unlock(np);
5663 goto out;
5664 }
5665 name = vname;
5666 }
5667 filename = &smallname[0];
5668 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5669 if (namelen >= sizeof(smallname)) {
5670 filename = kalloc_data(namelen + 1, Z_WAITOK);
5671 if (!filename) {
5672 error = ENOMEM;
5673 nfs_node_unlock(np);
5674 goto out;
5675 }
5676 snprintf(filename, namelen + 1, "%s", name);
5677 }
5678 nfs_node_unlock(np);
5679
5680 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5681 goto out;
5682 }
5683
5684 fh = zalloc(nfs_fhandle_zone);
5685 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
5686 NVATTR_INIT(nvattr);
5687 delegation = NFS_OPEN_DELEGATE_NONE;
5688 dstateid = np->n_dstateid;
5689 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5690
5691 nfsm_chain_null(&nmreq);
5692 nfsm_chain_null(&nmrep);
5693
5694 // PUTFH, OPEN, GETATTR(FH)
5695 numops = 3;
5696 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5697 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5698 numops--;
5699 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
5700 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5701 numops--;
5702 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPEN);
5703 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5704 nfsm_chain_add_32(error, &nmreq, share_access);
5705 nfsm_chain_add_32(error, &nmreq, share_deny);
5706 // open owner: clientid + uid + pid?
5707 nfsm_chain_add_openowner(error, &nmreq, nmp, noop);
5708 // openflag4
5709 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5710 // open_claim4
5711 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5712 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5713 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5714 numops--;
5715 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
5716 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5717 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5718 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5719 nfsm_chain_build_done(error, &nmreq);
5720 nfsm_assert(error, (numops == 0), EPROTO);
5721 nfsmout_if(error);
5722
5723 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5724 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
5725
5726 if ((lockerror = nfs_node_lock(np))) {
5727 error = lockerror;
5728 }
5729 nfsm_chain_skip_tag(error, &nmrep);
5730 nfsm_chain_get_32(error, &nmrep, numops);
5731 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5732 nfsmout_if(error);
5733 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5734 nfs_owner_seqid_increment(noop, NULL, error);
5735 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5736 nfsm_chain_check_change_info_open(error, &nmrep, VTONFS(dvp), 0);
5737 nfsm_chain_get_32(error, &nmrep, rflags);
5738 bmlen = NFS_ATTR_BITMAP_LEN;
5739 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5740 nfsm_chain_get_32(error, &nmrep, delegation);
5741 if (!error) {
5742 switch (delegation) {
5743 case NFS_OPEN_DELEGATE_NONE:
5744 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5745 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5746 break;
5747 case NFS_OPEN_DELEGATE_READ:
5748 case NFS_OPEN_DELEGATE_WRITE:
5749 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5750 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5751 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5752 (delegation == NFS_OPEN_DELEGATE_READ))) {
5753 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5754 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5755 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5756 }
5757 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5758 nfsm_chain_get_32(error, &nmrep, recall);
5759 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5760 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5761 }
5762 /* if we have any trouble accepting the ACE, just invalidate it */
5763 ace_type = ace_flags = ace_mask = len = 0;
5764 nfsm_chain_get_32(error, &nmrep, ace_type);
5765 nfsm_chain_get_32(error, &nmrep, ace_flags);
5766 nfsm_chain_get_32(error, &nmrep, ace_mask);
5767 nfsm_chain_get_32(error, &nmrep, len);
5768 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5769 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5770 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5771 if (!error && (len >= slen)) {
5772 s = kalloc_data(len + 1, Z_WAITOK);
5773 if (s) {
5774 slen = len + 1;
5775 } else {
5776 ace.ace_flags = 0;
5777 }
5778 }
5779 if (s) {
5780 nfsm_chain_get_opaque(error, &nmrep, len, s);
5781 } else {
5782 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5783 }
5784 if (!error && s) {
5785 s[len] = '\0';
5786 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5787 ace.ace_flags = 0;
5788 }
5789 }
5790 if (error || !s) {
5791 ace.ace_flags = 0;
5792 }
5793 if (s && (s != sbuf)) {
5794 kfree_data(s, slen);
5795 }
5796 if (!error) {
5797 /* stuff the latest delegation state in the node */
5798 lck_mtx_lock(&np->n_openlock);
5799 np->n_openflags &= ~N_DELEG_MASK;
5800 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5801 np->n_dstateid = dstateid;
5802 np->n_dace = ace;
5803 if (np->n_dlink.tqe_next == NFSNOLIST) {
5804 lck_mtx_lock(&nmp->nm_lock);
5805 if (np->n_dlink.tqe_next == NFSNOLIST) {
5806 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5807 }
5808 lck_mtx_unlock(&nmp->nm_lock);
5809 }
5810 lck_mtx_unlock(&np->n_openlock);
5811 }
5812 break;
5813 default:
5814 error = EBADRPC;
5815 break;
5816 }
5817 }
5818 nfsmout_if(error);
5819 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5820 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
5821 nfsmout_if(error);
5822 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5823 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5824 error = EBADRPC;
5825 goto nfsmout;
5826 }
5827 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5828 // XXX what if fh doesn't match the vnode we think we're re-opening?
5829 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5830 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5831 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5832 }
5833 }
5834 error = nfs_loadattrcache(np, nvattr, &xid, 1);
5835 nfsmout_if(error);
5836 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5837 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5838 }
5839 nfsmout:
5840 NVATTR_CLEANUP(nvattr);
5841 zfree(KT_NFS_VATTR, nvattr);
5842 NFS_ZFREE(nfs_fhandle_zone, fh);
5843 nfsm_chain_cleanup(&nmreq);
5844 nfsm_chain_cleanup(&nmrep);
5845 if (!lockerror) {
5846 nfs_node_unlock(np);
5847 }
5848 nfs_open_owner_clear_busy(noop);
5849 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5850 if (recall) {
5851 /*
5852 * We're making a delegated claim.
5853 * Don't return the delegation here in case we have more to claim.
5854 * Just make sure it's queued up to be returned.
5855 */
5856 nfs4_delegation_return_enqueue(np);
5857 }
5858 }
5859 out:
5860 // if (!error)
5861 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5862 if (filename && (filename != &smallname[0])) {
5863 kfree_data(filename, namelen + 1);
5864 }
5865 if (vname) {
5866 vnode_putname(vname);
5867 }
5868 if (dvp != NULLVP) {
5869 vnode_put(dvp);
5870 }
5871 return error;
5872 }
5873
5874 /*
5875 * Send an OPEN RPC to reclaim an open file.
5876 */
5877 int
nfs4_open_reclaim_rpc(struct nfs_open_file * nofp,int share_access,int share_deny)5878 nfs4_open_reclaim_rpc(
5879 struct nfs_open_file *nofp,
5880 int share_access,
5881 int share_deny)
5882 {
5883 struct nfsmount *nmp;
5884 struct nfs_open_owner *noop = nofp->nof_owner;
5885 struct nfs_vattr *nvattr;
5886 int error = 0, lockerror = ENOENT, status;
5887 int nfsvers, numops;
5888 u_int64_t xid;
5889 nfsnode_t np = nofp->nof_np;
5890 struct nfsm_chain nmreq, nmrep;
5891 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5892 uint32_t rflags = 0, delegation, recall = 0;
5893 fhandle_t *fh;
5894 struct nfs_stateid dstateid;
5895 char sbuf[64], *s = sbuf;
5896 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5897 struct kauth_ace ace;
5898 struct nfsreq_secinfo_args si;
5899
5900 nmp = NFSTONMP(np);
5901 if (nfs_mount_gone(nmp)) {
5902 return ENXIO;
5903 }
5904 nfsvers = nmp->nm_vers;
5905
5906 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5907 return error;
5908 }
5909
5910 fh = zalloc(nfs_fhandle_zone);
5911 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
5912 NVATTR_INIT(nvattr);
5913 delegation = NFS_OPEN_DELEGATE_NONE;
5914 dstateid = np->n_dstateid;
5915 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5916
5917 nfsm_chain_null(&nmreq);
5918 nfsm_chain_null(&nmrep);
5919
5920 // PUTFH, OPEN, GETATTR(FH)
5921 numops = 3;
5922 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5923 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5924 numops--;
5925 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
5926 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5927 numops--;
5928 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPEN);
5929 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5930 nfsm_chain_add_32(error, &nmreq, share_access);
5931 nfsm_chain_add_32(error, &nmreq, share_deny);
5932 // open owner: clientid + uid + pid?
5933 nfsm_chain_add_openowner(error, &nmreq, nmp, noop);
5934 // openflag4
5935 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5936 // open_claim4
5937 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5938 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5939 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5940 NFS_OPEN_DELEGATE_NONE;
5941 nfsm_chain_add_32(error, &nmreq, delegation);
5942 delegation = NFS_OPEN_DELEGATE_NONE;
5943 numops--;
5944 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
5945 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5946 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5947 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5948 nfsm_chain_build_done(error, &nmreq);
5949 nfsm_assert(error, (numops == 0), EPROTO);
5950 nfsmout_if(error);
5951
5952 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5953 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
5954
5955 if ((lockerror = nfs_node_lock(np))) {
5956 error = lockerror;
5957 }
5958 nfsm_chain_skip_tag(error, &nmrep);
5959 nfsm_chain_get_32(error, &nmrep, numops);
5960 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5961 nfsmout_if(error);
5962 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5963 nfs_owner_seqid_increment(noop, NULL, error);
5964 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5965 nfsm_chain_check_change_info_open(error, &nmrep, np, 0);
5966 nfsm_chain_get_32(error, &nmrep, rflags);
5967 bmlen = NFS_ATTR_BITMAP_LEN;
5968 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5969 nfsm_chain_get_32(error, &nmrep, delegation);
5970 if (!error) {
5971 switch (delegation) {
5972 case NFS_OPEN_DELEGATE_NONE:
5973 if (np->n_openflags & N_DELEG_MASK) {
5974 /*
5975 * Hey! We were supposed to get our delegation back even
5976 * if it was getting immediately recalled. Bad server!
5977 *
5978 * Just try to return the existing delegation.
5979 */
5980 // NP(np, "nfs: open reclaim didn't return delegation?");
5981 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5982 recall = 1;
5983 }
5984 break;
5985 case NFS_OPEN_DELEGATE_READ:
5986 case NFS_OPEN_DELEGATE_WRITE:
5987 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5988 nfsm_chain_get_32(error, &nmrep, recall);
5989 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5990 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5991 }
5992 /* if we have any trouble accepting the ACE, just invalidate it */
5993 ace_type = ace_flags = ace_mask = len = 0;
5994 nfsm_chain_get_32(error, &nmrep, ace_type);
5995 nfsm_chain_get_32(error, &nmrep, ace_flags);
5996 nfsm_chain_get_32(error, &nmrep, ace_mask);
5997 nfsm_chain_get_32(error, &nmrep, len);
5998 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5999 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
6000 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
6001 if (!error && (len >= slen)) {
6002 s = kalloc_data(len + 1, Z_WAITOK);
6003 if (s) {
6004 slen = len + 1;
6005 } else {
6006 ace.ace_flags = 0;
6007 }
6008 }
6009 if (s) {
6010 nfsm_chain_get_opaque(error, &nmrep, len, s);
6011 } else {
6012 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
6013 }
6014 if (!error && s) {
6015 s[len] = '\0';
6016 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6017 ace.ace_flags = 0;
6018 }
6019 }
6020 if (error || !s) {
6021 ace.ace_flags = 0;
6022 }
6023 if (s && (s != sbuf)) {
6024 kfree_data(s, slen);
6025 }
6026 if (!error) {
6027 /* stuff the delegation state in the node */
6028 lck_mtx_lock(&np->n_openlock);
6029 np->n_openflags &= ~N_DELEG_MASK;
6030 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
6031 np->n_dstateid = dstateid;
6032 np->n_dace = ace;
6033 if (np->n_dlink.tqe_next == NFSNOLIST) {
6034 lck_mtx_lock(&nmp->nm_lock);
6035 if (np->n_dlink.tqe_next == NFSNOLIST) {
6036 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
6037 }
6038 lck_mtx_unlock(&nmp->nm_lock);
6039 }
6040 lck_mtx_unlock(&np->n_openlock);
6041 }
6042 break;
6043 default:
6044 error = EBADRPC;
6045 break;
6046 }
6047 }
6048 nfsmout_if(error);
6049 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6050 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6051 nfsmout_if(error);
6052 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6053 NP(np, "nfs: open reclaim didn't return filehandle?");
6054 error = EBADRPC;
6055 goto nfsmout;
6056 }
6057 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
6058 // XXX what if fh doesn't match the vnode we think we're re-opening?
6059 // That should be pretty hard in this case, given that we are doing
6060 // the open reclaim using the file handle (and not a dir/name pair).
6061 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
6062 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6063 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
6064 }
6065 }
6066 error = nfs_loadattrcache(np, nvattr, &xid, 1);
6067 nfsmout_if(error);
6068 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6069 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6070 }
6071 nfsmout:
6072 // if (!error)
6073 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
6074 NVATTR_CLEANUP(nvattr);
6075 zfree(KT_NFS_VATTR, nvattr);
6076 NFS_ZFREE(nfs_fhandle_zone, fh);
6077 nfsm_chain_cleanup(&nmreq);
6078 nfsm_chain_cleanup(&nmrep);
6079 if (!lockerror) {
6080 nfs_node_unlock(np);
6081 }
6082 nfs_open_owner_clear_busy(noop);
6083 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
6084 if (recall) {
6085 nfs4_delegation_return_enqueue(np);
6086 }
6087 }
6088 return error;
6089 }
6090
6091 int
nfs4_open_downgrade_rpc(nfsnode_t np,struct nfs_open_file * nofp,vfs_context_t ctx)6092 nfs4_open_downgrade_rpc(
6093 nfsnode_t np,
6094 struct nfs_open_file *nofp,
6095 vfs_context_t ctx)
6096 {
6097 struct nfs_open_owner *noop = nofp->nof_owner;
6098 struct nfsmount *nmp;
6099 int error, lockerror = ENOENT, status, nfsvers, numops;
6100 struct nfsm_chain nmreq, nmrep;
6101 u_int64_t xid;
6102 struct nfsreq_secinfo_args si;
6103
6104 nmp = NFSTONMP(np);
6105 if (nfs_mount_gone(nmp)) {
6106 return ENXIO;
6107 }
6108 nfsvers = nmp->nm_vers;
6109
6110 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6111 return error;
6112 }
6113
6114 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6115 nfsm_chain_null(&nmreq);
6116 nfsm_chain_null(&nmrep);
6117
6118 // PUTFH, OPEN_DOWNGRADE, GETATTR
6119 numops = 3;
6120 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6121 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
6122 numops--;
6123 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
6124 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6125 numops--;
6126 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
6127 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6128 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6129 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
6130 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
6131 numops--;
6132 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
6133 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6134 nfsm_chain_build_done(error, &nmreq);
6135 nfsm_assert(error, (numops == 0), EPROTO);
6136 nfsmout_if(error);
6137 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6138 vfs_context_thread(ctx), vfs_context_ucred(ctx),
6139 &si, R_NOINTR, &nmrep, &xid, &status);
6140
6141 if ((lockerror = nfs_node_lock(np))) {
6142 error = lockerror;
6143 }
6144 nfsm_chain_skip_tag(error, &nmrep);
6145 nfsm_chain_get_32(error, &nmrep, numops);
6146 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6147 nfsmout_if(error);
6148 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6149 nfs_owner_seqid_increment(noop, NULL, error);
6150 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6151 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6152 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6153 nfsmout:
6154 if (!lockerror) {
6155 nfs_node_unlock(np);
6156 }
6157 nfs_open_owner_clear_busy(noop);
6158 nfsm_chain_cleanup(&nmreq);
6159 nfsm_chain_cleanup(&nmrep);
6160 return error;
6161 }
6162
6163 int
nfs4_close_rpc(nfsnode_t np,struct nfs_open_file * nofp,thread_t thd,kauth_cred_t cred,int flags)6164 nfs4_close_rpc(
6165 nfsnode_t np,
6166 struct nfs_open_file *nofp,
6167 thread_t thd,
6168 kauth_cred_t cred,
6169 int flags)
6170 {
6171 struct nfs_open_owner *noop = nofp->nof_owner;
6172 struct nfsmount *nmp;
6173 int error, lockerror = ENOENT, status, nfsvers, numops;
6174 struct nfsm_chain nmreq, nmrep;
6175 u_int64_t xid;
6176 struct nfsreq_secinfo_args si;
6177
6178 nmp = NFSTONMP(np);
6179 if (nfs_mount_gone(nmp)) {
6180 return ENXIO;
6181 }
6182 nfsvers = nmp->nm_vers;
6183
6184 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6185 return error;
6186 }
6187
6188 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6189 nfsm_chain_null(&nmreq);
6190 nfsm_chain_null(&nmrep);
6191
6192 // PUTFH, CLOSE, GETATTR
6193 numops = 3;
6194 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6195 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
6196 numops--;
6197 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
6198 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6199 numops--;
6200 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_CLOSE);
6201 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6202 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6203 numops--;
6204 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
6205 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6206 nfsm_chain_build_done(error, &nmreq);
6207 nfsm_assert(error, (numops == 0), EPROTO);
6208 nfsmout_if(error);
6209 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6210
6211 if ((lockerror = nfs_node_lock(np))) {
6212 error = lockerror;
6213 }
6214 nfsm_chain_skip_tag(error, &nmrep);
6215 nfsm_chain_get_32(error, &nmrep, numops);
6216 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6217 nfsmout_if(error);
6218 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
6219 nfs_owner_seqid_increment(noop, NULL, error);
6220 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6221 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6222 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6223 nfsmout:
6224 if (!lockerror) {
6225 nfs_node_unlock(np);
6226 }
6227 nfs_open_owner_clear_busy(noop);
6228 nfsm_chain_cleanup(&nmreq);
6229 nfsm_chain_cleanup(&nmrep);
6230 return error;
6231 }
6232
6233 int
nfs4_release_lockowner_rpc(nfsnode_t np,struct nfs_lock_owner * nlop,thread_t thd,kauth_cred_t cred)6234 nfs4_release_lockowner_rpc(
6235 nfsnode_t np,
6236 struct nfs_lock_owner *nlop,
6237 thread_t thd,
6238 kauth_cred_t cred)
6239 {
6240 struct nfsmount *nmp;
6241 int error, status, nfsvers, numops;
6242 struct nfsm_chain nmreq, nmrep;
6243 u_int64_t xid;
6244 struct nfsreq_secinfo_args si;
6245
6246 nmp = NFSTONMP(np);
6247 if (nfs_mount_gone(nmp)) {
6248 return ENXIO;
6249 }
6250 nfsvers = nmp->nm_vers;
6251
6252 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6253 nfsm_chain_null(&nmreq);
6254 nfsm_chain_null(&nmrep);
6255
6256 // PUTFH, RELEASE_LOCKOWNER
6257 numops = 2;
6258 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6259 nfsm_chain_add_compound_header(error, &nmreq, "release lockowner", nmp->nm_minor_vers, numops);
6260 numops--;
6261 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6262 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6263 numops--;
6264 nfsm_chain_add_32(error, &nmreq, NFS_OP_RELEASE_LOCKOWNER);
6265 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
6266 nfsm_chain_build_done(error, &nmreq);
6267 nfsm_assert(error, (numops == 0), EPROTO);
6268 nfsmout_if(error);
6269 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, &xid, &status);
6270
6271 nfsm_chain_skip_tag(error, &nmrep);
6272 nfsm_chain_get_32(error, &nmrep, numops);
6273 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6274 nfsmout_if(error);
6275 nfsm_chain_op_check(error, &nmrep, NFS_OP_RELEASE_LOCKOWNER);
6276 nfsmout_if(error);
6277 nfsmout:
6278 nfsm_chain_cleanup(&nmreq);
6279 nfsm_chain_cleanup(&nmrep);
6280 return error;
6281 }
6282
6283 /*
6284 * Claim the delegated open combinations this open file holds.
6285 */
6286 int
nfs4_claim_delegated_state_for_open_file(struct nfs_open_file * nofp,int flags)6287 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
6288 {
6289 struct nfs_open_owner *noop = nofp->nof_owner;
6290 struct nfs_lock_owner *nlop;
6291 struct nfs_file_lock *nflp, *nextnflp;
6292 struct nfsmount *nmp;
6293 int error = 0, reopen = 0;
6294
6295 if (nofp->nof_d_rw_drw) {
6296 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6297 if (!error) {
6298 lck_mtx_lock(&nofp->nof_lock);
6299 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6300 nofp->nof_d_rw_drw = 0;
6301 lck_mtx_unlock(&nofp->nof_lock);
6302 }
6303 }
6304 if (!error && nofp->nof_d_w_drw) {
6305 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6306 if (!error) {
6307 lck_mtx_lock(&nofp->nof_lock);
6308 nofp->nof_w_drw += nofp->nof_d_w_drw;
6309 nofp->nof_d_w_drw = 0;
6310 lck_mtx_unlock(&nofp->nof_lock);
6311 }
6312 }
6313 if (!error && nofp->nof_d_r_drw) {
6314 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6315 if (!error) {
6316 lck_mtx_lock(&nofp->nof_lock);
6317 nofp->nof_r_drw += nofp->nof_d_r_drw;
6318 nofp->nof_d_r_drw = 0;
6319 lck_mtx_unlock(&nofp->nof_lock);
6320 }
6321 }
6322 if (!error && nofp->nof_d_rw_dw) {
6323 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6324 if (!error) {
6325 lck_mtx_lock(&nofp->nof_lock);
6326 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6327 nofp->nof_d_rw_dw = 0;
6328 lck_mtx_unlock(&nofp->nof_lock);
6329 }
6330 }
6331 if (!error && nofp->nof_d_w_dw) {
6332 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6333 if (!error) {
6334 lck_mtx_lock(&nofp->nof_lock);
6335 nofp->nof_w_dw += nofp->nof_d_w_dw;
6336 nofp->nof_d_w_dw = 0;
6337 lck_mtx_unlock(&nofp->nof_lock);
6338 }
6339 }
6340 if (!error && nofp->nof_d_r_dw) {
6341 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6342 if (!error) {
6343 lck_mtx_lock(&nofp->nof_lock);
6344 nofp->nof_r_dw += nofp->nof_d_r_dw;
6345 nofp->nof_d_r_dw = 0;
6346 lck_mtx_unlock(&nofp->nof_lock);
6347 }
6348 }
6349 /* non-deny-mode opens may be reopened if no locks are held */
6350 if (!error && nofp->nof_d_rw) {
6351 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6352 /* for some errors, we should just try reopening the file */
6353 if (nfs_mount_state_error_delegation_lost(error)) {
6354 reopen = error;
6355 }
6356 if (!error || reopen) {
6357 lck_mtx_lock(&nofp->nof_lock);
6358 nofp->nof_rw += nofp->nof_d_rw;
6359 nofp->nof_d_rw = 0;
6360 lck_mtx_unlock(&nofp->nof_lock);
6361 }
6362 }
6363 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6364 if ((!error || reopen) && nofp->nof_d_w) {
6365 if (!error) {
6366 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6367 /* for some errors, we should just try reopening the file */
6368 if (nfs_mount_state_error_delegation_lost(error)) {
6369 reopen = error;
6370 }
6371 }
6372 if (!error || reopen) {
6373 lck_mtx_lock(&nofp->nof_lock);
6374 nofp->nof_w += nofp->nof_d_w;
6375 nofp->nof_d_w = 0;
6376 lck_mtx_unlock(&nofp->nof_lock);
6377 }
6378 }
6379 if ((!error || reopen) && nofp->nof_d_r) {
6380 if (!error) {
6381 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6382 /* for some errors, we should just try reopening the file */
6383 if (nfs_mount_state_error_delegation_lost(error)) {
6384 reopen = error;
6385 }
6386 }
6387 if (!error || reopen) {
6388 lck_mtx_lock(&nofp->nof_lock);
6389 nofp->nof_r += nofp->nof_d_r;
6390 nofp->nof_d_r = 0;
6391 lck_mtx_unlock(&nofp->nof_lock);
6392 }
6393 }
6394
6395 if (reopen) {
6396 /*
6397 * Any problems with the delegation probably indicates that we
6398 * should review/return all of our current delegation state.
6399 */
6400 if ((nmp = NFSTONMP(nofp->nof_np))) {
6401 nfs4_delegation_return_enqueue(nofp->nof_np);
6402 lck_mtx_lock(&nmp->nm_lock);
6403 nfs_need_recover(nmp, NFSERR_EXPIRED);
6404 lck_mtx_unlock(&nmp->nm_lock);
6405 }
6406 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6407 /* just reopen the file on next access */
6408 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6409 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6410 lck_mtx_lock(&nofp->nof_lock);
6411 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6412 lck_mtx_unlock(&nofp->nof_lock);
6413 return 0;
6414 }
6415 if (reopen) {
6416 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6417 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6418 }
6419 }
6420
6421 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6422 /* claim delegated locks */
6423 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
6424 if (nlop->nlo_open_owner != noop) {
6425 continue;
6426 }
6427 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6428 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6429 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6430 continue;
6431 }
6432 /* skip non-delegated locks */
6433 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6434 continue;
6435 }
6436 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6437 if (error) {
6438 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6439 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6440 break;
6441 }
6442 // else {
6443 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6444 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6445 // }
6446 }
6447 if (error) {
6448 break;
6449 }
6450 }
6451 }
6452
6453 if (!error) { /* all state claimed successfully! */
6454 return 0;
6455 }
6456
6457 /* restart if it looks like a problem more than just losing the delegation */
6458 if (!nfs_mount_state_error_delegation_lost(error) &&
6459 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6460 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6461 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6462 nfs_need_reconnect(nmp);
6463 }
6464 return error;
6465 }
6466
6467 /* delegated state lost (once held but now not claimable) */
6468 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6469
6470 /*
6471 * Any problems with the delegation probably indicates that we
6472 * should review/return all of our current delegation state.
6473 */
6474 if ((nmp = NFSTONMP(nofp->nof_np))) {
6475 nfs4_delegation_return_enqueue(nofp->nof_np);
6476 lck_mtx_lock(&nmp->nm_lock);
6477 nfs_need_recover(nmp, NFSERR_EXPIRED);
6478 lck_mtx_unlock(&nmp->nm_lock);
6479 }
6480
6481 /* revoke all open file state */
6482 nfs_revoke_open_state_for_node(nofp->nof_np);
6483
6484 return error;
6485 }
6486 #endif /* CONFIG_NFS4*/
6487
6488 /*
6489 * Release all open state for the given node.
6490 */
6491 void
nfs_release_open_state_for_node(nfsnode_t np,int force)6492 nfs_release_open_state_for_node(nfsnode_t np, int force)
6493 {
6494 struct nfsmount *nmp = NFSTONMP(np);
6495 struct nfs_open_file *nofp;
6496 struct nfs_file_lock *nflp, *nextnflp;
6497
6498 /* drop held locks */
6499 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6500 /* skip dead & blocked lock requests */
6501 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6502 continue;
6503 }
6504 /* send an unlock if not a delegated lock */
6505 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6506 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
6507 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6508 }
6509 /* kill/remove the lock */
6510 lck_mtx_lock(&np->n_openlock);
6511 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6512 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6513 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6514 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6515 if (nflp->nfl_blockcnt) {
6516 /* wake up anyone blocked on this lock */
6517 wakeup(nflp);
6518 } else {
6519 /* remove nflp from lock list and destroy */
6520 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6521 nfs_file_lock_destroy(np, nflp, NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6522 }
6523 lck_mtx_unlock(&np->n_openlock);
6524 }
6525
6526 lck_mtx_lock(&np->n_openlock);
6527
6528 /* drop all opens */
6529 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6530 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6531 continue;
6532 }
6533 /* mark open state as lost */
6534 lck_mtx_lock(&nofp->nof_lock);
6535 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6536 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
6537
6538 lck_mtx_unlock(&nofp->nof_lock);
6539 #if CONFIG_NFS4
6540 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6541 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
6542 }
6543 #endif
6544 }
6545
6546 lck_mtx_unlock(&np->n_openlock);
6547 }
6548
6549 /*
6550 * State for a node has been lost, drop it, and revoke the node.
6551 * Attempt to return any state if possible in case the server
6552 * might somehow think we hold it.
6553 */
6554 void
nfs_revoke_open_state_for_node(nfsnode_t np)6555 nfs_revoke_open_state_for_node(nfsnode_t np)
6556 {
6557 struct nfsmount *nmp;
6558
6559 /* mark node as needing to be revoked */
6560 nfs_node_lock_force(np);
6561 if (np->n_flag & NREVOKE) { /* already revoked? */
6562 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6563 nfs_node_unlock(np);
6564 return;
6565 }
6566 np->n_flag |= NREVOKE;
6567 nfs_node_unlock(np);
6568
6569 nfs_release_open_state_for_node(np, 0);
6570 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6571
6572 /* mark mount as needing a revoke scan and have the socket thread do it. */
6573 if ((nmp = NFSTONMP(np))) {
6574 lck_mtx_lock(&nmp->nm_lock);
6575 nmp->nm_state |= NFSSTA_REVOKE;
6576 nfs_mount_sock_thread_wake(nmp);
6577 lck_mtx_unlock(&nmp->nm_lock);
6578 }
6579 }
6580
6581 #if CONFIG_NFS4
6582 /*
6583 * Claim the delegated open combinations that each of this node's open files hold.
6584 */
6585 int
nfs4_claim_delegated_state_for_node(nfsnode_t np,int flags)6586 nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6587 {
6588 struct nfs_open_file *nofp;
6589 int error = 0;
6590
6591 lck_mtx_lock(&np->n_openlock);
6592
6593 /* walk the open file list looking for opens with delegated state to claim */
6594 restart:
6595 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6596 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6597 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
6598 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6599 continue;
6600 }
6601 lck_mtx_unlock(&np->n_openlock);
6602 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6603 lck_mtx_lock(&np->n_openlock);
6604 if (error) {
6605 break;
6606 }
6607 goto restart;
6608 }
6609
6610 lck_mtx_unlock(&np->n_openlock);
6611
6612 return error;
6613 }
6614
6615 /*
6616 * Mark a node as needed to have its delegation returned.
6617 * Queue it up on the delegation return queue.
6618 * Make sure the thread is running.
6619 */
6620 void
nfs4_delegation_return_enqueue(nfsnode_t np)6621 nfs4_delegation_return_enqueue(nfsnode_t np)
6622 {
6623 struct nfsmount *nmp;
6624
6625 nmp = NFSTONMP(np);
6626 if (nfs_mount_gone(nmp)) {
6627 return;
6628 }
6629
6630 lck_mtx_lock(&np->n_openlock);
6631 np->n_openflags |= N_DELEG_RETURN;
6632 lck_mtx_unlock(&np->n_openlock);
6633
6634 lck_mtx_lock(&nmp->nm_lock);
6635 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6636 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
6637 }
6638 nfs_mount_sock_thread_wake(nmp);
6639 lck_mtx_unlock(&nmp->nm_lock);
6640 }
6641
6642 /*
6643 * return any delegation we may have for the given node
6644 */
6645 int
nfs4_delegation_return(nfsnode_t np,int flags,thread_t thd,kauth_cred_t cred)6646 nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6647 {
6648 struct nfsmount *nmp;
6649 fhandle_t *fh;
6650 nfs_stateid dstateid;
6651 int error;
6652
6653 nmp = NFSTONMP(np);
6654 if (nfs_mount_gone(nmp)) {
6655 return ENXIO;
6656 }
6657
6658 fh = zalloc(nfs_fhandle_zone);
6659
6660 /* first, make sure the node's marked for delegation return */
6661 lck_mtx_lock(&np->n_openlock);
6662 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6663 lck_mtx_unlock(&np->n_openlock);
6664
6665 /* make sure nobody else is using the delegation state */
6666 if ((error = nfs_open_state_set_busy(np, NULL))) {
6667 goto out;
6668 }
6669
6670 /* claim any delegated state */
6671 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6672 goto out;
6673 }
6674
6675 /* return the delegation */
6676 lck_mtx_lock(&np->n_openlock);
6677 dstateid = np->n_dstateid;
6678 fh->fh_len = np->n_fhsize;
6679 bcopy(np->n_fhp, fh->fh_data, fh->fh_len);
6680 lck_mtx_unlock(&np->n_openlock);
6681 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh->fh_data, fh->fh_len, &dstateid, flags, thd, cred);
6682 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6683 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6684 lck_mtx_lock(&np->n_openlock);
6685 np->n_openflags &= ~N_DELEG_MASK;
6686 lck_mtx_lock(&nmp->nm_lock);
6687 if (np->n_dlink.tqe_next != NFSNOLIST) {
6688 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6689 np->n_dlink.tqe_next = NFSNOLIST;
6690 }
6691 lck_mtx_unlock(&nmp->nm_lock);
6692 lck_mtx_unlock(&np->n_openlock);
6693 }
6694
6695 out:
6696 /* make sure it's no longer on the return queue and clear the return flags */
6697 lck_mtx_lock(&nmp->nm_lock);
6698 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6699 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6700 np->n_dreturn.tqe_next = NFSNOLIST;
6701 }
6702 lck_mtx_unlock(&nmp->nm_lock);
6703 lck_mtx_lock(&np->n_openlock);
6704 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6705 lck_mtx_unlock(&np->n_openlock);
6706
6707 if (error) {
6708 NP(np, "nfs4_delegation_return, error %d", error);
6709 if (error == ETIMEDOUT) {
6710 nfs_need_reconnect(nmp);
6711 }
6712 if (nfs_mount_state_error_should_restart(error)) {
6713 /* make sure recovery happens */
6714 lck_mtx_lock(&nmp->nm_lock);
6715 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6716 lck_mtx_unlock(&nmp->nm_lock);
6717 }
6718 }
6719
6720 nfs_open_state_clear_busy(np);
6721 NFS_ZFREE(nfs_fhandle_zone, fh);
6722 return error;
6723 }
6724
6725 /*
6726 * RPC to return a delegation for a file handle
6727 */
6728 int
nfs4_delegreturn_rpc(struct nfsmount * nmp,u_char * fhp,int fhlen,struct nfs_stateid * sid,int flags,thread_t thd,kauth_cred_t cred)6729 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6730 {
6731 int error = 0, status, numops;
6732 uint64_t xid;
6733 struct nfsm_chain nmreq, nmrep;
6734 struct nfsreq_secinfo_args si;
6735
6736 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6737 nfsm_chain_null(&nmreq);
6738 nfsm_chain_null(&nmrep);
6739
6740 // PUTFH, DELEGRETURN
6741 numops = 2;
6742 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6743 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6744 numops--;
6745 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
6746 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6747 numops--;
6748 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_DELEGRETURN);
6749 nfsm_chain_add_stateid(error, &nmreq, sid);
6750 nfsm_chain_build_done(error, &nmreq);
6751 nfsm_assert(error, (numops == 0), EPROTO);
6752 nfsmout_if(error);
6753 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6754 nfsm_chain_skip_tag(error, &nmrep);
6755 nfsm_chain_get_32(error, &nmrep, numops);
6756 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6757 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6758 nfsmout:
6759 nfsm_chain_cleanup(&nmreq);
6760 nfsm_chain_cleanup(&nmrep);
6761 return error;
6762 }
6763 #endif /* CONFIG_NFS4 */
6764
6765 /*
6766 * NFS read call.
6767 * Just call nfs_bioread() to do the work.
6768 *
6769 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6770 * without first calling VNOP_OPEN, so we make sure the file is open here.
6771 */
6772 int
nfs_vnop_read(struct vnop_read_args * ap)6773 nfs_vnop_read(
6774 struct vnop_read_args /* {
6775 * struct vnodeop_desc *a_desc;
6776 * vnode_t a_vp;
6777 * struct uio *a_uio;
6778 * int a_ioflag;
6779 * vfs_context_t a_context;
6780 * } */*ap)
6781 {
6782 vnode_t vp = ap->a_vp;
6783 vfs_context_t ctx = ap->a_context;
6784 nfsnode_t np;
6785 struct nfsmount *nmp;
6786 struct nfs_open_owner *noop;
6787 struct nfs_open_file *nofp;
6788 int error;
6789
6790 if (vnode_vtype(ap->a_vp) != VREG) {
6791 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6792 }
6793
6794 np = VTONFS(vp);
6795 nmp = NFSTONMP(np);
6796 if (nfs_mount_gone(nmp)) {
6797 return ENXIO;
6798 }
6799 if (np->n_flag & NREVOKE) {
6800 return EIO;
6801 }
6802
6803 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 1);
6804 if (!noop) {
6805 return ENOMEM;
6806 }
6807 restart:
6808 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6809 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6810 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6811 error = EIO;
6812 }
6813 #if CONFIG_NFS4
6814 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6815 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6816 nofp = NULL;
6817 if (!error) {
6818 goto restart;
6819 }
6820 }
6821 #endif
6822 if (error) {
6823 nfs_open_owner_rele(noop);
6824 return NFS_MAPERR(error);
6825 }
6826 /*
6827 * Since the read path is a hot path, if we already have
6828 * read access, lets go and try and do the read, without
6829 * busying the mount and open file node for this open owner.
6830 *
6831 * N.B. This is inherently racy w.r.t. an execve using
6832 * an already open file, in that the read at the end of
6833 * this routine will be racing with a potential close.
6834 * The code below ultimately has the same problem. In practice
6835 * this does not seem to be an issue.
6836 */
6837 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6838 nfs_open_owner_rele(noop);
6839 goto do_read;
6840 }
6841 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6842 if (error) {
6843 nfs_open_owner_rele(noop);
6844 return NFS_MAPERR(error);
6845 }
6846 /*
6847 * If we don't have a file already open with the access we need (read) then
6848 * we need to open one. Otherwise we just co-opt an open. We might not already
6849 * have access because we're trying to read the first page of the
6850 * file for execve.
6851 */
6852 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6853 if (error) {
6854 nfs_mount_state_in_use_end(nmp, 0);
6855 nfs_open_owner_rele(noop);
6856 return NFS_MAPERR(error);
6857 }
6858 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6859 /* we don't have the file open, so open it for read access if we're not denied */
6860 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6861 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6862 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6863 }
6864 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6865 nfs_open_file_clear_busy(nofp);
6866 nfs_mount_state_in_use_end(nmp, 0);
6867 nfs_open_owner_rele(noop);
6868 return EPERM;
6869 }
6870 if (np->n_flag & NREVOKE) {
6871 error = EIO;
6872 nfs_open_file_clear_busy(nofp);
6873 nfs_mount_state_in_use_end(nmp, 0);
6874 nfs_open_owner_rele(noop);
6875 return NFS_MAPERR(error);
6876 }
6877 if (nmp->nm_vers < NFS_VER4) {
6878 /* NFS v2/v3 opens are always allowed - so just add it. */
6879 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6880 }
6881 #if CONFIG_NFS4
6882 else {
6883 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6884 }
6885 #endif
6886 if (!error) {
6887 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6888 }
6889 }
6890 if (nofp) {
6891 nfs_open_file_clear_busy(nofp);
6892 }
6893 if (nfs_mount_state_in_use_end(nmp, error)) {
6894 nofp = NULL;
6895 goto restart;
6896 }
6897 nfs_open_owner_rele(noop);
6898 if (error) {
6899 return NFS_MAPERR(error);
6900 }
6901 do_read:
6902 error = nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6903 return NFS_MAPERR(error);
6904 }
6905
6906 #if CONFIG_NFS4
6907 /*
6908 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6909 * Files are created using the NFSv4 OPEN RPC. So we must open the
6910 * file to create it and then close it.
6911 */
6912 int
nfs4_vnop_create(struct vnop_create_args * ap)6913 nfs4_vnop_create(
6914 struct vnop_create_args /* {
6915 * struct vnodeop_desc *a_desc;
6916 * vnode_t a_dvp;
6917 * vnode_t *a_vpp;
6918 * struct componentname *a_cnp;
6919 * struct vnode_attr *a_vap;
6920 * vfs_context_t a_context;
6921 * } */*ap)
6922 {
6923 vfs_context_t ctx = ap->a_context;
6924 struct componentname *cnp = ap->a_cnp;
6925 struct vnode_attr *vap = ap->a_vap;
6926 vnode_t dvp = ap->a_dvp;
6927 vnode_t *vpp = ap->a_vpp;
6928 struct nfsmount *nmp;
6929 nfsnode_t np;
6930 int error = 0, busyerror = 0, accessMode, denyMode;
6931 struct nfs_open_owner *noop = NULL;
6932 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6933
6934 nmp = VTONMP(dvp);
6935 if (nfs_mount_gone(nmp)) {
6936 return ENXIO;
6937 }
6938
6939 if (vap) {
6940 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6941 }
6942
6943 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 1);
6944 if (!noop) {
6945 return ENOMEM;
6946 }
6947
6948 restart:
6949 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6950 if (error) {
6951 nfs_open_owner_rele(noop);
6952 return NFS_MAPERR(error);
6953 }
6954
6955 /* grab a provisional, nodeless open file */
6956 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6957 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6958 printf("nfs_vnop_create: LOST\n");
6959 error = EIO;
6960 }
6961 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6962 /* This shouldn't happen given that this is a new, nodeless nofp */
6963 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6964 nfs_open_file_destroy(newnofp);
6965 newnofp = NULL;
6966 if (!error) {
6967 nfs_mount_state_in_use_end(nmp, 0);
6968 goto restart;
6969 }
6970 }
6971 if (!error) {
6972 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6973 }
6974 if (error) {
6975 if (newnofp) {
6976 nfs_open_file_destroy(newnofp);
6977 }
6978 newnofp = NULL;
6979 goto out;
6980 }
6981
6982 /*
6983 * We're just trying to create the file.
6984 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6985 */
6986 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6987 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6988
6989 /* Do the open/create */
6990 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6991 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6992 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6993 /*
6994 * Hmm... it looks like we may have a situation where the request was
6995 * retransmitted because we didn't get the first response which successfully
6996 * created/opened the file and then the second time we were denied the open
6997 * because the mode the file was created with doesn't allow write access.
6998 *
6999 * We'll try to work around this by temporarily updating the mode and
7000 * retrying the open.
7001 */
7002 struct vnode_attr vattr;
7003
7004 /* first make sure it's there */
7005 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7006 if (!error2 && np) {
7007 nfs_node_unlock(np);
7008 *vpp = NFSTOV(np);
7009 if (vnode_vtype(NFSTOV(np)) == VREG) {
7010 VATTR_INIT(&vattr);
7011 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
7012 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
7013 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
7014 VATTR_INIT(&vattr);
7015 VATTR_SET(&vattr, va_mode, vap->va_mode);
7016 nfs4_setattr_rpc(np, &vattr, ctx);
7017 if (!error2) {
7018 error = 0;
7019 }
7020 }
7021 }
7022 if (error) {
7023 vnode_put(*vpp);
7024 *vpp = NULL;
7025 }
7026 }
7027 }
7028 if (!error && !*vpp) {
7029 printf("nfs4_open_rpc returned without a node?\n");
7030 /* Hmmm... with no node, we have no filehandle and can't close it */
7031 error = EIO;
7032 }
7033 if (error) {
7034 /* need to cleanup our temporary nofp */
7035 nfs_open_file_clear_busy(newnofp);
7036 nfs_open_file_destroy(newnofp);
7037 newnofp = NULL;
7038 goto out;
7039 }
7040 /* After we have a node, add our open file struct to the node */
7041 np = VTONFS(*vpp);
7042 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
7043 nofp = newnofp;
7044 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
7045 if (error) {
7046 /* This shouldn't happen, because we passed in a new nofp to use. */
7047 printf("nfs_open_file_find_internal failed! %d\n", error);
7048 goto out;
7049 } else if (nofp != newnofp) {
7050 /*
7051 * Hmm... an open file struct already exists.
7052 * Mark the existing one busy and merge our open into it.
7053 * Then destroy the one we created.
7054 * Note: there's no chance of an open confict because the
7055 * open has already been granted.
7056 */
7057 busyerror = nfs_open_file_set_busy(nofp, NULL);
7058 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
7059 nofp->nof_stateid = newnofp->nof_stateid;
7060 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
7061 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7062 }
7063 nfs_open_file_clear_busy(newnofp);
7064 nfs_open_file_destroy(newnofp);
7065 }
7066 newnofp = NULL;
7067 /* mark the node as holding a create-initiated open */
7068 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
7069 nofp->nof_creator = current_thread();
7070 out:
7071 if (nofp && !busyerror) {
7072 nfs_open_file_clear_busy(nofp);
7073 }
7074 if (nfs_mount_state_in_use_end(nmp, error)) {
7075 nofp = newnofp = NULL;
7076 busyerror = 0;
7077 goto restart;
7078 }
7079 if (noop) {
7080 nfs_open_owner_rele(noop);
7081 }
7082 return NFS_MAPERR(error);
7083 }
7084
7085 /*
7086 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
7087 */
7088 int
nfs4_create_rpc(vfs_context_t ctx,nfsnode_t dnp,struct componentname * cnp,struct vnode_attr * vap,int type,char * link,nfsnode_t * npp)7089 nfs4_create_rpc(
7090 vfs_context_t ctx,
7091 nfsnode_t dnp,
7092 struct componentname *cnp,
7093 struct vnode_attr *vap,
7094 int type,
7095 char *link,
7096 nfsnode_t *npp)
7097 {
7098 struct nfsmount *nmp;
7099 struct nfs_vattr *nvattr;
7100 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
7101 int nfsvers, namedattrs, numops;
7102 u_int64_t xid = 0, savedxid = 0;
7103 nfsnode_t np = NULL;
7104 vnode_t newvp = NULL;
7105 struct nfsm_chain nmreq, nmrep;
7106 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7107 const char *tag;
7108 nfs_specdata sd;
7109 fhandle_t *fh;
7110 struct nfsreq *req;
7111 struct nfs_dulookup *dul;
7112 struct nfsreq_secinfo_args si;
7113
7114 nmp = NFSTONMP(dnp);
7115 if (nfs_mount_gone(nmp)) {
7116 return ENXIO;
7117 }
7118 nfsvers = nmp->nm_vers;
7119 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7120 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7121 return EINVAL;
7122 }
7123
7124 sd.specdata1 = sd.specdata2 = 0;
7125
7126 switch (type) {
7127 case NFLNK:
7128 tag = "symlink";
7129 break;
7130 case NFBLK:
7131 case NFCHR:
7132 tag = "mknod";
7133 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
7134 return EINVAL;
7135 }
7136 sd.specdata1 = major(vap->va_rdev);
7137 sd.specdata2 = minor(vap->va_rdev);
7138 break;
7139 case NFSOCK:
7140 case NFFIFO:
7141 tag = "mknod";
7142 break;
7143 case NFDIR:
7144 tag = "mkdir";
7145 break;
7146 default:
7147 return EINVAL;
7148 }
7149
7150 fh = zalloc(nfs_fhandle_zone);
7151 req = zalloc(nfs_req_zone);
7152 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
7153 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
7154 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
7155
7156 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
7157 if (!namedattrs) {
7158 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7159 }
7160
7161 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
7162 NVATTR_INIT(nvattr);
7163 nfsm_chain_null(&nmreq);
7164 nfsm_chain_null(&nmrep);
7165
7166 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
7167 numops = 6;
7168 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
7169 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
7170 numops--;
7171 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
7172 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
7173 numops--;
7174 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_SAVEFH);
7175 numops--;
7176 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_CREATE);
7177 nfsm_chain_add_32(error, &nmreq, type);
7178 if (type == NFLNK) {
7179 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
7180 } else if ((type == NFBLK) || (type == NFCHR)) {
7181 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
7182 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
7183 }
7184 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7185 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
7186 numops--;
7187 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
7188 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7189 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7190 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
7191 numops--;
7192 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_RESTOREFH);
7193 numops--;
7194 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
7195 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7196 nfsm_chain_build_done(error, &nmreq);
7197 nfsm_assert(error, (numops == 0), EPROTO);
7198 nfsmout_if(error);
7199
7200 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
7201 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, R_NOUMOUNTINTR, NULL, &req);
7202 if (!error) {
7203 if (!namedattrs) {
7204 nfs_dulookup_start(dul, dnp, ctx);
7205 }
7206 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7207 }
7208
7209 if ((lockerror = nfs_node_lock(dnp))) {
7210 error = lockerror;
7211 }
7212 nfsm_chain_skip_tag(error, &nmrep);
7213 nfsm_chain_get_32(error, &nmrep, numops);
7214 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7215 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7216 nfsmout_if(error);
7217 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7218 nfsm_chain_check_change_info(error, &nmrep, dnp);
7219 bmlen = NFS_ATTR_BITMAP_LEN;
7220 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7221 /* At this point if we have no error, the object was created. */
7222 /* if we don't get attributes, then we should lookitup. */
7223 create_error = error;
7224 nfsmout_if(error);
7225 nfs_vattr_set_supported(bitmap, vap);
7226 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7227 nfsmout_if(error);
7228 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
7229 nfsmout_if(error);
7230 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
7231 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7232 error = EBADRPC;
7233 goto nfsmout;
7234 }
7235 /* directory attributes: if we don't get them, make sure to invalidate */
7236 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7237 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7238 savedxid = xid;
7239 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
7240 if (error) {
7241 NATTRINVALIDATE(dnp);
7242 }
7243
7244 nfsmout:
7245 nfsm_chain_cleanup(&nmreq);
7246 nfsm_chain_cleanup(&nmrep);
7247
7248 if (!lockerror) {
7249 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7250 dnp->n_flag &= ~NNEGNCENTRIES;
7251 cache_purge_negatives(NFSTOV(dnp));
7252 }
7253 dnp->n_flag |= NMODIFIED;
7254 nfs_node_unlock(dnp);
7255 /* nfs_getattr() will check changed and purge caches */
7256 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7257 }
7258
7259 if (!error && fh->fh_len) {
7260 /* create the vnode with the filehandle and attributes */
7261 xid = savedxid;
7262 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
7263 if (!error) {
7264 newvp = NFSTOV(np);
7265 }
7266 }
7267
7268 if (!namedattrs) {
7269 nfs_dulookup_finish(dul, dnp, ctx);
7270 }
7271
7272 NVATTR_CLEANUP(nvattr);
7273 NFS_ZFREE(nfs_fhandle_zone, fh);
7274 NFS_ZFREE(nfs_req_zone, req);
7275 kfree_type(struct nfs_dulookup, dul);
7276 zfree(KT_NFS_VATTR, nvattr);
7277
7278 /*
7279 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7280 * if we can succeed in looking up the object.
7281 */
7282 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7283 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7284 if (!error) {
7285 newvp = NFSTOV(np);
7286 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
7287 error = EEXIST;
7288 }
7289 }
7290 }
7291 if (!busyerror) {
7292 nfs_node_clear_busy(dnp);
7293 }
7294 if (error) {
7295 if (newvp) {
7296 nfs_node_unlock(np);
7297 vnode_put(newvp);
7298 }
7299 } else {
7300 nfs_node_unlock(np);
7301 *npp = np;
7302 }
7303 return error;
7304 }
7305
7306 int
nfs4_vnop_mknod(struct vnop_mknod_args * ap)7307 nfs4_vnop_mknod(
7308 struct vnop_mknod_args /* {
7309 * struct vnodeop_desc *a_desc;
7310 * vnode_t a_dvp;
7311 * vnode_t *a_vpp;
7312 * struct componentname *a_cnp;
7313 * struct vnode_attr *a_vap;
7314 * vfs_context_t a_context;
7315 * } */*ap)
7316 {
7317 nfsnode_t np = NULL;
7318 struct nfsmount *nmp;
7319 int error;
7320
7321 nmp = VTONMP(ap->a_dvp);
7322 if (nfs_mount_gone(nmp)) {
7323 return ENXIO;
7324 }
7325
7326 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7327 return EINVAL;
7328 }
7329 switch (ap->a_vap->va_type) {
7330 case VBLK:
7331 case VCHR:
7332 case VFIFO:
7333 case VSOCK:
7334 break;
7335 default:
7336 return ENOTSUP;
7337 }
7338
7339 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7340 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7341 if (!error) {
7342 *ap->a_vpp = NFSTOV(np);
7343 }
7344 return NFS_MAPERR(error);
7345 }
7346
7347 int
nfs4_vnop_mkdir(struct vnop_mkdir_args * ap)7348 nfs4_vnop_mkdir(
7349 struct vnop_mkdir_args /* {
7350 * struct vnodeop_desc *a_desc;
7351 * vnode_t a_dvp;
7352 * vnode_t *a_vpp;
7353 * struct componentname *a_cnp;
7354 * struct vnode_attr *a_vap;
7355 * vfs_context_t a_context;
7356 * } */*ap)
7357 {
7358 nfsnode_t np = NULL;
7359 int error;
7360
7361 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7362 NFDIR, NULL, &np);
7363 if (!error) {
7364 *ap->a_vpp = NFSTOV(np);
7365 }
7366 return NFS_MAPERR(error);
7367 }
7368
7369 int
nfs4_vnop_symlink(struct vnop_symlink_args * ap)7370 nfs4_vnop_symlink(
7371 struct vnop_symlink_args /* {
7372 * struct vnodeop_desc *a_desc;
7373 * vnode_t a_dvp;
7374 * vnode_t *a_vpp;
7375 * struct componentname *a_cnp;
7376 * struct vnode_attr *a_vap;
7377 * char *a_target;
7378 * vfs_context_t a_context;
7379 * } */*ap)
7380 {
7381 nfsnode_t np = NULL;
7382 int error;
7383
7384 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7385 NFLNK, ap->a_target, &np);
7386 if (!error) {
7387 *ap->a_vpp = NFSTOV(np);
7388 }
7389 return NFS_MAPERR(error);
7390 }
7391
7392 int
nfs4_vnop_link(struct vnop_link_args * ap)7393 nfs4_vnop_link(
7394 struct vnop_link_args /* {
7395 * struct vnodeop_desc *a_desc;
7396 * vnode_t a_vp;
7397 * vnode_t a_tdvp;
7398 * struct componentname *a_cnp;
7399 * vfs_context_t a_context;
7400 * } */*ap)
7401 {
7402 vfs_context_t ctx = ap->a_context;
7403 vnode_t vp = ap->a_vp;
7404 vnode_t tdvp = ap->a_tdvp;
7405 struct componentname *cnp = ap->a_cnp;
7406 int error = 0, lockerror = ENOENT, status;
7407 struct nfsmount *nmp;
7408 nfsnode_t np = VTONFS(vp);
7409 nfsnode_t tdnp = VTONFS(tdvp);
7410 int nfsvers, numops;
7411 u_int64_t xid, savedxid;
7412 struct nfsm_chain nmreq, nmrep;
7413 struct nfsreq_secinfo_args si;
7414
7415 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7416 return EXDEV;
7417 }
7418
7419 nmp = VTONMP(vp);
7420 if (nfs_mount_gone(nmp)) {
7421 return ENXIO;
7422 }
7423 nfsvers = nmp->nm_vers;
7424 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7425 return EINVAL;
7426 }
7427 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7428 return EINVAL;
7429 }
7430
7431 /*
7432 * Push all writes to the server, so that the attribute cache
7433 * doesn't get "out of sync" with the server.
7434 * XXX There should be a better way!
7435 */
7436 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7437
7438 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7439 return NFS_MAPERR(error);
7440 }
7441
7442 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7443 nfsm_chain_null(&nmreq);
7444 nfsm_chain_null(&nmrep);
7445
7446 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7447 numops = 7;
7448 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
7449 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
7450 numops--;
7451 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
7452 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7453 numops--;
7454 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_SAVEFH);
7455 numops--;
7456 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
7457 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7458 numops--;
7459 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LINK);
7460 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7461 numops--;
7462 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
7463 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7464 numops--;
7465 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_RESTOREFH);
7466 numops--;
7467 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
7468 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7469 nfsm_chain_build_done(error, &nmreq);
7470 nfsm_assert(error, (numops == 0), EPROTO);
7471 nfsmout_if(error);
7472 error = nfs_request2(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND,
7473 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, R_NOUMOUNTINTR, &nmrep, &xid, &status);
7474
7475 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7476 error = lockerror;
7477 goto nfsmout;
7478 }
7479 nfsm_chain_skip_tag(error, &nmrep);
7480 nfsm_chain_get_32(error, &nmrep, numops);
7481 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7482 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7483 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7484 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7485 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7486 /* directory attributes: if we don't get them, make sure to invalidate */
7487 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7488 savedxid = xid;
7489 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
7490 if (error) {
7491 NATTRINVALIDATE(tdnp);
7492 }
7493 /* link attributes: if we don't get them, make sure to invalidate */
7494 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7495 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7496 xid = savedxid;
7497 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
7498 if (error) {
7499 NATTRINVALIDATE(np);
7500 }
7501 nfsmout:
7502 nfsm_chain_cleanup(&nmreq);
7503 nfsm_chain_cleanup(&nmrep);
7504 if (!lockerror) {
7505 tdnp->n_flag |= NMODIFIED;
7506 }
7507 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7508 if (error == EEXIST) {
7509 error = 0;
7510 }
7511 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7512 tdnp->n_flag &= ~NNEGNCENTRIES;
7513 cache_purge_negatives(tdvp);
7514 }
7515 if (!lockerror) {
7516 nfs_node_unlock2(tdnp, np);
7517 }
7518 nfs_node_clear_busy2(tdnp, np);
7519 return NFS_MAPERR(error);
7520 }
7521
7522 int
nfs4_vnop_rmdir(struct vnop_rmdir_args * ap)7523 nfs4_vnop_rmdir(
7524 struct vnop_rmdir_args /* {
7525 * struct vnodeop_desc *a_desc;
7526 * vnode_t a_dvp;
7527 * vnode_t a_vp;
7528 * struct componentname *a_cnp;
7529 * vfs_context_t a_context;
7530 * } */*ap)
7531 {
7532 vfs_context_t ctx = ap->a_context;
7533 vnode_t vp = ap->a_vp;
7534 vnode_t dvp = ap->a_dvp;
7535 struct componentname *cnp = ap->a_cnp;
7536 struct nfsmount *nmp;
7537 int error = 0, namedattrs;
7538 nfsnode_t np = VTONFS(vp);
7539 nfsnode_t dnp = VTONFS(dvp);
7540 struct nfs_dulookup *dul;
7541
7542 if (vnode_vtype(vp) != VDIR) {
7543 return EINVAL;
7544 }
7545
7546 nmp = NFSTONMP(dnp);
7547 if (nfs_mount_gone(nmp)) {
7548 return ENXIO;
7549 }
7550 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7551
7552 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7553 return NFS_MAPERR(error);
7554 }
7555
7556 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
7557 if (!namedattrs) {
7558 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7559 nfs_dulookup_start(dul, dnp, ctx);
7560 }
7561
7562 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
7563 vfs_context_thread(ctx), vfs_context_ucred(ctx));
7564
7565 nfs_name_cache_purge(dnp, np, cnp, ctx);
7566 /* nfs_getattr() will check changed and purge caches */
7567 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7568 if (!namedattrs) {
7569 nfs_dulookup_finish(dul, dnp, ctx);
7570 }
7571 nfs_node_clear_busy2(dnp, np);
7572
7573 /*
7574 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7575 */
7576 if (error == ENOENT) {
7577 error = 0;
7578 }
7579 if (!error) {
7580 /*
7581 * remove nfsnode from hash now so we can't accidentally find it
7582 * again if another object gets created with the same filehandle
7583 * before this vnode gets reclaimed
7584 */
7585 lck_mtx_lock(&nfs_node_hash_mutex);
7586 if (np->n_hflag & NHHASHED) {
7587 LIST_REMOVE(np, n_hash);
7588 np->n_hflag &= ~NHHASHED;
7589 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7590 }
7591 lck_mtx_unlock(&nfs_node_hash_mutex);
7592 }
7593 kfree_type(struct nfs_dulookup, dul);
7594 return NFS_MAPERR(error);
7595 }
7596
7597 /*
7598 * NFSv4 Named Attributes
7599 *
7600 * Both the extended attributes interface and the named streams interface
7601 * are backed by NFSv4 named attributes. The implementations for both use
7602 * a common set of routines in an attempt to reduce code duplication, to
7603 * increase efficiency, to increase caching of both names and data, and to
7604 * confine the complexity.
7605 *
7606 * Each NFS node caches its named attribute directory's file handle.
7607 * The directory nodes for the named attribute directories are handled
7608 * exactly like regular directories (with a couple minor exceptions).
7609 * Named attribute nodes are also treated as much like regular files as
7610 * possible.
7611 *
7612 * Most of the heavy lifting is done by nfs4_named_attr_get().
7613 */
7614
7615 /*
7616 * Get the given node's attribute directory node.
7617 * If !fetch, then only return a cached node.
7618 * Otherwise, we will attempt to fetch the node from the server.
7619 * (Note: the node should be marked busy.)
7620 */
7621 nfsnode_t
nfs4_named_attr_dir_get(nfsnode_t np,int fetch,vfs_context_t ctx)7622 nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
7623 {
7624 nfsnode_t adnp = NULL;
7625 struct nfsmount *nmp;
7626 int error = 0, status, numops;
7627 struct nfsm_chain nmreq, nmrep;
7628 u_int64_t xid;
7629 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7630 fhandle_t *fh;
7631 struct nfs_vattr *nvattr;
7632 struct componentname cn;
7633 struct nfsreq *req;
7634 struct nfsreq_secinfo_args si;
7635
7636 nmp = NFSTONMP(np);
7637 if (nfs_mount_gone(nmp)) {
7638 return NULL;
7639 }
7640 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7641 return NULL;
7642 }
7643
7644 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7645 fh = zalloc(nfs_fhandle_zone);
7646 req = zalloc(nfs_req_zone);
7647 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
7648 NVATTR_INIT(nvattr);
7649 nfsm_chain_null(&nmreq);
7650 nfsm_chain_null(&nmrep);
7651
7652 bzero(&cn, sizeof(cn));
7653 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7654 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
7655 cn.cn_nameiop = LOOKUP;
7656
7657 if (np->n_attrdirfh) {
7658 // XXX can't set parent correctly (to np) yet
7659 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7660 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7661 if (adnp) {
7662 goto nfsmout;
7663 }
7664 }
7665 if (!fetch) {
7666 error = ENOENT;
7667 goto nfsmout;
7668 }
7669
7670 // PUTFH, OPENATTR, GETATTR
7671 numops = 3;
7672 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
7673 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
7674 numops--;
7675 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
7676 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7677 numops--;
7678 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPENATTR);
7679 nfsm_chain_add_32(error, &nmreq, 0);
7680 numops--;
7681 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
7682 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7683 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7684 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7685 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7686 nfsm_chain_build_done(error, &nmreq);
7687 nfsm_assert(error, (numops == 0), EPROTO);
7688 nfsmout_if(error);
7689 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
7690 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7691 if (!error) {
7692 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7693 }
7694
7695 nfsm_chain_skip_tag(error, &nmrep);
7696 nfsm_chain_get_32(error, &nmrep, numops);
7697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7698 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7699 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7700 nfsmout_if(error);
7701 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
7702 nfsmout_if(error);
7703 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
7704 error = ENOENT;
7705 goto nfsmout;
7706 }
7707 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
7708 /* (re)allocate attrdir fh buffer */
7709 if (np->n_attrdirfh) {
7710 kfree_data(np->n_attrdirfh, *np->n_attrdirfh + 1);
7711 }
7712 np->n_attrdirfh = kalloc_data(fh->fh_len + 1, Z_WAITOK);
7713 }
7714 if (!np->n_attrdirfh) {
7715 error = ENOMEM;
7716 goto nfsmout;
7717 }
7718 /* cache the attrdir fh in the node */
7719 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
7720 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
7721 /* create node for attrdir */
7722 // XXX can't set parent correctly (to np) yet
7723 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
7724 nfsmout:
7725 NVATTR_CLEANUP(nvattr);
7726 NFS_ZFREE(nfs_fhandle_zone, fh);
7727 NFS_ZFREE(nfs_req_zone, req);
7728 zfree(KT_NFS_VATTR, nvattr);
7729 nfsm_chain_cleanup(&nmreq);
7730 nfsm_chain_cleanup(&nmrep);
7731
7732 if (adnp) {
7733 /* sanity check that this node is an attribute directory */
7734 if (adnp->n_vattr.nva_type != VDIR) {
7735 error = EINVAL;
7736 }
7737 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
7738 error = EINVAL;
7739 }
7740 nfs_node_unlock(adnp);
7741 if (error) {
7742 vnode_put(NFSTOV(adnp));
7743 }
7744 }
7745 return error ? NULL : adnp;
7746 }
7747
7748 /*
7749 * Get the given node's named attribute node for the name given.
7750 *
7751 * In an effort to increase the performance of named attribute access, we try
7752 * to reduce server requests by doing the following:
7753 *
7754 * - cache the node's named attribute directory file handle in the node
7755 * - maintain a directory vnode for the attribute directory
7756 * - use name cache entries (positive and negative) to speed up lookups
7757 * - optionally open the named attribute (with the given accessMode) in the same RPC
7758 * - combine attribute directory retrieval with the lookup/open RPC
7759 * - optionally prefetch the named attribute's first block of data in the same RPC
7760 *
7761 * Also, in an attempt to reduce the number of copies/variations of this code,
7762 * parts of the RPC building/processing code are conditionalized on what is
7763 * needed for any particular request (openattr, lookup vs. open, read).
7764 *
7765 * Note that because we may not have the attribute directory node when we start
7766 * the lookup/open, we lock both the node and the attribute directory node.
7767 */
7768
7769 #define NFS_GET_NAMED_ATTR_CREATE 0x1
7770 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7771 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7772 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7773
7774 int
nfs4_named_attr_get(nfsnode_t np,struct componentname * cnp,uint32_t accessMode,int flags,vfs_context_t ctx,nfsnode_t * anpp,struct nfs_open_file ** nofpp)7775 nfs4_named_attr_get(
7776 nfsnode_t np,
7777 struct componentname *cnp,
7778 uint32_t accessMode,
7779 int flags,
7780 vfs_context_t ctx,
7781 nfsnode_t *anpp,
7782 struct nfs_open_file **nofpp)
7783 {
7784 struct nfsmount *nmp;
7785 int error = 0, open_error = EIO;
7786 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7787 int create, guarded, prefetch, truncate, noopbusy = 0;
7788 int open, status, numops, hadattrdir, negnamecache;
7789 struct nfs_vattr *nvattr;
7790 struct vnode_attr vattr;
7791 nfsnode_t adnp = NULL, anp = NULL;
7792 vnode_t avp = NULL;
7793 u_int64_t xid = 0, savedxid = 0;
7794 struct nfsm_chain nmreq, nmrep;
7795 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7796 uint32_t denyMode = 0, rflags, delegation, recall, eof, rlen, retlen;
7797 nfs_stateid stateid, dstateid;
7798 fhandle_t *fh;
7799 struct nfs_open_owner *noop = NULL;
7800 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7801 struct vnop_access_args naa;
7802 thread_t thd;
7803 kauth_cred_t cred;
7804 struct timeval now;
7805 char sbuf[64], *s;
7806 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7807 struct kauth_ace ace;
7808 struct nfsreq *req;
7809 struct nfsreq_secinfo_args si;
7810
7811 *anpp = NULL;
7812 rflags = delegation = recall = eof = rlen = retlen = 0;
7813 ace.ace_flags = 0;
7814 s = sbuf;
7815 slen = sizeof(sbuf);
7816
7817 nmp = NFSTONMP(np);
7818 if (nfs_mount_gone(nmp)) {
7819 return ENXIO;
7820 }
7821 fh = zalloc(nfs_fhandle_zone);
7822 req = zalloc(nfs_req_zone);
7823 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
7824 NVATTR_INIT(nvattr);
7825 fh->fh_len = 0;
7826 bzero(&dstateid, sizeof(dstateid));
7827 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7828 thd = vfs_context_thread(ctx);
7829 cred = vfs_context_ucred(ctx);
7830 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7831 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7832 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7833 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7834
7835 if (!create) {
7836 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
7837 if (error) {
7838 goto out_free;
7839 }
7840 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7841 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7842 error = ENOATTR;
7843 goto out_free;
7844 }
7845 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7846 /* shouldn't happen... but just be safe */
7847 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7848 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7849 }
7850 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7851 if (open) {
7852 /*
7853 * We're trying to open the file.
7854 * We'll create/open it with the given access mode,
7855 * and set NFS_OPEN_FILE_CREATE.
7856 */
7857 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7858 if (prefetch && guarded) {
7859 prefetch = 0; /* no sense prefetching data that can't be there */
7860 }
7861 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 1);
7862 if (!noop) {
7863 error = ENOMEM;
7864 goto out_free;
7865 }
7866 }
7867
7868 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7869 goto out_free;
7870 }
7871
7872 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7873 hadattrdir = (adnp != NULL);
7874 if (prefetch) {
7875 microuptime(&now);
7876 /* use the special state ID because we don't have a real one to send */
7877 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7878 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7879 }
7880 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7881 nfsm_chain_null(&nmreq);
7882 nfsm_chain_null(&nmrep);
7883
7884 if (hadattrdir) {
7885 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
7886 goto nfsmout;
7887 }
7888 /* nfs_getattr() will check changed and purge caches */
7889 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7890 nfsmout_if(error);
7891 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7892 switch (error) {
7893 case ENOENT:
7894 /* negative cache entry */
7895 goto nfsmout;
7896 case 0:
7897 /* cache miss */
7898 /* try dir buf cache lookup */
7899 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0, NULL);
7900 if (!error && anp) {
7901 /* dir buf cache hit */
7902 *anpp = anp;
7903 error = -1;
7904 }
7905 if (error != -1) { /* cache miss */
7906 break;
7907 }
7908 OS_FALLTHROUGH;
7909 case -1:
7910 /* cache hit, not really an error */
7911 OSAddAtomic64(1, &nfsclntstats.lookupcache_hits);
7912 if (!anp && avp) {
7913 *anpp = anp = VTONFS(avp);
7914 }
7915
7916 nfs_node_clear_busy(adnp);
7917 adbusyerror = ENOENT;
7918
7919 /* check for directory access */
7920 naa.a_desc = &vnop_access_desc;
7921 naa.a_vp = NFSTOV(adnp);
7922 naa.a_action = KAUTH_VNODE_SEARCH;
7923 naa.a_context = ctx;
7924
7925 /* compute actual success/failure based on accessibility */
7926 error = nfs_vnop_access(&naa);
7927 OS_FALLTHROUGH;
7928 default:
7929 /* we either found it, or hit an error */
7930 if (!error && guarded) {
7931 /* found cached entry but told not to use it */
7932 error = EEXIST;
7933 vnode_put(NFSTOV(anp));
7934 *anpp = anp = NULL;
7935 }
7936 /* we're done if error or we don't need to open */
7937 if (error || !open) {
7938 goto nfsmout;
7939 }
7940 /* no error and we need to open... */
7941 }
7942 }
7943
7944 if (open) {
7945 restart:
7946 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7947 if (error) {
7948 nfs_open_owner_rele(noop);
7949 noop = NULL;
7950 goto nfsmout;
7951 }
7952 inuse = 1;
7953
7954 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7955 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7956 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7957 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7958 error = EIO;
7959 }
7960 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7961 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7962 nfs_open_file_destroy(newnofp);
7963 newnofp = NULL;
7964 if (!error) {
7965 nfs_mount_state_in_use_end(nmp, 0);
7966 inuse = 0;
7967 goto restart;
7968 }
7969 }
7970 if (!error) {
7971 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7972 }
7973 if (error) {
7974 if (newnofp) {
7975 nfs_open_file_destroy(newnofp);
7976 }
7977 newnofp = NULL;
7978 goto nfsmout;
7979 }
7980 if (anp) {
7981 /*
7982 * We already have the node. So we just need to open
7983 * it - which we may be able to do with a delegation.
7984 */
7985 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7986 if (!error) {
7987 /* open succeeded, so our open file is no longer temporary */
7988 nofp = newnofp;
7989 nofpbusyerror = 0;
7990 newnofp = NULL;
7991 if (nofpp) {
7992 *nofpp = nofp;
7993 }
7994 }
7995 goto nfsmout;
7996 }
7997 }
7998
7999 /*
8000 * We either don't have the attrdir or we didn't find the attribute
8001 * in the name cache, so we need to talk to the server.
8002 *
8003 * If we don't have the attrdir, we'll need to ask the server for that too.
8004 * If the caller is requesting that the attribute be created, we need to
8005 * make sure the attrdir is created.
8006 * The caller may also request that the first block of an existing attribute
8007 * be retrieved at the same time.
8008 */
8009
8010 if (open) {
8011 /* need to mark the open owner busy during the RPC */
8012 if ((error = nfs_open_owner_set_busy(noop, thd))) {
8013 goto nfsmout;
8014 }
8015 noopbusy = 1;
8016 }
8017
8018 /*
8019 * We'd like to get updated post-open/lookup attributes for the
8020 * directory and we may also want to prefetch some data via READ.
8021 * We'd like the READ results to be last so that we can leave the
8022 * data in the mbufs until the end.
8023 *
8024 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
8025 */
8026 numops = 5;
8027 if (!hadattrdir) {
8028 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
8029 }
8030 if (prefetch) {
8031 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
8032 }
8033 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
8034 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
8035 if (hadattrdir) {
8036 numops--;
8037 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
8038 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
8039 } else {
8040 numops--;
8041 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
8042 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
8043 numops--;
8044 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPENATTR);
8045 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
8046 numops--;
8047 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
8048 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
8049 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
8050 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
8051 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
8052 }
8053 if (open) {
8054 numops--;
8055 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPEN);
8056 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
8057 nfsm_chain_add_32(error, &nmreq, accessMode);
8058 nfsm_chain_add_32(error, &nmreq, denyMode);
8059 nfsm_chain_add_openowner(error, &nmreq, nmp, noop);
8060 nfsm_chain_add_32(error, &nmreq, create);
8061 if (create) {
8062 nfsm_chain_add_32(error, &nmreq, guarded);
8063 VATTR_INIT(&vattr);
8064 if (truncate) {
8065 VATTR_SET(&vattr, va_data_size, 0);
8066 }
8067 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
8068 }
8069 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
8070 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
8071 } else {
8072 numops--;
8073 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOOKUP);
8074 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
8075 }
8076 numops--;
8077 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
8078 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
8079 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
8080 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
8081 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
8082 if (prefetch) {
8083 numops--;
8084 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_SAVEFH);
8085 }
8086 if (hadattrdir) {
8087 numops--;
8088 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
8089 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
8090 } else {
8091 numops--;
8092 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
8093 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
8094 numops--;
8095 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPENATTR);
8096 nfsm_chain_add_32(error, &nmreq, 0);
8097 }
8098 numops--;
8099 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
8100 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
8101 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
8102 if (prefetch) {
8103 numops--;
8104 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_RESTOREFH);
8105 numops--;
8106 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_NVERIFY);
8107 VATTR_INIT(&vattr);
8108 VATTR_SET(&vattr, va_data_size, 0);
8109 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
8110 numops--;
8111 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_READ);
8112 nfsm_chain_add_stateid(error, &nmreq, &stateid);
8113 nfsm_chain_add_64(error, &nmreq, 0);
8114 nfsm_chain_add_32(error, &nmreq, rlen);
8115 }
8116 nfsm_chain_build_done(error, &nmreq);
8117 nfsm_assert(error, (numops == 0), EPROTO);
8118 nfsmout_if(error);
8119 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
8120 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
8121 if (!error) {
8122 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
8123 }
8124
8125 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
8126 error = adlockerror;
8127 }
8128 savedxid = xid;
8129 nfsm_chain_skip_tag(error, &nmrep);
8130 nfsm_chain_get_32(error, &nmrep, numops);
8131 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8132 if (!hadattrdir) {
8133 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8134 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8135 nfsmout_if(error);
8136 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
8137 nfsmout_if(error);
8138 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) && fh->fh_len) {
8139 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
8140 /* (re)allocate attrdir fh buffer */
8141 if (np->n_attrdirfh) {
8142 kfree_data(np->n_attrdirfh, *np->n_attrdirfh + 1);
8143 }
8144 np->n_attrdirfh = kalloc_data(fh->fh_len + 1, Z_WAITOK);
8145 }
8146 if (np->n_attrdirfh) {
8147 /* remember the attrdir fh in the node */
8148 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
8149 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
8150 /* create busied node for attrdir */
8151 struct componentname cn;
8152 bzero(&cn, sizeof(cn));
8153 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
8154 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
8155 cn.cn_nameiop = LOOKUP;
8156 // XXX can't set parent correctly (to np) yet
8157 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
8158 if (!error) {
8159 adlockerror = 0;
8160 /* set the node busy */
8161 SET(adnp->n_flag, NBUSY);
8162 adbusyerror = 0;
8163 }
8164 /* if no adnp, oh well... */
8165 error = 0;
8166 }
8167 }
8168 NVATTR_CLEANUP(nvattr);
8169 fh->fh_len = 0;
8170 }
8171 if (open) {
8172 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
8173 nfs_owner_seqid_increment(noop, NULL, error);
8174 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
8175 nfsm_chain_check_change_info_open(error, &nmrep, adnp, create ? NFS_CREATE_UNCHECKED : 0);
8176 nfsm_chain_get_32(error, &nmrep, rflags);
8177 bmlen = NFS_ATTR_BITMAP_LEN;
8178 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
8179 nfsm_chain_get_32(error, &nmrep, delegation);
8180 if (!error) {
8181 switch (delegation) {
8182 case NFS_OPEN_DELEGATE_NONE:
8183 break;
8184 case NFS_OPEN_DELEGATE_READ:
8185 case NFS_OPEN_DELEGATE_WRITE:
8186 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
8187 nfsm_chain_get_32(error, &nmrep, recall);
8188 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
8189 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
8190 }
8191 /* if we have any trouble accepting the ACE, just invalidate it */
8192 ace_type = ace_flags = ace_mask = len = 0;
8193 nfsm_chain_get_32(error, &nmrep, ace_type);
8194 nfsm_chain_get_32(error, &nmrep, ace_flags);
8195 nfsm_chain_get_32(error, &nmrep, ace_mask);
8196 nfsm_chain_get_32(error, &nmrep, len);
8197 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
8198 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
8199 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
8200 if (!error && (len >= slen)) {
8201 s = kalloc_data(len + 1, Z_WAITOK);
8202 if (s) {
8203 slen = len + 1;
8204 } else {
8205 ace.ace_flags = 0;
8206 }
8207 }
8208 if (s) {
8209 nfsm_chain_get_opaque(error, &nmrep, len, s);
8210 } else {
8211 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
8212 }
8213 if (!error && s) {
8214 s[len] = '\0';
8215 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
8216 ace.ace_flags = 0;
8217 }
8218 }
8219 if (error || !s) {
8220 ace.ace_flags = 0;
8221 }
8222 if (s && (s != sbuf)) {
8223 kfree_data(s, slen);
8224 }
8225 break;
8226 default:
8227 error = EBADRPC;
8228 break;
8229 }
8230 }
8231 /* At this point if we have no error, the object was created/opened. */
8232 open_error = error;
8233 } else {
8234 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8235 }
8236 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8237 nfsmout_if(error);
8238 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
8239 nfsmout_if(error);
8240 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
8241 error = EIO;
8242 goto nfsmout;
8243 }
8244 if (prefetch) {
8245 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
8246 }
8247 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8248 if (!hadattrdir) {
8249 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8250 }
8251 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8252 nfsmout_if(error);
8253 xid = savedxid;
8254 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8255 nfsmout_if(error);
8256
8257 if (open) {
8258 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
8259 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8260 }
8261 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8262 if (adnp) {
8263 nfs_node_unlock(adnp);
8264 adlockerror = ENOENT;
8265 }
8266 NVATTR_CLEANUP(nvattr);
8267 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh->fh_data, fh->fh_len, noop, &newnofp->nof_stateid, thd, cred, nvattr, &xid);
8268 nfsmout_if(error);
8269 savedxid = xid;
8270 if ((adlockerror = nfs_node_lock(adnp))) {
8271 error = adlockerror;
8272 }
8273 }
8274 }
8275
8276 nfsmout:
8277 if (open && adnp && !adlockerror) {
8278 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8279 adnp->n_flag &= ~NNEGNCENTRIES;
8280 cache_purge_negatives(NFSTOV(adnp));
8281 }
8282 adnp->n_flag |= NMODIFIED;
8283 nfs_node_unlock(adnp);
8284 adlockerror = ENOENT;
8285 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8286 }
8287 if (adnp && !adlockerror && (error == ENOENT) &&
8288 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8289 /* add a negative entry in the name cache */
8290 cache_enter(NFSTOV(adnp), NULL, cnp);
8291 adnp->n_flag |= NNEGNCENTRIES;
8292 }
8293 if (adnp && !adlockerror) {
8294 nfs_node_unlock(adnp);
8295 adlockerror = ENOENT;
8296 }
8297 if (!error && !anp && fh->fh_len) {
8298 /* create the vnode with the filehandle and attributes */
8299 xid = savedxid;
8300 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &anp);
8301 if (!error) {
8302 *anpp = anp;
8303 nfs_node_unlock(anp);
8304 }
8305 if (!error && open) {
8306 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8307 /* After we have a node, add our open file struct to the node */
8308 nofp = newnofp;
8309 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8310 if (error) {
8311 /* This shouldn't happen, because we passed in a new nofp to use. */
8312 printf("nfs_open_file_find_internal failed! %d\n", error);
8313 nofp = NULL;
8314 } else if (nofp != newnofp) {
8315 /*
8316 * Hmm... an open file struct already exists.
8317 * Mark the existing one busy and merge our open into it.
8318 * Then destroy the one we created.
8319 * Note: there's no chance of an open confict because the
8320 * open has already been granted.
8321 */
8322 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8323 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8324 nofp->nof_stateid = newnofp->nof_stateid;
8325 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
8326 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8327 }
8328 nfs_open_file_clear_busy(newnofp);
8329 nfs_open_file_destroy(newnofp);
8330 newnofp = NULL;
8331 }
8332 if (!error) {
8333 newnofp = NULL;
8334 nofpbusyerror = 0;
8335 /* mark the node as holding a create-initiated open */
8336 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8337 nofp->nof_creator = current_thread();
8338 if (nofpp) {
8339 *nofpp = nofp;
8340 }
8341 }
8342 }
8343 }
8344 NVATTR_CLEANUP(nvattr);
8345 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8346 if (!error && anp && !recall) {
8347 /* stuff the delegation state in the node */
8348 lck_mtx_lock(&anp->n_openlock);
8349 anp->n_openflags &= ~N_DELEG_MASK;
8350 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8351 anp->n_dstateid = dstateid;
8352 anp->n_dace = ace;
8353 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8354 lck_mtx_lock(&nmp->nm_lock);
8355 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8356 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8357 }
8358 lck_mtx_unlock(&nmp->nm_lock);
8359 }
8360 lck_mtx_unlock(&anp->n_openlock);
8361 } else {
8362 /* give the delegation back */
8363 if (anp) {
8364 if (NFS_CMPFH(anp, fh->fh_data, fh->fh_len)) {
8365 /* update delegation state and return it */
8366 lck_mtx_lock(&anp->n_openlock);
8367 anp->n_openflags &= ~N_DELEG_MASK;
8368 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8369 anp->n_dstateid = dstateid;
8370 anp->n_dace = ace;
8371 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8372 lck_mtx_lock(&nmp->nm_lock);
8373 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8374 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8375 }
8376 lck_mtx_unlock(&nmp->nm_lock);
8377 }
8378 lck_mtx_unlock(&anp->n_openlock);
8379 /* don't need to send a separate delegreturn for fh */
8380 fh->fh_len = 0;
8381 }
8382 /* return anp's current delegation */
8383 nfs4_delegation_return(anp, 0, thd, cred);
8384 }
8385 if (fh->fh_len) { /* return fh's delegation if it wasn't for anp */
8386 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
8387 }
8388 }
8389 }
8390 if (open) {
8391 if (newnofp) {
8392 /* need to cleanup our temporary nofp */
8393 nfs_open_file_clear_busy(newnofp);
8394 nfs_open_file_destroy(newnofp);
8395 newnofp = NULL;
8396 } else if (nofp && !nofpbusyerror) {
8397 nfs_open_file_clear_busy(nofp);
8398 nofpbusyerror = ENOENT;
8399 }
8400 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8401 inuse = 0;
8402 nofp = newnofp = NULL;
8403 rflags = delegation = recall = eof = rlen = retlen = 0;
8404 ace.ace_flags = 0;
8405 s = sbuf;
8406 slen = sizeof(sbuf);
8407 nfsm_chain_cleanup(&nmreq);
8408 nfsm_chain_cleanup(&nmrep);
8409 if (anp) {
8410 vnode_put(NFSTOV(anp));
8411 *anpp = anp = NULL;
8412 }
8413 hadattrdir = (adnp != NULL);
8414 if (noopbusy) {
8415 nfs_open_owner_clear_busy(noop);
8416 noopbusy = 0;
8417 }
8418 goto restart;
8419 }
8420 inuse = 0;
8421 if (noop) {
8422 if (noopbusy) {
8423 nfs_open_owner_clear_busy(noop);
8424 noopbusy = 0;
8425 }
8426 nfs_open_owner_rele(noop);
8427 }
8428 }
8429 if (!error && prefetch && nmrep.nmc_mhead) {
8430 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8431 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8432 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8433 nfsm_chain_get_32(error, &nmrep, eof);
8434 nfsm_chain_get_32(error, &nmrep, retlen);
8435 if (!error && anp) {
8436 /*
8437 * There can be one problem with doing the prefetch.
8438 * Because we don't have the node before we start the RPC, we
8439 * can't have the buffer busy while the READ is performed.
8440 * So there is a chance that other I/O occured on the same
8441 * range of data while we were performing this RPC. If that
8442 * happens, then it's possible the data we have in the READ
8443 * response is no longer up to date.
8444 * Once we have the node and the buffer, we need to make sure
8445 * that there's no chance we could be putting stale data in
8446 * the buffer.
8447 * So, we check if the range read is dirty or if any I/O may
8448 * have occured on it while we were performing our RPC.
8449 */
8450 struct nfsbuf *bp = NULL;
8451 int lastpg;
8452 nfsbufpgs pagemask, pagemaskand;
8453
8454 retlen = MIN(retlen, rlen);
8455
8456 /* check if node needs size update or invalidation */
8457 if (ISSET(anp->n_flag, NUPDATESIZE)) {
8458 nfs_data_update_size(anp, 0);
8459 }
8460 if (!(error = nfs_node_lock(anp))) {
8461 if (anp->n_flag & NNEEDINVALIDATE) {
8462 anp->n_flag &= ~NNEEDINVALIDATE;
8463 nfs_node_unlock(anp);
8464 error = nfs_vinvalbuf1(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8465 if (!error) { /* lets play it safe and just drop the data */
8466 error = EIO;
8467 }
8468 } else {
8469 nfs_node_unlock(anp);
8470 }
8471 }
8472
8473 /* calculate page mask for the range of data read */
8474 lastpg = (retlen - 1) / PAGE_SIZE;
8475 nfs_buf_pgs_get_page_mask(&pagemask, lastpg + 1);
8476
8477 if (!error) {
8478 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8479 }
8480 /* don't save the data if dirty or potential I/O conflict */
8481 nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand);
8482 if (!error && bp && !bp->nb_dirtyoff && !nfs_buf_pgs_is_set(&pagemaskand) &&
8483 timevalcmp(&anp->n_lastio, &now, <)) {
8484 OSAddAtomic64(1, &nfsclntstats.read_bios);
8485 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
8486 SET(bp->nb_flags, NB_READ);
8487 NFS_BUF_MAP(bp);
8488 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8489 if (error) {
8490 bp->nb_error = error;
8491 SET(bp->nb_flags, NB_ERROR);
8492 } else {
8493 bp->nb_offio = 0;
8494 bp->nb_endio = rlen;
8495 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
8496 bp->nb_endio = retlen;
8497 }
8498 if (eof || (retlen == 0)) {
8499 /* zero out the remaining data (up to EOF) */
8500 off_t rpcrem, eofrem, rem;
8501 rpcrem = (rlen - retlen);
8502 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8503 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
8504 if (rem > 0) {
8505 bzero(bp->nb_data + retlen, rem);
8506 }
8507 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8508 /* ugh... short read ... just invalidate for now... */
8509 SET(bp->nb_flags, NB_INVAL);
8510 }
8511 }
8512 nfs_buf_read_finish(bp);
8513 microuptime(&anp->n_lastio);
8514 }
8515 if (bp) {
8516 nfs_buf_release(bp, 1);
8517 }
8518 }
8519 error = 0; /* ignore any transient error in processing the prefetch */
8520 }
8521 if (adnp && !adbusyerror) {
8522 nfs_node_clear_busy(adnp);
8523 adbusyerror = ENOENT;
8524 }
8525 if (!busyerror) {
8526 nfs_node_clear_busy(np);
8527 busyerror = ENOENT;
8528 }
8529 if (adnp) {
8530 vnode_put(NFSTOV(adnp));
8531 }
8532 if (inuse) {
8533 nfs_mount_state_in_use_end(nmp, error);
8534 }
8535 if (error && *anpp) {
8536 vnode_put(NFSTOV(*anpp));
8537 *anpp = NULL;
8538 }
8539 nfsm_chain_cleanup(&nmreq);
8540 nfsm_chain_cleanup(&nmrep);
8541 out_free:
8542 NFS_ZFREE(nfs_fhandle_zone, fh);
8543 NFS_ZFREE(nfs_req_zone, req);
8544 zfree(KT_NFS_VATTR, nvattr);
8545 return error;
8546 }
8547
8548 /*
8549 * Remove a named attribute.
8550 */
8551 int
nfs4_named_attr_remove(nfsnode_t np,nfsnode_t anp,const char * name,vfs_context_t ctx)8552 nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8553 {
8554 nfsnode_t adnp = NULL;
8555 struct nfsmount *nmp;
8556 struct componentname cn;
8557 struct vnop_remove_args vra;
8558 int error, putanp = 0;
8559
8560 nmp = NFSTONMP(np);
8561 if (nfs_mount_gone(nmp)) {
8562 return ENXIO;
8563 }
8564
8565 bzero(&cn, sizeof(cn));
8566 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8567 cn.cn_namelen = NFS_STRLEN_INT(name);
8568 cn.cn_nameiop = DELETE;
8569 cn.cn_flags = 0;
8570
8571 if (!anp) {
8572 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8573 0, ctx, &anp, NULL);
8574 if ((!error && !anp) || (error == ENOATTR)) {
8575 error = ENOENT;
8576 }
8577 if (error) {
8578 if (anp) {
8579 vnode_put(NFSTOV(anp));
8580 anp = NULL;
8581 }
8582 goto out;
8583 }
8584 putanp = 1;
8585 }
8586
8587 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8588 goto out;
8589 }
8590 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8591 nfs_node_clear_busy(np);
8592 if (!adnp) {
8593 error = ENOENT;
8594 goto out;
8595 }
8596
8597 vra.a_desc = &vnop_remove_desc;
8598 vra.a_dvp = NFSTOV(adnp);
8599 vra.a_vp = NFSTOV(anp);
8600 vra.a_cnp = &cn;
8601 vra.a_flags = 0;
8602 vra.a_context = ctx;
8603 error = nfs_vnop_remove(&vra);
8604 out:
8605 if (adnp) {
8606 vnode_put(NFSTOV(adnp));
8607 }
8608 if (putanp) {
8609 vnode_put(NFSTOV(anp));
8610 }
8611 return error;
8612 }
8613
8614 int
nfs4_vnop_getxattr(struct vnop_getxattr_args * ap)8615 nfs4_vnop_getxattr(
8616 struct vnop_getxattr_args /* {
8617 * struct vnodeop_desc *a_desc;
8618 * vnode_t a_vp;
8619 * const char * a_name;
8620 * uio_t a_uio;
8621 * size_t *a_size;
8622 * int a_options;
8623 * vfs_context_t a_context;
8624 * } */*ap)
8625 {
8626 vfs_context_t ctx = ap->a_context;
8627 struct nfsmount *nmp;
8628 struct nfs_vattr *nvattr;
8629 struct componentname cn;
8630 nfsnode_t anp;
8631 int error = 0, isrsrcfork;
8632
8633 nmp = VTONMP(ap->a_vp);
8634 if (nfs_mount_gone(nmp)) {
8635 return ENXIO;
8636 }
8637
8638 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8639 return ENOTSUP;
8640 }
8641
8642 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
8643 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
8644 if (error) {
8645 goto out;
8646 }
8647 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8648 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8649 error = ENOATTR;
8650 goto out;
8651 }
8652
8653 bzero(&cn, sizeof(cn));
8654 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8655 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
8656 cn.cn_nameiop = LOOKUP;
8657 cn.cn_flags = MAKEENTRY;
8658
8659 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8660 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8661
8662 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8663 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8664 if ((!error && !anp) || (error == ENOENT)) {
8665 error = ENOATTR;
8666 }
8667 if (!error) {
8668 if (ap->a_uio) {
8669 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
8670 } else {
8671 *ap->a_size = anp->n_size;
8672 }
8673 }
8674 if (anp) {
8675 vnode_put(NFSTOV(anp));
8676 }
8677 out:
8678 zfree(KT_NFS_VATTR, nvattr);
8679 return NFS_MAPERR(error);
8680 }
8681
8682 int
nfs4_vnop_setxattr(struct vnop_setxattr_args * ap)8683 nfs4_vnop_setxattr(
8684 struct vnop_setxattr_args /* {
8685 * struct vnodeop_desc *a_desc;
8686 * vnode_t a_vp;
8687 * const char * a_name;
8688 * uio_t a_uio;
8689 * int a_options;
8690 * vfs_context_t a_context;
8691 * } */*ap)
8692 {
8693 vfs_context_t ctx = ap->a_context;
8694 int options = ap->a_options;
8695 uio_t uio = ap->a_uio;
8696 const char *name = ap->a_name;
8697 struct nfsmount *nmp;
8698 struct componentname cn;
8699 nfsnode_t anp = NULL;
8700 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8701 #define FINDERINFOSIZE 32
8702 uint8_t finfo[FINDERINFOSIZE];
8703 uint32_t *finfop;
8704 struct nfs_open_file *nofp = NULL;
8705 uio_t auio = NULL;
8706 struct vnop_write_args vwa;
8707
8708 nmp = VTONMP(ap->a_vp);
8709 if (nfs_mount_gone(nmp)) {
8710 return ENXIO;
8711 }
8712
8713 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8714 return ENOTSUP;
8715 }
8716
8717 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8718 return EINVAL;
8719 }
8720
8721 /* XXX limitation based on need to back up uio on short write */
8722 if (uio_iovcnt(uio) > 1) {
8723 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8724 return EINVAL;
8725 }
8726
8727 bzero(&cn, sizeof(cn));
8728 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8729 cn.cn_namelen = NFS_STRLEN_INT(name);
8730 cn.cn_nameiop = CREATE;
8731 cn.cn_flags = MAKEENTRY;
8732
8733 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8734 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8735 if (!isrsrcfork) {
8736 uio_setoffset(uio, 0);
8737 }
8738 if (isfinderinfo) {
8739 bzero(&finfo, sizeof(finfo));
8740 if (uio_resid(uio) != sizeof(finfo)) {
8741 return ERANGE;
8742 }
8743 error = uiomove((char*)&finfo, sizeof(finfo), uio);
8744 if (error) {
8745 return NFS_MAPERR(error);
8746 }
8747 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8748 empty = 1;
8749 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
8750 if (finfop[i]) {
8751 empty = 0;
8752 break;
8753 }
8754 }
8755 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
8756 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8757 if (error == ENOENT) {
8758 error = 0;
8759 }
8760 return NFS_MAPERR(error);
8761 }
8762 /* first, let's see if we get a create/replace error */
8763 }
8764
8765 /*
8766 * create/open the xattr
8767 *
8768 * We need to make sure not to create it if XATTR_REPLACE.
8769 * For all xattrs except the resource fork, we also want to
8770 * truncate the xattr to remove any current data. We'll do
8771 * that by setting the size to 0 on create/open.
8772 */
8773 flags = 0;
8774 if (!(options & XATTR_REPLACE)) {
8775 flags |= NFS_GET_NAMED_ATTR_CREATE;
8776 }
8777 if (options & XATTR_CREATE) {
8778 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
8779 }
8780 if (!isrsrcfork) {
8781 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
8782 }
8783
8784 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8785 flags, ctx, &anp, &nofp);
8786 if (!error && !anp) {
8787 error = ENOATTR;
8788 }
8789 if (error) {
8790 goto out;
8791 }
8792 /* grab the open state from the get/create/open */
8793 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8794 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8795 nofp->nof_creator = NULL;
8796 nfs_open_file_clear_busy(nofp);
8797 }
8798
8799 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8800 if (isfinderinfo && empty) {
8801 goto doclose;
8802 }
8803
8804 /*
8805 * Write the data out and flush.
8806 *
8807 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8808 */
8809 vwa.a_desc = &vnop_write_desc;
8810 vwa.a_vp = NFSTOV(anp);
8811 vwa.a_uio = NULL;
8812 vwa.a_ioflag = 0;
8813 vwa.a_context = ctx;
8814 if (isfinderinfo) {
8815 auio = uio_create(1, 0, UIO_SYSSPACE, UIO_WRITE);
8816 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8817 vwa.a_uio = auio;
8818 } else if (uio_resid(uio) > 0) {
8819 vwa.a_uio = uio;
8820 }
8821 if (vwa.a_uio) {
8822 error = nfs_vnop_write(&vwa);
8823 if (!error) {
8824 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
8825 }
8826 if (isfinderinfo) {
8827 uio_free(auio);
8828 }
8829 }
8830 doclose:
8831 /* Close the xattr. */
8832 if (nofp) {
8833 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8834 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
8835 if (!busyerror) {
8836 nfs_open_file_clear_busy(nofp);
8837 }
8838 }
8839 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
8840 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8841 if (error == ENOENT) {
8842 error = 0;
8843 }
8844 }
8845 if (!error) {
8846 error = closeerror;
8847 }
8848 out:
8849 if (anp) {
8850 vnode_put(NFSTOV(anp));
8851 }
8852 if (error == ENOENT) {
8853 error = ENOATTR;
8854 }
8855 return NFS_MAPERR(error);
8856 }
8857
8858 int
nfs4_vnop_removexattr(struct vnop_removexattr_args * ap)8859 nfs4_vnop_removexattr(
8860 struct vnop_removexattr_args /* {
8861 * struct vnodeop_desc *a_desc;
8862 * vnode_t a_vp;
8863 * const char * a_name;
8864 * int a_options;
8865 * vfs_context_t a_context;
8866 * } */*ap)
8867 {
8868 struct nfsmount *nmp = VTONMP(ap->a_vp);
8869 int error;
8870
8871 if (nfs_mount_gone(nmp)) {
8872 return ENXIO;
8873 }
8874 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8875 return ENOTSUP;
8876 }
8877
8878 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
8879 if (error == ENOENT) {
8880 error = ENOATTR;
8881 }
8882 return NFS_MAPERR(error);
8883 }
8884
8885 int
nfs4_vnop_listxattr(struct vnop_listxattr_args * ap)8886 nfs4_vnop_listxattr(
8887 struct vnop_listxattr_args /* {
8888 * struct vnodeop_desc *a_desc;
8889 * vnode_t a_vp;
8890 * uio_t a_uio;
8891 * size_t *a_size;
8892 * int a_options;
8893 * vfs_context_t a_context;
8894 * } */*ap)
8895 {
8896 vfs_context_t ctx = ap->a_context;
8897 nfsnode_t np = VTONFS(ap->a_vp);
8898 uio_t uio = ap->a_uio;
8899 nfsnode_t adnp = NULL;
8900 struct nfsmount *nmp;
8901 int error, done, i;
8902 struct nfs_vattr *nvattr;
8903 uint64_t cookie, nextcookie, lbn = 0;
8904 struct nfsbuf *bp = NULL;
8905 struct nfs_dir_buf_header *ndbhp;
8906 struct direntry *dp;
8907
8908 nmp = VTONMP(ap->a_vp);
8909 if (nfs_mount_gone(nmp)) {
8910 return ENXIO;
8911 }
8912
8913 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8914 return ENOTSUP;
8915 }
8916
8917 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
8918 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
8919 if (error) {
8920 goto out_free;
8921 }
8922 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8923 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8924 error = 0;
8925 goto out_free;
8926 }
8927
8928 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8929 goto out_free;
8930 }
8931 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8932 nfs_node_clear_busy(np);
8933 if (!adnp) {
8934 goto out;
8935 }
8936
8937 if ((error = nfs_node_lock(adnp))) {
8938 goto out;
8939 }
8940
8941 if (adnp->n_flag & NNEEDINVALIDATE) {
8942 adnp->n_flag &= ~NNEEDINVALIDATE;
8943 nfs_invaldir(adnp);
8944 nfs_node_unlock(adnp);
8945 error = nfs_vinvalbuf1(NFSTOV(adnp), 0, ctx, 1);
8946 if (!error) {
8947 error = nfs_node_lock(adnp);
8948 }
8949 if (error) {
8950 goto out;
8951 }
8952 }
8953
8954 /*
8955 * check for need to invalidate when (re)starting at beginning
8956 */
8957 if (adnp->n_flag & NMODIFIED) {
8958 nfs_invaldir(adnp);
8959 nfs_node_unlock(adnp);
8960 if ((error = nfs_vinvalbuf1(NFSTOV(adnp), 0, ctx, 1))) {
8961 goto out;
8962 }
8963 } else {
8964 nfs_node_unlock(adnp);
8965 }
8966 /* nfs_getattr() will check changed and purge caches */
8967 if ((error = nfs_getattr(adnp, nvattr, ctx, NGA_CACHED))) {
8968 goto out;
8969 }
8970
8971 if (uio && (uio_resid(uio) == 0)) {
8972 goto out;
8973 }
8974
8975 done = 0;
8976 nextcookie = lbn = 0;
8977
8978 while (!error && !done) {
8979 OSAddAtomic64(1, &nfsclntstats.biocache_readdirs);
8980 cookie = nextcookie;
8981 getbuffer:
8982 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8983 if (error) {
8984 goto out;
8985 }
8986 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8987 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8988 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8989 ndbhp->ndbh_flags = 0;
8990 ndbhp->ndbh_count = 0;
8991 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8992 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8993 }
8994 error = nfs_buf_readdir(bp, ctx);
8995 if (error == NFSERR_DIRBUFDROPPED) {
8996 goto getbuffer;
8997 }
8998 if (error) {
8999 nfs_buf_release(bp, 1);
9000 }
9001 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
9002 if (!nfs_node_lock(adnp)) {
9003 nfs_invaldir(adnp);
9004 nfs_node_unlock(adnp);
9005 }
9006 nfs_vinvalbuf1(NFSTOV(adnp), 0, ctx, 1);
9007 if (error == NFSERR_BAD_COOKIE) {
9008 error = ENOENT;
9009 }
9010 }
9011 if (error) {
9012 goto out;
9013 }
9014 }
9015
9016 /* go through all the entries copying/counting */
9017 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
9018 for (i = 0; i < ndbhp->ndbh_count; i++) {
9019 if (!xattr_protected(dp->d_name)) {
9020 if (uio == NULL) {
9021 *ap->a_size += dp->d_namlen + 1;
9022 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
9023 error = ERANGE;
9024 } else {
9025 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
9026 if (error && (error != EFAULT)) {
9027 error = ERANGE;
9028 }
9029 }
9030 }
9031 nextcookie = dp->d_seekoff;
9032 dp = NFS_DIRENTRY_NEXT(dp);
9033 }
9034
9035 if (i == ndbhp->ndbh_count) {
9036 /* hit end of buffer, move to next buffer */
9037 lbn = nextcookie;
9038 /* if we also hit EOF, we're done */
9039 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
9040 done = 1;
9041 }
9042 }
9043 if (!error && !done && (nextcookie == cookie)) {
9044 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
9045 error = EIO;
9046 }
9047 nfs_buf_release(bp, 1);
9048 }
9049 out:
9050 if (adnp) {
9051 vnode_put(NFSTOV(adnp));
9052 }
9053 out_free:
9054 zfree(KT_NFS_VATTR, nvattr);
9055 return NFS_MAPERR(error);
9056 }
9057
9058 #if NAMEDSTREAMS
9059 int
nfs4_vnop_getnamedstream(struct vnop_getnamedstream_args * ap)9060 nfs4_vnop_getnamedstream(
9061 struct vnop_getnamedstream_args /* {
9062 * struct vnodeop_desc *a_desc;
9063 * vnode_t a_vp;
9064 * vnode_t *a_svpp;
9065 * const char *a_name;
9066 * enum nsoperation a_operation;
9067 * int a_flags;
9068 * vfs_context_t a_context;
9069 * } */*ap)
9070 {
9071 vfs_context_t ctx = ap->a_context;
9072 struct nfsmount *nmp;
9073 struct nfs_vattr *nvattr;
9074 struct componentname cn;
9075 nfsnode_t anp;
9076 int error = 0;
9077
9078 nmp = VTONMP(ap->a_vp);
9079 if (nfs_mount_gone(nmp)) {
9080 return ENXIO;
9081 }
9082
9083 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9084 return ENOTSUP;
9085 }
9086
9087 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
9088 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
9089 if (error) {
9090 goto out;
9091 }
9092 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
9093 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
9094 error = ENOATTR;
9095 goto out;
9096 }
9097
9098 bzero(&cn, sizeof(cn));
9099 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
9100 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
9101 cn.cn_nameiop = LOOKUP;
9102 cn.cn_flags = MAKEENTRY;
9103
9104 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
9105 0, ctx, &anp, NULL);
9106 if ((!error && !anp) || (error == ENOENT)) {
9107 error = ENOATTR;
9108 }
9109 if (!error && anp) {
9110 *ap->a_svpp = NFSTOV(anp);
9111 } else if (anp) {
9112 vnode_put(NFSTOV(anp));
9113 }
9114 out:
9115 zfree(KT_NFS_VATTR, nvattr);
9116 return NFS_MAPERR(error);
9117 }
9118
9119 int
nfs4_vnop_makenamedstream(struct vnop_makenamedstream_args * ap)9120 nfs4_vnop_makenamedstream(
9121 struct vnop_makenamedstream_args /* {
9122 * struct vnodeop_desc *a_desc;
9123 * vnode_t *a_svpp;
9124 * vnode_t a_vp;
9125 * const char *a_name;
9126 * int a_flags;
9127 * vfs_context_t a_context;
9128 * } */*ap)
9129 {
9130 vfs_context_t ctx = ap->a_context;
9131 struct nfsmount *nmp;
9132 struct componentname cn;
9133 nfsnode_t anp;
9134 int error = 0;
9135
9136 nmp = VTONMP(ap->a_vp);
9137 if (nfs_mount_gone(nmp)) {
9138 return ENXIO;
9139 }
9140
9141 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9142 return ENOTSUP;
9143 }
9144
9145 bzero(&cn, sizeof(cn));
9146 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
9147 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
9148 cn.cn_nameiop = CREATE;
9149 cn.cn_flags = MAKEENTRY;
9150
9151 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
9152 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
9153 if ((!error && !anp) || (error == ENOENT)) {
9154 error = ENOATTR;
9155 }
9156 if (!error && anp) {
9157 *ap->a_svpp = NFSTOV(anp);
9158 } else if (anp) {
9159 vnode_put(NFSTOV(anp));
9160 }
9161 return NFS_MAPERR(error);
9162 }
9163
9164 int
nfs4_vnop_removenamedstream(struct vnop_removenamedstream_args * ap)9165 nfs4_vnop_removenamedstream(
9166 struct vnop_removenamedstream_args /* {
9167 * struct vnodeop_desc *a_desc;
9168 * vnode_t a_vp;
9169 * vnode_t a_svp;
9170 * const char *a_name;
9171 * int a_flags;
9172 * vfs_context_t a_context;
9173 * } */*ap)
9174 {
9175 int error;
9176 struct nfsmount *nmp = VTONMP(ap->a_vp);
9177 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
9178 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
9179
9180 if (nfs_mount_gone(nmp)) {
9181 return ENXIO;
9182 }
9183
9184 /*
9185 * Given that a_svp is a named stream, checking for
9186 * named attribute support is kinda pointless.
9187 */
9188 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9189 return ENOTSUP;
9190 }
9191
9192 error = nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
9193 return NFS_MAPERR(error);
9194 }
9195
9196 #endif
9197 #endif /* CONFIG_NFS4 */
9198
9199 #endif /* CONFIG_NFS_CLIENT */
9200