1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
65 * FreeBSD-Id: nfs_vnops.c,v 1.72 1997/11/07 09:20:48 phk Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS_CLIENT
70
71 /*
72 * vnode op calls for Sun NFS version 2 and 3
73 */
74 #include <sys/param.h>
75 #include <sys/kernel.h>
76 #include <sys/systm.h>
77 #include <sys/resourcevar.h>
78 #include <sys/proc_internal.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/malloc.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/conf.h>
84 #include <sys/vnode_internal.h>
85 #include <sys/dirent.h>
86 #include <sys/fcntl.h>
87 #include <sys/lockf.h>
88 #include <sys/ubc_internal.h>
89 #include <sys/attr.h>
90 #include <sys/signalvar.h>
91 #include <sys/uio_internal.h>
92 #include <sys/xattr.h>
93
94 #include <vfs/vfs_support.h>
95
96 #include <sys/vm.h>
97
98 #include <sys/time.h>
99 #include <kern/clock.h>
100 #include <libkern/OSAtomic.h>
101
102 #include <miscfs/fifofs/fifo.h>
103 #include <miscfs/specfs/specdev.h>
104
105 #include <nfs/rpcv2.h>
106 #include <nfs/nfsproto.h>
107 #include <nfs/nfs.h>
108 #include <nfs/nfsnode.h>
109 #include <nfs/nfs_gss.h>
110 #include <nfs/nfsmount.h>
111 #include <nfs/nfs_lock.h>
112 #include <nfs/xdr_subs.h>
113 #include <nfs/nfsm_subs.h>
114
115 #include <net/if.h>
116 #include <netinet/in.h>
117 #include <netinet/in_var.h>
118
119 #include <vm/vm_kern.h>
120 #include <vm/vm_pageout.h>
121
122 #include <kern/task.h>
123 #include <kern/sched_prim.h>
124
125 #define NFS_VNOP_DBG(...) NFSCLNT_DBG(NFSCLNT_FAC_VNOP, 7, ## __VA_ARGS__)
126 #define DEFAULT_READLINK_NOCACHE 0
127
128 KALLOC_TYPE_DEFINE(KT_NFS_VATTR, struct nfs_vattr, KT_DEFAULT);
129
130 /*
131 * NFS vnode ops
132 */
133 int nfs_vnop_lookup(struct vnop_lookup_args *);
134 int nfsspec_vnop_read(struct vnop_read_args *);
135 int nfsspec_vnop_write(struct vnop_write_args *);
136 int nfsspec_vnop_close(struct vnop_close_args *);
137 #if FIFO
138 int nfsfifo_vnop_read(struct vnop_read_args *);
139 int nfsfifo_vnop_write(struct vnop_write_args *);
140 int nfsfifo_vnop_close(struct vnop_close_args *);
141 #endif
142 int nfs_vnop_ioctl(struct vnop_ioctl_args *);
143 int nfs_vnop_select(struct vnop_select_args *);
144 int nfs_vnop_setattr(struct vnop_setattr_args *);
145 int nfs_vnop_fsync(struct vnop_fsync_args *);
146 int nfs_vnop_rename(struct vnop_rename_args *);
147 int nfs_vnop_readdir(struct vnop_readdir_args *);
148 int nfs_vnop_readlink(struct vnop_readlink_args *);
149 int nfs_vnop_pathconf(struct vnop_pathconf_args *);
150 int nfs_vnop_pagein(struct vnop_pagein_args *);
151 int nfs_vnop_pageout(struct vnop_pageout_args *);
152 int nfs_vnop_blktooff(struct vnop_blktooff_args *);
153 int nfs_vnop_offtoblk(struct vnop_offtoblk_args *);
154 int nfs_vnop_blockmap(struct vnop_blockmap_args *);
155 int nfs_vnop_monitor(struct vnop_monitor_args *);
156
157 int nfs3_vnop_create(struct vnop_create_args *);
158 int nfs3_vnop_mknod(struct vnop_mknod_args *);
159 int nfs3_vnop_getattr(struct vnop_getattr_args *);
160 int nfs3_vnop_link(struct vnop_link_args *);
161 int nfs3_vnop_mkdir(struct vnop_mkdir_args *);
162 int nfs3_vnop_rmdir(struct vnop_rmdir_args *);
163 int nfs3_vnop_symlink(struct vnop_symlink_args *);
164
165
166 vnop_t **nfsv2_vnodeop_p;
167 static const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
168 { .opve_op = &vnop_default_desc, .opve_impl = (vnop_t *)vn_default_error },
169 { .opve_op = &vnop_lookup_desc, .opve_impl = (vnop_t *)nfs_vnop_lookup }, /* lookup */
170 { .opve_op = &vnop_create_desc, .opve_impl = (vnop_t *)nfs3_vnop_create }, /* create */
171 { .opve_op = &vnop_mknod_desc, .opve_impl = (vnop_t *)nfs3_vnop_mknod }, /* mknod */
172 { .opve_op = &vnop_open_desc, .opve_impl = (vnop_t *)nfs_vnop_open }, /* open */
173 { .opve_op = &vnop_close_desc, .opve_impl = (vnop_t *)nfs_vnop_close }, /* close */
174 { .opve_op = &vnop_access_desc, .opve_impl = (vnop_t *)nfs_vnop_access }, /* access */
175 { .opve_op = &vnop_getattr_desc, .opve_impl = (vnop_t *)nfs3_vnop_getattr }, /* getattr */
176 { .opve_op = &vnop_setattr_desc, .opve_impl = (vnop_t *)nfs_vnop_setattr }, /* setattr */
177 { .opve_op = &vnop_read_desc, .opve_impl = (vnop_t *)nfs_vnop_read }, /* read */
178 { .opve_op = &vnop_write_desc, .opve_impl = (vnop_t *)nfs_vnop_write }, /* write */
179 { .opve_op = &vnop_ioctl_desc, .opve_impl = (vnop_t *)nfs_vnop_ioctl }, /* ioctl */
180 { .opve_op = &vnop_select_desc, .opve_impl = (vnop_t *)nfs_vnop_select }, /* select */
181 { .opve_op = &vnop_revoke_desc, .opve_impl = (vnop_t *)nfs_vnop_revoke }, /* revoke */
182 { .opve_op = &vnop_mmap_desc, .opve_impl = (vnop_t *)nfs_vnop_mmap }, /* mmap */
183 { .opve_op = &vnop_mmap_check_desc, .opve_impl = (vnop_t *)nfs_vnop_mmap_check }, /* mmap_check */
184 { .opve_op = &vnop_mnomap_desc, .opve_impl = (vnop_t *)nfs_vnop_mnomap }, /* mnomap */
185 { .opve_op = &vnop_fsync_desc, .opve_impl = (vnop_t *)nfs_vnop_fsync }, /* fsync */
186 { .opve_op = &vnop_remove_desc, .opve_impl = (vnop_t *)nfs_vnop_remove }, /* remove */
187 { .opve_op = &vnop_link_desc, .opve_impl = (vnop_t *)nfs3_vnop_link }, /* link */
188 { .opve_op = &vnop_rename_desc, .opve_impl = (vnop_t *)nfs_vnop_rename }, /* rename */
189 { .opve_op = &vnop_mkdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_mkdir }, /* mkdir */
190 { .opve_op = &vnop_rmdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_rmdir }, /* rmdir */
191 { .opve_op = &vnop_symlink_desc, .opve_impl = (vnop_t *)nfs3_vnop_symlink }, /* symlink */
192 { .opve_op = &vnop_readdir_desc, .opve_impl = (vnop_t *)nfs_vnop_readdir }, /* readdir */
193 { .opve_op = &vnop_readlink_desc, .opve_impl = (vnop_t *)nfs_vnop_readlink }, /* readlink */
194 { .opve_op = &vnop_inactive_desc, .opve_impl = (vnop_t *)nfs_vnop_inactive }, /* inactive */
195 { .opve_op = &vnop_reclaim_desc, .opve_impl = (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
196 { .opve_op = &vnop_strategy_desc, .opve_impl = (vnop_t *)err_strategy }, /* strategy */
197 { .opve_op = &vnop_pathconf_desc, .opve_impl = (vnop_t *)nfs_vnop_pathconf }, /* pathconf */
198 { .opve_op = &vnop_advlock_desc, .opve_impl = (vnop_t *)nfs_vnop_advlock }, /* advlock */
199 { .opve_op = &vnop_bwrite_desc, .opve_impl = (vnop_t *)err_bwrite }, /* bwrite */
200 { .opve_op = &vnop_pagein_desc, .opve_impl = (vnop_t *)nfs_vnop_pagein }, /* Pagein */
201 { .opve_op = &vnop_pageout_desc, .opve_impl = (vnop_t *)nfs_vnop_pageout }, /* Pageout */
202 { .opve_op = &vnop_copyfile_desc, .opve_impl = (vnop_t *)err_copyfile }, /* Copyfile */
203 { .opve_op = &vnop_blktooff_desc, .opve_impl = (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
204 { .opve_op = &vnop_offtoblk_desc, .opve_impl = (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
205 { .opve_op = &vnop_blockmap_desc, .opve_impl = (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
206 { .opve_op = &vnop_monitor_desc, .opve_impl = (vnop_t *)nfs_vnop_monitor }, /* monitor */
207 { .opve_op = NULL, .opve_impl = NULL }
208 };
209 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
210 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
211
212
213 #if CONFIG_NFS4
214 vnop_t **nfsv4_vnodeop_p;
215 static const struct vnodeopv_entry_desc nfsv4_vnodeop_entries[] = {
216 { &vnop_default_desc, (vnop_t *)vn_default_error },
217 { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */
218 { &vnop_create_desc, (vnop_t *)nfs4_vnop_create }, /* create */
219 { &vnop_mknod_desc, (vnop_t *)nfs4_vnop_mknod }, /* mknod */
220 { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */
221 { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */
222 { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */
223 { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */
224 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
225 { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */
226 { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */
227 { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */
228 { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */
229 { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */
230 { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */
231 { &vnop_mmap_check_desc, (vnop_t *)nfs_vnop_mmap_check }, /* mmap_check */
232 { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */
233 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
234 { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */
235 { &vnop_link_desc, (vnop_t *)nfs4_vnop_link }, /* link */
236 { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */
237 { &vnop_mkdir_desc, (vnop_t *)nfs4_vnop_mkdir }, /* mkdir */
238 { &vnop_rmdir_desc, (vnop_t *)nfs4_vnop_rmdir }, /* rmdir */
239 { &vnop_symlink_desc, (vnop_t *)nfs4_vnop_symlink }, /* symlink */
240 { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */
241 { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */
242 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
243 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
244 { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */
245 { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */
246 { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */
247 { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */
248 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
249 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
250 { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */
251 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
252 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
253 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
254 { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */
255 { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */
256 { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr }, /* removexattr */
257 { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr }, /* listxattr */
258 #if NAMEDSTREAMS
259 { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */
260 { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */
261 { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream }, /* removenamedstream */
262 #endif
263 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
264 { NULL, NULL }
265 };
266 const struct vnodeopv_desc nfsv4_vnodeop_opv_desc =
267 { &nfsv4_vnodeop_p, nfsv4_vnodeop_entries };
268 #endif
269
270 /*
271 * Special device vnode ops
272 */
273 vnop_t **spec_nfsv2nodeop_p;
274 static const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
275 { &vnop_default_desc, (vnop_t *)vn_default_error },
276 { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */
277 { &vnop_create_desc, (vnop_t *)spec_create }, /* create */
278 { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */
279 { &vnop_open_desc, (vnop_t *)spec_open }, /* open */
280 { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */
281 { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */
282 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
283 { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */
284 { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */
285 { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */
286 { &vnop_select_desc, (vnop_t *)spec_select }, /* select */
287 { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */
288 { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */
289 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
290 { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */
291 { &vnop_link_desc, (vnop_t *)spec_link }, /* link */
292 { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */
293 { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */
294 { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */
295 { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */
296 { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */
297 { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */
298 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
299 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
300 { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */
301 { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */
302 { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */
303 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
304 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
305 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
306 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
307 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
308 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
309 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
310 { NULL, NULL }
311 };
312 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
313 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
314 #if CONFIG_NFS4
315 vnop_t **spec_nfsv4nodeop_p;
316 static const struct vnodeopv_entry_desc spec_nfsv4nodeop_entries[] = {
317 { &vnop_default_desc, (vnop_t *)vn_default_error },
318 { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */
319 { &vnop_create_desc, (vnop_t *)spec_create }, /* create */
320 { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */
321 { &vnop_open_desc, (vnop_t *)spec_open }, /* open */
322 { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */
323 { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */
324 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
325 { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */
326 { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */
327 { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */
328 { &vnop_select_desc, (vnop_t *)spec_select }, /* select */
329 { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */
330 { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */
331 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
332 { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */
333 { &vnop_link_desc, (vnop_t *)spec_link }, /* link */
334 { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */
335 { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */
336 { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */
337 { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */
338 { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */
339 { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */
340 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
341 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
342 { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */
343 { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */
344 { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */
345 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
346 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
347 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
348 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
349 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
350 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
351 { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */
352 { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */
353 { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */
354 { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */
355 #if NAMEDSTREAMS
356 { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */
357 { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */
358 { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */
359 #endif
360 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
361 { NULL, NULL }
362 };
363 const struct vnodeopv_desc spec_nfsv4nodeop_opv_desc =
364 { &spec_nfsv4nodeop_p, spec_nfsv4nodeop_entries };
365 #endif /* CONFIG_NFS4 */
366
367 #if FIFO
368 vnop_t **fifo_nfsv2nodeop_p;
369 static const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
370 { &vnop_default_desc, (vnop_t *)vn_default_error },
371 { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */
372 { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */
373 { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */
374 { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */
375 { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */
376 { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */
377 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
378 { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */
379 { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */
380 { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */
381 { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */
382 { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */
383 { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */
384 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
385 { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */
386 { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */
387 { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */
388 { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */
389 { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */
390 { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */
391 { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */
392 { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */
393 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
394 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
395 { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */
396 { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */
397 { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */
398 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
399 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
400 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
401 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
402 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
403 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
404 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
405 { NULL, NULL }
406 };
407 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
408 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
409 #endif
410
411 #if CONFIG_NFS4
412 #if FIFO
413 vnop_t **fifo_nfsv4nodeop_p;
414 static const struct vnodeopv_entry_desc fifo_nfsv4nodeop_entries[] = {
415 { &vnop_default_desc, (vnop_t *)vn_default_error },
416 { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */
417 { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */
418 { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */
419 { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */
420 { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */
421 { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */
422 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
423 { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */
424 { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */
425 { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */
426 { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */
427 { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */
428 { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */
429 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
430 { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */
431 { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */
432 { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */
433 { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */
434 { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */
435 { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */
436 { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */
437 { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */
438 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
439 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
440 { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */
441 { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */
442 { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */
443 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
444 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
445 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
446 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
447 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
448 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
449 { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */
450 { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */
451 { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */
452 { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */
453 #if NAMEDSTREAMS
454 { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */
455 { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */
456 { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */
457 #endif
458 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
459 { NULL, NULL }
460 };
461 const struct vnodeopv_desc fifo_nfsv4nodeop_opv_desc =
462 { &fifo_nfsv4nodeop_p, fifo_nfsv4nodeop_entries };
463 #endif /* FIFO */
464 #endif /* CONFIG_NFS4 */
465
466 int nfs_sillyrename(nfsnode_t, nfsnode_t, struct componentname *, vfs_context_t);
467 int nfs_getattr_internal(nfsnode_t, struct nfs_vattr *, vfs_context_t, int);
468 int nfs_refresh_fh(nfsnode_t, vfs_context_t);
469
470
471 ZONE_VIEW_DEFINE(ZV_NFSDIROFF, "NFSV3 diroff",
472 KHEAP_ID_DATA_BUFFERS, sizeof(struct nfsdmap));
473
474 static void
nfs_dir_buf_cache_lookup_boundaries(struct nfsbuf * bp,int * sof,int * eof)475 nfs_dir_buf_cache_lookup_boundaries(struct nfsbuf *bp, int *sof, int *eof)
476 {
477 if (bp) {
478 struct nfs_dir_buf_header *ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
479 if (sof && bp->nb_lblkno == 0) {
480 *sof = 1;
481 }
482 if (eof && ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
483 *eof = 1;
484 }
485 }
486 }
487
488 /*
489 * Update nfsnode attributes to avoid extra getattr calls for each direntry.
490 * This function should be called only if RDIRPLUS flag is enabled.
491 */
492 void
nfs_rdirplus_update_node_attrs(nfsnode_t dnp,struct direntry * dp,fhandle_t * fhp,struct nfs_vattr * nvattrp,uint64_t * savedxidp)493 nfs_rdirplus_update_node_attrs(nfsnode_t dnp, struct direntry *dp, fhandle_t *fhp, struct nfs_vattr *nvattrp, uint64_t *savedxidp)
494 {
495 nfsnode_t np;
496 struct componentname cn;
497 int isdot = (dp->d_namlen == 1) && (dp->d_name[0] == '.');
498 int isdotdot = (dp->d_namlen == 2) && (dp->d_name[0] == '.') && (dp->d_name[1] == '.');
499 int should_update_fileid = nvattrp->nva_flags & NFS_FFLAG_FILEID_CONTAINS_XID;
500 uint64_t xid = 0;
501
502 if (isdot || isdotdot) {
503 return;
504 }
505
506 np = NULL;
507 bzero(&cn, sizeof(cn));
508 cn.cn_nameptr = dp->d_name;
509 cn.cn_namelen = dp->d_namlen;
510 cn.cn_nameiop = LOOKUP;
511
512 /* xid might be stashed in nva_fileid is rdirplus is enabled */
513 if (should_update_fileid) {
514 xid = nvattrp->nva_fileid;
515 nvattrp->nva_fileid = dp->d_fileno;
516 }
517 nfs_nget(NFSTOMP(dnp), dnp, &cn, fhp->fh_data, fhp->fh_len, nvattrp, savedxidp, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
518 if (should_update_fileid) {
519 nvattrp->nva_fileid = xid;
520 }
521 if (np) {
522 nfs_node_unlock(np);
523 vnode_put(NFSTOV(np));
524 }
525 }
526
527 /*
528 * Find the slot in the access cache for this UID.
529 * If adding and no existing slot is found, reuse slots in FIFO order.
530 * The index of the next slot to use is kept in the last entry of the n_access array.
531 */
532 int
nfs_node_access_slot(nfsnode_t np,uid_t uid,int add)533 nfs_node_access_slot(nfsnode_t np, uid_t uid, int add)
534 {
535 int slot;
536
537 for (slot = 0; slot < NFS_ACCESS_CACHE_SIZE; slot++) {
538 if (np->n_accessuid[slot] == uid) {
539 break;
540 }
541 }
542 if (slot == NFS_ACCESS_CACHE_SIZE) {
543 if (!add) {
544 return -1;
545 }
546 slot = np->n_access[NFS_ACCESS_CACHE_SIZE];
547 np->n_access[NFS_ACCESS_CACHE_SIZE] = (slot + 1) % NFS_ACCESS_CACHE_SIZE;
548 }
549 return slot;
550 }
551
552 int
nfs3_access_rpc(nfsnode_t np,u_int32_t * access,int rpcflags,vfs_context_t ctx)553 nfs3_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
554 {
555 int error = 0, lockerror = ENOENT, status = 0, slot;
556 uint32_t access_result = 0;
557 u_int64_t xid;
558 struct nfsm_chain nmreq, nmrep;
559 struct nfsmount *nmp;
560 struct timeval now;
561 uid_t uid;
562
563 nfsm_chain_null(&nmreq);
564 nfsm_chain_null(&nmrep);
565
566 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(NFS_VER3) + NFSX_UNSIGNED);
567 nfsm_chain_add_fh(error, &nmreq, NFS_VER3, np->n_fhp, np->n_fhsize);
568 nfsm_chain_add_32(error, &nmreq, *access);
569 nfsm_chain_build_done(error, &nmreq);
570 nfsmout_if(error);
571 error = nfs_request2(np, NULL, &nmreq, NFSPROC_ACCESS,
572 vfs_context_thread(ctx), vfs_context_ucred(ctx),
573 NULL, rpcflags, &nmrep, &xid, &status);
574 if ((lockerror = nfs_node_lock(np))) {
575 error = lockerror;
576 }
577 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
578 if (!error) {
579 error = status;
580 }
581 nfsm_chain_get_32(error, &nmrep, access_result);
582 nfsmout_if(error);
583
584 /* XXXab do we really need mount here, also why are we doing access cache management here? */
585 nmp = NFSTONMP(np);
586 if (nfs_mount_gone(nmp)) {
587 error = ENXIO;
588 }
589 nfsmout_if(error);
590
591 #if CONFIG_NFS_GSS
592 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
593 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
594 } else {
595 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
596 }
597 #else
598 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
599 #endif /* CONFIG_NFS_GSS */
600 slot = nfs_node_access_slot(np, uid, 1);
601 np->n_accessuid[slot] = uid;
602 microuptime(&now);
603 np->n_accessstamp[slot] = now.tv_sec;
604 np->n_access[slot] = access_result;
605
606 /*
607 * If we asked for DELETE but didn't get it, the server
608 * may simply not support returning that bit (possible
609 * on UNIX systems). So, we'll assume that it is OK,
610 * and just let any subsequent delete action fail if it
611 * really isn't deletable.
612 */
613 if ((*access & NFS_ACCESS_DELETE) &&
614 !(np->n_access[slot] & NFS_ACCESS_DELETE)) {
615 np->n_access[slot] |= NFS_ACCESS_DELETE;
616 }
617 /* ".zfs" subdirectories may erroneously give a denied answer for add/remove */
618 if (nfs_access_dotzfs && (np->n_flag & NISDOTZFSCHILD)) {
619 np->n_access[slot] |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
620 }
621 /* pass back the access returned with this request */
622 *access = np->n_access[slot];
623 nfsmout:
624 if (!lockerror) {
625 nfs_node_unlock(np);
626 }
627 nfsm_chain_cleanup(&nmreq);
628 nfsm_chain_cleanup(&nmrep);
629 return error;
630 }
631
632
633 /*
634 * NFS access vnode op.
635 * For NFS version 2, just return ok. File accesses may fail later.
636 * For NFS version 3+, use the access RPC to check accessibility. If file
637 * permissions are changed on the server, accesses might still fail later.
638 */
639 int
nfs_vnop_access(struct vnop_access_args * ap)640 nfs_vnop_access(
641 struct vnop_access_args /* {
642 * struct vnodeop_desc *a_desc;
643 * vnode_t a_vp;
644 * int a_action;
645 * vfs_context_t a_context;
646 * } */*ap)
647 {
648 vfs_context_t ctx = ap->a_context;
649 vnode_t vp = ap->a_vp;
650 int error = 0, slot, dorpc, rpcflags = 0;
651 u_int32_t access, waccess;
652 nfsnode_t np = VTONFS(vp);
653 struct nfsmount *nmp;
654 int nfsvers;
655 struct timeval now;
656 uid_t uid;
657
658 nmp = VTONMP(vp);
659 if (nfs_mount_gone(nmp)) {
660 return ENXIO;
661 }
662 nfsvers = nmp->nm_vers;
663
664
665 if (nfsvers == NFS_VER2 || NMFLAG(nmp, NOOPAQUE_AUTH)) {
666 if ((ap->a_action & KAUTH_VNODE_WRITE_RIGHTS) &&
667 vfs_isrdonly(vnode_mount(vp))) {
668 return EROFS;
669 }
670 return 0;
671 }
672
673 /*
674 * For NFS v3, do an access rpc, otherwise you are stuck emulating
675 * ufs_access() locally using the vattr. This may not be correct,
676 * since the server may apply other access criteria such as
677 * client uid-->server uid mapping that we do not know about, but
678 * this is better than just returning anything that is lying about
679 * in the cache.
680 */
681
682 /*
683 * Convert KAUTH primitives to NFS access rights.
684 */
685 access = 0;
686 if (vnode_isdir(vp)) {
687 /* directory */
688 if (ap->a_action &
689 (KAUTH_VNODE_LIST_DIRECTORY |
690 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
691 access |= NFS_ACCESS_READ;
692 }
693 if (ap->a_action & KAUTH_VNODE_SEARCH) {
694 access |= NFS_ACCESS_LOOKUP;
695 }
696 if (ap->a_action &
697 (KAUTH_VNODE_ADD_FILE |
698 KAUTH_VNODE_ADD_SUBDIRECTORY)) {
699 access |= NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
700 }
701 if (ap->a_action & KAUTH_VNODE_DELETE_CHILD) {
702 access |= NFS_ACCESS_MODIFY;
703 }
704 } else {
705 /* file */
706 if (ap->a_action &
707 (KAUTH_VNODE_READ_DATA |
708 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
709 access |= NFS_ACCESS_READ;
710 }
711 if (ap->a_action & KAUTH_VNODE_WRITE_DATA) {
712 access |= NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
713 }
714 if (ap->a_action & KAUTH_VNODE_APPEND_DATA) {
715 access |= NFS_ACCESS_EXTEND;
716 }
717 if (ap->a_action & KAUTH_VNODE_EXECUTE) {
718 access |= NFS_ACCESS_EXECUTE;
719 }
720 }
721 /* common */
722 if (ap->a_action & KAUTH_VNODE_DELETE) {
723 access |= NFS_ACCESS_DELETE;
724 }
725 if (ap->a_action &
726 (KAUTH_VNODE_WRITE_ATTRIBUTES |
727 KAUTH_VNODE_WRITE_EXTATTRIBUTES |
728 KAUTH_VNODE_WRITE_SECURITY)) {
729 access |= NFS_ACCESS_MODIFY;
730 }
731 /* XXX this is pretty dubious */
732 if (ap->a_action & KAUTH_VNODE_CHANGE_OWNER) {
733 access |= NFS_ACCESS_MODIFY;
734 }
735
736 /* if caching, always ask for every right */
737 if (nfs_access_cache_timeout > 0) {
738 waccess = NFS_ACCESS_READ | NFS_ACCESS_MODIFY |
739 NFS_ACCESS_EXTEND | NFS_ACCESS_EXECUTE |
740 NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
741 } else {
742 waccess = access;
743 }
744
745 if ((error = nfs_node_lock(np))) {
746 return NFS_MAPERR(error);
747 }
748
749 /*
750 * Does our cached result allow us to give a definite yes to
751 * this request?
752 */
753 #if CONFIG_NFS_GSS
754 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
755 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
756 } else {
757 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
758 }
759 #else
760 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
761 #endif /* CONFIG_NFS_GSS */
762 slot = nfs_node_access_slot(np, uid, 0);
763 dorpc = 1;
764 if (access == 0) {
765 /* not asking for any rights understood by NFS, so don't bother doing an RPC */
766 /* OSAddAtomic(1, &nfsclntstats.accesscache_hits); */
767 dorpc = 0;
768 waccess = 0;
769 } else if (NACCESSVALID(np, slot)) {
770 microuptime(&now);
771 if (((now.tv_sec < (np->n_accessstamp[slot] + nfs_access_cache_timeout)) &&
772 ((np->n_access[slot] & access) == access)) || nfs_use_cache(nmp)) {
773 /* OSAddAtomic(1, &nfsclntstats.accesscache_hits); */
774 dorpc = 0;
775 waccess = np->n_access[slot];
776 }
777 }
778 nfs_node_unlock(np);
779 if (dorpc) {
780 /* Either a no, or a don't know. Go to the wire. */
781 /* OSAddAtomic(1, &nfsclntstats.accesscache_misses); */
782
783 /*
784 * Allow an access call to timeout if we have it cached
785 * so we won't hang if the server isn't responding.
786 */
787 if (NACCESSVALID(np, slot)) {
788 rpcflags |= R_SOFT;
789 }
790
791 error = nmp->nm_funcs->nf_access_rpc(np, &waccess, rpcflags, ctx);
792
793 /*
794 * If the server didn't respond return the cached access.
795 */
796 if ((error == ETIMEDOUT) && (rpcflags & R_SOFT)) {
797 error = 0;
798 waccess = np->n_access[slot];
799 }
800 }
801 if (!error && ((waccess & access) != access)) {
802 error = EACCES;
803 }
804
805 return NFS_MAPERR(error);
806 }
807
808
809 /*
810 * NFS open vnode op
811 *
812 * Perform various update/invalidation checks and then add the
813 * open to the node. Regular files will have an open file structure
814 * on the node and, for NFSv4, perform an OPEN request on the server.
815 */
816 int
nfs_vnop_open(struct vnop_open_args * ap)817 nfs_vnop_open(
818 struct vnop_open_args /* {
819 * struct vnodeop_desc *a_desc;
820 * vnode_t a_vp;
821 * int a_mode;
822 * vfs_context_t a_context;
823 * } */*ap)
824 {
825 vfs_context_t ctx = ap->a_context;
826 vnode_t vp = ap->a_vp;
827 nfsnode_t np = VTONFS(vp);
828 struct nfsmount *nmp;
829 int error, accessMode, denyMode, opened = 0;
830 struct nfs_open_owner *noop = NULL;
831 struct nfs_open_file *nofp = NULL;
832 enum vtype vtype;
833
834 if (!(ap->a_mode & (FREAD | FWRITE))) {
835 return EINVAL;
836 }
837
838 nmp = VTONMP(vp);
839 if (nfs_mount_gone(nmp)) {
840 return ENXIO;
841 }
842 if (np->n_flag & NREVOKE) {
843 return EIO;
844 }
845
846 vtype = vnode_vtype(vp);
847 if ((vtype != VREG) && (vtype != VDIR) && (vtype != VLNK)) {
848 return EACCES;
849 }
850
851 /* First, check if we need to update/invalidate */
852 if (ISSET(np->n_flag, NUPDATESIZE)) {
853 nfs_data_update_size(np, 0);
854 }
855 if ((error = nfs_node_lock(np))) {
856 return NFS_MAPERR(error);
857 }
858 if (np->n_flag & NNEEDINVALIDATE) {
859 np->n_flag &= ~NNEEDINVALIDATE;
860 if (vtype == VDIR) {
861 nfs_invaldir(np);
862 }
863 nfs_node_unlock(np);
864 nfs_vinvalbuf1(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
865 if ((error = nfs_node_lock(np))) {
866 return NFS_MAPERR(error);
867 }
868 }
869 if (vtype == VREG) {
870 np->n_lastrahead = -1;
871 }
872 if (np->n_flag & NMODIFIED) {
873 if (vtype == VDIR) {
874 nfs_invaldir(np);
875 }
876 nfs_node_unlock(np);
877 if ((error = nfs_vinvalbuf1(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1))) {
878 return NFS_MAPERR(error);
879 }
880 } else {
881 nfs_node_unlock(np);
882 }
883
884 /* nfs_getattr() will check changed and purge caches */
885 if ((error = nfs_getattr(np, NULL, ctx, NGA_CACHED))) {
886 return NFS_MAPERR(error);
887 }
888
889 if (vtype != VREG) {
890 /* Just mark that it was opened */
891 lck_mtx_lock(&np->n_openlock);
892 np->n_openrefcnt++;
893 lck_mtx_unlock(&np->n_openlock);
894 return 0;
895 }
896
897 /* mode contains some combination of: FREAD, FWRITE, O_SHLOCK, O_EXLOCK */
898 accessMode = 0;
899 if (ap->a_mode & FREAD) {
900 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
901 }
902 if (ap->a_mode & FWRITE) {
903 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
904 }
905 if (ap->a_mode & O_EXLOCK) {
906 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
907 } else if (ap->a_mode & O_SHLOCK) {
908 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
909 } else {
910 denyMode = NFS_OPEN_SHARE_DENY_NONE;
911 }
912 // XXX don't do deny modes just yet (and never do it for !v4)
913 denyMode = NFS_OPEN_SHARE_DENY_NONE;
914
915 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 1);
916 if (!noop) {
917 return ENOMEM;
918 }
919
920 restart:
921 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
922 if (error) {
923 nfs_open_owner_rele(noop);
924 return NFS_MAPERR(error);
925 }
926 if (np->n_flag & NREVOKE) {
927 error = EIO;
928 nfs_mount_state_in_use_end(nmp, 0);
929 nfs_open_owner_rele(noop);
930 return NFS_MAPERR(error);
931 }
932
933 error = nfs_open_file_find(np, noop, &nofp, accessMode, denyMode, 1);
934 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
935 NP(np, "nfs_vnop_open: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
936 error = EIO;
937 }
938 #if CONFIG_NFS4
939 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
940 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
941 nofp = NULL;
942 if (!error) {
943 nfs_mount_state_in_use_end(nmp, 0);
944 goto restart;
945 }
946 }
947 #endif
948 if (!error) {
949 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
950 }
951 if (error) {
952 nofp = NULL;
953 goto out;
954 }
955
956 if (nmp->nm_vers < NFS_VER4) {
957 /*
958 * NFS v2/v3 opens are always allowed - so just add it.
959 */
960 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
961 goto out;
962 }
963
964 /*
965 * If we just created the file and the modes match, then we simply use
966 * the open performed in the create. Otherwise, send the request.
967 */
968 if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
969 (nofp->nof_creator == current_thread()) &&
970 (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) &&
971 (denyMode == NFS_OPEN_SHARE_DENY_NONE)) {
972 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
973 nofp->nof_creator = NULL;
974 } else {
975 #if CONFIG_NFS4
976 if (!opened) {
977 error = nfs4_open(np, nofp, accessMode, denyMode, ctx);
978 }
979 #endif
980 if ((error == EACCES) && (nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
981 (nofp->nof_creator == current_thread())) {
982 /*
983 * Ugh. This can happen if we just created the file with read-only
984 * perms and we're trying to open it for real with different modes
985 * (e.g. write-only or with a deny mode) and the server decides to
986 * not allow the second open because of the read-only perms.
987 * The best we can do is to just use the create's open.
988 * We may have access we don't need or we may not have a requested
989 * deny mode. We may log complaints later, but we'll try to avoid it.
990 */
991 if (denyMode != NFS_OPEN_SHARE_DENY_NONE) {
992 NP(np, "nfs_vnop_open: deny mode foregone on create, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
993 }
994 nofp->nof_creator = NULL;
995 error = 0;
996 }
997 if (error) {
998 goto out;
999 }
1000 opened = 1;
1001 /*
1002 * If we had just created the file, we already had it open.
1003 * If the actual open mode is less than what we grabbed at
1004 * create time, then we'll downgrade the open here.
1005 */
1006 if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
1007 (nofp->nof_creator == current_thread())) {
1008 error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
1009 if (error) {
1010 NP(np, "nfs_vnop_open: create close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
1011 }
1012 if (!nfs_mount_state_error_should_restart(error)) {
1013 error = 0;
1014 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
1015 }
1016 }
1017 }
1018
1019 out:
1020 if (nofp) {
1021 nfs_open_file_clear_busy(nofp);
1022 }
1023 if (nfs_mount_state_in_use_end(nmp, error)) {
1024 nofp = NULL;
1025 goto restart;
1026 }
1027 if (error) {
1028 NP(np, "nfs_vnop_open: error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
1029 }
1030 if (noop) {
1031 nfs_open_owner_rele(noop);
1032 }
1033 if (!error && vtype == VREG && (ap->a_mode & FWRITE)) {
1034 lck_mtx_lock(&nmp->nm_lock);
1035 nmp->nm_state &= ~NFSSTA_SQUISHY;
1036 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
1037 if (nmp->nm_curdeadtimeout <= 0) {
1038 nmp->nm_deadto_start = 0;
1039 }
1040 nmp->nm_writers++;
1041 lck_mtx_unlock(&nmp->nm_lock);
1042 }
1043
1044 return NFS_MAPERR(error);
1045 }
1046
1047 static uint32_t
nfs_no_of_open_file_writers(nfsnode_t np)1048 nfs_no_of_open_file_writers(nfsnode_t np)
1049 {
1050 uint32_t writers = 0;
1051 struct nfs_open_file *nofp;
1052
1053 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
1054 writers += nofp->nof_w + nofp->nof_rw + nofp->nof_w_dw + nofp->nof_rw_dw +
1055 nofp->nof_w_drw + nofp->nof_rw_drw + nofp->nof_d_w_dw +
1056 nofp->nof_d_rw_dw + nofp->nof_d_w_drw + nofp->nof_d_rw_drw +
1057 nofp->nof_d_w + nofp->nof_d_rw;
1058 }
1059
1060 return writers;
1061 }
1062
1063 /*
1064 * NFS close vnode op
1065 *
1066 * What an NFS client should do upon close after writing is a debatable issue.
1067 * Most NFS clients push delayed writes to the server upon close, basically for
1068 * two reasons:
1069 * 1 - So that any write errors may be reported back to the client process
1070 * doing the close system call. By far the two most likely errors are
1071 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
1072 * 2 - To put a worst case upper bound on cache inconsistency between
1073 * multiple clients for the file.
1074 * There is also a consistency problem for Version 2 of the protocol w.r.t.
1075 * not being able to tell if other clients are writing a file concurrently,
1076 * since there is no way of knowing if the changed modify time in the reply
1077 * is only due to the write for this client.
1078 * (NFS Version 3 provides weak cache consistency data in the reply that
1079 * should be sufficient to detect and handle this case.)
1080 *
1081 * The current code does the following:
1082 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
1083 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate them.
1084 * for NFS Version 4 - basically the same as NFSv3
1085 */
1086 int
nfs_vnop_close(struct vnop_close_args * ap)1087 nfs_vnop_close(
1088 struct vnop_close_args /* {
1089 * struct vnodeop_desc *a_desc;
1090 * vnode_t a_vp;
1091 * int a_fflag;
1092 * vfs_context_t a_context;
1093 * } */*ap)
1094 {
1095 vfs_context_t ctx = ap->a_context;
1096 vnode_t vp = ap->a_vp;
1097 nfsnode_t np = VTONFS(vp);
1098 struct nfsmount *nmp;
1099 int error = 0, error1, nfsvers;
1100 int fflag = ap->a_fflag;
1101 enum vtype vtype;
1102 int accessMode, denyMode;
1103 struct nfs_open_owner *noop = NULL;
1104 struct nfs_open_file *nofp = NULL;
1105
1106 nmp = VTONMP(vp);
1107 if (!nmp) {
1108 return ENXIO;
1109 }
1110 nfsvers = nmp->nm_vers;
1111 vtype = vnode_vtype(vp);
1112
1113 /* First, check if we need to update/flush/invalidate */
1114 if (ISSET(np->n_flag, NUPDATESIZE)) {
1115 nfs_data_update_size(np, 0);
1116 }
1117 nfs_node_lock_force(np);
1118 if (np->n_flag & NNEEDINVALIDATE) {
1119 np->n_flag &= ~NNEEDINVALIDATE;
1120 nfs_node_unlock(np);
1121 nfs_vinvalbuf1(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
1122 nfs_node_lock_force(np);
1123 }
1124 if ((vtype == VREG) && (np->n_flag & NMODIFIED) && (fflag & FWRITE)) {
1125 /* we're closing an open for write and the file is modified, so flush it */
1126 nfs_node_unlock(np);
1127 if (nfsvers != NFS_VER2) {
1128 error = nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), 0);
1129 } else {
1130 error = nfs_vinvalbuf1(vp, V_SAVE, ctx, 1);
1131 }
1132 nfs_node_lock_force(np);
1133 NATTRINVALIDATE(np);
1134 }
1135 if (np->n_flag & NWRITEERR) {
1136 np->n_flag &= ~NWRITEERR;
1137 error = np->n_error;
1138 }
1139 nfs_node_unlock(np);
1140
1141 if (vtype != VREG) {
1142 /* Just mark that it was closed */
1143 lck_mtx_lock(&np->n_openlock);
1144 if (np->n_openrefcnt == 0) {
1145 if (fflag & (FREAD | FWRITE)) {
1146 NP(np, "nfs_vnop_close: open reference underrun");
1147 error = EINVAL;
1148 }
1149 } else if (fflag & (FREAD | FWRITE)) {
1150 np->n_openrefcnt--;
1151 } else {
1152 /* No FREAD/FWRITE set - probably the final close */
1153 np->n_openrefcnt = 0;
1154 }
1155 lck_mtx_unlock(&np->n_openlock);
1156 return NFS_MAPERR(error);
1157 }
1158 error1 = error;
1159
1160 /* fflag should contain some combination of: FREAD, FWRITE */
1161 accessMode = 0;
1162 if (fflag & FREAD) {
1163 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
1164 }
1165 if (fflag & FWRITE) {
1166 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
1167 }
1168 // XXX It would be nice if we still had the O_EXLOCK/O_SHLOCK flags that were on the open
1169 // if (fflag & O_EXLOCK)
1170 // denyMode = NFS_OPEN_SHARE_DENY_BOTH;
1171 // else if (fflag & O_SHLOCK)
1172 // denyMode = NFS_OPEN_SHARE_DENY_WRITE;
1173 // else
1174 // denyMode = NFS_OPEN_SHARE_DENY_NONE;
1175 // XXX don't do deny modes just yet (and never do it for !v4)
1176 denyMode = NFS_OPEN_SHARE_DENY_NONE;
1177
1178 if (!accessMode) {
1179 /*
1180 * No mode given to close?
1181 * Guess this is the final close.
1182 * We should unlock all locks and close all opens.
1183 */
1184 uint32_t writers;
1185 mount_t mp = vnode_mount(vp);
1186 int force = (!mp || vfs_isforce(mp));
1187
1188 writers = nfs_no_of_open_file_writers(np);
1189 nfs_release_open_state_for_node(np, force);
1190 if (writers) {
1191 lck_mtx_lock(&nmp->nm_lock);
1192 if (writers > nmp->nm_writers) {
1193 NP(np, "nfs_vnop_close: number of write opens for mount underrun. Node has %d"
1194 " opens for write. Mount has total of %d opens for write\n",
1195 writers, nmp->nm_writers);
1196 nmp->nm_writers = 0;
1197 } else {
1198 nmp->nm_writers -= writers;
1199 }
1200 lck_mtx_unlock(&nmp->nm_lock);
1201 }
1202
1203 return NFS_MAPERR(error);
1204 } else if (fflag & FWRITE) {
1205 lck_mtx_lock(&nmp->nm_lock);
1206 if (nmp->nm_writers == 0) {
1207 NP(np, "nfs_vnop_close: removing open writer from mount, but mount has no files open for writing");
1208 } else {
1209 nmp->nm_writers--;
1210 }
1211 lck_mtx_unlock(&nmp->nm_lock);
1212 }
1213
1214
1215 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 0);
1216 if (!noop) {
1217 // printf("nfs_vnop_close: can't get open owner!\n");
1218 return EIO;
1219 }
1220
1221 restart:
1222 error = nfs_mount_state_in_use_start(nmp, NULL);
1223 if (error) {
1224 nfs_open_owner_rele(noop);
1225 return NFS_MAPERR(error);
1226 }
1227
1228 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
1229 #if CONFIG_NFS4
1230 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
1231 error = nfs4_reopen(nofp, NULL);
1232 nofp = NULL;
1233 if (!error) {
1234 nfs_mount_state_in_use_end(nmp, 0);
1235 goto restart;
1236 }
1237 }
1238 #endif
1239 if (error) {
1240 NP(np, "nfs_vnop_close: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
1241 error = EBADF;
1242 goto out;
1243 }
1244 error = nfs_open_file_set_busy(nofp, NULL);
1245 if (error) {
1246 nofp = NULL;
1247 goto out;
1248 }
1249
1250 error = nfs_close(np, nofp, accessMode, denyMode, ctx);
1251 if (error) {
1252 NP(np, "nfs_vnop_close: close error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
1253 }
1254
1255 out:
1256 if (nofp) {
1257 nfs_open_file_clear_busy(nofp);
1258 }
1259 if (nfs_mount_state_in_use_end(nmp, error)) {
1260 nofp = NULL;
1261 goto restart;
1262 }
1263 if (!error) {
1264 error = error1;
1265 }
1266 if (error) {
1267 NP(np, "nfs_vnop_close: error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
1268 }
1269 if (noop) {
1270 nfs_open_owner_rele(noop);
1271 }
1272 return NFS_MAPERR(error);
1273 }
1274
1275 /*
1276 * nfs_close(): common function that does all the heavy lifting of file closure
1277 *
1278 * Takes an open file structure and a set of access/deny modes and figures out how
1279 * to update the open file structure (and the state on the server) appropriately.
1280 */
1281 int
nfs_close(nfsnode_t np,struct nfs_open_file * nofp,uint32_t accessMode,uint32_t denyMode,__unused vfs_context_t ctx)1282 nfs_close(
1283 nfsnode_t np,
1284 struct nfs_open_file *nofp,
1285 uint32_t accessMode,
1286 uint32_t denyMode,
1287 __unused vfs_context_t ctx)
1288 {
1289 #if CONFIG_NFS4
1290 struct nfs_lock_owner *nlop;
1291 #endif
1292 int error = 0, changed = 0, delegated = 0, closed = 0, downgrade = 0;
1293 uint8_t newAccessMode, newDenyMode;
1294
1295 /* warn if modes don't match current state */
1296 if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode)) {
1297 NP(np, "nfs_close: mode mismatch %d %d, current %d %d, %d",
1298 accessMode, denyMode, nofp->nof_access, nofp->nof_deny,
1299 kauth_cred_getuid(nofp->nof_owner->noo_cred));
1300 }
1301
1302 /*
1303 * If we're closing a write-only open, we may not have a write-only count
1304 * if we also grabbed read access. So, check the read-write count.
1305 */
1306 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
1307 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
1308 (nofp->nof_w == 0) && (nofp->nof_d_w == 0) &&
1309 (nofp->nof_rw || nofp->nof_d_rw)) {
1310 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
1311 }
1312 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
1313 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
1314 (nofp->nof_w_dw == 0) && (nofp->nof_d_w_dw == 0) &&
1315 (nofp->nof_rw_dw || nofp->nof_d_rw_dw)) {
1316 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
1317 }
1318 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
1319 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
1320 (nofp->nof_w_drw == 0) && (nofp->nof_d_w_drw == 0) &&
1321 (nofp->nof_rw_drw || nofp->nof_d_rw_drw)) {
1322 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
1323 }
1324 }
1325
1326 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
1327 if ((newAccessMode != nofp->nof_access) || (newDenyMode != nofp->nof_deny)) {
1328 changed = 1;
1329 } else {
1330 changed = 0;
1331 }
1332
1333 if (NFSTONMP(np)->nm_vers < NFS_VER4) {
1334 /* NFS v2/v3 closes simply need to remove the open. */
1335 goto v3close;
1336 }
1337 #if CONFIG_NFS4
1338 if ((newAccessMode == 0) || (nofp->nof_opencnt == 1)) {
1339 /*
1340 * No more access after this close, so clean up and close it.
1341 * Don't send a close RPC if we're closing a delegated open.
1342 */
1343 nfs_wait_bufs(np);
1344 closed = 1;
1345 if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
1346 error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
1347 }
1348 if (error == NFSERR_LOCKS_HELD) {
1349 /*
1350 * Hmm... the server says we have locks we need to release first
1351 * Find the lock owner and try to unlock everything.
1352 */
1353 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), 0, 0);
1354 if (nlop) {
1355 nfs4_unlock_rpc(np, nlop, F_WRLCK, 0, UINT64_MAX,
1356 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
1357 nfs_lock_owner_rele(np, nlop, vfs_context_thread(ctx), vfs_context_ucred(ctx));
1358 }
1359 error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
1360 }
1361 } else if (changed) {
1362 /*
1363 * File is still open but with less access, so downgrade the open.
1364 * Don't send a downgrade RPC if we're closing a delegated open.
1365 */
1366 if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
1367 downgrade = 1;
1368 /*
1369 * If we have delegated opens, we should probably claim them before sending
1370 * the downgrade because the server may not know the open we are downgrading to.
1371 */
1372 if (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
1373 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
1374 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r) {
1375 nfs4_claim_delegated_state_for_open_file(nofp, 0);
1376 }
1377 /* need to remove the open before sending the downgrade */
1378 nfs_open_file_remove_open(nofp, accessMode, denyMode);
1379 error = nfs4_open_downgrade_rpc(np, nofp, ctx);
1380 if (error) { /* Hmm.. that didn't work. Add the open back in. */
1381 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
1382 }
1383 }
1384 }
1385 #endif
1386 v3close:
1387 if (error) {
1388 NP(np, "nfs_close: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
1389 return error;
1390 }
1391
1392 if (!downgrade) {
1393 nfs_open_file_remove_open(nofp, accessMode, denyMode);
1394 }
1395
1396 if (closed) {
1397 lck_mtx_lock(&nofp->nof_lock);
1398 if (nofp->nof_r || nofp->nof_d_r || nofp->nof_w || nofp->nof_d_w || nofp->nof_d_rw ||
1399 (nofp->nof_rw && !((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && !nofp->nof_creator && (nofp->nof_rw == 1))) ||
1400 nofp->nof_r_dw || nofp->nof_d_r_dw || nofp->nof_w_dw || nofp->nof_d_w_dw ||
1401 nofp->nof_rw_dw || nofp->nof_d_rw_dw || nofp->nof_r_drw || nofp->nof_d_r_drw ||
1402 nofp->nof_w_drw || nofp->nof_d_w_drw || nofp->nof_rw_drw || nofp->nof_d_rw_drw) {
1403 NP(np, "nfs_close: unexpected count: %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u flags 0x%x, %d",
1404 nofp->nof_r, nofp->nof_d_r, nofp->nof_w, nofp->nof_d_w,
1405 nofp->nof_rw, nofp->nof_d_rw, nofp->nof_r_dw, nofp->nof_d_r_dw,
1406 nofp->nof_w_dw, nofp->nof_d_w_dw, nofp->nof_rw_dw, nofp->nof_d_rw_dw,
1407 nofp->nof_r_drw, nofp->nof_d_r_drw, nofp->nof_w_drw, nofp->nof_d_w_drw,
1408 nofp->nof_rw_drw, nofp->nof_d_rw_drw, nofp->nof_flags,
1409 kauth_cred_getuid(nofp->nof_owner->noo_cred));
1410 }
1411 /* clear out all open info, just to be safe */
1412 nofp->nof_access = nofp->nof_deny = 0;
1413 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
1414 nofp->nof_r = nofp->nof_d_r = 0;
1415 nofp->nof_w = nofp->nof_d_w = 0;
1416 nofp->nof_rw = nofp->nof_d_rw = 0;
1417 nofp->nof_r_dw = nofp->nof_d_r_dw = 0;
1418 nofp->nof_w_dw = nofp->nof_d_w_dw = 0;
1419 nofp->nof_rw_dw = nofp->nof_d_rw_dw = 0;
1420 nofp->nof_r_drw = nofp->nof_d_r_drw = 0;
1421 nofp->nof_w_drw = nofp->nof_d_w_drw = 0;
1422 nofp->nof_rw_drw = nofp->nof_d_rw_drw = 0;
1423 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
1424 lck_mtx_unlock(&nofp->nof_lock);
1425 /* XXX we may potentially want to clean up idle/unused open file structures */
1426 }
1427 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
1428 error = EIO;
1429 NP(np, "nfs_close: LOST%s, %d", !nofp->nof_opencnt ? " (last)" : "",
1430 kauth_cred_getuid(nofp->nof_owner->noo_cred));
1431 }
1432
1433 return error;
1434 }
1435
1436
1437 int
nfs3_getattr_rpc(nfsnode_t np,mount_t mp,u_char * fhp,size_t fhsize,int flags,vfs_context_t ctx,struct nfs_vattr * nvap,u_int64_t * xidp)1438 nfs3_getattr_rpc(
1439 nfsnode_t np,
1440 mount_t mp,
1441 u_char *fhp,
1442 size_t fhsize,
1443 int flags,
1444 vfs_context_t ctx,
1445 struct nfs_vattr *nvap,
1446 u_int64_t *xidp)
1447 {
1448 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
1449 int error = 0, status = 0, nfsvers, rpcflags = 0;
1450 struct nfsm_chain nmreq, nmrep;
1451
1452 if (nfs_mount_gone(nmp)) {
1453 return ENXIO;
1454 }
1455 nfsvers = nmp->nm_vers;
1456
1457 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
1458 rpcflags = R_RECOVER;
1459 }
1460
1461 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
1462 rpcflags |= R_SOFT;
1463 }
1464
1465 nfsm_chain_null(&nmreq);
1466 nfsm_chain_null(&nmrep);
1467
1468 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
1469 if (nfsvers != NFS_VER2) {
1470 nfsm_chain_add_32(error, &nmreq, fhsize);
1471 }
1472 nfsm_chain_add_opaque(error, &nmreq, fhp, fhsize);
1473 nfsm_chain_build_done(error, &nmreq);
1474 nfsmout_if(error);
1475 error = nfs_request2(np, mp, &nmreq, NFSPROC_GETATTR,
1476 vfs_context_thread(ctx), vfs_context_ucred(ctx),
1477 NULL, rpcflags, &nmrep, xidp, &status);
1478 if (!error) {
1479 error = status;
1480 }
1481 nfsmout_if(error);
1482 error = nfs_parsefattr(nmp, &nmrep, nfsvers, nvap);
1483 nfsmout:
1484 nfsm_chain_cleanup(&nmreq);
1485 nfsm_chain_cleanup(&nmrep);
1486 return error;
1487 }
1488
1489 /*
1490 * nfs_refresh_fh will attempt to update the file handle for the node.
1491 *
1492 * It only does this for symbolic links and regular files that are not currently opened.
1493 *
1494 * On Success returns 0 and the nodes file handle is updated, or ESTALE on failure.
1495 */
1496 int
nfs_refresh_fh(nfsnode_t np,vfs_context_t ctx)1497 nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx)
1498 {
1499 vnode_t dvp, vp = NFSTOV(np);
1500 nfsnode_t dnp;
1501 const char *v_name = vnode_getname(vp);
1502 char *name;
1503 int namelen, refreshed;
1504 uint32_t fhsize;
1505 int error, wanted = 0;
1506 uint8_t *fhp;
1507 struct timespec ts = {.tv_sec = 2, .tv_nsec = 0};
1508
1509 NFS_VNOP_DBG("vnode is %d\n", vnode_vtype(vp));
1510
1511 dvp = vnode_parent(vp);
1512 if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VLNK) ||
1513 v_name == NULL || *v_name == '\0' || dvp == NULL) {
1514 if (v_name != NULL) {
1515 vnode_putname(v_name);
1516 }
1517 return ESTALE;
1518 }
1519 dnp = VTONFS(dvp);
1520
1521 namelen = NFS_STRLEN_INT(v_name);
1522 name = kalloc_data(namelen + 1, Z_WAITOK);
1523 if (name == NULL) {
1524 vnode_putname(v_name);
1525 return ESTALE;
1526 }
1527 bcopy(v_name, name, namelen + 1);
1528 NFS_VNOP_DBG("Trying to refresh %s : %s\n", v_name, name);
1529 vnode_putname(v_name);
1530
1531 /* Allocate the maximum size file handle */
1532 fhp = kalloc_data(NFS4_FHSIZE, Z_WAITOK);
1533 if (fhp == NULL) {
1534 kfree_data(name, namelen + 1);
1535 return ESTALE;
1536 }
1537
1538 if ((error = nfs_node_lock(np))) {
1539 kfree_data(name, namelen + 1);
1540 kfree_data(fhp, NFS4_FHSIZE);
1541 return ESTALE;
1542 }
1543
1544 fhsize = np->n_fhsize;
1545 bcopy(np->n_fhp, fhp, fhsize);
1546 while (ISSET(np->n_flag, NREFRESH)) {
1547 SET(np->n_flag, NREFRESHWANT);
1548 NFS_VNOP_DBG("Waiting for refresh of %s\n", name);
1549 msleep(np, &np->n_lock, PZERO - 1, "nfsrefreshwant", &ts);
1550 if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) {
1551 break;
1552 }
1553 }
1554 refreshed = error ? 0 : !NFS_CMPFH(np, fhp, fhsize);
1555 SET(np->n_flag, NREFRESH);
1556 nfs_node_unlock(np);
1557
1558 NFS_VNOP_DBG("error = %d, refreshed = %d\n", error, refreshed);
1559 if (error || refreshed) {
1560 goto nfsmout;
1561 }
1562
1563 /* Check that there are no open references for this file */
1564 lck_mtx_lock(&np->n_openlock);
1565 if (np->n_openrefcnt || !TAILQ_EMPTY(&np->n_opens) || !TAILQ_EMPTY(&np->n_lock_owners)) {
1566 int cnt = 0;
1567 struct nfs_open_file *ofp;
1568
1569 TAILQ_FOREACH(ofp, &np->n_opens, nof_link) {
1570 cnt += ofp->nof_opencnt;
1571 }
1572 if (cnt) {
1573 lck_mtx_unlock(&np->n_openlock);
1574 NFS_VNOP_DBG("Can not refresh file handle for %s with open state\n", name);
1575 NFS_VNOP_DBG("\topenrefcnt = %d, opens = %d lock_owners = %d\n",
1576 np->n_openrefcnt, cnt, !TAILQ_EMPTY(&np->n_lock_owners));
1577 error = ESTALE;
1578 goto nfsmout;
1579 }
1580 }
1581 lck_mtx_unlock(&np->n_openlock);
1582 /*
1583 * Since the FH is currently stale we should not be able to
1584 * establish any open state until the FH is refreshed.
1585 */
1586
1587 error = nfs_node_lock(np);
1588 nfsmout_if(error);
1589 /*
1590 * Symlinks should never need invalidations and are holding
1591 * the one and only nfsbuf in an uncached acquired state
1592 * trying to do a readlink. So we will hang if we invalidate
1593 * in that case. Only in in the VREG case do we need to
1594 * invalidate.
1595 */
1596 if (vnode_vtype(vp) == VREG) {
1597 np->n_flag &= ~NNEEDINVALIDATE;
1598 nfs_node_unlock(np);
1599 error = nfs_vinvalbuf1(vp, V_IGNORE_WRITEERR, ctx, 1);
1600 if (error) {
1601 NFS_VNOP_DBG("nfs_vinvalbuf1 returned %d\n", error);
1602 }
1603 nfsmout_if(error);
1604 } else {
1605 nfs_node_unlock(np);
1606 }
1607
1608 NFS_VNOP_DBG("Looking up %s\n", name);
1609 error = nfs_lookitup(dnp, name, namelen, ctx, &np);
1610 if (error) {
1611 NFS_VNOP_DBG("nfs_lookitup returned %d\n", error);
1612 }
1613
1614 nfsmout:
1615 nfs_node_lock_force(np);
1616 wanted = ISSET(np->n_flag, NREFRESHWANT);
1617 CLR(np->n_flag, NREFRESH | NREFRESHWANT);
1618 nfs_node_unlock(np);
1619 if (wanted) {
1620 wakeup(np);
1621 }
1622
1623 if (error == 0) {
1624 NFS_VNOP_DBG("%s refreshed file handle\n", name);
1625 }
1626
1627 kfree_data(name, namelen + 1);
1628 kfree_data(fhp, NFS4_FHSIZE);
1629
1630 return error ? ESTALE : 0;
1631 }
1632
1633 int
nfs_getattr(nfsnode_t np,struct nfs_vattr * nvap,vfs_context_t ctx,int flags)1634 nfs_getattr(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
1635 {
1636 int error;
1637
1638 retry:
1639 error = nfs_getattr_internal(np, nvap, ctx, flags);
1640 if (error == ESTALE) {
1641 error = nfs_refresh_fh(np, ctx);
1642 if (!error) {
1643 goto retry;
1644 }
1645 }
1646 return error;
1647 }
1648
1649 int
nfs_getattr_internal(nfsnode_t np,struct nfs_vattr * nvap,vfs_context_t ctx,int flags)1650 nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
1651 {
1652 struct nfsmount *nmp;
1653 int error = 0, nfsvers, inprogset = 0, wanted = 0, avoidfloods = 0;
1654 struct nfs_vattr *nvattr = NULL;
1655 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1656 u_int64_t xid = 0;
1657
1658 FSDBG_TOP(513, np->n_size, np, np->n_vattr.nva_size, np->n_flag);
1659
1660 nmp = NFSTONMP(np);
1661
1662 if (nfs_mount_gone(nmp)) {
1663 return ENXIO;
1664 }
1665 nfsvers = nmp->nm_vers;
1666
1667 if (!nvap) {
1668 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
1669 nvap = nvattr;
1670 }
1671 NVATTR_INIT(nvap);
1672
1673 /* Update local times for special files. */
1674 if (np->n_flag & (NACC | NUPD)) {
1675 nfs_node_lock_force(np);
1676 np->n_flag |= NCHG;
1677 nfs_node_unlock(np);
1678 }
1679 /* Update size, if necessary */
1680 if (ISSET(np->n_flag, NUPDATESIZE)) {
1681 nfs_data_update_size(np, 0);
1682 }
1683
1684 error = nfs_node_lock(np);
1685 nfsmout_if(error);
1686 if (!(flags & (NGA_UNCACHED | NGA_MONITOR)) || ((nfsvers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK))) {
1687 /*
1688 * Use the cache or wait for any getattr in progress if:
1689 * - it's a cached request, or
1690 * - we have a delegation, or
1691 * - the server isn't responding
1692 */
1693 while (1) {
1694 error = nfs_getattrcache(np, nvap, flags);
1695 if (!error || (error != ENOENT)) {
1696 nfs_node_unlock(np);
1697 goto nfsmout;
1698 }
1699 error = 0;
1700 if (!ISSET(np->n_flag, NGETATTRINPROG)) {
1701 break;
1702 }
1703 if (flags & NGA_MONITOR) {
1704 /* no need to wait if a request is pending */
1705 error = EINPROGRESS;
1706 nfs_node_unlock(np);
1707 goto nfsmout;
1708 }
1709 SET(np->n_flag, NGETATTRWANT);
1710 msleep(np, &np->n_lock, PZERO - 1, "nfsgetattrwant", &ts);
1711 if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) {
1712 nfs_node_unlock(np);
1713 goto nfsmout;
1714 }
1715 }
1716 SET(np->n_flag, NGETATTRINPROG);
1717 inprogset = 1;
1718 } else if (!ISSET(np->n_flag, NGETATTRINPROG)) {
1719 SET(np->n_flag, NGETATTRINPROG);
1720 inprogset = 1;
1721 } else if (flags & NGA_MONITOR) {
1722 /* no need to make a request if one is pending */
1723 error = EINPROGRESS;
1724 }
1725 nfs_node_unlock(np);
1726
1727 nmp = NFSTONMP(np);
1728 if (nfs_mount_gone(nmp)) {
1729 error = ENXIO;
1730 }
1731 if (error) {
1732 goto nfsmout;
1733 }
1734
1735 /*
1736 * Return cached attributes if they are valid,
1737 * if the server doesn't respond, and this is
1738 * some softened up style of mount.
1739 */
1740 if (NATTRVALID(np) && nfs_use_cache(nmp)) {
1741 flags |= NGA_SOFT;
1742 }
1743
1744 /*
1745 * We might want to try to get both the attributes and access info by
1746 * making an ACCESS call and seeing if it returns updated attributes.
1747 * But don't bother if we aren't caching access info or if the
1748 * attributes returned wouldn't be cached.
1749 */
1750 if (!(flags & NGA_ACL) && (nfsvers != NFS_VER2) && nfs_access_for_getattr && (nfs_access_cache_timeout > 0)) {
1751 if (nfs_attrcachetimeout(np) > 0) {
1752 /* OSAddAtomic(1, &nfsclntstats.accesscache_misses); */
1753 u_int32_t access = NFS_ACCESS_ALL;
1754 int rpcflags = 0;
1755
1756 /* Return cached attrs if server doesn't respond */
1757 if (flags & NGA_SOFT) {
1758 rpcflags |= R_SOFT;
1759 }
1760
1761 error = nmp->nm_funcs->nf_access_rpc(np, &access, rpcflags, ctx);
1762
1763 if (error == ETIMEDOUT) {
1764 goto returncached;
1765 }
1766
1767 if (error) {
1768 goto nfsmout;
1769 }
1770 nfs_node_lock_force(np);
1771 error = nfs_getattrcache(np, nvap, flags);
1772 nfs_node_unlock(np);
1773 if (!error || (error != ENOENT)) {
1774 goto nfsmout;
1775 }
1776 /* Well, that didn't work... just do a getattr... */
1777 error = 0;
1778 }
1779 }
1780
1781 avoidfloods = 0;
1782
1783 tryagain:
1784 error = nmp->nm_funcs->nf_getattr_rpc(np, NULL, np->n_fhp, np->n_fhsize, flags, ctx, nvap, &xid);
1785 if (!error) {
1786 nfs_node_lock_force(np);
1787 error = nfs_loadattrcache(np, nvap, &xid, 0);
1788 nfs_node_unlock(np);
1789 }
1790
1791 /*
1792 * If the server didn't respond, return cached attributes.
1793 */
1794 returncached:
1795 if ((flags & NGA_SOFT) && (error == ETIMEDOUT)) {
1796 nfs_node_lock_force(np);
1797 error = nfs_getattrcache(np, nvap, flags);
1798 if (!error || (error != ENOENT)) {
1799 nfs_node_unlock(np);
1800 goto nfsmout;
1801 }
1802 nfs_node_unlock(np);
1803 }
1804 nfsmout_if(error);
1805
1806 if (!xid) { /* out-of-order rpc - attributes were dropped */
1807 FSDBG(513, -1, np, np->n_xid >> 32, np->n_xid);
1808 if (avoidfloods++ < 20) {
1809 goto tryagain;
1810 }
1811 /* avoidfloods>1 is bizarre. at 20 pull the plug */
1812 /* just return the last attributes we got */
1813 }
1814 nfsmout:
1815 nfs_node_lock_force(np);
1816 if (inprogset) {
1817 wanted = ISSET(np->n_flag, NGETATTRWANT);
1818 CLR(np->n_flag, (NGETATTRINPROG | NGETATTRWANT));
1819 }
1820 if (!error) {
1821 /* check if the node changed on us */
1822 vnode_t vp = NFSTOV(np);
1823 enum vtype vtype = vnode_vtype(vp);
1824 if ((vtype == VDIR) && NFS_CHANGED_NC(nfsvers, np, nvap)) {
1825 FSDBG(513, -1, np, 0, np);
1826 np->n_flag &= ~NNEGNCENTRIES;
1827 cache_purge(vp);
1828 np->n_ncgen++;
1829 NFS_CHANGED_UPDATE_NC(nfsvers, np, nvap);
1830 NFS_VNOP_DBG("Purge directory %s\n", vnode_getname(vp) ? vnode_getname(vp) : "empty");
1831 }
1832 if (NFS_CHANGED(nfsvers, np, nvap)) {
1833 FSDBG(513, -1, np, -1, np);
1834 if (vtype == VDIR) {
1835 NFS_VNOP_DBG("Invalidate directory %s\n", vnode_getname(vp) ? vnode_getname(vp) : "empty");
1836 nfs_invaldir(np);
1837 }
1838 nfs_node_unlock(np);
1839 if (wanted) {
1840 wakeup(np);
1841 }
1842 error = nfs_vinvalbuf1(vp, V_SAVE, ctx, 1);
1843 FSDBG(513, -1, np, -2, error);
1844 if (!error) {
1845 nfs_node_lock_force(np);
1846 NFS_CHANGED_UPDATE(nfsvers, np, nvap);
1847 nfs_node_unlock(np);
1848 }
1849 } else {
1850 nfs_node_unlock(np);
1851 if (wanted) {
1852 wakeup(np);
1853 }
1854 }
1855 } else {
1856 nfs_node_unlock(np);
1857 if (wanted) {
1858 wakeup(np);
1859 }
1860 }
1861
1862 if (nvattr != NULL) {
1863 NVATTR_CLEANUP(nvap);
1864 zfree(KT_NFS_VATTR, nvattr);
1865 } else if (!(flags & NGA_ACL)) {
1866 /* make sure we don't return an ACL if it wasn't asked for */
1867 NFS_BITMAP_CLR(nvap->nva_bitmap, NFS_FATTR_ACL);
1868 if (nvap->nva_acl) {
1869 kauth_acl_free(nvap->nva_acl);
1870 nvap->nva_acl = NULL;
1871 }
1872 }
1873 FSDBG_BOT(513, np->n_size, error, np->n_vattr.nva_size, np->n_flag);
1874 return error;
1875 }
1876
1877
1878 /*
1879 * NFS getattr call from vfs.
1880 */
1881
1882 /*
1883 * The attributes we support over the wire.
1884 * We also get fsid but the vfs layer gets it out of the mount
1885 * structure after this calling us so there's no need to return it,
1886 * and Finder expects to call getattrlist just looking for the FSID
1887 * with out hanging on a non responsive server.
1888 */
1889 #define NFS3_SUPPORTED_VATTRS \
1890 (VNODE_ATTR_va_rdev | \
1891 VNODE_ATTR_va_nlink | \
1892 VNODE_ATTR_va_data_size | \
1893 VNODE_ATTR_va_data_alloc | \
1894 VNODE_ATTR_va_uid | \
1895 VNODE_ATTR_va_gid | \
1896 VNODE_ATTR_va_mode | \
1897 VNODE_ATTR_va_modify_time | \
1898 VNODE_ATTR_va_change_time | \
1899 VNODE_ATTR_va_access_time | \
1900 VNODE_ATTR_va_fileid | \
1901 VNODE_ATTR_va_type)
1902
1903
1904 int
nfs3_vnop_getattr(struct vnop_getattr_args * ap)1905 nfs3_vnop_getattr(
1906 struct vnop_getattr_args /* {
1907 * struct vnodeop_desc *a_desc;
1908 * vnode_t a_vp;
1909 * struct vnode_attr *a_vap;
1910 * vfs_context_t a_context;
1911 * } */*ap)
1912 {
1913 int error;
1914 nfsnode_t np;
1915 uint64_t supported_attrs;
1916 struct nfs_vattr *nva;
1917 struct vnode_attr *vap = ap->a_vap;
1918 struct nfsmount *nmp;
1919 dev_t rdev;
1920
1921 nmp = VTONMP(ap->a_vp);
1922
1923 /*
1924 * Lets don't go over the wire if we don't support any of the attributes.
1925 * Just fall through at the VFS layer and let it cons up what it needs.
1926 */
1927 /* Return the io size no matter what, since we don't go over the wire for this */
1928 VATTR_RETURN(vap, va_iosize, nfs_iosize);
1929
1930 supported_attrs = NFS3_SUPPORTED_VATTRS;
1931
1932 if ((vap->va_active & supported_attrs) == 0) {
1933 return 0;
1934 }
1935
1936 if (VATTR_IS_ACTIVE(ap->a_vap, va_name)) {
1937 NFS_VNOP_DBG("Getting attrs for %s\n", vnode_getname(ap->a_vp) ? vnode_getname(ap->a_vp) : "empty");
1938 }
1939
1940 /*
1941 * We should not go over the wire if only fileid was requested and has ever been populated.
1942 */
1943 if ((vap->va_active & supported_attrs) == VNODE_ATTR_va_fileid) {
1944 np = VTONFS(ap->a_vp);
1945 if (np->n_attrstamp) {
1946 VATTR_RETURN(vap, va_fileid, np->n_vattr.nva_fileid);
1947 return 0;
1948 }
1949 }
1950
1951 nva = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
1952 error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, NGA_CACHED);
1953 if (error) {
1954 goto out;
1955 }
1956
1957 /* copy nva to *a_vap */
1958 VATTR_RETURN(vap, va_type, nva->nva_type);
1959 VATTR_RETURN(vap, va_mode, nva->nva_mode);
1960 rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2);
1961 VATTR_RETURN(vap, va_rdev, rdev);
1962 VATTR_RETURN(vap, va_uid, nva->nva_uid);
1963 VATTR_RETURN(vap, va_gid, nva->nva_gid);
1964 VATTR_RETURN(vap, va_nlink, nva->nva_nlink);
1965 VATTR_RETURN(vap, va_fileid, nva->nva_fileid);
1966 VATTR_RETURN(vap, va_data_size, nva->nva_size);
1967 VATTR_RETURN(vap, va_data_alloc, nva->nva_bytes);
1968 vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS];
1969 vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS];
1970 VATTR_SET_SUPPORTED(vap, va_access_time);
1971 vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY];
1972 vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY];
1973 VATTR_SET_SUPPORTED(vap, va_modify_time);
1974 vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE];
1975 vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE];
1976 VATTR_SET_SUPPORTED(vap, va_change_time);
1977
1978
1979 // VATTR_RETURN(vap, va_encoding, 0xffff /* kTextEncodingUnknown */);
1980 out:
1981 zfree(KT_NFS_VATTR, nva);
1982 return NFS_MAPERR(error);
1983 }
1984
1985 /*
1986 * NFS setattr call.
1987 */
1988 int
nfs_vnop_setattr(struct vnop_setattr_args * ap)1989 nfs_vnop_setattr(
1990 struct vnop_setattr_args /* {
1991 * struct vnodeop_desc *a_desc;
1992 * vnode_t a_vp;
1993 * struct vnode_attr *a_vap;
1994 * vfs_context_t a_context;
1995 * } */*ap)
1996 {
1997 vfs_context_t ctx = ap->a_context;
1998 vnode_t vp = ap->a_vp;
1999 nfsnode_t np = VTONFS(vp);
2000 struct nfsmount *nmp;
2001 struct vnode_attr *vap = ap->a_vap;
2002 int error = 0;
2003 int biosize, nfsvers, namedattrs;
2004 u_quad_t origsize, vapsize;
2005 struct nfs_dulookup *dul;
2006 nfsnode_t dnp = NULL;
2007 int dul_in_progress = 0;
2008 vnode_t dvp = NULL;
2009 const char *vname = NULL;
2010 #if CONFIG_NFS4
2011 struct nfs_open_owner *noop = NULL;
2012 struct nfs_open_file *nofp = NULL;
2013 #endif
2014 nmp = VTONMP(vp);
2015 if (nfs_mount_gone(nmp)) {
2016 return ENXIO;
2017 }
2018 nfsvers = nmp->nm_vers;
2019 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
2020 biosize = nmp->nm_biosize;
2021
2022 /* Disallow write attempts if the filesystem is mounted read-only. */
2023 if (vnode_vfsisrdonly(vp)) {
2024 return EROFS;
2025 }
2026
2027 origsize = np->n_size;
2028 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2029 switch (vnode_vtype(vp)) {
2030 case VDIR:
2031 return EISDIR;
2032 case VCHR:
2033 case VBLK:
2034 case VSOCK:
2035 case VFIFO:
2036 if (!VATTR_IS_ACTIVE(vap, va_modify_time) &&
2037 !VATTR_IS_ACTIVE(vap, va_access_time) &&
2038 !VATTR_IS_ACTIVE(vap, va_mode) &&
2039 !VATTR_IS_ACTIVE(vap, va_uid) &&
2040 !VATTR_IS_ACTIVE(vap, va_gid)) {
2041 return 0;
2042 }
2043 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2044 break;
2045 default:
2046 /*
2047 * Disallow write attempts if the filesystem is
2048 * mounted read-only.
2049 */
2050 if (vnode_vfsisrdonly(vp)) {
2051 return EROFS;
2052 }
2053 FSDBG_TOP(512, np->n_size, vap->va_data_size,
2054 np->n_vattr.nva_size, np->n_flag);
2055 /* clear NNEEDINVALIDATE, if set */
2056 if ((error = nfs_node_lock(np))) {
2057 return NFS_MAPERR(error);
2058 }
2059 if (np->n_flag & NNEEDINVALIDATE) {
2060 np->n_flag &= ~NNEEDINVALIDATE;
2061 }
2062 nfs_node_unlock(np);
2063 /* flush everything */
2064 error = nfs_vinvalbuf1(vp, (vap->va_data_size ? V_SAVE : 0), ctx, 1);
2065 if (error) {
2066 NP(np, "nfs_setattr: nfs_vinvalbuf1 %d", error);
2067 FSDBG_BOT(512, np->n_size, vap->va_data_size, np->n_vattr.nva_size, -1);
2068 return NFS_MAPERR(error);
2069 }
2070 #if CONFIG_NFS4
2071 if (nfsvers >= NFS_VER4) {
2072 /* setting file size requires having the file open for write access */
2073 if (np->n_flag & NREVOKE) {
2074 return EIO;
2075 }
2076 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), vfs_context_proc(ctx), 1);
2077 if (!noop) {
2078 return ENOMEM;
2079 }
2080 restart:
2081 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
2082 if (error) {
2083 return NFS_MAPERR(error);
2084 }
2085 if (np->n_flag & NREVOKE) {
2086 nfs_mount_state_in_use_end(nmp, 0);
2087 return EIO;
2088 }
2089 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2090 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2091 error = EIO;
2092 }
2093 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2094 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
2095 nofp = NULL;
2096 if (!error) {
2097 nfs_mount_state_in_use_end(nmp, 0);
2098 goto restart;
2099 }
2100 }
2101 if (!error) {
2102 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
2103 }
2104 if (error) {
2105 nfs_mount_state_in_use_end(nmp, 0);
2106 nfs_open_owner_rele(noop);
2107 return NFS_MAPERR(error);
2108 }
2109 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE)) {
2110 /* we don't have the file open for write access, so open it */
2111 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, ctx);
2112 if (!error) {
2113 nofp->nof_flags |= NFS_OPEN_FILE_SETATTR;
2114 }
2115 if (nfs_mount_state_error_should_restart(error)) {
2116 nfs_open_file_clear_busy(nofp);
2117 nofp = NULL;
2118 nfs_mount_state_in_use_end(nmp, error);
2119 goto restart;
2120 }
2121 }
2122 }
2123 #endif
2124 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
2125 if (np->n_size > vap->va_data_size) { /* shrinking? */
2126 daddr64_t obn, bn;
2127 int mustwrite;
2128 off_t neweofoff;
2129 struct nfsbuf *bp;
2130 nfsbufpgs pagemask;
2131
2132 obn = (np->n_size - 1) / biosize;
2133 bn = vap->va_data_size / biosize;
2134 for (; obn >= bn; obn--) {
2135 if (!nfs_buf_is_incore(np, obn)) {
2136 continue;
2137 }
2138 error = nfs_buf_get(np, obn, biosize, NULL, NBLK_READ, &bp);
2139 if (error) {
2140 continue;
2141 }
2142 if (obn != bn) {
2143 FSDBG(512, bp, bp->nb_flags, 0, obn);
2144 SET(bp->nb_flags, NB_INVAL);
2145 nfs_buf_release(bp, 1);
2146 continue;
2147 }
2148 mustwrite = 0;
2149 neweofoff = vap->va_data_size - NBOFF(bp);
2150 /* check for any dirty data before the new EOF */
2151 if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < neweofoff)) {
2152 /* clip dirty range to EOF */
2153 if (bp->nb_dirtyend > neweofoff) {
2154 bp->nb_dirtyend = neweofoff;
2155 if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
2156 bp->nb_dirtyoff = bp->nb_dirtyend = 0;
2157 }
2158 }
2159 if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < neweofoff)) {
2160 mustwrite++;
2161 }
2162 }
2163 nfs_buf_pgs_get_page_mask(&pagemask, round_page_64(neweofoff) / PAGE_SIZE);
2164 nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &bp->nb_dirty);
2165 if (nfs_buf_pgs_is_set(&bp->nb_dirty)) {
2166 mustwrite++;
2167 }
2168 if (!mustwrite) {
2169 FSDBG(512, bp, bp->nb_flags, 0, obn);
2170 SET(bp->nb_flags, NB_INVAL);
2171 nfs_buf_release(bp, 1);
2172 continue;
2173 }
2174 /* gotta write out dirty data before invalidating */
2175 /* (NB_STABLE indicates that data writes should be FILESYNC) */
2176 /* (NB_NOCACHE indicates buffer should be discarded) */
2177 CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL | NB_ASYNC | NB_READ));
2178 SET(bp->nb_flags, NB_STABLE | NB_NOCACHE);
2179 if (!IS_VALID_CRED(bp->nb_wcred)) {
2180 kauth_cred_t cred = vfs_context_ucred(ctx);
2181 kauth_cred_ref(cred);
2182 bp->nb_wcred = cred;
2183 }
2184 error = nfs_buf_write(bp);
2185 // Note: bp has been released
2186 if (error) {
2187 FSDBG(512, bp, 0xd00dee, 0xbad, error);
2188 nfs_node_lock_force(np);
2189 np->n_error = error;
2190 np->n_flag |= NWRITEERR;
2191 /*
2192 * There was a write error and we need to
2193 * invalidate attrs and flush buffers in
2194 * order to sync up with the server.
2195 * (if this write was extending the file,
2196 * we may no longer know the correct size)
2197 */
2198 NATTRINVALIDATE(np);
2199 nfs_node_unlock(np);
2200 nfs_data_unlock(np);
2201 nfs_vinvalbuf1(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
2202 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
2203 error = 0;
2204 }
2205 }
2206 }
2207 if (vap->va_data_size != np->n_size) {
2208 ubc_setsize(vp, (off_t)vap->va_data_size); /* XXX error? */
2209 }
2210 origsize = np->n_size;
2211 np->n_size = np->n_vattr.nva_size = vap->va_data_size;
2212 nfs_node_lock_force(np);
2213 CLR(np->n_flag, NUPDATESIZE);
2214 nfs_node_unlock(np);
2215 FSDBG(512, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
2216 }
2217 } else if (VATTR_IS_ACTIVE(vap, va_modify_time) ||
2218 VATTR_IS_ACTIVE(vap, va_access_time) ||
2219 (vap->va_vaflags & VA_UTIMES_NULL)) {
2220 if ((error = nfs_node_lock(np))) {
2221 #if CONFIG_NFS4
2222 if (nfsvers >= NFS_VER4) {
2223 nfs_mount_state_in_use_end(nmp, 0);
2224 }
2225 #endif
2226 return NFS_MAPERR(error);
2227 }
2228 if ((np->n_flag & NMODIFIED) && (vnode_vtype(vp) == VREG)) {
2229 nfs_node_unlock(np);
2230 error = nfs_vinvalbuf1(vp, V_SAVE, ctx, 1);
2231 if (error == EINTR) {
2232 #if CONFIG_NFS4
2233 if (nfsvers >= NFS_VER4) {
2234 nfs_mount_state_in_use_end(nmp, 0);
2235 }
2236 #endif
2237 return NFS_MAPERR(error);
2238 }
2239 } else {
2240 nfs_node_unlock(np);
2241 }
2242 }
2243
2244 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
2245
2246 if ((VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid) ||
2247 VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid)) &&
2248 !(error = nfs_node_lock(np))) {
2249 NACCESSINVALIDATE(np);
2250 nfs_node_unlock(np);
2251 if (!namedattrs) {
2252 dvp = vnode_getparent(vp);
2253 vname = vnode_getname(vp);
2254 dnp = (dvp && vname) ? VTONFS(dvp) : NULL;
2255 if (dnp) {
2256 if (nfs_node_set_busy(dnp, vfs_context_thread(ctx))) {
2257 vnode_put(dvp);
2258 vnode_putname(vname);
2259 } else {
2260 nfs_dulookup_init(dul, dnp, vname, NFS_STRLEN_INT(vname), ctx);
2261 nfs_dulookup_start(dul, dnp, ctx);
2262 dul_in_progress = 1;
2263 }
2264 } else {
2265 if (dvp) {
2266 vnode_put(dvp);
2267 }
2268 if (vname) {
2269 vnode_putname(vname);
2270 }
2271 }
2272 }
2273 }
2274
2275 if (!error) {
2276 error = nmp->nm_funcs->nf_setattr_rpc(np, vap, ctx);
2277 }
2278
2279 if (dul_in_progress) {
2280 nfs_dulookup_finish(dul, dnp, ctx);
2281 nfs_node_clear_busy(dnp);
2282 vnode_put(dvp);
2283 vnode_putname(vname);
2284 }
2285
2286 kfree_type(struct nfs_dulookup, dul);
2287 FSDBG_BOT(512, np->n_size, vap->va_data_size, np->n_vattr.nva_size, error);
2288 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2289 if (error && (origsize != np->n_size) &&
2290 ((nfsvers < NFS_VER4) || !nfs_mount_state_error_should_restart(error))) {
2291 /* make every effort to resync file size w/ server... */
2292 /* (don't bother if we'll be restarting the operation) */
2293 int err; /* preserve "error" for return */
2294 np->n_size = np->n_vattr.nva_size = origsize;
2295 nfs_node_lock_force(np);
2296 CLR(np->n_flag, NUPDATESIZE);
2297 nfs_node_unlock(np);
2298 FSDBG(512, np, np->n_size, np->n_vattr.nva_size, 0xf00d0002);
2299 ubc_setsize(vp, (off_t)np->n_size); /* XXX check error */
2300 vapsize = vap->va_data_size;
2301 vap->va_data_size = origsize;
2302 err = nmp->nm_funcs->nf_setattr_rpc(np, vap, ctx);
2303 if (err) {
2304 NP(np, "nfs_vnop_setattr: nfs%d_setattr_rpc %d %d", nfsvers, error, err);
2305 }
2306 vap->va_data_size = vapsize;
2307 }
2308 nfs_node_lock_force(np);
2309 /*
2310 * The size was just set. If the size is already marked for update, don't
2311 * trust the newsize (it may have been set while the setattr was in progress).
2312 * Clear the update flag and make sure we fetch new attributes so we are sure
2313 * we have the latest size.
2314 */
2315 if (ISSET(np->n_flag, NUPDATESIZE)) {
2316 CLR(np->n_flag, NUPDATESIZE);
2317 NATTRINVALIDATE(np);
2318 nfs_node_unlock(np);
2319 nfs_getattr(np, NULL, ctx, NGA_UNCACHED);
2320 } else {
2321 nfs_node_unlock(np);
2322 }
2323 nfs_data_unlock(np);
2324 #if CONFIG_NFS4
2325 if (nfsvers >= NFS_VER4) {
2326 if (nofp) {
2327 /* don't close our setattr open if we'll be restarting... */
2328 if (!nfs_mount_state_error_should_restart(error) &&
2329 (nofp->nof_flags & NFS_OPEN_FILE_SETATTR)) {
2330 int err = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, ctx);
2331 if (err) {
2332 NP(np, "nfs_vnop_setattr: close error: %d", err);
2333 }
2334 nofp->nof_flags &= ~NFS_OPEN_FILE_SETATTR;
2335 }
2336 nfs_open_file_clear_busy(nofp);
2337 nofp = NULL;
2338 }
2339 if (nfs_mount_state_in_use_end(nmp, error)) {
2340 goto restart;
2341 }
2342 nfs_open_owner_rele(noop);
2343 }
2344 #endif
2345 }
2346 return NFS_MAPERR(error);
2347 }
2348
2349 /*
2350 * Do an NFS setattr RPC.
2351 */
2352 int
nfs3_setattr_rpc(nfsnode_t np,struct vnode_attr * vap,vfs_context_t ctx)2353 nfs3_setattr_rpc(
2354 nfsnode_t np,
2355 struct vnode_attr *vap,
2356 vfs_context_t ctx)
2357 {
2358 struct nfsmount *nmp = NFSTONMP(np);
2359 int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0, nfsvers;
2360 u_int64_t xid, nextxid;
2361 struct nfsm_chain nmreq, nmrep;
2362
2363 if (nfs_mount_gone(nmp)) {
2364 return ENXIO;
2365 }
2366 nfsvers = nmp->nm_vers;
2367
2368 VATTR_SET_SUPPORTED(vap, va_mode);
2369 VATTR_SET_SUPPORTED(vap, va_uid);
2370 VATTR_SET_SUPPORTED(vap, va_gid);
2371 VATTR_SET_SUPPORTED(vap, va_data_size);
2372 VATTR_SET_SUPPORTED(vap, va_access_time);
2373 VATTR_SET_SUPPORTED(vap, va_modify_time);
2374
2375
2376 if (VATTR_IS_ACTIVE(vap, va_flags)
2377 ) {
2378 if (vap->va_flags) { /* we don't support setting flags */
2379 if (vap->va_active & ~VNODE_ATTR_va_flags) {
2380 return EINVAL; /* return EINVAL if other attributes also set */
2381 } else {
2382 return ENOTSUP; /* return ENOTSUP for chflags(2) */
2383 }
2384 }
2385 /* no flags set, so we'll just ignore it */
2386 if (!(vap->va_active & ~VNODE_ATTR_va_flags)) {
2387 return 0; /* no (other) attributes to set, so nothing to do */
2388 }
2389 }
2390
2391 nfsm_chain_null(&nmreq);
2392 nfsm_chain_null(&nmrep);
2393
2394 nfsm_chain_build_alloc_init(error, &nmreq,
2395 NFSX_FH(nfsvers) + NFSX_SATTR(nfsvers));
2396 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2397 if (nfsvers == NFS_VER3) {
2398 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2399 nfsm_chain_add_32(error, &nmreq, TRUE);
2400 nfsm_chain_add_32(error, &nmreq, vap->va_mode);
2401 } else {
2402 nfsm_chain_add_32(error, &nmreq, FALSE);
2403 }
2404 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2405 nfsm_chain_add_32(error, &nmreq, TRUE);
2406 nfsm_chain_add_32(error, &nmreq, vap->va_uid);
2407 } else {
2408 nfsm_chain_add_32(error, &nmreq, FALSE);
2409 }
2410 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2411 nfsm_chain_add_32(error, &nmreq, TRUE);
2412 nfsm_chain_add_32(error, &nmreq, vap->va_gid);
2413 } else {
2414 nfsm_chain_add_32(error, &nmreq, FALSE);
2415 }
2416 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2417 nfsm_chain_add_32(error, &nmreq, TRUE);
2418 nfsm_chain_add_64(error, &nmreq, vap->va_data_size);
2419 } else {
2420 nfsm_chain_add_32(error, &nmreq, FALSE);
2421 }
2422 if (vap->va_vaflags & VA_UTIMES_NULL) {
2423 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_SERVER);
2424 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_SERVER);
2425 } else {
2426 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
2427 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_CLIENT);
2428 nfsm_chain_add_32(error, &nmreq, vap->va_access_time.tv_sec);
2429 nfsm_chain_add_32(error, &nmreq, vap->va_access_time.tv_nsec);
2430 } else {
2431 nfsm_chain_add_32(error, &nmreq, NFS_TIME_DONT_CHANGE);
2432 }
2433 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
2434 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_CLIENT);
2435 nfsm_chain_add_32(error, &nmreq, vap->va_modify_time.tv_sec);
2436 nfsm_chain_add_32(error, &nmreq, vap->va_modify_time.tv_nsec);
2437 } else {
2438 nfsm_chain_add_32(error, &nmreq, NFS_TIME_DONT_CHANGE);
2439 }
2440 }
2441 nfsm_chain_add_32(error, &nmreq, FALSE);
2442 } else {
2443 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_mode) ?
2444 vtonfsv2_mode(vnode_vtype(NFSTOV(np)), vap->va_mode) : -1);
2445 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_uid) ?
2446 vap->va_uid : (uint32_t)-1);
2447 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_gid) ?
2448 vap->va_gid : (uint32_t)-1);
2449 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_data_size) ?
2450 vap->va_data_size : (uint32_t)-1);
2451 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
2452 nfsm_chain_add_32(error, &nmreq, vap->va_access_time.tv_sec);
2453 nfsm_chain_add_32(error, &nmreq, (vap->va_access_time.tv_nsec != -1) ?
2454 ((uint32_t)vap->va_access_time.tv_nsec / 1000) : 0xffffffff);
2455 } else {
2456 nfsm_chain_add_32(error, &nmreq, -1);
2457 nfsm_chain_add_32(error, &nmreq, -1);
2458 }
2459 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
2460 nfsm_chain_add_32(error, &nmreq, vap->va_modify_time.tv_sec);
2461 nfsm_chain_add_32(error, &nmreq, (vap->va_modify_time.tv_nsec != -1) ?
2462 ((uint32_t)vap->va_modify_time.tv_nsec / 1000) : 0xffffffff);
2463 } else {
2464 nfsm_chain_add_32(error, &nmreq, -1);
2465 nfsm_chain_add_32(error, &nmreq, -1);
2466 }
2467 }
2468 nfsm_chain_build_done(error, &nmreq);
2469 nfsmout_if(error);
2470 error = nfs_request(np, NULL, &nmreq, NFSPROC_SETATTR, ctx, NULL, &nmrep, &xid, &status);
2471 if ((lockerror = nfs_node_lock(np))) {
2472 error = lockerror;
2473 }
2474 if (nfsvers == NFS_VER3) {
2475 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
2476 nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid);
2477 nfsmout_if(error);
2478 /* if file hadn't changed, update cached mtime */
2479 if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) {
2480 NFS_CHANGED_UPDATE(nfsvers, np, &np->n_vattr);
2481 }
2482 /* if directory hadn't changed, update namecache mtime */
2483 if ((vnode_vtype(NFSTOV(np)) == VDIR) &&
2484 nfstimespeccmp(&np->n_ncmtime, &premtime, ==)) {
2485 NFS_CHANGED_UPDATE_NC(nfsvers, np, &np->n_vattr);
2486 }
2487 if (!wccpostattr) {
2488 NATTRINVALIDATE(np);
2489 }
2490 error = status;
2491 } else {
2492 if (!error) {
2493 error = status;
2494 }
2495 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2496 }
2497 /*
2498 * We just changed the attributes and we want to make sure that we
2499 * see the latest attributes. Get the next XID. If it's not the
2500 * next XID after the SETATTR XID, then it's possible that another
2501 * RPC was in flight at the same time and it might put stale attributes
2502 * in the cache. In that case, we invalidate the attributes and set
2503 * the attribute cache XID to guarantee that newer attributes will
2504 * get loaded next.
2505 */
2506 nextxid = 0;
2507 nfs_get_xid(&nextxid);
2508 if (nextxid != (xid + 1)) {
2509 np->n_xid = nextxid;
2510 NATTRINVALIDATE(np);
2511 }
2512 nfsmout:
2513 if (!lockerror) {
2514 nfs_node_unlock(np);
2515 }
2516 nfsm_chain_cleanup(&nmreq);
2517 nfsm_chain_cleanup(&nmrep);
2518 return error;
2519 }
2520
2521 /*
2522 * NFS lookup call, one step at a time...
2523 * First look in cache
2524 * If not found, unlock the directory nfsnode and do the RPC
2525 */
2526 int
nfs_vnop_lookup(struct vnop_lookup_args * ap)2527 nfs_vnop_lookup(
2528 struct vnop_lookup_args /* {
2529 * struct vnodeop_desc *a_desc;
2530 * vnode_t a_dvp;
2531 * vnode_t *a_vpp;
2532 * struct componentname *a_cnp;
2533 * vfs_context_t a_context;
2534 * } */*ap)
2535 {
2536 vfs_context_t ctx = ap->a_context;
2537 struct componentname *cnp = ap->a_cnp;
2538 vnode_t dvp = ap->a_dvp;
2539 vnode_t *vpp = ap->a_vpp;
2540 int flags = cnp->cn_flags;
2541 vnode_t newvp;
2542 nfsnode_t dnp, np;
2543 struct nfsmount *nmp;
2544 mount_t mp;
2545 int nfsvers, error, busyerror = ENOENT, isdot, isdotdot, negnamecache;
2546 u_int64_t xid = 0;
2547 struct nfs_vattr *nvattr;
2548 int ngflags, skipdu = 0;
2549 struct vnop_access_args naa;
2550 fhandle_t *fh;
2551 struct nfsreq *req;
2552
2553 *vpp = NULLVP;
2554
2555 dnp = VTONFS(dvp);
2556
2557 fh = zalloc(nfs_fhandle_zone);
2558 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
2559 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
2560 NVATTR_INIT(nvattr);
2561
2562 mp = vnode_mount(dvp);
2563 nmp = VFSTONFS(mp);
2564 if (nfs_mount_gone(nmp)) {
2565 error = ENXIO;
2566 goto error_return;
2567 }
2568 nfsvers = nmp->nm_vers;
2569 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
2570
2571 if ((error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)))) {
2572 goto error_return;
2573 }
2574 /* nfs_getattr() will check changed and purge caches */
2575 if ((error = nfs_getattr(dnp, NULL, ctx, NGA_CACHED))) {
2576 goto error_return;
2577 }
2578
2579 error = cache_lookup(dvp, vpp, cnp);
2580 switch (error) {
2581 case ENOENT:
2582 /* negative cache entry */
2583 goto error_return;
2584 case 0:
2585 /* cache miss */
2586 if ((nfsvers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) {
2587 /* if rdirplus, try dir buf cache lookup */
2588 error = nfs_dir_buf_cache_lookup(dnp, &np, cnp, ctx, 0, &skipdu);
2589 if (!error && np) {
2590 /* dir buf cache hit */
2591 *vpp = NFSTOV(np);
2592 error = -1;
2593 } else if (skipdu) {
2594 /* Skip lookup for du files */
2595 error = ENOENT;
2596 goto error_return;
2597 }
2598 }
2599 if (error != -1) { /* cache miss */
2600 break;
2601 }
2602 OS_FALLTHROUGH;
2603 case -1:
2604 /* cache hit, not really an error */
2605 OSAddAtomic64(1, &nfsclntstats.lookupcache_hits);
2606
2607 nfs_node_clear_busy(dnp);
2608 busyerror = ENOENT;
2609
2610 /* check for directory access */
2611 naa.a_desc = &vnop_access_desc;
2612 naa.a_vp = dvp;
2613 naa.a_action = KAUTH_VNODE_SEARCH;
2614 naa.a_context = ctx;
2615
2616 /* compute actual success/failure based on accessibility */
2617 error = nfs_vnop_access(&naa);
2618 OS_FALLTHROUGH;
2619 default:
2620 /* unexpected error from cache_lookup */
2621 goto error_return;
2622 }
2623
2624 /* skip lookup, if we know who we are: "." or ".." */
2625 isdot = isdotdot = 0;
2626 if (cnp->cn_nameptr[0] == '.') {
2627 if (cnp->cn_namelen == 1) {
2628 isdot = 1;
2629 }
2630 if ((cnp->cn_namelen == 2) && (cnp->cn_nameptr[1] == '.')) {
2631 isdotdot = 1;
2632 }
2633 }
2634 if (isdotdot || isdot) {
2635 fh->fh_len = 0;
2636 goto found;
2637 }
2638 #if CONFIG_NFS4
2639 if ((nfsvers >= NFS_VER4) && (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER)) {
2640 /* we should never be looking things up in a trigger directory, return nothing */
2641 error = ENOENT;
2642 goto error_return;
2643 }
2644 #endif
2645
2646 /* do we know this name is too long? */
2647 nmp = VTONMP(dvp);
2648 if (nfs_mount_gone(nmp)) {
2649 error = ENXIO;
2650 goto error_return;
2651 }
2652 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) &&
2653 (cnp->cn_namelen > nmp->nm_fsattr.nfsa_maxname)) {
2654 error = ENAMETOOLONG;
2655 goto error_return;
2656 }
2657
2658 error = 0;
2659 newvp = NULLVP;
2660
2661 OSAddAtomic64(1, &nfsclntstats.lookupcache_misses);
2662
2663 error = nmp->nm_funcs->nf_lookup_rpc_async(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &req);
2664 nfsmout_if(error);
2665 error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, req, &xid, fh, nvattr);
2666 nfsmout_if(error);
2667
2668 /* is the file handle the same as this directory's file handle? */
2669 isdot = NFS_CMPFH(dnp, fh->fh_data, fh->fh_len);
2670
2671 found:
2672 if (flags & ISLASTCN) {
2673 switch (cnp->cn_nameiop) {
2674 case DELETE:
2675 cnp->cn_flags &= ~MAKEENTRY;
2676 break;
2677 case RENAME:
2678 cnp->cn_flags &= ~MAKEENTRY;
2679 if (isdot) {
2680 error = EISDIR;
2681 goto error_return;
2682 }
2683 break;
2684 }
2685 }
2686
2687 if (isdotdot) {
2688 newvp = vnode_getparent(dvp);
2689 if (!newvp) {
2690 error = ENOENT;
2691 goto error_return;
2692 }
2693 } else if (isdot) {
2694 error = vnode_get(dvp);
2695 if (error) {
2696 goto error_return;
2697 }
2698 newvp = dvp;
2699 nfs_node_lock_force(dnp);
2700 if (fh->fh_len && (dnp->n_xid <= xid)) {
2701 nfs_loadattrcache(dnp, nvattr, &xid, 0);
2702 }
2703 nfs_node_unlock(dnp);
2704 } else {
2705 ngflags = (cnp->cn_flags & MAKEENTRY) ? NG_MAKEENTRY : 0;
2706 error = nfs_nget(mp, dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, ngflags, &np);
2707 if (error) {
2708 goto error_return;
2709 }
2710 newvp = NFSTOV(np);
2711 nfs_node_unlock(np);
2712 }
2713 *vpp = newvp;
2714
2715 nfsmout:
2716 if (error) {
2717 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) &&
2718 (flags & ISLASTCN) && (error == ENOENT)) {
2719 if (vnode_mount(dvp) && vnode_vfsisrdonly(dvp)) {
2720 error = EROFS;
2721 } else {
2722 error = EJUSTRETURN;
2723 }
2724 }
2725 }
2726 if ((error == ENOENT) && (cnp->cn_flags & MAKEENTRY) &&
2727 (cnp->cn_nameiop != CREATE) && negnamecache) {
2728 /* add a negative entry in the name cache */
2729 nfs_node_lock_force(dnp);
2730 cache_enter(dvp, NULL, cnp);
2731 dnp->n_flag |= NNEGNCENTRIES;
2732 nfs_node_unlock(dnp);
2733 }
2734 error_return:
2735 NVATTR_CLEANUP(nvattr);
2736 NFS_ZFREE(nfs_fhandle_zone, fh);
2737 NFS_ZFREE(nfs_req_zone, req);
2738 zfree(KT_NFS_VATTR, nvattr);
2739 if (!busyerror) {
2740 nfs_node_clear_busy(dnp);
2741 }
2742 if (error && *vpp) {
2743 vnode_put(*vpp);
2744 *vpp = NULLVP;
2745 }
2746 return NFS_MAPERR(error);
2747 }
2748
2749 int nfs_readlink_nocache = DEFAULT_READLINK_NOCACHE;
2750
2751 /*
2752 * NFS readlink call
2753 */
2754 int
nfs_vnop_readlink(struct vnop_readlink_args * ap)2755 nfs_vnop_readlink(
2756 struct vnop_readlink_args /* {
2757 * struct vnodeop_desc *a_desc;
2758 * vnode_t a_vp;
2759 * struct uio *a_uio;
2760 * vfs_context_t a_context;
2761 * } */*ap)
2762 {
2763 vfs_context_t ctx = ap->a_context;
2764 nfsnode_t np = VTONFS(ap->a_vp);
2765 struct nfsmount *nmp;
2766 int error = 0, nfsvers;
2767 size_t buflen;
2768 uio_t uio = ap->a_uio;
2769 struct nfsbuf *bp = NULL;
2770 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
2771 long timeo = 0;
2772
2773 if (vnode_vtype(ap->a_vp) != VLNK) {
2774 return EPERM;
2775 }
2776
2777 if (uio_resid(uio) == 0) {
2778 return 0;
2779 }
2780 if (uio_offset(uio) < 0) {
2781 return EINVAL;
2782 }
2783
2784 nmp = VTONMP(ap->a_vp);
2785 if (nfs_mount_gone(nmp)) {
2786 return ENXIO;
2787 }
2788 nfsvers = nmp->nm_vers;
2789
2790
2791 /* nfs_getattr() will check changed and purge caches */
2792 if ((error = nfs_getattr(np, NULL, ctx, nfs_readlink_nocache ? NGA_UNCACHED : NGA_CACHED))) {
2793 FSDBG(531, np, 0xd1e0001, 0, error);
2794 return NFS_MAPERR(error);
2795 }
2796
2797 if (nfs_readlink_nocache) {
2798 timeo = nfs_attrcachetimeout(np);
2799 nanouptime(&ts);
2800 }
2801
2802 retry:
2803 OSAddAtomic64(1, &nfsclntstats.biocache_readlinks);
2804 error = nfs_buf_get(np, 0, NFS_MAXPATHLEN, vfs_context_thread(ctx), NBLK_META, &bp);
2805 if (error) {
2806 FSDBG(531, np, 0xd1e0002, 0, error);
2807 return NFS_MAPERR(error);
2808 }
2809
2810 if (nfs_readlink_nocache) {
2811 NFS_VNOP_DBG("timeo = %ld ts.tv_sec = %ld need refresh = %d cached = %d\n", timeo, ts.tv_sec,
2812 (np->n_rltim.tv_sec + timeo) < ts.tv_sec || nfs_readlink_nocache > 1,
2813 ISSET(bp->nb_flags, NB_CACHE) == NB_CACHE);
2814 /* n_rltim is synchronized by the associated nfs buf */
2815 if (ISSET(bp->nb_flags, NB_CACHE) && ((nfs_readlink_nocache > 1) || ((np->n_rltim.tv_sec + timeo) < ts.tv_sec))) {
2816 SET(bp->nb_flags, NB_INVAL);
2817 nfs_buf_release(bp, 0);
2818 goto retry;
2819 }
2820 }
2821 if (!ISSET(bp->nb_flags, NB_CACHE)) {
2822 readagain:
2823 OSAddAtomic64(1, &nfsclntstats.readlink_bios);
2824 buflen = bp->nb_bufsize;
2825 error = nmp->nm_funcs->nf_readlink_rpc(np, bp->nb_data, &buflen, ctx);
2826 if (error) {
2827 if (error == ESTALE) {
2828 NFS_VNOP_DBG("Stale FH from readlink rpc\n");
2829 error = nfs_refresh_fh(np, ctx);
2830 if (error == 0) {
2831 goto readagain;
2832 }
2833 }
2834 SET(bp->nb_flags, NB_ERROR);
2835 bp->nb_error = error;
2836 NFS_VNOP_DBG("readlink failed %d\n", error);
2837 } else {
2838 bp->nb_validoff = 0;
2839 bp->nb_validend = buflen;
2840 np->n_rltim = ts;
2841 NFS_VNOP_DBG("readlink of %.*s\n", (int32_t)bp->nb_validend, (char *)bp->nb_data);
2842 }
2843 } else {
2844 NFS_VNOP_DBG("got cached link of %.*s\n", (int32_t)bp->nb_validend, (char *)bp->nb_data);
2845 }
2846
2847 if (!error && (bp->nb_validend > 0)) {
2848 int validend32 = bp->nb_validend > INT_MAX ? INT_MAX : (int)bp->nb_validend;
2849 error = uiomove(bp->nb_data, validend32, uio);
2850 if (!error && bp->nb_validend > validend32) {
2851 error = uiomove(bp->nb_data + validend32, (int)(bp->nb_validend - validend32), uio);
2852 }
2853 }
2854 FSDBG(531, np, bp->nb_validend, 0, error);
2855 nfs_buf_release(bp, 1);
2856 return NFS_MAPERR(error);
2857 }
2858
2859 /*
2860 * Do a readlink RPC.
2861 */
2862 int
nfs3_readlink_rpc(nfsnode_t np,char * buf,size_t * buflenp,vfs_context_t ctx)2863 nfs3_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx)
2864 {
2865 struct nfsmount *nmp;
2866 int error = 0, lockerror = ENOENT, nfsvers, status;
2867 size_t len;
2868 u_int64_t xid;
2869 struct nfsm_chain nmreq, nmrep;
2870
2871 nmp = NFSTONMP(np);
2872 if (nfs_mount_gone(nmp)) {
2873 return ENXIO;
2874 }
2875 nfsvers = nmp->nm_vers;
2876 nfsm_chain_null(&nmreq);
2877 nfsm_chain_null(&nmrep);
2878
2879 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
2880 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2881 nfsm_chain_build_done(error, &nmreq);
2882 nfsmout_if(error);
2883 error = nfs_request(np, NULL, &nmreq, NFSPROC_READLINK, ctx, NULL, &nmrep, &xid, &status);
2884 if ((lockerror = nfs_node_lock(np))) {
2885 error = lockerror;
2886 }
2887 if (nfsvers == NFS_VER3) {
2888 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
2889 }
2890 if (!error) {
2891 error = status;
2892 }
2893 nfsm_chain_get_32(error, &nmrep, len);
2894 nfsmout_if(error);
2895 if ((nfsvers == NFS_VER2) && (len > *buflenp)) {
2896 error = EBADRPC;
2897 goto nfsmout;
2898 }
2899 if (len >= *buflenp) {
2900 if (np->n_size && (np->n_size < *buflenp)) {
2901 len = (size_t)np->n_size;
2902 } else {
2903 len = *buflenp - 1;
2904 }
2905 }
2906 nfsm_chain_get_opaque(error, &nmrep, len, buf);
2907 if (!error) {
2908 *buflenp = len;
2909 }
2910 nfsmout:
2911 if (!lockerror) {
2912 nfs_node_unlock(np);
2913 }
2914 nfsm_chain_cleanup(&nmreq);
2915 nfsm_chain_cleanup(&nmrep);
2916 return error;
2917 }
2918
2919 /*
2920 * NFS read RPC call
2921 * Ditto above
2922 */
2923 int
nfs_read_rpc(nfsnode_t np,uio_t uio,vfs_context_t ctx)2924 nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx)
2925 {
2926 struct nfsmount *nmp;
2927 int error = 0, nfsvers, eof = 0;
2928 size_t nmrsize, len, retlen;
2929 user_ssize_t tsiz;
2930 off_t txoffset;
2931 struct nfsreq *req;
2932 #if CONFIG_NFS4
2933 uint32_t stategenid = 0, restart = 0;
2934 #endif
2935 FSDBG_TOP(536, np, uio_offset(uio), uio_resid(uio), 0);
2936 nmp = NFSTONMP(np);
2937 if (nfs_mount_gone(nmp)) {
2938 return ENXIO;
2939 }
2940 nfsvers = nmp->nm_vers;
2941 nmrsize = nmp->nm_rsize;
2942
2943 txoffset = uio_offset(uio);
2944 tsiz = uio_resid(uio);
2945 if ((nfsvers == NFS_VER2) && ((uint64_t)(txoffset + tsiz) > 0xffffffffULL)) {
2946 FSDBG_BOT(536, np, uio_offset(uio), uio_resid(uio), EFBIG);
2947 return EFBIG;
2948 }
2949
2950 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
2951 while (tsiz > 0) {
2952 len = retlen = (tsiz > (user_ssize_t)nmrsize) ? nmrsize : (size_t)tsiz;
2953 FSDBG(536, np, txoffset, len, 0);
2954 if (np->n_flag & NREVOKE) {
2955 error = EIO;
2956 break;
2957 }
2958 #if CONFIG_NFS4
2959 if (nmp->nm_vers >= NFS_VER4) {
2960 stategenid = nmp->nm_stategenid;
2961 }
2962 #endif
2963 error = nmp->nm_funcs->nf_read_rpc_async(np, txoffset, len,
2964 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, &req);
2965 if (!error) {
2966 error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, uio, &retlen, &eof);
2967 }
2968 #if CONFIG_NFS4
2969 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) &&
2970 (++restart <= nfs_mount_state_max_restarts(nmp))) { /* guard against no progress */
2971 lck_mtx_lock(&nmp->nm_lock);
2972 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
2973 NP(np, "nfs_read_rpc: error %d, initiating recovery", error);
2974 nfs_need_recover(nmp, error);
2975 }
2976 lck_mtx_unlock(&nmp->nm_lock);
2977 if (np->n_flag & NREVOKE) {
2978 error = EIO;
2979 } else {
2980 if (error == NFSERR_GRACE) {
2981 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
2982 }
2983 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
2984 continue;
2985 }
2986 }
2987 }
2988 #endif
2989 if (error) {
2990 break;
2991 }
2992 txoffset += retlen;
2993 tsiz -= retlen;
2994 if (nfsvers != NFS_VER2) {
2995 if (eof || (retlen == 0)) {
2996 tsiz = 0;
2997 }
2998 } else if (retlen < len) {
2999 tsiz = 0;
3000 }
3001 }
3002
3003 NFS_ZFREE(nfs_req_zone, req);
3004 FSDBG_BOT(536, np, eof, uio_resid(uio), error);
3005 return error;
3006 }
3007
3008 int
nfs3_read_rpc_async(nfsnode_t np,off_t offset,size_t len,thread_t thd,kauth_cred_t cred,struct nfsreq_cbinfo * cb,struct nfsreq ** reqp)3009 nfs3_read_rpc_async(
3010 nfsnode_t np,
3011 off_t offset,
3012 size_t len,
3013 thread_t thd,
3014 kauth_cred_t cred,
3015 struct nfsreq_cbinfo *cb,
3016 struct nfsreq **reqp)
3017 {
3018 struct nfsmount *nmp;
3019 int error = 0, nfsvers;
3020 struct nfsm_chain nmreq;
3021
3022 nmp = NFSTONMP(np);
3023 if (nfs_mount_gone(nmp)) {
3024 return ENXIO;
3025 }
3026 nfsvers = nmp->nm_vers;
3027
3028 nfsm_chain_null(&nmreq);
3029 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers) + 3 * NFSX_UNSIGNED);
3030 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
3031 if (nfsvers == NFS_VER3) {
3032 nfsm_chain_add_64(error, &nmreq, offset);
3033 nfsm_chain_add_32(error, &nmreq, len);
3034 } else {
3035 nfsm_chain_add_32(error, &nmreq, offset);
3036 nfsm_chain_add_32(error, &nmreq, len);
3037 nfsm_chain_add_32(error, &nmreq, 0);
3038 }
3039 nfsm_chain_build_done(error, &nmreq);
3040 nfsmout_if(error);
3041 error = nfs_request_async(np, NULL, &nmreq, NFSPROC_READ, thd, cred, NULL, 0, cb, reqp);
3042 nfsmout:
3043 nfsm_chain_cleanup(&nmreq);
3044 return error;
3045 }
3046
3047 int
nfs3_read_rpc_async_finish(nfsnode_t np,struct nfsreq * req,uio_t uio,size_t * lenp,int * eofp)3048 nfs3_read_rpc_async_finish(
3049 nfsnode_t np,
3050 struct nfsreq *req,
3051 uio_t uio,
3052 size_t *lenp,
3053 int *eofp)
3054 {
3055 int error = 0, lockerror, nfsvers, status = 0, eof = 0;
3056 uint32_t retlen = 0;
3057 uint64_t xid;
3058 struct nfsmount *nmp;
3059 struct nfsm_chain nmrep;
3060
3061 nmp = NFSTONMP(np);
3062 if (nfs_mount_gone(nmp)) {
3063 nfs_request_async_cancel(req);
3064 return ENXIO;
3065 }
3066 nfsvers = nmp->nm_vers;
3067
3068 nfsm_chain_null(&nmrep);
3069
3070 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
3071 if (error == EINPROGRESS) { /* async request restarted */
3072 return error;
3073 }
3074
3075 if ((lockerror = nfs_node_lock(np))) {
3076 error = lockerror;
3077 }
3078 if (nfsvers == NFS_VER3) {
3079 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
3080 }
3081 if (!error) {
3082 error = status;
3083 }
3084 if (nfsvers == NFS_VER3) {
3085 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED);
3086 nfsm_chain_get_32(error, &nmrep, eof);
3087 } else {
3088 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
3089 }
3090 if (!lockerror) {
3091 nfs_node_unlock(np);
3092 }
3093 nfsm_chain_get_32(error, &nmrep, retlen);
3094 if ((nfsvers == NFS_VER2) && (retlen > *lenp)) {
3095 error = EBADRPC;
3096 }
3097 nfsmout_if(error);
3098 error = nfsm_chain_get_uio(&nmrep, MIN(retlen, *lenp), uio);
3099 if (eofp) {
3100 if (nfsvers == NFS_VER3) {
3101 if (!eof && !retlen) {
3102 eof = 1;
3103 }
3104 } else if (retlen < *lenp) {
3105 eof = 1;
3106 }
3107 *eofp = eof;
3108 }
3109 *lenp = MIN(retlen, *lenp);
3110 nfsmout:
3111 nfsm_chain_cleanup(&nmrep);
3112 return error;
3113 }
3114
3115 /*
3116 * NFS write call
3117 */
3118 int
nfs_vnop_write(struct vnop_write_args * ap)3119 nfs_vnop_write(
3120 struct vnop_write_args /* {
3121 * struct vnodeop_desc *a_desc;
3122 * vnode_t a_vp;
3123 * struct uio *a_uio;
3124 * int a_ioflag;
3125 * vfs_context_t a_context;
3126 * } */*ap)
3127 {
3128 vfs_context_t ctx = ap->a_context;
3129 uio_t uio = ap->a_uio;
3130 vnode_t vp = ap->a_vp;
3131 nfsnode_t np = VTONFS(vp);
3132 int ioflag = ap->a_ioflag;
3133 struct nfsbuf *bp;
3134 struct nfsmount *nmp = VTONMP(vp);
3135 daddr64_t lbn;
3136 uint32_t biosize;
3137 int error = 0;
3138 off_t n, on;
3139 int n32;
3140 off_t boff, start, end;
3141 uio_t auio;
3142 thread_t thd;
3143 kauth_cred_t cred;
3144
3145 FSDBG_TOP(515, np, uio_offset(uio), uio_resid(uio), ioflag);
3146
3147 if (vnode_vtype(vp) != VREG) {
3148 FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), EIO);
3149 return EIO;
3150 }
3151
3152 thd = vfs_context_thread(ctx);
3153 cred = vfs_context_ucred(ctx);
3154
3155 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
3156
3157 if ((error = nfs_node_lock(np))) {
3158 nfs_data_unlock(np);
3159 FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), error);
3160 return NFS_MAPERR(error);
3161 }
3162 np->n_wrbusy++;
3163
3164 if (np->n_flag & NWRITEERR) {
3165 error = np->n_error;
3166 np->n_flag &= ~NWRITEERR;
3167 }
3168 if (np->n_flag & NNEEDINVALIDATE) {
3169 np->n_flag &= ~NNEEDINVALIDATE;
3170 nfs_node_unlock(np);
3171 nfs_data_unlock(np);
3172 nfs_vinvalbuf1(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
3173 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
3174 } else {
3175 nfs_node_unlock(np);
3176 }
3177 if (error) {
3178 goto out;
3179 }
3180
3181 biosize = nmp->nm_biosize;
3182
3183 if (ioflag & (IO_APPEND | IO_SYNC)) {
3184 nfs_node_lock_force(np);
3185 if (np->n_flag & NMODIFIED) {
3186 NATTRINVALIDATE(np);
3187 nfs_node_unlock(np);
3188 nfs_data_unlock(np);
3189 error = nfs_vinvalbuf1(vp, V_SAVE, ctx, 1);
3190 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
3191 if (error) {
3192 FSDBG(515, np, uio_offset(uio), 0x10bad01, error);
3193 goto out;
3194 }
3195 } else {
3196 nfs_node_unlock(np);
3197 }
3198 if (ioflag & IO_APPEND) {
3199 nfs_data_unlock(np);
3200 /* nfs_getattr() will check changed and purge caches */
3201 error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED);
3202 /* we'll be extending the file, so take the data lock exclusive */
3203 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
3204 if (error) {
3205 FSDBG(515, np, uio_offset(uio), 0x10bad02, error);
3206 goto out;
3207 }
3208 uio_setoffset(uio, np->n_size);
3209 }
3210 }
3211 if (uio_offset(uio) < 0) {
3212 error = EINVAL;
3213 FSDBG_BOT(515, np, uio_offset(uio), 0xbad0ff, error);
3214 goto out;
3215 }
3216 if (uio_resid(uio) == 0) {
3217 goto out;
3218 }
3219
3220 if (((uio_offset(uio) + uio_resid(uio)) > (off_t)np->n_size) && !(ioflag & IO_APPEND)) {
3221 /*
3222 * It looks like we'll be extending the file, so take the data lock exclusive.
3223 */
3224 nfs_data_unlock(np);
3225 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
3226
3227 /*
3228 * Also, if the write begins after the previous EOF buffer, make sure to zero
3229 * and validate the new bytes in that buffer.
3230 */
3231 struct nfsbuf *eofbp = NULL;
3232 daddr64_t eofbn = np->n_size / biosize;
3233 uint32_t eofoff = np->n_size % biosize;
3234 lbn = uio_offset(uio) / biosize;
3235
3236 if (eofoff && (eofbn < lbn)) {
3237 if ((error = nfs_buf_get(np, eofbn, biosize, thd, NBLK_WRITE | NBLK_ONLYVALID, &eofbp))) {
3238 goto out;
3239 }
3240 np->n_size += (biosize - eofoff);
3241 nfs_node_lock_force(np);
3242 CLR(np->n_flag, NUPDATESIZE);
3243 np->n_flag |= NMODIFIED;
3244 nfs_node_unlock(np);
3245 FSDBG(516, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
3246 ubc_setsize(vp, (off_t)np->n_size); /* XXX errors */
3247 if (eofbp) {
3248 /*
3249 * For the old last page, don't zero bytes if there
3250 * are invalid bytes in that page (i.e. the page isn't
3251 * currently valid).
3252 * For pages after the old last page, zero them and
3253 * mark them as valid.
3254 */
3255 char *d;
3256 int i;
3257 if (ioflag & IO_NOCACHE) {
3258 SET(eofbp->nb_flags, NB_NOCACHE);
3259 }
3260 NFS_BUF_MAP(eofbp);
3261 FSDBG(516, eofbp, eofoff, biosize - eofoff, 0xe0fff01e);
3262 d = eofbp->nb_data;
3263 i = eofoff / PAGE_SIZE;
3264 while (eofoff < biosize) {
3265 int poff = eofoff & PAGE_MASK;
3266 if (!poff || NBPGVALID(eofbp, i)) {
3267 bzero(d + eofoff, PAGE_SIZE - poff);
3268 NBPGVALID_SET(eofbp, i);
3269 }
3270 eofoff += PAGE_SIZE - poff;
3271 i++;
3272 }
3273 nfs_buf_release(eofbp, 1);
3274 }
3275 }
3276 }
3277
3278 do {
3279 OSAddAtomic64(1, &nfsclntstats.biocache_writes);
3280 lbn = uio_offset(uio) / biosize;
3281 on = uio_offset(uio) % biosize;
3282 n = biosize - on;
3283 if (uio_resid(uio) < n) {
3284 n = uio_resid(uio);
3285 }
3286 again:
3287 /*
3288 * Get a cache block for writing. The range to be written is
3289 * (off..off+n) within the block. We ensure that the block
3290 * either has no dirty region or that the given range is
3291 * contiguous with the existing dirty region.
3292 */
3293 error = nfs_buf_get(np, lbn, biosize, thd, NBLK_WRITE, &bp);
3294 if (error) {
3295 goto out;
3296 }
3297 /* map the block because we know we're going to write to it */
3298 NFS_BUF_MAP(bp);
3299
3300 if (ioflag & IO_NOCACHE) {
3301 SET(bp->nb_flags, NB_NOCACHE);
3302 }
3303
3304 if (!IS_VALID_CRED(bp->nb_wcred)) {
3305 kauth_cred_ref(cred);
3306 bp->nb_wcred = cred;
3307 }
3308
3309 /*
3310 * If there's already a dirty range AND dirty pages in this block we
3311 * need to send a commit AND write the dirty pages before continuing.
3312 *
3313 * If there's already a dirty range OR dirty pages in this block
3314 * and the new write range is not contiguous with the existing range,
3315 * then force the buffer to be written out now.
3316 * (We used to just extend the dirty range to cover the valid,
3317 * but unwritten, data in between also. But writing ranges
3318 * of data that weren't actually written by an application
3319 * risks overwriting some other client's data with stale data
3320 * that's just masquerading as new written data.)
3321 */
3322 if (bp->nb_dirtyend > 0) {
3323 if (on > bp->nb_dirtyend || (on + n) < bp->nb_dirtyoff || nfs_buf_pgs_is_set(&bp->nb_dirty)) {
3324 FSDBG(515, np, uio_offset(uio), bp, 0xd15c001);
3325 /* write/commit buffer "synchronously" */
3326 /* (NB_STABLE indicates that data writes should be FILESYNC) */
3327 CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL));
3328 SET(bp->nb_flags, (NB_ASYNC | NB_STABLE));
3329 error = nfs_buf_write(bp);
3330 if (error) {
3331 goto out;
3332 }
3333 goto again;
3334 }
3335 } else if (nfs_buf_pgs_is_set(&bp->nb_dirty)) {
3336 off_t firstpg = 0, lastpg = 0;
3337 nfsbufpgs pagemask, pagemaskand;
3338 /* calculate write range pagemask */
3339 if (n > 0) {
3340 firstpg = on / PAGE_SIZE;
3341 lastpg = (on + n - 1) / PAGE_SIZE;
3342 nfs_buf_pgs_set_pages_between(&pagemask, firstpg, lastpg + 1);
3343 } else {
3344 NBPGS_ERASE(&pagemask);
3345 }
3346 /* check if there are dirty pages outside the write range */
3347 nfs_buf_pgs_bit_not(&pagemask);
3348 nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand);
3349 if (nfs_buf_pgs_is_set(&pagemaskand)) {
3350 FSDBG(515, np, uio_offset(uio), bp, 0xd15c002);
3351 /* write/commit buffer "synchronously" */
3352 /* (NB_STABLE indicates that data writes should be FILESYNC) */
3353 CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL));
3354 SET(bp->nb_flags, (NB_ASYNC | NB_STABLE));
3355 error = nfs_buf_write(bp);
3356 if (error) {
3357 goto out;
3358 }
3359 goto again;
3360 }
3361 /* if the first or last pages are already dirty */
3362 /* make sure that the dirty range encompasses those pages */
3363 if (NBPGDIRTY(bp, firstpg) || NBPGDIRTY(bp, lastpg)) {
3364 FSDBG(515, np, uio_offset(uio), bp, 0xd15c003);
3365 bp->nb_dirtyoff = MIN(on, firstpg * PAGE_SIZE);
3366 if (NBPGDIRTY(bp, lastpg)) {
3367 bp->nb_dirtyend = (lastpg + 1) * PAGE_SIZE;
3368 /* clip to EOF */
3369 if (NBOFF(bp) + bp->nb_dirtyend > (off_t)np->n_size) {
3370 bp->nb_dirtyend = np->n_size - NBOFF(bp);
3371 if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
3372 bp->nb_dirtyoff = bp->nb_dirtyend = 0;
3373 }
3374 }
3375 } else {
3376 bp->nb_dirtyend = on + n;
3377 }
3378 }
3379 }
3380
3381 /*
3382 * Are we extending the size of the file with this write?
3383 * If so, update file size now that we have the block.
3384 * If there was a partial buf at the old eof, validate
3385 * and zero the new bytes.
3386 */
3387 if ((uio_offset(uio) + n) > (off_t)np->n_size) {
3388 daddr64_t eofbn = np->n_size / biosize;
3389 int neweofoff = (uio_offset(uio) + n) % biosize;
3390
3391 FSDBG(515, 0xb1ffa000, uio_offset(uio) + n, eofoff, neweofoff);
3392
3393 /* if we're extending within the same last block */
3394 /* and the block is flagged as being cached... */
3395 if ((lbn == eofbn) && ISSET(bp->nb_flags, NB_CACHE)) {
3396 /* ...check that all pages in buffer are valid */
3397 int endpg = ((neweofoff ? neweofoff : biosize) - 1) / PAGE_SIZE;
3398 nfsbufpgs pagemask, pagemaskand;
3399 /* pagemask only has to extend to last page being written to */
3400 nfs_buf_pgs_get_page_mask(&pagemask, endpg + 1);
3401 FSDBG(515, 0xb1ffa001, bp->nb_valid, pagemask, 0);
3402 nfs_buf_pgs_bit_and(&bp->nb_valid, &pagemask, &pagemaskand);
3403 if (!NBPGS_IS_EQUAL(&pagemaskand, &pagemask)) {
3404 /* zerofill any hole */
3405 if (on > bp->nb_validend) {
3406 for (off_t i = bp->nb_validend / PAGE_SIZE; i <= (on - 1) / PAGE_SIZE; i++) {
3407 NBPGVALID_SET(bp, i);
3408 }
3409 NFS_BUF_MAP(bp);
3410 FSDBG(516, bp, bp->nb_validend, on - bp->nb_validend, 0xf01e);
3411 NFS_BZERO((char *)bp->nb_data + bp->nb_validend, on - bp->nb_validend);
3412 }
3413 /* zerofill any trailing data in the last page */
3414 if (neweofoff) {
3415 NFS_BUF_MAP(bp);
3416 FSDBG(516, bp, neweofoff, PAGE_SIZE - (neweofoff & PAGE_MASK), 0xe0f);
3417 bzero((char *)bp->nb_data + neweofoff,
3418 PAGE_SIZE - (neweofoff & PAGE_MASK));
3419 }
3420 }
3421 }
3422 np->n_size = uio_offset(uio) + n;
3423 nfs_node_lock_force(np);
3424 CLR(np->n_flag, NUPDATESIZE);
3425 np->n_flag |= NMODIFIED;
3426 nfs_node_unlock(np);
3427 FSDBG(516, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
3428 ubc_setsize(vp, (off_t)np->n_size); /* XXX errors */
3429 }
3430 /*
3431 * If dirtyend exceeds file size, chop it down. This should
3432 * not occur unless there is a race.
3433 */
3434 if (NBOFF(bp) + bp->nb_dirtyend > (off_t)np->n_size) {
3435 bp->nb_dirtyend = np->n_size - NBOFF(bp);
3436 if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
3437 bp->nb_dirtyoff = bp->nb_dirtyend = 0;
3438 }
3439 }
3440 /*
3441 * UBC doesn't handle partial pages, so we need to make sure
3442 * that any pages left in the page cache are completely valid.
3443 *
3444 * Writes that are smaller than a block are delayed if they
3445 * don't extend to the end of the block.
3446 *
3447 * If the block isn't (completely) cached, we may need to read
3448 * in some parts of pages that aren't covered by the write.
3449 * If the write offset (on) isn't page aligned, we'll need to
3450 * read the start of the first page being written to. Likewise,
3451 * if the offset of the end of the write (on+n) isn't page aligned,
3452 * we'll need to read the end of the last page being written to.
3453 *
3454 * Notes:
3455 * We don't want to read anything we're just going to write over.
3456 * We don't want to read anything we're just going drop when the
3457 * I/O is complete (i.e. don't do reads for NOCACHE requests).
3458 * We don't want to issue multiple I/Os if we don't have to
3459 * (because they're synchronous rpcs).
3460 * We don't want to read anything we already have modified in the
3461 * page cache.
3462 */
3463 if (!ISSET(bp->nb_flags, NB_CACHE) && (n < biosize)) {
3464 off_t firstpgoff, lastpgoff, firstpg, lastpg, dirtypg;
3465 start = end = -1;
3466 firstpg = on / PAGE_SIZE;
3467 firstpgoff = on & PAGE_MASK;
3468 lastpg = (on + n - 1) / PAGE_SIZE;
3469 lastpgoff = (on + n) & PAGE_MASK;
3470 if (firstpgoff && !NBPGVALID(bp, firstpg)) {
3471 /* need to read start of first page */
3472 start = firstpg * PAGE_SIZE;
3473 end = start + firstpgoff;
3474 }
3475 if (lastpgoff && !NBPGVALID(bp, lastpg)) {
3476 /* need to read end of last page */
3477 if (start < 0) {
3478 start = (lastpg * PAGE_SIZE) + lastpgoff;
3479 }
3480 end = (lastpg + 1) * PAGE_SIZE;
3481 }
3482 if (ISSET(bp->nb_flags, NB_NOCACHE)) {
3483 /*
3484 * For nocache writes, if there is any partial page at the
3485 * start or end of the write range, then we do the write
3486 * synchronously to make sure that we can drop the data
3487 * from the cache as soon as the WRITE finishes. Normally,
3488 * we would do an unstable write and not drop the data until
3489 * it was committed. But doing that here would risk allowing
3490 * invalid data to be read from the cache between the WRITE
3491 * and the COMMIT.
3492 * (NB_STABLE indicates that data writes should be FILESYNC)
3493 */
3494 if (end > start) {
3495 SET(bp->nb_flags, NB_STABLE);
3496 }
3497 goto skipread;
3498 }
3499 if (end > start) {
3500 /* need to read the data in range: start...end-1 */
3501
3502 /* first, check for dirty pages in between */
3503 /* if there are, we'll have to do two reads because */
3504 /* we don't want to overwrite the dirty pages. */
3505 for (dirtypg = start / PAGE_SIZE; dirtypg <= (end - 1) / PAGE_SIZE; dirtypg++) {
3506 if (NBPGDIRTY(bp, dirtypg)) {
3507 break;
3508 }
3509 }
3510
3511 /* if start is at beginning of page, try */
3512 /* to get any preceeding pages as well. */
3513 if (!(start & PAGE_MASK)) {
3514 /* stop at next dirty/valid page or start of block */
3515 for (; start > 0; start -= PAGE_SIZE) {
3516 if (NBPGVALID(bp, ((start - 1) / PAGE_SIZE))) {
3517 break;
3518 }
3519 }
3520 }
3521
3522 NFS_BUF_MAP(bp);
3523 /* setup uio for read(s) */
3524 boff = NBOFF(bp);
3525 auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
3526
3527 if (dirtypg <= (end - 1) / PAGE_SIZE) {
3528 /* there's a dirty page in the way, so just do two reads */
3529 /* we'll read the preceding data here */
3530 uio_reset(auio, boff + start, UIO_SYSSPACE, UIO_READ);
3531 NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + start), on - start);
3532 error = nfs_read_rpc(np, auio, ctx);
3533 if (error) {
3534 /* Free allocated uio buffer */
3535 uio_free(auio);
3536 /* couldn't read the data, so treat buffer as synchronous NOCACHE */
3537 SET(bp->nb_flags, (NB_NOCACHE | NB_STABLE));
3538 goto skipread;
3539 }
3540 if (uio_resid(auio) > 0) {
3541 FSDBG(516, bp, (caddr_t)uio_curriovbase(auio) - bp->nb_data, uio_resid(auio), 0xd00dee01);
3542 bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio));
3543 }
3544 if (!error) {
3545 /* update validoff/validend if necessary */
3546 if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) {
3547 bp->nb_validoff = start;
3548 }
3549 if ((bp->nb_validend < 0) || (bp->nb_validend < on)) {
3550 bp->nb_validend = on;
3551 }
3552 if ((off_t)np->n_size > boff + bp->nb_validend) {
3553 bp->nb_validend = MIN(np->n_size - (boff + start), biosize);
3554 }
3555 /* validate any pages before the write offset */
3556 for (; start < on / PAGE_SIZE; start += PAGE_SIZE) {
3557 NBPGVALID_SET(bp, start / PAGE_SIZE);
3558 }
3559 }
3560 /* adjust start to read any trailing data */
3561 start = on + n;
3562 }
3563
3564 /* if end is at end of page, try to */
3565 /* get any following pages as well. */
3566 if (!(end & PAGE_MASK)) {
3567 /* stop at next valid page or end of block */
3568 for (; end < biosize; end += PAGE_SIZE) {
3569 if (NBPGVALID(bp, end / PAGE_SIZE)) {
3570 break;
3571 }
3572 }
3573 }
3574
3575 if (((boff + start) >= (off_t)np->n_size) ||
3576 ((start >= on) && ((boff + on + n) >= (off_t)np->n_size))) {
3577 /*
3578 * Either this entire read is beyond the current EOF
3579 * or the range that we won't be modifying (on+n...end)
3580 * is all beyond the current EOF.
3581 * No need to make a trip across the network to
3582 * read nothing. So, just zero the buffer instead.
3583 */
3584 FSDBG(516, bp, start, end - start, 0xd00dee00);
3585 NFS_BZERO(bp->nb_data + start, end - start);
3586 error = 0;
3587 } else {
3588 /* now we'll read the (rest of the) data */
3589 uio_reset(auio, boff + start, UIO_SYSSPACE, UIO_READ);
3590 NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + start), end - start);
3591 error = nfs_read_rpc(np, auio, ctx);
3592 if (error) {
3593 /* couldn't read the data, so treat buffer as synchronous NOCACHE */
3594 SET(bp->nb_flags, (NB_NOCACHE | NB_STABLE));
3595 /* Free allocated uio buffer */
3596 uio_free(auio);
3597 goto skipread;
3598 }
3599 if (uio_resid(auio) > 0) {
3600 FSDBG(516, bp, (caddr_t)uio_curriovbase(auio) - bp->nb_data, uio_resid(auio), 0xd00dee02);
3601 bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio));
3602 }
3603 }
3604 if (!error) {
3605 /* update validoff/validend if necessary */
3606 if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) {
3607 bp->nb_validoff = start;
3608 }
3609 if ((bp->nb_validend < 0) || (bp->nb_validend < end)) {
3610 bp->nb_validend = end;
3611 }
3612 if ((off_t)np->n_size > boff + bp->nb_validend) {
3613 bp->nb_validend = MIN(np->n_size - (boff + start), biosize);
3614 }
3615 /* validate any pages before the write offset's page */
3616 for (; start < (off_t)trunc_page_64(on); start += PAGE_SIZE) {
3617 NBPGVALID_SET(bp, start / PAGE_SIZE);
3618 }
3619 /* validate any pages after the range of pages being written to */
3620 for (; (end - 1) > (off_t)round_page_64(on + n - 1); end -= PAGE_SIZE) {
3621 NBPGVALID_SET(bp, (end - 1) / PAGE_SIZE);
3622 }
3623 }
3624 /* Free allocated uio buffer */
3625 uio_free(auio);
3626 /* Note: pages being written to will be validated when written */
3627 }
3628 }
3629 skipread:
3630
3631 if (ISSET(bp->nb_flags, NB_ERROR)) {
3632 error = bp->nb_error;
3633 nfs_buf_release(bp, 1);
3634 goto out;
3635 }
3636
3637 nfs_node_lock_force(np);
3638 np->n_flag |= NMODIFIED;
3639 nfs_node_unlock(np);
3640
3641 NFS_BUF_MAP(bp);
3642 if (n < 0) {
3643 error = EINVAL;
3644 } else {
3645 n32 = n > INT_MAX ? INT_MAX : (int)n;
3646 error = uiomove(bp->nb_data + on, n32, uio);
3647 if (!error && n > n32) {
3648 error = uiomove(bp->nb_data + on + n32, (int)(n - n32), uio);
3649 }
3650 }
3651 if (error) {
3652 SET(bp->nb_flags, NB_ERROR);
3653 nfs_buf_release(bp, 1);
3654 goto out;
3655 }
3656
3657 /* validate any pages written to */
3658 start = on & ~PAGE_MASK;
3659 for (; start < on + n; start += PAGE_SIZE) {
3660 NBPGVALID_SET(bp, start / PAGE_SIZE);
3661 /*
3662 * This may seem a little weird, but we don't actually set the
3663 * dirty bits for writes. This is because we keep the dirty range
3664 * in the nb_dirtyoff/nb_dirtyend fields. Also, particularly for
3665 * delayed writes, when we give the pages back to the VM we don't
3666 * want to keep them marked dirty, because when we later write the
3667 * buffer we won't be able to tell which pages were written dirty
3668 * and which pages were mmapped and dirtied.
3669 */
3670 }
3671 if (bp->nb_dirtyend > 0) {
3672 bp->nb_dirtyoff = MIN(on, bp->nb_dirtyoff);
3673 bp->nb_dirtyend = MAX((on + n), bp->nb_dirtyend);
3674 } else {
3675 bp->nb_dirtyoff = on;
3676 bp->nb_dirtyend = on + n;
3677 }
3678 if (bp->nb_validend <= 0 || bp->nb_validend < bp->nb_dirtyoff ||
3679 bp->nb_validoff > bp->nb_dirtyend) {
3680 bp->nb_validoff = bp->nb_dirtyoff;
3681 bp->nb_validend = bp->nb_dirtyend;
3682 } else {
3683 bp->nb_validoff = MIN(bp->nb_validoff, bp->nb_dirtyoff);
3684 bp->nb_validend = MAX(bp->nb_validend, bp->nb_dirtyend);
3685 }
3686 if (!ISSET(bp->nb_flags, NB_CACHE)) {
3687 nfs_buf_normalize_valid_range(np, bp);
3688 }
3689
3690 /*
3691 * Since this block is being modified, it must be written
3692 * again and not just committed.
3693 */
3694 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
3695 nfs_node_lock_force(np);
3696 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
3697 np->n_needcommitcnt--;
3698 CHECK_NEEDCOMMITCNT(np);
3699 }
3700 CLR(bp->nb_flags, NB_NEEDCOMMIT);
3701 nfs_node_unlock(np);
3702 }
3703
3704 if (ioflag & IO_SYNC) {
3705 error = nfs_buf_write(bp);
3706 if (error) {
3707 goto out;
3708 }
3709 if (np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) {
3710 nfs_flushcommits(np, 1);
3711 }
3712 } else if (((n + on) == biosize) || (ioflag & IO_APPEND) ||
3713 (ioflag & IO_NOCACHE) || ISSET(bp->nb_flags, NB_NOCACHE)) {
3714 SET(bp->nb_flags, NB_ASYNC);
3715 error = nfs_buf_write(bp);
3716 if (error) {
3717 goto out;
3718 }
3719 } else {
3720 /* If the block wasn't already delayed: charge for the write */
3721 if (!ISSET(bp->nb_flags, NB_DELWRI)) {
3722 proc_t p = vfs_context_proc(ctx);
3723 if (p && p->p_stats) {
3724 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock);
3725 }
3726 }
3727 nfs_buf_write_delayed(bp);
3728 }
3729
3730 } while (uio_resid(uio) > 0 && n > 0);
3731
3732 out:
3733 nfs_node_lock_force(np);
3734 np->n_wrbusy--;
3735 if ((ioflag & IO_SYNC) && !np->n_wrbusy && !np->n_numoutput) {
3736 np->n_flag &= ~NMODIFIED;
3737 }
3738 nfs_node_unlock(np);
3739 nfs_data_unlock(np);
3740 FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), error);
3741 return NFS_MAPERR(error);
3742 }
3743
3744
3745 /*
3746 * NFS write call
3747 */
3748 int
nfs_write_rpc(nfsnode_t np,uio_t uio,vfs_context_t ctx,int * iomodep,uint64_t * wverfp)3749 nfs_write_rpc(
3750 nfsnode_t np,
3751 uio_t uio,
3752 vfs_context_t ctx,
3753 int *iomodep,
3754 uint64_t *wverfp)
3755 {
3756 return nfs_write_rpc2(np, uio, vfs_context_thread(ctx), vfs_context_ucred(ctx), iomodep, wverfp);
3757 }
3758
3759 int
nfs_write_rpc2(nfsnode_t np,uio_t uio,thread_t thd,kauth_cred_t cred,int * iomodep,uint64_t * wverfp)3760 nfs_write_rpc2(
3761 nfsnode_t np,
3762 uio_t uio,
3763 thread_t thd,
3764 kauth_cred_t cred,
3765 int *iomodep,
3766 uint64_t *wverfp)
3767 {
3768 struct nfsmount *nmp;
3769 int error = 0, nfsvers;
3770 int wverfset, commit = 0, committed;
3771 uint64_t wverf = 0, wverf2 = 0;
3772 size_t nmwsize, totalsize, tsiz, len, rlen = 0;
3773 struct nfsreq *req;
3774 #if CONFIG_NFS4
3775 uint32_t stategenid = 0, restart = 0;
3776 #endif
3777 uint32_t vrestart = 0;
3778 uio_t uio_write = NULL;
3779
3780 #if DIAGNOSTIC
3781 /* XXX limitation based on need to back up uio on short write */
3782 if (uio_iovcnt(uio) != 1) {
3783 panic("nfs3_write_rpc: iovcnt > 1");
3784 }
3785 #endif
3786 FSDBG_TOP(537, np, uio_offset(uio), uio_resid(uio), *iomodep);
3787 nmp = NFSTONMP(np);
3788 if (nfs_mount_gone(nmp)) {
3789 return ENXIO;
3790 }
3791 nfsvers = nmp->nm_vers;
3792 nmwsize = nmp->nm_wsize;
3793
3794 wverfset = 0;
3795 committed = NFS_WRITE_FILESYNC;
3796
3797 totalsize = tsiz = uio_resid(uio);
3798 if ((nfsvers == NFS_VER2) && ((uint64_t)(uio_offset(uio) + tsiz) > 0xffffffffULL)) {
3799 FSDBG_BOT(537, np, uio_offset(uio), uio_resid(uio), EFBIG);
3800 return EFBIG;
3801 }
3802
3803 uio_write = uio_duplicate(uio);
3804 if (uio_write == NULL) {
3805 return EIO;
3806 }
3807
3808 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
3809 while (tsiz > 0) {
3810 len = (tsiz > nmwsize) ? nmwsize : tsiz;
3811 FSDBG(537, np, uio_offset(uio_write), len, 0);
3812 if (np->n_flag & NREVOKE) {
3813 error = EIO;
3814 break;
3815 }
3816 #if CONFIG_NFS4
3817 if (nmp->nm_vers >= NFS_VER4) {
3818 stategenid = nmp->nm_stategenid;
3819 }
3820 #endif
3821 error = nmp->nm_funcs->nf_write_rpc_async(np, uio_write, len, thd, cred, *iomodep, NULL, &req);
3822 if (!error) {
3823 error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &commit, &rlen, &wverf2);
3824 }
3825 nmp = NFSTONMP(np);
3826 if (nfs_mount_gone(nmp)) {
3827 error = ENXIO;
3828 }
3829 #if CONFIG_NFS4
3830 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) &&
3831 (++restart <= nfs_mount_state_max_restarts(nmp))) { /* guard against no progress */
3832 lck_mtx_lock(&nmp->nm_lock);
3833 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
3834 NP(np, "nfs_write_rpc: error %d, initiating recovery", error);
3835 nfs_need_recover(nmp, error);
3836 }
3837 lck_mtx_unlock(&nmp->nm_lock);
3838 if (np->n_flag & NREVOKE) {
3839 error = EIO;
3840 } else {
3841 if (error == NFSERR_GRACE) {
3842 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
3843 }
3844 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
3845 continue;
3846 }
3847 }
3848 }
3849 #endif
3850 if (error) {
3851 break;
3852 }
3853 if (nfsvers == NFS_VER2) {
3854 tsiz -= len;
3855 continue;
3856 }
3857
3858 /* check for a short write */
3859 if (rlen < len) {
3860 /* Reset the uio_write to reflect the actual transfer */
3861 uio_free(uio_write);
3862 uio_write = uio_duplicate(uio);
3863 if (uio_write == NULL) {
3864 error = EIO;
3865 break;
3866 }
3867 uio_update(uio_write, totalsize - (tsiz - rlen));
3868 len = rlen;
3869 }
3870
3871 /* return lowest commit level returned */
3872 if (commit < committed) {
3873 committed = commit;
3874 }
3875
3876 tsiz -= len;
3877
3878 /* check write verifier */
3879 if (!wverfset) {
3880 wverf = wverf2;
3881 wverfset = 1;
3882 } else if (wverf != wverf2) {
3883 /* verifier changed, so we need to restart all the writes */
3884 if (++vrestart > 100) {
3885 /* give up after too many restarts */
3886 error = EIO;
3887 break;
3888 }
3889 /* Reset the uio_write back to the start */
3890 uio_free(uio_write);
3891 uio_write = uio_duplicate(uio);
3892 if (uio_write == NULL) {
3893 error = EIO;
3894 break;
3895 }
3896 committed = NFS_WRITE_FILESYNC;
3897 wverfset = 0;
3898 tsiz = totalsize;
3899 }
3900 }
3901
3902 /* update the uio to reflect the total transfer */
3903 uio_update(uio, totalsize - tsiz);
3904
3905 if (uio_write) {
3906 uio_free(uio_write);
3907 }
3908 if (wverfset && wverfp) {
3909 *wverfp = wverf;
3910 }
3911 *iomodep = committed;
3912 if (error) {
3913 uio_setresid(uio, tsiz);
3914 }
3915 NFS_ZFREE(nfs_req_zone, req);
3916 FSDBG_BOT(537, np, committed, uio_resid(uio), error);
3917 return error;
3918 }
3919
3920 int
nfs3_write_rpc_async(nfsnode_t np,uio_t uio,size_t len,thread_t thd,kauth_cred_t cred,int iomode,struct nfsreq_cbinfo * cb,struct nfsreq ** reqp)3921 nfs3_write_rpc_async(
3922 nfsnode_t np,
3923 uio_t uio,
3924 size_t len,
3925 thread_t thd,
3926 kauth_cred_t cred,
3927 int iomode,
3928 struct nfsreq_cbinfo *cb,
3929 struct nfsreq **reqp)
3930 {
3931 struct nfsmount *nmp;
3932 mount_t mp;
3933 int error = 0, nfsvers;
3934 struct nfsm_chain nmreq;
3935
3936 nmp = NFSTONMP(np);
3937 if (nfs_mount_gone(nmp)) {
3938 return ENXIO;
3939 }
3940 nfsvers = nmp->nm_vers;
3941
3942 /* for async mounts, don't bother sending sync write requests */
3943 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
3944 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
3945 iomode = NFS_WRITE_UNSTABLE;
3946 }
3947
3948 nfsm_chain_null(&nmreq);
3949 nfsm_chain_build_alloc_init(error, &nmreq,
3950 NFSX_FH(nfsvers) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
3951 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
3952 if (nfsvers == NFS_VER3) {
3953 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
3954 nfsm_chain_add_32(error, &nmreq, len);
3955 nfsm_chain_add_32(error, &nmreq, iomode);
3956 } else {
3957 nfsm_chain_add_32(error, &nmreq, 0);
3958 nfsm_chain_add_32(error, &nmreq, uio_offset(uio));
3959 nfsm_chain_add_32(error, &nmreq, 0);
3960 }
3961 nfsm_chain_add_32(error, &nmreq, len);
3962 nfsmout_if(error);
3963 error = nfsm_chain_add_uio(&nmreq, uio, len);
3964 nfsm_chain_build_done(error, &nmreq);
3965 nfsmout_if(error);
3966 error = nfs_request_async(np, NULL, &nmreq, NFSPROC_WRITE, thd, cred, NULL, 0, cb, reqp);
3967 nfsmout:
3968 nfsm_chain_cleanup(&nmreq);
3969 return error;
3970 }
3971
3972 int
nfs3_write_rpc_async_finish(nfsnode_t np,struct nfsreq * req,int * iomodep,size_t * rlenp,uint64_t * wverfp)3973 nfs3_write_rpc_async_finish(
3974 nfsnode_t np,
3975 struct nfsreq *req,
3976 int *iomodep,
3977 size_t *rlenp,
3978 uint64_t *wverfp)
3979 {
3980 struct nfsmount *nmp;
3981 int error = 0, lockerror = ENOENT, nfsvers, status;
3982 int updatemtime = 0, wccpostattr = 0, rlen, committed = NFS_WRITE_FILESYNC;
3983 u_int64_t xid, wverf;
3984 mount_t mp;
3985 struct nfsm_chain nmrep;
3986
3987 nmp = NFSTONMP(np);
3988 if (nfs_mount_gone(nmp)) {
3989 nfs_request_async_cancel(req);
3990 return ENXIO;
3991 }
3992 nfsvers = nmp->nm_vers;
3993
3994 nfsm_chain_null(&nmrep);
3995
3996 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
3997 if (error == EINPROGRESS) { /* async request restarted */
3998 return error;
3999 }
4000 nmp = NFSTONMP(np);
4001 if (nfs_mount_gone(nmp)) {
4002 error = ENXIO;
4003 }
4004 if (!error && (lockerror = nfs_node_lock(np))) {
4005 error = lockerror;
4006 }
4007 if (nfsvers == NFS_VER3) {
4008 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4009 nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid);
4010 if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) {
4011 updatemtime = 1;
4012 }
4013 if (!error) {
4014 error = status;
4015 }
4016 nfsm_chain_get_32(error, &nmrep, rlen);
4017 nfsmout_if(error);
4018 *rlenp = rlen;
4019 if (rlen <= 0) {
4020 error = NFSERR_IO;
4021 }
4022 nfsm_chain_get_32(error, &nmrep, committed);
4023 nfsm_chain_get_64(error, &nmrep, wverf);
4024 nfsmout_if(error);
4025 if (wverfp) {
4026 *wverfp = wverf;
4027 }
4028 lck_mtx_lock(&nmp->nm_lock);
4029 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
4030 nmp->nm_verf = wverf;
4031 nmp->nm_state |= NFSSTA_HASWRITEVERF;
4032 } else if (nmp->nm_verf != wverf) {
4033 nmp->nm_verf = wverf;
4034 }
4035 lck_mtx_unlock(&nmp->nm_lock);
4036 } else {
4037 if (!error) {
4038 error = status;
4039 }
4040 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
4041 nfsmout_if(error);
4042 }
4043 if (updatemtime) {
4044 NFS_CHANGED_UPDATE(nfsvers, np, &np->n_vattr);
4045 }
4046 nfsmout:
4047 if (!lockerror) {
4048 nfs_node_unlock(np);
4049 }
4050 nfsm_chain_cleanup(&nmrep);
4051 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
4052 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
4053 committed = NFS_WRITE_FILESYNC;
4054 }
4055 *iomodep = committed;
4056 return error;
4057 }
4058
4059 /*
4060 * NFS mknod vnode op
4061 *
4062 * For NFS v2 this is a kludge. Use a create RPC but with the IFMT bits of the
4063 * mode set to specify the file type and the size field for rdev.
4064 */
4065 int
nfs3_vnop_mknod(struct vnop_mknod_args * ap)4066 nfs3_vnop_mknod(
4067 struct vnop_mknod_args /* {
4068 * struct vnodeop_desc *a_desc;
4069 * vnode_t a_dvp;
4070 * vnode_t *a_vpp;
4071 * struct componentname *a_cnp;
4072 * struct vnode_attr *a_vap;
4073 * vfs_context_t a_context;
4074 * } */*ap)
4075 {
4076 vnode_t dvp = ap->a_dvp;
4077 vnode_t *vpp = ap->a_vpp;
4078 struct componentname *cnp = ap->a_cnp;
4079 struct vnode_attr *vap = ap->a_vap;
4080 vfs_context_t ctx = ap->a_context;
4081 vnode_t newvp = NULL;
4082 nfsnode_t np = NULL;
4083 struct nfsmount *nmp;
4084 nfsnode_t dnp = VTONFS(dvp);
4085 struct nfs_vattr *nvattr;
4086 fhandle_t *fh;
4087 int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0;
4088 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4089 u_int32_t rdev;
4090 u_int64_t xid = 0, dxid;
4091 int nfsvers, gotuid, gotgid;
4092 struct nfsm_chain nmreq, nmrep;
4093 struct nfsreq *req;
4094
4095 nmp = VTONMP(dvp);
4096 if (nfs_mount_gone(nmp)) {
4097 return ENXIO;
4098 }
4099 nfsvers = nmp->nm_vers;
4100
4101 if (!VATTR_IS_ACTIVE(vap, va_type)) {
4102 return EINVAL;
4103 }
4104 if (vap->va_type == VCHR || vap->va_type == VBLK) {
4105 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
4106 return EINVAL;
4107 }
4108 rdev = vap->va_rdev;
4109 } else if (vap->va_type == VFIFO || vap->va_type == VSOCK) {
4110 rdev = 0xffffffff;
4111 } else {
4112 return ENOTSUP;
4113 }
4114 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
4115 return ENAMETOOLONG;
4116 }
4117
4118 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4119
4120 VATTR_SET_SUPPORTED(vap, va_mode);
4121 VATTR_SET_SUPPORTED(vap, va_uid);
4122 VATTR_SET_SUPPORTED(vap, va_gid);
4123 VATTR_SET_SUPPORTED(vap, va_data_size);
4124 VATTR_SET_SUPPORTED(vap, va_access_time);
4125 VATTR_SET_SUPPORTED(vap, va_modify_time);
4126 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4127 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4128
4129 nfsm_chain_null(&nmreq);
4130 nfsm_chain_null(&nmrep);
4131
4132 fh = zalloc(nfs_fhandle_zone);
4133 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4134 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
4135
4136 nfsm_chain_build_alloc_init(error, &nmreq,
4137 NFSX_FH(nfsvers) + 4 * NFSX_UNSIGNED +
4138 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers));
4139 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4140 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4141 if (nfsvers == NFS_VER3) {
4142 nfsm_chain_add_32(error, &nmreq, vtonfs_type(vap->va_type, nfsvers));
4143 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
4144 if (vap->va_type == VCHR || vap->va_type == VBLK) {
4145 nfsm_chain_add_32(error, &nmreq, major(vap->va_rdev));
4146 nfsm_chain_add_32(error, &nmreq, minor(vap->va_rdev));
4147 }
4148 } else {
4149 nfsm_chain_add_v2sattr(error, &nmreq, vap, rdev);
4150 }
4151 nfsm_chain_build_done(error, &nmreq);
4152 if (!error) {
4153 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
4154 }
4155 nfsmout_if(error);
4156
4157 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_MKNOD,
4158 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
4159 if (!error) {
4160 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4161 }
4162
4163 if ((lockerror = nfs_node_lock(dnp))) {
4164 error = lockerror;
4165 }
4166 /* XXX no EEXIST kludge here? */
4167 dxid = xid;
4168 if (!error && !status) {
4169 if (dnp->n_flag & NNEGNCENTRIES) {
4170 dnp->n_flag &= ~NNEGNCENTRIES;
4171 cache_purge_negatives(dvp);
4172 }
4173 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr);
4174 }
4175 if (nfsvers == NFS_VER3) {
4176 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
4177 }
4178 if (!error) {
4179 error = status;
4180 }
4181 nfsmout:
4182 nfsm_chain_cleanup(&nmreq);
4183 nfsm_chain_cleanup(&nmrep);
4184
4185 if (!lockerror) {
4186 dnp->n_flag |= NMODIFIED;
4187 /* if directory hadn't changed, update namecache mtime */
4188 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
4189 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
4190 }
4191 nfs_node_unlock(dnp);
4192 /* nfs_getattr() will check changed and purge caches */
4193 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
4194 }
4195
4196 if (!error && fh->fh_len) {
4197 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
4198 }
4199 if (!error && !np) {
4200 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
4201 }
4202 if (!error && np) {
4203 newvp = NFSTOV(np);
4204 }
4205 if (!busyerror) {
4206 nfs_node_clear_busy(dnp);
4207 }
4208
4209 if (!error && (gotuid || gotgid) &&
4210 (!newvp || nfs_getattrcache(np, nvattr, 0) ||
4211 (gotuid && (nvattr->nva_uid != vap->va_uid)) ||
4212 (gotgid && (nvattr->nva_gid != vap->va_gid)))) {
4213 /* clear ID bits if server didn't use them (or we can't tell) */
4214 VATTR_CLEAR_SUPPORTED(vap, va_uid);
4215 VATTR_CLEAR_SUPPORTED(vap, va_gid);
4216 }
4217 if (error) {
4218 if (newvp) {
4219 nfs_node_unlock(np);
4220 vnode_put(newvp);
4221 }
4222 } else {
4223 *vpp = newvp;
4224 nfs_node_unlock(np);
4225 }
4226 NFS_ZFREE(nfs_fhandle_zone, fh);
4227 NFS_ZFREE(nfs_req_zone, req);
4228 zfree(KT_NFS_VATTR, nvattr);
4229 return NFS_MAPERR(error);
4230 }
4231
4232 /*
4233 * NFS file create call
4234 */
4235 int
nfs3_vnop_create(struct vnop_create_args * ap)4236 nfs3_vnop_create(
4237 struct vnop_create_args /* {
4238 * struct vnodeop_desc *a_desc;
4239 * vnode_t a_dvp;
4240 * vnode_t *a_vpp;
4241 * struct componentname *a_cnp;
4242 * struct vnode_attr *a_vap;
4243 * vfs_context_t a_context;
4244 * } */*ap)
4245 {
4246 vfs_context_t ctx = ap->a_context;
4247 vnode_t dvp = ap->a_dvp;
4248 struct vnode_attr *vap = ap->a_vap;
4249 struct componentname *cnp = ap->a_cnp;
4250 struct nfs_vattr *nvattr;
4251 fhandle_t *fh;
4252 nfsnode_t np = NULL;
4253 struct nfsmount *nmp;
4254 nfsnode_t dnp = VTONFS(dvp);
4255 vnode_t newvp = NULL;
4256 int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0, fmode = 0;
4257 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4258 int nfsvers, gotuid, gotgid;
4259 u_int64_t xid = 0, dxid;
4260 struct nfsm_chain nmreq, nmrep;
4261 struct nfsreq *req;
4262 struct nfs_dulookup *dul;
4263 int dul_in_progress = 0;
4264 int namedattrs;
4265
4266 nmp = VTONMP(dvp);
4267 if (nfs_mount_gone(nmp)) {
4268 return ENXIO;
4269 }
4270 nfsvers = nmp->nm_vers;
4271 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4272
4273 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
4274 return ENAMETOOLONG;
4275 }
4276
4277 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4278
4279 VATTR_SET_SUPPORTED(vap, va_mode);
4280 VATTR_SET_SUPPORTED(vap, va_uid);
4281 VATTR_SET_SUPPORTED(vap, va_gid);
4282 VATTR_SET_SUPPORTED(vap, va_data_size);
4283 VATTR_SET_SUPPORTED(vap, va_access_time);
4284 VATTR_SET_SUPPORTED(vap, va_modify_time);
4285 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4286 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4287
4288 if ((vap->va_vaflags & VA_EXCLUSIVE)
4289 ) {
4290 fmode |= O_EXCL;
4291 if (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)) {
4292 vap->va_vaflags |= VA_UTIMES_NULL;
4293 }
4294 }
4295
4296 fh = zalloc(nfs_fhandle_zone);
4297 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4298 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
4299 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
4300
4301 again:
4302 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
4303 if (!namedattrs) {
4304 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4305 }
4306
4307 nfsm_chain_null(&nmreq);
4308 nfsm_chain_null(&nmrep);
4309
4310 nfsm_chain_build_alloc_init(error, &nmreq,
4311 NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED +
4312 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers));
4313 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4314 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4315 if (nfsvers == NFS_VER3) {
4316 if (fmode & O_EXCL) {
4317 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4318 error = nfsm_chaim_add_exclusive_create_verifier(error, &nmreq, nmp);
4319 } else {
4320 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4321 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
4322 }
4323 } else {
4324 nfsm_chain_add_v2sattr(error, &nmreq, vap, 0);
4325 }
4326 nfsm_chain_build_done(error, &nmreq);
4327 nfsmout_if(error);
4328
4329 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_CREATE,
4330 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
4331 if (!error) {
4332 if (!namedattrs) {
4333 nfs_dulookup_start(dul, dnp, ctx);
4334 dul_in_progress = 1;
4335 }
4336 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4337 }
4338
4339 if ((lockerror = nfs_node_lock(dnp))) {
4340 error = lockerror;
4341 }
4342 dxid = xid;
4343 if (!error && !status) {
4344 if (dnp->n_flag & NNEGNCENTRIES) {
4345 dnp->n_flag &= ~NNEGNCENTRIES;
4346 cache_purge_negatives(dvp);
4347 }
4348 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr);
4349 }
4350 if (nfsvers == NFS_VER3) {
4351 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
4352 }
4353 if (!error) {
4354 error = status;
4355 }
4356 nfsmout:
4357 nfsm_chain_cleanup(&nmreq);
4358 nfsm_chain_cleanup(&nmrep);
4359
4360 if (!lockerror) {
4361 dnp->n_flag |= NMODIFIED;
4362 /* if directory hadn't changed, update namecache mtime */
4363 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
4364 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
4365 }
4366 nfs_node_unlock(dnp);
4367 /* nfs_getattr() will check changed and purge caches */
4368 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
4369 }
4370
4371 if (!error && fh->fh_len) {
4372 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
4373 }
4374 if (!error && !np) {
4375 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
4376 }
4377 if (!error && np) {
4378 newvp = NFSTOV(np);
4379 }
4380
4381 if (dul_in_progress) {
4382 nfs_dulookup_finish(dul, dnp, ctx);
4383 }
4384 if (!busyerror) {
4385 nfs_node_clear_busy(dnp);
4386 }
4387
4388 if (error) {
4389 if ((nfsvers == NFS_VER3) && (fmode & O_EXCL) && (error == NFSERR_NOTSUPP)) {
4390 fmode &= ~O_EXCL;
4391 goto again;
4392 }
4393 if (newvp) {
4394 nfs_node_unlock(np);
4395 vnode_put(newvp);
4396 }
4397 } else if ((nfsvers == NFS_VER3) && (fmode & O_EXCL)) {
4398 nfs_node_unlock(np);
4399 error = nfs3_setattr_rpc(np, vap, ctx);
4400 if (error && (gotuid || gotgid)) {
4401 /* it's possible the server didn't like our attempt to set IDs. */
4402 /* so, let's try it again without those */
4403 VATTR_CLEAR_ACTIVE(vap, va_uid);
4404 VATTR_CLEAR_ACTIVE(vap, va_gid);
4405 error = nfs3_setattr_rpc(np, vap, ctx);
4406 }
4407 if (error) {
4408 vnode_put(newvp);
4409 } else {
4410 nfs_node_lock_force(np);
4411 }
4412 }
4413 if (!error) {
4414 *ap->a_vpp = newvp;
4415 }
4416 if (!error && (gotuid || gotgid) &&
4417 (!newvp || nfs_getattrcache(np, nvattr, 0) ||
4418 (gotuid && (nvattr->nva_uid != vap->va_uid)) ||
4419 (gotgid && (nvattr->nva_gid != vap->va_gid)))) {
4420 /* clear ID bits if server didn't use them (or we can't tell) */
4421 VATTR_CLEAR_SUPPORTED(vap, va_uid);
4422 VATTR_CLEAR_SUPPORTED(vap, va_gid);
4423 }
4424 if (!error) {
4425 nfs_node_unlock(np);
4426 }
4427 NFS_ZFREE(nfs_fhandle_zone, fh);
4428 NFS_ZFREE(nfs_req_zone, req);
4429 kfree_type(struct nfs_dulookup, dul);
4430 zfree(KT_NFS_VATTR, nvattr);
4431 return NFS_MAPERR(error);
4432 }
4433
4434 /*
4435 * NFS file remove call
4436 * To try and make NFS semantics closer to UFS semantics, a file that has
4437 * other processes using the vnode is renamed instead of removed and then
4438 * removed later on the last close.
4439 * - If vnode_isinuse()
4440 * If a rename is not already in the works
4441 * call nfs_sillyrename() to set it up
4442 * else
4443 * do the remove RPC
4444 */
4445 int
nfs_vnop_remove(struct vnop_remove_args * ap)4446 nfs_vnop_remove(
4447 struct vnop_remove_args /* {
4448 * struct vnodeop_desc *a_desc;
4449 * vnode_t a_dvp;
4450 * vnode_t a_vp;
4451 * struct componentname *a_cnp;
4452 * int a_flags;
4453 * vfs_context_t a_context;
4454 * } */*ap)
4455 {
4456 vfs_context_t ctx = ap->a_context;
4457 vnode_t vp = ap->a_vp;
4458 vnode_t dvp = ap->a_dvp;
4459 struct componentname *cnp = ap->a_cnp;
4460 nfsnode_t dnp = VTONFS(dvp);
4461 nfsnode_t np = VTONFS(vp);
4462 int error = 0, nfsvers, namedattrs, inuse, gotattr = 0, flushed = 0, setsize = 0;
4463 struct nfs_vattr *nvattr;
4464 struct nfsmount *nmp;
4465 struct nfs_dulookup *dul;
4466
4467 /* XXX prevent removing a sillyrenamed file? */
4468
4469 nmp = NFSTONMP(dnp);
4470 if (nfs_mount_gone(nmp)) {
4471 return ENXIO;
4472 }
4473
4474 if (vnode_isdir(vp)) {
4475 return EPERM;
4476 }
4477
4478 nfsvers = nmp->nm_vers;
4479 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4480 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
4481 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
4482
4483 again_relock:
4484 error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx));
4485 if (error) {
4486 goto out_free;
4487 }
4488
4489 /* lock the node while we remove the file */
4490 lck_mtx_lock(&nfs_node_hash_mutex);
4491 while (np->n_hflag & NHLOCKED) {
4492 np->n_hflag |= NHLOCKWANT;
4493 msleep(np, &nfs_node_hash_mutex, PINOD, "nfs_remove", NULL);
4494 }
4495 np->n_hflag |= NHLOCKED;
4496 lck_mtx_unlock(&nfs_node_hash_mutex);
4497
4498 if (!namedattrs) {
4499 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4500 }
4501
4502 again:
4503 inuse = vnode_isinuse(vp, 0);
4504 if ((ap->a_flags & VNODE_REMOVE_NODELETEBUSY) && inuse) {
4505 /* Caller requested Carbon delete semantics, but file is busy */
4506 error = EBUSY;
4507 goto out;
4508 }
4509 if (inuse && !gotattr) {
4510 if (nfs_getattr(np, nvattr, ctx, NGA_CACHED)) {
4511 nvattr->nva_nlink = 1;
4512 }
4513 gotattr = 1;
4514 goto again;
4515 }
4516 if (!inuse || (np->n_sillyrename && (nvattr->nva_nlink > 1))) {
4517 if (!inuse && !flushed) { /* flush all the buffers first */
4518 /* unlock the node */
4519 lck_mtx_lock(&nfs_node_hash_mutex);
4520 np->n_hflag &= ~NHLOCKED;
4521 if (np->n_hflag & NHLOCKWANT) {
4522 np->n_hflag &= ~NHLOCKWANT;
4523 wakeup(np);
4524 }
4525 lck_mtx_unlock(&nfs_node_hash_mutex);
4526 nfs_node_clear_busy2(dnp, np);
4527 error = nfs_vinvalbuf1(vp, V_SAVE, ctx, 1);
4528 FSDBG(260, np, np->n_size, np->n_vattr.nva_size, 0xf00d0011);
4529 flushed = 1;
4530 if (error == EINTR) {
4531 nfs_node_lock_force(np);
4532 NATTRINVALIDATE(np);
4533 nfs_node_unlock(np);
4534 goto out_free;
4535 }
4536 if (!namedattrs) {
4537 nfs_dulookup_finish(dul, dnp, ctx);
4538 }
4539 goto again_relock;
4540 }
4541 #if CONFIG_NFS4
4542 if ((nmp->nm_vers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK)) {
4543 nfs4_delegation_return(np, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4544 }
4545 #endif
4546 /*
4547 * Purge the name cache so that the chance of a lookup for
4548 * the name succeeding while the remove is in progress is
4549 * minimized.
4550 */
4551 nfs_name_cache_purge(dnp, np, cnp, ctx);
4552
4553 if (!namedattrs) {
4554 nfs_dulookup_start(dul, dnp, ctx);
4555 }
4556
4557 /* Do the rpc */
4558 error = nmp->nm_funcs->nf_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
4559 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4560
4561 /*
4562 * Kludge City: If the first reply to the remove rpc is lost..
4563 * the reply to the retransmitted request will be ENOENT
4564 * since the file was in fact removed
4565 * Therefore, we cheat and return success.
4566 */
4567 if (error == ENOENT) {
4568 error = 0;
4569 }
4570
4571 if (!error && !inuse && !np->n_sillyrename) {
4572 /*
4573 * removal succeeded, it's not in use, and not silly renamed so
4574 * remove nfsnode from hash now so we can't accidentally find it
4575 * again if another object gets created with the same filehandle
4576 * before this vnode gets reclaimed
4577 */
4578 lck_mtx_lock(&nfs_node_hash_mutex);
4579 if (np->n_hflag & NHHASHED) {
4580 LIST_REMOVE(np, n_hash);
4581 np->n_hflag &= ~NHHASHED;
4582 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
4583 }
4584 lck_mtx_unlock(&nfs_node_hash_mutex);
4585 /* clear flags now: won't get nfs_vnop_inactive for recycled vnode */
4586 /* clear all flags other than these */
4587 nfs_node_lock_force(np);
4588 np->n_flag &= (NMODIFIED);
4589 NATTRINVALIDATE(np);
4590 nfs_node_unlock(np);
4591 vnode_recycle(vp);
4592 setsize = 1;
4593 } else {
4594 nfs_node_lock_force(np);
4595 NATTRINVALIDATE(np);
4596 nfs_node_unlock(np);
4597 }
4598 } else if (!np->n_sillyrename) {
4599 if (!namedattrs) {
4600 nfs_dulookup_start(dul, dnp, ctx);
4601 }
4602 error = nfs_sillyrename(dnp, np, cnp, ctx);
4603 nfs_node_lock_force(np);
4604 NATTRINVALIDATE(np);
4605 nfs_node_unlock(np);
4606 } else {
4607 nfs_node_lock_force(np);
4608 NATTRINVALIDATE(np);
4609 nfs_node_unlock(np);
4610 if (!namedattrs) {
4611 nfs_dulookup_start(dul, dnp, ctx);
4612 }
4613 }
4614
4615 /* nfs_getattr() will check changed and purge caches */
4616 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
4617 if (!namedattrs) {
4618 nfs_dulookup_finish(dul, dnp, ctx);
4619 }
4620 out:
4621 /* unlock the node */
4622 lck_mtx_lock(&nfs_node_hash_mutex);
4623 np->n_hflag &= ~NHLOCKED;
4624 if (np->n_hflag & NHLOCKWANT) {
4625 np->n_hflag &= ~NHLOCKWANT;
4626 wakeup(np);
4627 }
4628 lck_mtx_unlock(&nfs_node_hash_mutex);
4629 nfs_node_clear_busy2(dnp, np);
4630 if (setsize) {
4631 ubc_setsize(vp, 0);
4632 }
4633 out_free:
4634 kfree_type(struct nfs_dulookup, dul);
4635 zfree(KT_NFS_VATTR, nvattr);
4636 return NFS_MAPERR(error);
4637 }
4638
4639 /*
4640 * NFS silly-renamed file removal function called from nfs_vnop_inactive
4641 */
4642 int
nfs_removeit(struct nfs_sillyrename * nsp)4643 nfs_removeit(struct nfs_sillyrename *nsp)
4644 {
4645 struct nfsmount *nmp = NFSTONMP(nsp->nsr_dnp);
4646 if (nfs_mount_gone(nmp)) {
4647 return ENXIO;
4648 }
4649 return nmp->nm_funcs->nf_remove_rpc(nsp->nsr_dnp, nsp->nsr_name, nsp->nsr_namlen, NULL, nsp->nsr_cred);
4650 }
4651
4652 /*
4653 * NFS remove rpc, called from nfs_remove() and nfs_removeit().
4654 */
4655 int
nfs3_remove_rpc(nfsnode_t dnp,char * name,int namelen,thread_t thd,kauth_cred_t cred)4656 nfs3_remove_rpc(
4657 nfsnode_t dnp,
4658 char *name,
4659 int namelen,
4660 thread_t thd,
4661 kauth_cred_t cred)
4662 {
4663 int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0;
4664 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4665 struct nfsmount *nmp;
4666 int nfsvers;
4667 u_int64_t xid;
4668 struct nfsm_chain nmreq, nmrep;
4669
4670 nmp = NFSTONMP(dnp);
4671 if (nfs_mount_gone(nmp)) {
4672 return ENXIO;
4673 }
4674 nfsvers = nmp->nm_vers;
4675 if ((nfsvers == NFS_VER2) && (namelen > NFS_MAXNAMLEN)) {
4676 return ENAMETOOLONG;
4677 }
4678
4679 nfsm_chain_null(&nmreq);
4680 nfsm_chain_null(&nmrep);
4681
4682 nfsm_chain_build_alloc_init(error, &nmreq,
4683 NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen));
4684 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4685 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
4686 nfsm_chain_build_done(error, &nmreq);
4687 nfsmout_if(error);
4688
4689 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC_REMOVE, thd, cred, NULL, 0, &nmrep, &xid, &status);
4690
4691 if ((lockerror = nfs_node_lock(dnp))) {
4692 error = lockerror;
4693 }
4694 if (nfsvers == NFS_VER3) {
4695 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &xid);
4696 }
4697 nfsmout_if(error);
4698 dnp->n_flag |= NMODIFIED;
4699 /* if directory hadn't changed, update namecache mtime */
4700 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
4701 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
4702 }
4703 if (!wccpostattr) {
4704 NATTRINVALIDATE(dnp);
4705 }
4706 if (!error) {
4707 error = status;
4708 }
4709 nfsmout:
4710 if (!lockerror) {
4711 nfs_node_unlock(dnp);
4712 }
4713 nfsm_chain_cleanup(&nmreq);
4714 nfsm_chain_cleanup(&nmrep);
4715 return error;
4716 }
4717
4718 /*
4719 * NFS file rename call
4720 */
4721 int
nfs_vnop_rename(struct vnop_rename_args * ap)4722 nfs_vnop_rename(
4723 struct vnop_rename_args /* {
4724 * struct vnodeop_desc *a_desc;
4725 * vnode_t a_fdvp;
4726 * vnode_t a_fvp;
4727 * struct componentname *a_fcnp;
4728 * vnode_t a_tdvp;
4729 * vnode_t a_tvp;
4730 * struct componentname *a_tcnp;
4731 * vfs_context_t a_context;
4732 * } */*ap)
4733 {
4734 vfs_context_t ctx = ap->a_context;
4735 vnode_t fdvp = ap->a_fdvp;
4736 vnode_t fvp = ap->a_fvp;
4737 vnode_t tdvp = ap->a_tdvp;
4738 vnode_t tvp = ap->a_tvp;
4739 nfsnode_t fdnp, fnp, tdnp, tnp;
4740 struct componentname *tcnp = ap->a_tcnp;
4741 struct componentname *fcnp = ap->a_fcnp;
4742 int error, nfsvers, inuse = 0, tvprecycle = 0, locked = 0;
4743 mount_t fmp, tdmp, tmp;
4744 struct nfs_vattr *nvattr;
4745 struct nfsmount *nmp;
4746
4747 fdnp = VTONFS(fdvp);
4748 fnp = VTONFS(fvp);
4749 tdnp = VTONFS(tdvp);
4750 tnp = tvp ? VTONFS(tvp) : NULL;
4751
4752 nmp = NFSTONMP(fdnp);
4753 if (nfs_mount_gone(nmp)) {
4754 return ENXIO;
4755 }
4756 nfsvers = nmp->nm_vers;
4757
4758 error = nfs_node_set_busy4(fdnp, fnp, tdnp, tnp, vfs_context_thread(ctx));
4759 if (error) {
4760 return NFS_MAPERR(error);
4761 }
4762
4763 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
4764
4765 if (tvp && (tvp != fvp)) {
4766 /* lock the node while we rename over the existing file */
4767 lck_mtx_lock(&nfs_node_hash_mutex);
4768 while (tnp->n_hflag & NHLOCKED) {
4769 tnp->n_hflag |= NHLOCKWANT;
4770 msleep(tnp, &nfs_node_hash_mutex, PINOD, "nfs_rename", NULL);
4771 }
4772 tnp->n_hflag |= NHLOCKED;
4773 lck_mtx_unlock(&nfs_node_hash_mutex);
4774 locked = 1;
4775 }
4776
4777 /* Check for cross-device rename */
4778 fmp = vnode_mount(fvp);
4779 tmp = tvp ? vnode_mount(tvp) : NULL;
4780 tdmp = vnode_mount(tdvp);
4781 if ((fmp != tdmp) || (tvp && (fmp != tmp))) {
4782 error = EXDEV;
4783 goto out;
4784 }
4785
4786 /* XXX prevent renaming from/over a sillyrenamed file? */
4787
4788 /*
4789 * If the tvp exists and is in use, sillyrename it before doing the
4790 * rename of the new file over it.
4791 * XXX Can't sillyrename a directory.
4792 * Don't sillyrename if source and target are same vnode (hard
4793 * links or case-variants)
4794 */
4795 if (tvp && (tvp != fvp)) {
4796 inuse = vnode_isinuse(tvp, 0);
4797 }
4798 if (inuse && !tnp->n_sillyrename && (vnode_vtype(tvp) != VDIR)) {
4799 error = nfs_sillyrename(tdnp, tnp, tcnp, ctx);
4800 if (error) {
4801 /* sillyrename failed. Instead of pressing on, return error */
4802 goto out; /* should not be ENOENT. */
4803 } else {
4804 /* sillyrename succeeded.*/
4805 tvp = NULL;
4806 }
4807 }
4808 #if CONFIG_NFS4
4809 else if (tvp && (nmp->nm_vers >= NFS_VER4) && (tnp->n_openflags & N_DELEG_MASK)) {
4810 nfs4_delegation_return(tnp, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4811 }
4812 #endif
4813 error = nmp->nm_funcs->nf_rename_rpc(fdnp, fcnp->cn_nameptr, fcnp->cn_namelen,
4814 tdnp, tcnp->cn_nameptr, tcnp->cn_namelen, ctx);
4815
4816 /*
4817 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
4818 */
4819 if (error == ENOENT) {
4820 error = 0;
4821 }
4822
4823 if (tvp && (tvp != fvp) && !tnp->n_sillyrename) {
4824 nfs_node_lock_force(tnp);
4825 tvprecycle = (!error && !vnode_isinuse(tvp, 0) && (vnode_iocount(tvp) == 1) &&
4826 (nfs_getattrcache(tnp, nvattr, 0) || (nvattr->nva_nlink == 1)));
4827 nfs_node_unlock(tnp);
4828 lck_mtx_lock(&nfs_node_hash_mutex);
4829 if (tvprecycle && (tnp->n_hflag & NHHASHED)) {
4830 /*
4831 * remove nfsnode from hash now so we can't accidentally find it
4832 * again if another object gets created with the same filehandle
4833 * before this vnode gets reclaimed
4834 */
4835 LIST_REMOVE(tnp, n_hash);
4836 tnp->n_hflag &= ~NHHASHED;
4837 FSDBG(266, 0, tnp, tnp->n_flag, 0xb1eb1e);
4838 }
4839 lck_mtx_unlock(&nfs_node_hash_mutex);
4840 }
4841
4842 /* purge the old name cache entries and enter the new one */
4843 nfs_name_cache_purge(fdnp, fnp, fcnp, ctx);
4844 if (tvp) {
4845 nfs_name_cache_purge(tdnp, tnp, tcnp, ctx);
4846 if (tvprecycle) {
4847 /* clear flags now: won't get nfs_vnop_inactive for recycled vnode */
4848 /* clear all flags other than these */
4849 nfs_node_lock_force(tnp);
4850 tnp->n_flag &= (NMODIFIED);
4851 nfs_node_unlock(tnp);
4852 vnode_recycle(tvp);
4853 }
4854 }
4855 if (!error) {
4856 nfs_node_lock_force(tdnp);
4857 if (tdnp->n_flag & NNEGNCENTRIES) {
4858 tdnp->n_flag &= ~NNEGNCENTRIES;
4859 cache_purge_negatives(tdvp);
4860 }
4861 nfs_node_unlock(tdnp);
4862 nfs_node_lock_force(fnp);
4863 cache_enter(tdvp, fvp, tcnp);
4864 if (tdvp != fdvp) { /* update parent pointer */
4865 if (fnp->n_parent && !vnode_get(fnp->n_parent)) {
4866 /* remove ref from old parent */
4867 vnode_rele(fnp->n_parent);
4868 vnode_put(fnp->n_parent);
4869 }
4870 fnp->n_parent = tdvp;
4871 if (tdvp && !vnode_get(tdvp)) {
4872 /* add ref to new parent */
4873 vnode_ref(tdvp);
4874 vnode_put(tdvp);
4875 } else {
4876 fnp->n_parent = NULL;
4877 }
4878 }
4879 nfs_node_unlock(fnp);
4880 }
4881 out:
4882 /* nfs_getattr() will check changed and purge caches */
4883 nfs_getattr(fdnp, NULL, ctx, NGA_CACHED);
4884 nfs_getattr(tdnp, NULL, ctx, NGA_CACHED);
4885 if (locked) {
4886 /* unlock node */
4887 lck_mtx_lock(&nfs_node_hash_mutex);
4888 tnp->n_hflag &= ~NHLOCKED;
4889 if (tnp->n_hflag & NHLOCKWANT) {
4890 tnp->n_hflag &= ~NHLOCKWANT;
4891 wakeup(tnp);
4892 }
4893 lck_mtx_unlock(&nfs_node_hash_mutex);
4894 }
4895 nfs_node_clear_busy4(fdnp, fnp, tdnp, tnp);
4896 zfree(KT_NFS_VATTR, nvattr);
4897 return NFS_MAPERR(error);
4898 }
4899
4900 /*
4901 * Do an NFS rename rpc. Called from nfs_vnop_rename() and nfs_sillyrename().
4902 */
4903 int
nfs3_rename_rpc(nfsnode_t fdnp,char * fnameptr,int fnamelen,nfsnode_t tdnp,char * tnameptr,int tnamelen,vfs_context_t ctx)4904 nfs3_rename_rpc(
4905 nfsnode_t fdnp,
4906 char *fnameptr,
4907 int fnamelen,
4908 nfsnode_t tdnp,
4909 char *tnameptr,
4910 int tnamelen,
4911 vfs_context_t ctx)
4912 {
4913 int error = 0, lockerror = ENOENT, status = 0, fwccpostattr = 0, twccpostattr = 0;
4914 struct timespec fpremtime = { .tv_sec = 0, .tv_nsec = 0 }, tpremtime = { .tv_sec = 0, .tv_nsec = 0 };
4915 struct nfsmount *nmp;
4916 int nfsvers;
4917 u_int64_t xid, txid;
4918 struct nfsm_chain nmreq, nmrep;
4919
4920 nmp = NFSTONMP(fdnp);
4921 if (nfs_mount_gone(nmp)) {
4922 return ENXIO;
4923 }
4924 nfsvers = nmp->nm_vers;
4925 if ((nfsvers == NFS_VER2) &&
4926 ((fnamelen > NFS_MAXNAMLEN) || (tnamelen > NFS_MAXNAMLEN))) {
4927 return ENAMETOOLONG;
4928 }
4929
4930 nfsm_chain_null(&nmreq);
4931 nfsm_chain_null(&nmrep);
4932
4933 nfsm_chain_build_alloc_init(error, &nmreq,
4934 (NFSX_FH(nfsvers) + NFSX_UNSIGNED) * 2 +
4935 nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen));
4936 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
4937 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
4938 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
4939 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
4940 nfsm_chain_build_done(error, &nmreq);
4941 nfsmout_if(error);
4942
4943 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC_RENAME, ctx, NULL, &nmrep, &xid, &status);
4944
4945 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
4946 error = lockerror;
4947 }
4948 if (nfsvers == NFS_VER3) {
4949 txid = xid;
4950 nfsm_chain_get_wcc_data(error, &nmrep, fdnp, &fpremtime, &fwccpostattr, &xid);
4951 nfsm_chain_get_wcc_data(error, &nmrep, tdnp, &tpremtime, &twccpostattr, &txid);
4952 }
4953 if (!error) {
4954 error = status;
4955 }
4956 nfsmout:
4957 nfsm_chain_cleanup(&nmreq);
4958 nfsm_chain_cleanup(&nmrep);
4959 if (!lockerror) {
4960 fdnp->n_flag |= NMODIFIED;
4961 /* if directory hadn't changed, update namecache mtime */
4962 if (nfstimespeccmp(&fdnp->n_ncmtime, &fpremtime, ==)) {
4963 NFS_CHANGED_UPDATE_NC(nfsvers, fdnp, &fdnp->n_vattr);
4964 }
4965 if (!fwccpostattr) {
4966 NATTRINVALIDATE(fdnp);
4967 }
4968 tdnp->n_flag |= NMODIFIED;
4969 /* if directory hadn't changed, update namecache mtime */
4970 if (nfstimespeccmp(&tdnp->n_ncmtime, &tpremtime, ==)) {
4971 NFS_CHANGED_UPDATE_NC(nfsvers, tdnp, &tdnp->n_vattr);
4972 }
4973 if (!twccpostattr) {
4974 NATTRINVALIDATE(tdnp);
4975 }
4976 nfs_node_unlock2(fdnp, tdnp);
4977 }
4978 return error;
4979 }
4980
4981 /*
4982 * NFS hard link create call
4983 */
4984 int
nfs3_vnop_link(struct vnop_link_args * ap)4985 nfs3_vnop_link(
4986 struct vnop_link_args /* {
4987 * struct vnodeop_desc *a_desc;
4988 * vnode_t a_vp;
4989 * vnode_t a_tdvp;
4990 * struct componentname *a_cnp;
4991 * vfs_context_t a_context;
4992 * } */*ap)
4993 {
4994 vfs_context_t ctx = ap->a_context;
4995 vnode_t vp = ap->a_vp;
4996 vnode_t tdvp = ap->a_tdvp;
4997 struct componentname *cnp = ap->a_cnp;
4998 int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0, attrflag = 0;
4999 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
5000 struct nfsmount *nmp;
5001 nfsnode_t np = VTONFS(vp);
5002 nfsnode_t tdnp = VTONFS(tdvp);
5003 int nfsvers;
5004 u_int64_t xid, txid;
5005 struct nfsm_chain nmreq, nmrep;
5006
5007 if (vnode_mount(vp) != vnode_mount(tdvp)) {
5008 return EXDEV;
5009 }
5010
5011 nmp = VTONMP(vp);
5012 if (nfs_mount_gone(nmp)) {
5013 return ENXIO;
5014 }
5015 nfsvers = nmp->nm_vers;
5016 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
5017 return ENAMETOOLONG;
5018 }
5019
5020 /*
5021 * Push all writes to the server, so that the attribute cache
5022 * doesn't get "out of sync" with the server.
5023 * XXX There should be a better way!
5024 */
5025 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
5026
5027 error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx));
5028 if (error) {
5029 return NFS_MAPERR(error);
5030 }
5031
5032 nfsm_chain_null(&nmreq);
5033 nfsm_chain_null(&nmrep);
5034
5035 nfsm_chain_build_alloc_init(error, &nmreq,
5036 NFSX_FH(nfsvers) * 2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
5037 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5038 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
5039 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5040 nfsm_chain_build_done(error, &nmreq);
5041 nfsmout_if(error);
5042 error = nfs_request(np, NULL, &nmreq, NFSPROC_LINK, ctx, NULL, &nmrep, &xid, &status);
5043
5044 if ((lockerror = nfs_node_lock2(tdnp, np))) {
5045 error = lockerror;
5046 goto nfsmout;
5047 }
5048 if (nfsvers == NFS_VER3) {
5049 txid = xid;
5050 nfsm_chain_postop_attr_update_flag(error, &nmrep, np, attrflag, &xid);
5051 nfsm_chain_get_wcc_data(error, &nmrep, tdnp, &premtime, &wccpostattr, &txid);
5052 }
5053 if (!error) {
5054 error = status;
5055 }
5056 nfsmout:
5057 nfsm_chain_cleanup(&nmreq);
5058 nfsm_chain_cleanup(&nmrep);
5059 if (!lockerror) {
5060 if (!attrflag) {
5061 NATTRINVALIDATE(np);
5062 }
5063 tdnp->n_flag |= NMODIFIED;
5064 /* if directory hadn't changed, update namecache mtime */
5065 if (nfstimespeccmp(&tdnp->n_ncmtime, &premtime, ==)) {
5066 NFS_CHANGED_UPDATE_NC(nfsvers, tdnp, &tdnp->n_vattr);
5067 }
5068 if (!wccpostattr) {
5069 NATTRINVALIDATE(tdnp);
5070 }
5071 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
5072 tdnp->n_flag &= ~NNEGNCENTRIES;
5073 cache_purge_negatives(tdvp);
5074 }
5075 nfs_node_unlock2(tdnp, np);
5076 }
5077 nfs_node_clear_busy2(tdnp, np);
5078 /*
5079 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
5080 */
5081 if (error == EEXIST) {
5082 error = 0;
5083 }
5084 return NFS_MAPERR(error);
5085 }
5086
5087 /*
5088 * NFS symbolic link create call
5089 */
5090 int
nfs3_vnop_symlink(struct vnop_symlink_args * ap)5091 nfs3_vnop_symlink(
5092 struct vnop_symlink_args /* {
5093 * struct vnodeop_desc *a_desc;
5094 * vnode_t a_dvp;
5095 * vnode_t *a_vpp;
5096 * struct componentname *a_cnp;
5097 * struct vnode_attr *a_vap;
5098 * char *a_target;
5099 * vfs_context_t a_context;
5100 * } */*ap)
5101 {
5102 vfs_context_t ctx = ap->a_context;
5103 vnode_t dvp = ap->a_dvp;
5104 struct vnode_attr *vap = ap->a_vap;
5105 struct componentname *cnp = ap->a_cnp;
5106 struct nfs_vattr *nvattr;
5107 fhandle_t *fh;
5108 int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0;
5109 size_t slen;
5110 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
5111 vnode_t newvp = NULL;
5112 int nfsvers, gotuid, gotgid;
5113 u_int64_t xid = 0, dxid;
5114 nfsnode_t np = NULL;
5115 nfsnode_t dnp = VTONFS(dvp);
5116 struct nfsmount *nmp;
5117 struct nfsm_chain nmreq, nmrep;
5118 struct nfsreq *req;
5119 struct nfs_dulookup *dul;
5120 int namedattrs;
5121 int dul_in_progress = 0;
5122
5123 nmp = VTONMP(dvp);
5124 if (nfs_mount_gone(nmp)) {
5125 return ENXIO;
5126 }
5127 nfsvers = nmp->nm_vers;
5128 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5129
5130 slen = strlen(ap->a_target);
5131 if ((nfsvers == NFS_VER2) &&
5132 ((cnp->cn_namelen > NFS_MAXNAMLEN) || (slen > NFS_MAXPATHLEN))) {
5133 return ENAMETOOLONG;
5134 }
5135
5136 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5137
5138 VATTR_SET_SUPPORTED(vap, va_mode);
5139 VATTR_SET_SUPPORTED(vap, va_uid);
5140 VATTR_SET_SUPPORTED(vap, va_gid);
5141 VATTR_SET_SUPPORTED(vap, va_data_size);
5142 VATTR_SET_SUPPORTED(vap, va_access_time);
5143 VATTR_SET_SUPPORTED(vap, va_modify_time);
5144 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5145 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5146
5147 fh = zalloc(nfs_fhandle_zone);
5148 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
5149 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
5150 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
5151
5152 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
5153 if (!namedattrs) {
5154 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5155 }
5156
5157 nfsm_chain_null(&nmreq);
5158 nfsm_chain_null(&nmrep);
5159
5160 nfsm_chain_build_alloc_init(error, &nmreq,
5161 NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED +
5162 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(nfsvers));
5163 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5164 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5165 if (nfsvers == NFS_VER3) {
5166 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
5167 }
5168 nfsm_chain_add_name(error, &nmreq, ap->a_target, slen, nmp);
5169 if (nfsvers == NFS_VER2) {
5170 nfsm_chain_add_v2sattr(error, &nmreq, vap, -1);
5171 }
5172 nfsm_chain_build_done(error, &nmreq);
5173 nfsmout_if(error);
5174
5175 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_SYMLINK,
5176 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
5177 if (!error) {
5178 if (!namedattrs) {
5179 nfs_dulookup_start(dul, dnp, ctx);
5180 dul_in_progress = 1;
5181 }
5182 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5183 }
5184
5185 if ((lockerror = nfs_node_lock(dnp))) {
5186 error = lockerror;
5187 }
5188 dxid = xid;
5189 if (!error && !status) {
5190 if (dnp->n_flag & NNEGNCENTRIES) {
5191 dnp->n_flag &= ~NNEGNCENTRIES;
5192 cache_purge_negatives(dvp);
5193 }
5194 if (nfsvers == NFS_VER3) {
5195 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr);
5196 } else {
5197 fh->fh_len = 0;
5198 }
5199 }
5200 if (nfsvers == NFS_VER3) {
5201 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
5202 }
5203 if (!error) {
5204 error = status;
5205 }
5206 nfsmout:
5207 nfsm_chain_cleanup(&nmreq);
5208 nfsm_chain_cleanup(&nmrep);
5209
5210 if (!lockerror) {
5211 dnp->n_flag |= NMODIFIED;
5212 /* if directory hadn't changed, update namecache mtime */
5213 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
5214 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
5215 }
5216 nfs_node_unlock(dnp);
5217 /* nfs_getattr() will check changed and purge caches */
5218 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
5219 }
5220
5221 if (!error && fh->fh_len) {
5222 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
5223 }
5224 if (!error && np) {
5225 newvp = NFSTOV(np);
5226 }
5227
5228 if (dul_in_progress) {
5229 nfs_dulookup_finish(dul, dnp, ctx);
5230 }
5231
5232 /*
5233 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
5234 * if we can succeed in looking up the symlink.
5235 */
5236 if ((error == EEXIST) || (!error && !newvp)) {
5237 if (newvp) {
5238 nfs_node_unlock(np);
5239 vnode_put(newvp);
5240 newvp = NULL;
5241 }
5242 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
5243 if (!error) {
5244 newvp = NFSTOV(np);
5245 if (vnode_vtype(newvp) != VLNK) {
5246 error = EEXIST;
5247 }
5248 }
5249 }
5250 if (!busyerror) {
5251 nfs_node_clear_busy(dnp);
5252 }
5253 if (!error && (gotuid || gotgid) &&
5254 (!newvp || nfs_getattrcache(np, nvattr, 0) ||
5255 (gotuid && (nvattr->nva_uid != vap->va_uid)) ||
5256 (gotgid && (nvattr->nva_gid != vap->va_gid)))) {
5257 /* clear ID bits if server didn't use them (or we can't tell) */
5258 VATTR_CLEAR_SUPPORTED(vap, va_uid);
5259 VATTR_CLEAR_SUPPORTED(vap, va_gid);
5260 }
5261 if (error) {
5262 if (newvp) {
5263 nfs_node_unlock(np);
5264 vnode_put(newvp);
5265 }
5266 } else {
5267 nfs_node_unlock(np);
5268 *ap->a_vpp = newvp;
5269 }
5270 NFS_ZFREE(nfs_fhandle_zone, fh);
5271 NFS_ZFREE(nfs_req_zone, req);
5272 kfree_type(struct nfs_dulookup, dul);
5273 zfree(KT_NFS_VATTR, nvattr);
5274 return NFS_MAPERR(error);
5275 }
5276
5277 /*
5278 * NFS make dir call
5279 */
5280 int
nfs3_vnop_mkdir(struct vnop_mkdir_args * ap)5281 nfs3_vnop_mkdir(
5282 struct vnop_mkdir_args /* {
5283 * struct vnodeop_desc *a_desc;
5284 * vnode_t a_dvp;
5285 * vnode_t *a_vpp;
5286 * struct componentname *a_cnp;
5287 * struct vnode_attr *a_vap;
5288 * vfs_context_t a_context;
5289 * } */*ap)
5290 {
5291 vfs_context_t ctx = ap->a_context;
5292 vnode_t dvp = ap->a_dvp;
5293 struct vnode_attr *vap = ap->a_vap;
5294 struct componentname *cnp = ap->a_cnp;
5295 struct nfs_vattr *nvattr;
5296 nfsnode_t np = NULL;
5297 struct nfsmount *nmp;
5298 nfsnode_t dnp = VTONFS(dvp);
5299 vnode_t newvp = NULL;
5300 int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0;
5301 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
5302 int nfsvers, gotuid, gotgid;
5303 u_int64_t xid = 0, dxid;
5304 fhandle_t *fh;
5305 struct nfsm_chain nmreq, nmrep;
5306 struct nfsreq *req;
5307 struct nfs_dulookup *dul;
5308 int namedattrs;
5309 int dul_in_progress = 0;
5310
5311 nmp = VTONMP(dvp);
5312 if (nfs_mount_gone(nmp)) {
5313 return ENXIO;
5314 }
5315 nfsvers = nmp->nm_vers;
5316 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5317
5318 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
5319 return ENAMETOOLONG;
5320 }
5321
5322 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5323
5324 VATTR_SET_SUPPORTED(vap, va_mode);
5325 VATTR_SET_SUPPORTED(vap, va_uid);
5326 VATTR_SET_SUPPORTED(vap, va_gid);
5327 VATTR_SET_SUPPORTED(vap, va_data_size);
5328 VATTR_SET_SUPPORTED(vap, va_access_time);
5329 VATTR_SET_SUPPORTED(vap, va_modify_time);
5330 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5331 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5332
5333 fh = zalloc(nfs_fhandle_zone);
5334 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
5335 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
5336 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
5337
5338 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
5339 if (!namedattrs) {
5340 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5341 }
5342
5343 nfsm_chain_null(&nmreq);
5344 nfsm_chain_null(&nmrep);
5345
5346 nfsm_chain_build_alloc_init(error, &nmreq,
5347 NFSX_FH(nfsvers) + NFSX_UNSIGNED +
5348 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers));
5349 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5350 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5351 if (nfsvers == NFS_VER3) {
5352 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
5353 } else {
5354 nfsm_chain_add_v2sattr(error, &nmreq, vap, -1);
5355 }
5356 nfsm_chain_build_done(error, &nmreq);
5357 nfsmout_if(error);
5358
5359 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_MKDIR,
5360 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
5361 if (!error) {
5362 if (!namedattrs) {
5363 nfs_dulookup_start(dul, dnp, ctx);
5364 dul_in_progress = 1;
5365 }
5366 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5367 }
5368
5369 if ((lockerror = nfs_node_lock(dnp))) {
5370 error = lockerror;
5371 }
5372 dxid = xid;
5373 if (!error && !status) {
5374 if (dnp->n_flag & NNEGNCENTRIES) {
5375 dnp->n_flag &= ~NNEGNCENTRIES;
5376 cache_purge_negatives(dvp);
5377 }
5378 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr);
5379 }
5380 if (nfsvers == NFS_VER3) {
5381 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
5382 }
5383 if (!error) {
5384 error = status;
5385 }
5386 nfsmout:
5387 nfsm_chain_cleanup(&nmreq);
5388 nfsm_chain_cleanup(&nmrep);
5389
5390 if (!lockerror) {
5391 dnp->n_flag |= NMODIFIED;
5392 /* if directory hadn't changed, update namecache mtime */
5393 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
5394 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
5395 }
5396 nfs_node_unlock(dnp);
5397 /* nfs_getattr() will check changed and purge caches */
5398 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
5399 }
5400
5401 if (!error && fh->fh_len) {
5402 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
5403 }
5404 if (!error && np) {
5405 newvp = NFSTOV(np);
5406 }
5407
5408 if (dul_in_progress) {
5409 nfs_dulookup_finish(dul, dnp, ctx);
5410 }
5411
5412 /*
5413 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
5414 * if we can succeed in looking up the directory.
5415 */
5416 if ((error == EEXIST) || (!error && !newvp)) {
5417 if (newvp) {
5418 nfs_node_unlock(np);
5419 vnode_put(newvp);
5420 newvp = NULL;
5421 }
5422 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
5423 if (!error) {
5424 newvp = NFSTOV(np);
5425 if (vnode_vtype(newvp) != VDIR) {
5426 error = EEXIST;
5427 }
5428 }
5429 }
5430 if (!busyerror) {
5431 nfs_node_clear_busy(dnp);
5432 }
5433 if (!error && (gotuid || gotgid) &&
5434 (!newvp || nfs_getattrcache(np, nvattr, 0) ||
5435 (gotuid && (nvattr->nva_uid != vap->va_uid)) ||
5436 (gotgid && (nvattr->nva_gid != vap->va_gid)))) {
5437 /* clear ID bits if server didn't use them (or we can't tell) */
5438 VATTR_CLEAR_SUPPORTED(vap, va_uid);
5439 VATTR_CLEAR_SUPPORTED(vap, va_gid);
5440 }
5441 if (error) {
5442 if (newvp) {
5443 nfs_node_unlock(np);
5444 vnode_put(newvp);
5445 }
5446 } else {
5447 nfs_node_unlock(np);
5448 *ap->a_vpp = newvp;
5449 }
5450 NFS_ZFREE(nfs_fhandle_zone, fh);
5451 NFS_ZFREE(nfs_req_zone, req);
5452 kfree_type(struct nfs_dulookup, dul);
5453 zfree(KT_NFS_VATTR, nvattr);
5454 return NFS_MAPERR(error);
5455 }
5456
5457 /*
5458 * NFS remove directory call
5459 */
5460 int
nfs3_vnop_rmdir(struct vnop_rmdir_args * ap)5461 nfs3_vnop_rmdir(
5462 struct vnop_rmdir_args /* {
5463 * struct vnodeop_desc *a_desc;
5464 * vnode_t a_dvp;
5465 * vnode_t a_vp;
5466 * struct componentname *a_cnp;
5467 * vfs_context_t a_context;
5468 * } */*ap)
5469 {
5470 vfs_context_t ctx = ap->a_context;
5471 vnode_t vp = ap->a_vp;
5472 vnode_t dvp = ap->a_dvp;
5473 struct componentname *cnp = ap->a_cnp;
5474 int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0;
5475 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
5476 struct nfsmount *nmp;
5477 nfsnode_t np = VTONFS(vp);
5478 nfsnode_t dnp = VTONFS(dvp);
5479 int nfsvers;
5480 u_int64_t xid;
5481 struct nfsm_chain nmreq, nmrep;
5482 struct nfsreq *req;
5483 struct nfs_dulookup *dul;
5484 int namedattrs;
5485 int dul_in_progress = 0;
5486
5487 nmp = VTONMP(vp);
5488 if (nfs_mount_gone(nmp)) {
5489 return ENXIO;
5490 }
5491 nfsvers = nmp->nm_vers;
5492 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5493
5494 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
5495 return ENAMETOOLONG;
5496 }
5497
5498 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
5499 return NFS_MAPERR(error);
5500 }
5501
5502 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
5503 dul = kalloc_type(struct nfs_dulookup, Z_WAITOK);
5504
5505 if (!namedattrs) {
5506 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5507 }
5508
5509 nfsm_chain_null(&nmreq);
5510 nfsm_chain_null(&nmrep);
5511
5512 nfsm_chain_build_alloc_init(error, &nmreq,
5513 NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
5514 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5515 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5516 nfsm_chain_build_done(error, &nmreq);
5517 nfsmout_if(error);
5518
5519 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_RMDIR,
5520 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
5521 if (!error) {
5522 if (!namedattrs) {
5523 nfs_dulookup_start(dul, dnp, ctx);
5524 dul_in_progress = 1;
5525 }
5526 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5527 }
5528
5529 if ((lockerror = nfs_node_lock(dnp))) {
5530 error = lockerror;
5531 }
5532 if (nfsvers == NFS_VER3) {
5533 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &xid);
5534 }
5535 if (!error) {
5536 error = status;
5537 }
5538 nfsmout:
5539 nfsm_chain_cleanup(&nmreq);
5540 nfsm_chain_cleanup(&nmrep);
5541
5542 if (!lockerror) {
5543 dnp->n_flag |= NMODIFIED;
5544 /* if directory hadn't changed, update namecache mtime */
5545 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
5546 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
5547 }
5548 nfs_node_unlock(dnp);
5549 nfs_name_cache_purge(dnp, np, cnp, ctx);
5550 /* nfs_getattr() will check changed and purge caches */
5551 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
5552 }
5553 if (dul_in_progress) {
5554 nfs_dulookup_finish(dul, dnp, ctx);
5555 }
5556 nfs_node_clear_busy2(dnp, np);
5557
5558 /*
5559 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
5560 */
5561 if (error == ENOENT) {
5562 error = 0;
5563 }
5564 if (!error) {
5565 /*
5566 * remove nfsnode from hash now so we can't accidentally find it
5567 * again if another object gets created with the same filehandle
5568 * before this vnode gets reclaimed
5569 */
5570 lck_mtx_lock(&nfs_node_hash_mutex);
5571 if (np->n_hflag & NHHASHED) {
5572 LIST_REMOVE(np, n_hash);
5573 np->n_hflag &= ~NHHASHED;
5574 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
5575 }
5576 lck_mtx_unlock(&nfs_node_hash_mutex);
5577 }
5578 NFS_ZFREE(nfs_req_zone, req);
5579 kfree_type(struct nfs_dulookup, dul);
5580 return NFS_MAPERR(error);
5581 }
5582
5583 /*
5584 * NFS readdir call
5585 *
5586 * The incoming "offset" is a directory cookie indicating where in the
5587 * directory entries should be read from. A zero cookie means start at
5588 * the beginning of the directory. Any other cookie will be a cookie
5589 * returned from the server.
5590 *
5591 * Using that cookie, determine which buffer (and where in that buffer)
5592 * to start returning entries from. Buffer logical block numbers are
5593 * the cookies they start at. If a buffer is found that is not full,
5594 * call into the bio/RPC code to fill it. The RPC code will probably
5595 * fill several buffers (dropping the first, requiring a re-get).
5596 *
5597 * When done copying entries to the buffer, set the offset to the current
5598 * entry's cookie and enter that cookie in the cookie cache.
5599 *
5600 * Note: because the getdirentries(2) API returns a long-typed offset,
5601 * the incoming offset is a potentially truncated cookie (ptc).
5602 * The cookie matching code is aware of this and will fall back to
5603 * matching only 32 bits of the cookie.
5604 */
5605 int
nfs_vnop_readdir(struct vnop_readdir_args * ap)5606 nfs_vnop_readdir(
5607 struct vnop_readdir_args /* {
5608 * struct vnodeop_desc *a_desc;
5609 * vnode_t a_vp;
5610 * struct uio *a_uio;
5611 * int a_flags;
5612 * int *a_eofflag;
5613 * int *a_numdirent;
5614 * vfs_context_t a_context;
5615 * } */*ap)
5616 {
5617 vfs_context_t ctx = ap->a_context;
5618 vnode_t dvp = ap->a_vp;
5619 nfsnode_t dnp = VTONFS(dvp);
5620 struct nfsmount *nmp;
5621 uio_t uio = ap->a_uio;
5622 int error, nfsvers, extended, numdirent, bigcookies, ptc, done;
5623 long attrcachetimeout;
5624 uint16_t i, iptc, rlen, nlen;
5625 uint64_t cookie, nextcookie, lbn = 0;
5626 struct nfsbuf *bp = NULL;
5627 struct nfs_dir_buf_header *ndbhp;
5628 struct direntry *dp, *dpptc;
5629 struct dirent dent;
5630 char *cp = NULL;
5631 struct timeval now;
5632 thread_t thd;
5633
5634 nmp = VTONMP(dvp);
5635 if (nfs_mount_gone(nmp)) {
5636 return ENXIO;
5637 }
5638 nfsvers = nmp->nm_vers;
5639 bigcookies = (nmp->nm_state & NFSSTA_BIGCOOKIES);
5640 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5641
5642 if (vnode_vtype(dvp) != VDIR) {
5643 return EPERM;
5644 }
5645
5646 if (ap->a_eofflag) {
5647 *ap->a_eofflag = 0;
5648 }
5649
5650 if (uio_resid(uio) == 0) {
5651 return 0;
5652 }
5653 #if CONFIG_NFS4
5654 if ((nfsvers >= NFS_VER4) && (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER)) {
5655 /* trigger directories should never be read, return nothing */
5656 return 0;
5657 }
5658 #endif
5659 thd = vfs_context_thread(ctx);
5660 numdirent = done = 0;
5661 nextcookie = uio_offset(uio);
5662 ptc = bigcookies && NFS_DIR_COOKIE_POTENTIALLY_TRUNCATED(nextcookie);
5663
5664 if ((error = nfs_node_lock(dnp))) {
5665 goto out;
5666 }
5667
5668 if (dnp->n_flag & NNEEDINVALIDATE) {
5669 dnp->n_flag &= ~NNEEDINVALIDATE;
5670 nfs_invaldir(dnp);
5671 nfs_node_unlock(dnp);
5672 error = nfs_vinvalbuf1(dvp, 0, ctx, 1);
5673 if (!error) {
5674 error = nfs_node_lock(dnp);
5675 }
5676 if (error) {
5677 goto out;
5678 }
5679 }
5680
5681 if (dnp->n_rdirplusstamp_eof && dnp->n_rdirplusstamp_sof) {
5682 attrcachetimeout = nfs_attrcachetimeout(dnp);
5683 microuptime(&now);
5684 if (attrcachetimeout && (now.tv_sec - dnp->n_rdirplusstamp_sof > attrcachetimeout - 1)) {
5685 dnp->n_rdirplusstamp_eof = dnp->n_rdirplusstamp_sof = 0;
5686 nfs_invaldir(dnp);
5687 nfs_node_unlock(dnp);
5688 error = nfs_vinvalbuf1(dvp, 0, ctx, 1);
5689 if (!error) {
5690 error = nfs_node_lock(dnp);
5691 }
5692 if (error) {
5693 goto out;
5694 }
5695 }
5696 }
5697
5698 /*
5699 * check for need to invalidate when (re)starting at beginning
5700 */
5701 if (!nextcookie) {
5702 if (dnp->n_flag & NMODIFIED) {
5703 nfs_invaldir(dnp);
5704 nfs_node_unlock(dnp);
5705 if ((error = nfs_vinvalbuf1(dvp, 0, ctx, 1))) {
5706 goto out;
5707 }
5708 } else {
5709 nfs_node_unlock(dnp);
5710 }
5711 /* nfs_getattr() will check changed and purge caches */
5712 if ((error = nfs_getattr(dnp, NULL, ctx, NGA_CACHED))) {
5713 goto out;
5714 }
5715 } else {
5716 nfs_node_unlock(dnp);
5717 }
5718
5719 error = nfs_dir_cookie_to_lbn(dnp, nextcookie, &ptc, &lbn);
5720 if (error) {
5721 if (error < 0) { /* just hit EOF cookie */
5722 done = 1;
5723 error = 0;
5724 }
5725 if (ap->a_eofflag) {
5726 *ap->a_eofflag = 1;
5727 }
5728 }
5729
5730 while (!error && !done) {
5731 OSAddAtomic64(1, &nfsclntstats.biocache_readdirs);
5732 cookie = nextcookie;
5733 getbuffer:
5734 error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ, &bp);
5735 if (error) {
5736 goto out;
5737 }
5738 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
5739 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
5740 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
5741 ndbhp->ndbh_flags = 0;
5742 ndbhp->ndbh_count = 0;
5743 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
5744 ndbhp->ndbh_ncgen = dnp->n_ncgen;
5745 }
5746 error = nfs_buf_readdir(bp, ctx);
5747 if (error == NFSERR_DIRBUFDROPPED) {
5748 goto getbuffer;
5749 }
5750 if (error) {
5751 nfs_buf_release(bp, 1);
5752 }
5753 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
5754 if (!nfs_node_lock(dnp)) {
5755 nfs_invaldir(dnp);
5756 nfs_node_unlock(dnp);
5757 }
5758 nfs_vinvalbuf1(dvp, 0, ctx, 1);
5759 if (error == NFSERR_BAD_COOKIE) {
5760 error = ENOENT;
5761 }
5762 }
5763 if (error) {
5764 goto out;
5765 }
5766 }
5767
5768 /* find next entry to return */
5769 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
5770 i = 0;
5771 if ((lbn != cookie) && !(ptc && NFS_DIR_COOKIE_SAME32(lbn, cookie))) {
5772 dpptc = NULL;
5773 iptc = 0;
5774 for (; (i < ndbhp->ndbh_count) && (cookie != dp->d_seekoff); i++) {
5775 if (ptc && !dpptc && NFS_DIR_COOKIE_SAME32(cookie, dp->d_seekoff)) {
5776 iptc = i;
5777 dpptc = dp;
5778 }
5779 nextcookie = dp->d_seekoff;
5780 dp = NFS_DIRENTRY_NEXT(dp);
5781 }
5782 if ((i == ndbhp->ndbh_count) && dpptc) {
5783 i = iptc;
5784 dp = dpptc;
5785 }
5786 if (i < ndbhp->ndbh_count) {
5787 nextcookie = dp->d_seekoff;
5788 dp = NFS_DIRENTRY_NEXT(dp);
5789 i++;
5790 }
5791 }
5792 ptc = 0; /* only have to deal with ptc on first cookie */
5793
5794 /* return as many entries as we can */
5795 for (; i < ndbhp->ndbh_count; i++) {
5796 if (extended) {
5797 rlen = dp->d_reclen;
5798 cp = (char*)dp;
5799 } else {
5800 if (!cp) {
5801 cp = (char*)&dent;
5802 bzero(cp, sizeof(dent));
5803 }
5804 if (dp->d_namlen > (sizeof(dent.d_name) - 1)) {
5805 nlen = sizeof(dent.d_name) - 1;
5806 } else {
5807 nlen = dp->d_namlen;
5808 }
5809 rlen = NFS_DIRENT_LEN(nlen);
5810 dent.d_reclen = rlen;
5811 dent.d_ino = (ino_t)dp->d_ino;
5812 dent.d_type = dp->d_type;
5813 dent.d_namlen = (uint8_t)nlen;
5814 strlcpy(dent.d_name, dp->d_name, nlen + 1);
5815 }
5816 /* check that the record fits */
5817 if (rlen > uio_resid(uio)) {
5818 done = 1;
5819 break;
5820 }
5821 if ((error = uiomove(cp, rlen, uio))) {
5822 break;
5823 }
5824 numdirent++;
5825 nextcookie = dp->d_seekoff;
5826 dp = NFS_DIRENTRY_NEXT(dp);
5827 }
5828
5829 if (i == ndbhp->ndbh_count) {
5830 /* hit end of buffer, move to next buffer */
5831 lbn = nextcookie;
5832 /* if we also hit EOF, we're done */
5833 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
5834 done = 1;
5835 if (ap->a_eofflag) {
5836 *ap->a_eofflag = 1;
5837 }
5838 }
5839 }
5840 if (!error) {
5841 uio_setoffset(uio, nextcookie);
5842 }
5843 if (!error && !done && (nextcookie == cookie)) {
5844 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
5845 error = EIO;
5846 }
5847 nfs_buf_release(bp, 1);
5848 }
5849
5850 if (!error) {
5851 nfs_dir_cookie_cache(dnp, nextcookie, lbn);
5852 }
5853
5854 if (ap->a_numdirent) {
5855 *ap->a_numdirent = numdirent;
5856 }
5857 out:
5858 return NFS_MAPERR(error);
5859 }
5860
5861
5862 /*
5863 * Invalidate cached directory information, except for the actual directory
5864 * blocks (which are invalidated separately).
5865 */
5866 static void
nfs_invaldir_cookies(nfsnode_t dnp)5867 nfs_invaldir_cookies(nfsnode_t dnp)
5868 {
5869 if (vnode_vtype(NFSTOV(dnp)) != VDIR) {
5870 return;
5871 }
5872 dnp->n_eofcookie = 0;
5873 dnp->n_cookieverf = 0;
5874 if (!dnp->n_cookiecache) {
5875 return;
5876 }
5877 dnp->n_cookiecache->free = 0;
5878 dnp->n_cookiecache->mru = -1;
5879 memset(dnp->n_cookiecache->next, -1, NFSNUMCOOKIES);
5880 }
5881
5882 void
nfs_invaldir(nfsnode_t dnp)5883 nfs_invaldir(nfsnode_t dnp)
5884 {
5885
5886 nfs_invaldir_cookies(dnp);
5887 }
5888
5889 /*
5890 * calculate how much space is available for additional directory entries.
5891 */
5892 uint64_t
nfs_dir_buf_freespace(struct nfsbuf * bp,int rdirplus)5893 nfs_dir_buf_freespace(struct nfsbuf *bp, int rdirplus)
5894 {
5895 struct nfs_dir_buf_header *ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
5896 uint64_t space;
5897
5898 if (!ndbhp) {
5899 return 0;
5900 }
5901 space = bp->nb_bufsize - ndbhp->ndbh_entry_end;
5902 if (rdirplus) {
5903 space -= ndbhp->ndbh_count * sizeof(struct nfs_vattr);
5904 }
5905 return space;
5906 }
5907
5908 /*
5909 * add/update a cookie->lbn entry in the directory cookie cache
5910 */
5911 void
nfs_dir_cookie_cache(nfsnode_t dnp,uint64_t cookie,uint64_t lbn)5912 nfs_dir_cookie_cache(nfsnode_t dnp, uint64_t cookie, uint64_t lbn)
5913 {
5914 struct nfsdmap *ndcc;
5915 int8_t i, prev;
5916
5917 if (!cookie) {
5918 return;
5919 }
5920
5921 if (nfs_node_lock(dnp)) {
5922 return;
5923 }
5924
5925 if (cookie == dnp->n_eofcookie) { /* EOF cookie */
5926 nfs_node_unlock(dnp);
5927 return;
5928 }
5929
5930 ndcc = dnp->n_cookiecache;
5931 if (!ndcc) {
5932 /* allocate the cookie cache structure */
5933 ndcc = dnp->n_cookiecache = zalloc(ZV_NFSDIROFF);
5934 ndcc->free = 0;
5935 ndcc->mru = -1;
5936 memset(ndcc->next, -1, NFSNUMCOOKIES);
5937 }
5938
5939 /*
5940 * Search the list for this cookie.
5941 * Keep track of previous and last entries.
5942 */
5943 prev = -1;
5944 i = ndcc->mru;
5945 while ((i != -1) && (cookie != ndcc->cookies[i].key)) {
5946 if (ndcc->next[i] == -1) { /* stop on last entry so we can reuse */
5947 break;
5948 }
5949 prev = i;
5950 i = ndcc->next[i];
5951 }
5952 if ((i != -1) && (cookie == ndcc->cookies[i].key)) {
5953 /* found it, remove from list */
5954 if (prev != -1) {
5955 ndcc->next[prev] = ndcc->next[i];
5956 } else {
5957 ndcc->mru = ndcc->next[i];
5958 }
5959 } else {
5960 /* not found, use next free entry or reuse last entry */
5961 if (ndcc->free != NFSNUMCOOKIES) {
5962 i = ndcc->free++;
5963 } else {
5964 ndcc->next[prev] = -1;
5965 }
5966 ndcc->cookies[i].key = cookie;
5967 ndcc->cookies[i].lbn = lbn;
5968 }
5969 /* insert cookie at head of MRU list */
5970 ndcc->next[i] = ndcc->mru;
5971 ndcc->mru = i;
5972 nfs_node_unlock(dnp);
5973 }
5974
5975 /*
5976 * Try to map the given directory cookie to a directory buffer (return lbn).
5977 * If we have a possibly truncated cookie (ptc), check for 32-bit matches too.
5978 */
5979 int
nfs_dir_cookie_to_lbn(nfsnode_t dnp,uint64_t cookie,int * ptc,uint64_t * lbnp)5980 nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp)
5981 {
5982 struct nfsdmap *ndcc = dnp->n_cookiecache;
5983 int8_t eofptc, found;
5984 int i, iptc;
5985 struct nfsmount *nmp;
5986 struct nfsbuf *bp, *lastbp;
5987 struct nfsbuflists blist;
5988 struct direntry *dp, *dpptc;
5989 struct nfs_dir_buf_header *ndbhp;
5990
5991 if (!cookie) { /* initial cookie */
5992 *lbnp = 0;
5993 *ptc = 0;
5994 return 0;
5995 }
5996
5997 if (nfs_node_lock(dnp)) {
5998 return ENOENT;
5999 }
6000
6001 if (cookie == dnp->n_eofcookie) { /* EOF cookie */
6002 nfs_node_unlock(dnp);
6003 OSAddAtomic64(1, &nfsclntstats.direofcache_hits);
6004 *ptc = 0;
6005 return -1;
6006 }
6007 /* note if cookie is a 32-bit match with the EOF cookie */
6008 eofptc = *ptc ? NFS_DIR_COOKIE_SAME32(cookie, dnp->n_eofcookie) : 0;
6009 iptc = -1;
6010
6011 /* search the list for the cookie */
6012 for (i = ndcc ? ndcc->mru : -1; i >= 0; i = ndcc->next[i]) {
6013 if (ndcc->cookies[i].key == cookie) {
6014 /* found a match for this cookie */
6015 *lbnp = ndcc->cookies[i].lbn;
6016 nfs_node_unlock(dnp);
6017 OSAddAtomic64(1, &nfsclntstats.direofcache_hits);
6018 *ptc = 0;
6019 return 0;
6020 }
6021 /* check for 32-bit match */
6022 if (*ptc && (iptc == -1) && NFS_DIR_COOKIE_SAME32(ndcc->cookies[i].key, cookie)) {
6023 iptc = i;
6024 }
6025 }
6026 /* exact match not found */
6027 if (eofptc) {
6028 /* but 32-bit match hit the EOF cookie */
6029 nfs_node_unlock(dnp);
6030 OSAddAtomic64(1, &nfsclntstats.direofcache_hits);
6031 return -1;
6032 }
6033 if (iptc >= 0) {
6034 /* but 32-bit match got a hit */
6035 *lbnp = ndcc->cookies[iptc].lbn;
6036 nfs_node_unlock(dnp);
6037 OSAddAtomic64(1, &nfsclntstats.direofcache_hits);
6038 return 0;
6039 }
6040 nfs_node_unlock(dnp);
6041
6042 /*
6043 * No match found in the cookie cache... hmm...
6044 * Let's search the directory's buffers for the cookie.
6045 */
6046 nmp = NFSTONMP(dnp);
6047 if (nfs_mount_gone(nmp)) {
6048 return ENXIO;
6049 }
6050 dpptc = NULL;
6051 found = 0;
6052
6053 lck_mtx_lock(&nfs_buf_mutex);
6054 /*
6055 * Scan the list of buffers, keeping them in order.
6056 * Note that itercomplete inserts each of the remaining buffers
6057 * into the head of list (thus reversing the elements). So, we
6058 * make sure to iterate through all buffers, inserting them after
6059 * each other, to keep them in order.
6060 * Also note: the LIST_INSERT_AFTER(lastbp) is only safe because
6061 * we don't drop nfs_buf_mutex.
6062 */
6063 if (!nfs_buf_iterprepare(dnp, &blist, NBI_CLEAN)) {
6064 lastbp = NULL;
6065 while ((bp = LIST_FIRST(&blist))) {
6066 LIST_REMOVE(bp, nb_vnbufs);
6067 if (!lastbp) {
6068 LIST_INSERT_HEAD(&dnp->n_cleanblkhd, bp, nb_vnbufs);
6069 } else {
6070 LIST_INSERT_AFTER(lastbp, bp, nb_vnbufs);
6071 }
6072 lastbp = bp;
6073 if (found) {
6074 continue;
6075 }
6076 nfs_buf_refget(bp);
6077 if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) {
6078 /* just skip this buffer */
6079 nfs_buf_refrele(bp);
6080 continue;
6081 }
6082 nfs_buf_refrele(bp);
6083
6084 /* scan the buffer for the cookie */
6085 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
6086 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
6087 dpptc = NULL;
6088 for (i = 0; (i < ndbhp->ndbh_count) && (cookie != dp->d_seekoff); i++) {
6089 if (*ptc && !dpptc && NFS_DIR_COOKIE_SAME32(cookie, dp->d_seekoff)) {
6090 dpptc = dp;
6091 iptc = i;
6092 }
6093 dp = NFS_DIRENTRY_NEXT(dp);
6094 }
6095 if ((i == ndbhp->ndbh_count) && dpptc) {
6096 /* found only a PTC match */
6097 dp = dpptc;
6098 i = iptc;
6099 } else if (i < ndbhp->ndbh_count) {
6100 *ptc = 0;
6101 }
6102 if (i < (ndbhp->ndbh_count - 1)) {
6103 /* next entry is *in* this buffer: return this block */
6104 *lbnp = bp->nb_lblkno;
6105 found = 1;
6106 } else if (i == (ndbhp->ndbh_count - 1)) {
6107 /* next entry refers to *next* buffer: return next block */
6108 *lbnp = dp->d_seekoff;
6109 found = 1;
6110 }
6111 nfs_buf_drop(bp);
6112 }
6113 nfs_buf_itercomplete(dnp, &blist, NBI_CLEAN);
6114 }
6115 lck_mtx_unlock(&nfs_buf_mutex);
6116 if (found) {
6117 OSAddAtomic64(1, &nfsclntstats.direofcache_hits);
6118 return 0;
6119 }
6120
6121 /* still not found... oh well, just start a new block */
6122 *lbnp = cookie;
6123 OSAddAtomic64(1, &nfsclntstats.direofcache_misses);
6124 return 0;
6125 }
6126
6127 /*
6128 * scan a directory buffer for the given name
6129 * Returns: ESRCH if not found, ENOENT if found invalid, 0 if found
6130 * Note: should only be called with RDIRPLUS directory buffers
6131 */
6132
6133 #define NDBS_PURGE 1
6134 #define NDBS_UPDATE 2
6135
6136 int
nfs_dir_buf_search(struct nfsbuf * bp,struct componentname * cnp,fhandle_t * fhp,struct nfs_vattr * nvap,uint64_t * xidp,time_t * attrstampp,daddr64_t * nextlbnp,int flags)6137 nfs_dir_buf_search(
6138 struct nfsbuf *bp,
6139 struct componentname *cnp,
6140 fhandle_t *fhp,
6141 struct nfs_vattr *nvap,
6142 uint64_t *xidp,
6143 time_t *attrstampp,
6144 daddr64_t *nextlbnp,
6145 int flags)
6146 {
6147 struct direntry *dp;
6148 struct nfs_dir_buf_header *ndbhp;
6149 struct nfs_vattr *nvattrp;
6150 daddr64_t nextlbn = 0;
6151 int i, error = ESRCH;
6152 uint32_t fhlen;
6153
6154 /* scan the buffer for the name */
6155 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
6156 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
6157 for (i = 0; i < ndbhp->ndbh_count; i++) {
6158 nextlbn = dp->d_seekoff;
6159 if ((cnp->cn_namelen == dp->d_namlen) && !strcmp(cnp->cn_nameptr, dp->d_name)) {
6160 fhlen = (uint8_t)dp->d_name[dp->d_namlen + 1];
6161 nvattrp = NFS_DIR_BUF_NVATTR(bp, i);
6162 if ((ndbhp->ndbh_ncgen != bp->nb_np->n_ncgen) || (fhlen == 0) ||
6163 (nvattrp->nva_type == VNON) || (nvattrp->nva_fileid == 0)) {
6164 /* entry is not valid */
6165 error = ENOENT;
6166 break;
6167 }
6168 if (flags == NDBS_PURGE) {
6169 dp->d_fileno = 0;
6170 bzero(nvattrp, sizeof(*nvattrp));
6171 error = ENOENT;
6172 break;
6173 }
6174 if (flags == NDBS_UPDATE) {
6175 /* update direntry's attrs if fh matches */
6176 if ((fhp->fh_len == fhlen) && !bcmp(&dp->d_name[dp->d_namlen + 2], fhp->fh_data, fhlen)) {
6177 bcopy(nvap, nvattrp, sizeof(*nvap));
6178 dp->d_fileno = nvattrp->nva_fileid;
6179 nvattrp->nva_fileid = *xidp;
6180 nvap->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID;
6181 *(time_t*)(&dp->d_name[dp->d_namlen + 2 + fhp->fh_len]) = *attrstampp;
6182 }
6183 error = 0;
6184 break;
6185 }
6186 /* copy out fh, attrs, attrstamp, and xid */
6187 fhp->fh_len = fhlen;
6188 bcopy(&dp->d_name[dp->d_namlen + 2], fhp->fh_data, MAX(fhp->fh_len, (int)sizeof(fhp->fh_data)));
6189 *attrstampp = *(time_t*)(&dp->d_name[dp->d_namlen + 2 + fhp->fh_len]);
6190 bcopy(nvattrp, nvap, sizeof(*nvap));
6191 *xidp = nvap->nva_fileid;
6192 nvap->nva_fileid = dp->d_fileno;
6193 nvap->nva_flags &= ~NFS_FFLAG_FILEID_CONTAINS_XID;
6194 error = 0;
6195 break;
6196 }
6197 dp = NFS_DIRENTRY_NEXT(dp);
6198 }
6199 if (nextlbnp) {
6200 *nextlbnp = nextlbn;
6201 }
6202 return error;
6203 }
6204
6205 /*
6206 * Look up a name in a directory's buffers.
6207 * Note: should only be called with RDIRPLUS directory buffers
6208 */
6209 int
nfs_dir_buf_cache_lookup(nfsnode_t dnp,nfsnode_t * npp,struct componentname * cnp,vfs_context_t ctx,int purge,int * skipdu)6210 nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cnp, vfs_context_t ctx, int purge, int *skipdu)
6211 {
6212 nfsnode_t newnp;
6213 struct nfsmount *nmp;
6214 int error = 0, i, found = 0, count = 0;
6215 u_int64_t xid;
6216 struct nfs_vattr *nvattr;
6217 fhandle_t *fh;
6218 time_t attrstamp = 0;
6219 thread_t thd = vfs_context_thread(ctx);
6220 struct nfsbuf *bp, *lastbp, *foundbp;
6221 struct nfsbuflists blist;
6222 daddr64_t lbn, nextlbn;
6223 int dotunder = (cnp->cn_namelen > 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '_');
6224 int isdot = (cnp->cn_namelen == 1) && (cnp->cn_nameptr[0] == '.');
6225 int isdotdot = (cnp->cn_namelen == 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '.');
6226 int eof = 0, sof = 0, skipped = 0;
6227
6228 nmp = NFSTONMP(dnp);
6229 if (nfs_mount_gone(nmp)) {
6230 return ENXIO;
6231 }
6232 if (!purge) {
6233 *npp = NULL;
6234 }
6235
6236 if (isdot || isdotdot) {
6237 return 0;
6238 }
6239
6240 fh = zalloc(nfs_fhandle_zone);
6241 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
6242
6243 /* first check most recent buffer (and next one too) */
6244 lbn = dnp->n_lastdbl;
6245 for (i = 0; i < 2; i++) {
6246 if ((error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp))) {
6247 goto out;
6248 }
6249 if (!bp) {
6250 skipped = 1;
6251 break;
6252 }
6253 count++;
6254 nfs_dir_buf_cache_lookup_boundaries(bp, &sof, &eof);
6255 error = nfs_dir_buf_search(bp, cnp, fh, nvattr, &xid, &attrstamp, &nextlbn, purge ? NDBS_PURGE : 0);
6256 nfs_buf_release(bp, 0);
6257 if (error == ESRCH) {
6258 error = 0;
6259 } else {
6260 found = 1;
6261 break;
6262 }
6263 lbn = nextlbn;
6264 }
6265
6266 lck_mtx_lock(&nfs_buf_mutex);
6267 if (found) {
6268 dnp->n_lastdbl = lbn;
6269 goto done;
6270 }
6271
6272 /* If we detect that we fetched full directory listing we should avoid sending lookups for ._ files */
6273 if (dotunder && !found && !error && eof && sof && !skipped && skipdu) {
6274 *skipdu = 1;
6275 }
6276
6277 /*
6278 * Scan the list of buffers, keeping them in order.
6279 * Note that itercomplete inserts each of the remaining buffers
6280 * into the head of list (thus reversing the elements). So, we
6281 * make sure to iterate through all buffers, inserting them after
6282 * each other, to keep them in order.
6283 * Also note: the LIST_INSERT_AFTER(lastbp) is only safe because
6284 * we don't drop nfs_buf_mutex.
6285 */
6286 eof = sof = skipped = 0;
6287 if (!nfs_buf_iterprepare(dnp, &blist, NBI_CLEAN)) {
6288 lastbp = foundbp = NULL;
6289 while ((bp = LIST_FIRST(&blist))) {
6290 LIST_REMOVE(bp, nb_vnbufs);
6291 if (!lastbp) {
6292 LIST_INSERT_HEAD(&dnp->n_cleanblkhd, bp, nb_vnbufs);
6293 } else {
6294 LIST_INSERT_AFTER(lastbp, bp, nb_vnbufs);
6295 }
6296 lastbp = bp;
6297 if (error || found) {
6298 skipped = 1;
6299 continue;
6300 }
6301 if (!purge && dotunder && (count > 100)) { /* don't waste too much time looking for ._ files */
6302 skipped = 1;
6303 continue;
6304 }
6305 nfs_buf_refget(bp);
6306 lbn = bp->nb_lblkno;
6307 if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) {
6308 /* just skip this buffer */
6309 nfs_buf_refrele(bp);
6310 skipped = 1;
6311 continue;
6312 }
6313 nfs_buf_refrele(bp);
6314 count++;
6315 nfs_dir_buf_cache_lookup_boundaries(bp, &sof, &eof);
6316 error = nfs_dir_buf_search(bp, cnp, fh, nvattr, &xid, &attrstamp, NULL, purge ? NDBS_PURGE : 0);
6317 if (error == ESRCH) {
6318 error = 0;
6319 } else {
6320 found = 1;
6321 foundbp = bp;
6322 }
6323 nfs_buf_drop(bp);
6324 }
6325 if (found) {
6326 LIST_REMOVE(foundbp, nb_vnbufs);
6327 LIST_INSERT_HEAD(&dnp->n_cleanblkhd, foundbp, nb_vnbufs);
6328 dnp->n_lastdbl = foundbp->nb_lblkno;
6329 }
6330 nfs_buf_itercomplete(dnp, &blist, NBI_CLEAN);
6331 }
6332
6333 /* If we detect that we fetched full directory listing we should avoid sending lookups for ._ files */
6334 if (dotunder && !found && !error && eof && sof && !skipped && skipdu) {
6335 *skipdu = 1;
6336 }
6337
6338 done:
6339 lck_mtx_unlock(&nfs_buf_mutex);
6340
6341 if (!error && found && !purge) {
6342 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data,
6343 fh->fh_len, nvattr, &xid, dnp->n_auth, NG_MAKEENTRY,
6344 &newnp);
6345 if (error) {
6346 goto out;
6347 }
6348 newnp->n_attrstamp = attrstamp;
6349 *npp = newnp;
6350 nfs_node_unlock(newnp);
6351 /* check if the dir buffer's attrs are out of date */
6352 if (!nfs_getattr(newnp, nvattr, ctx, NGA_CACHED) &&
6353 (newnp->n_attrstamp != attrstamp)) {
6354 /* they are, so update them */
6355 error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp);
6356 if (!error && bp) {
6357 attrstamp = newnp->n_attrstamp;
6358 xid = newnp->n_xid;
6359 nfs_dir_buf_search(bp, cnp, fh, nvattr, &xid, &attrstamp, NULL, NDBS_UPDATE);
6360 nfs_buf_release(bp, 0);
6361 }
6362 error = 0;
6363 }
6364 }
6365
6366 out:
6367 NFS_ZFREE(nfs_fhandle_zone, fh);
6368 zfree(KT_NFS_VATTR, nvattr);
6369 return error;
6370 }
6371
6372 /*
6373 * Purge name cache entries for the given node.
6374 * For RDIRPLUS, also invalidate the entry in the directory's buffers.
6375 */
6376 void
nfs_name_cache_purge(nfsnode_t dnp,nfsnode_t np,struct componentname * cnp,vfs_context_t ctx)6377 nfs_name_cache_purge(nfsnode_t dnp, nfsnode_t np, struct componentname *cnp, vfs_context_t ctx)
6378 {
6379 struct nfsmount *nmp = NFSTONMP(dnp);
6380
6381 cache_purge(NFSTOV(np));
6382 if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) {
6383 nfs_dir_buf_cache_lookup(dnp, NULL, cnp, ctx, 1, NULL);
6384 }
6385 }
6386
6387 /*
6388 * NFS V3 readdir (plus) RPC.
6389 */
6390 int
nfs3_readdir_rpc(nfsnode_t dnp,struct nfsbuf * bp,vfs_context_t ctx)6391 nfs3_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
6392 {
6393 struct nfsmount *nmp;
6394 int error = 0, lockerror, nfsvers, rdirplus, bigcookies;
6395 int i, status = 0, attrflag, fhflag, more_entries = 1, eof, bp_dropped = 0;
6396 uint32_t nmreaddirsize, nmrsize;
6397 uint32_t namlen, skiplen, fhlen, xlen, attrlen;
6398 uint64_t cookie, lastcookie, xid, savedxid, fileno, space_free, space_needed;
6399 struct nfsm_chain nmreq, nmrep, nmrepsave;
6400 fhandle_t *fh;
6401 struct nfs_vattr *nvattrp;
6402 struct nfs_dir_buf_header *ndbhp;
6403 struct direntry *dp;
6404 char *padstart;
6405 struct timeval now;
6406 uint16_t reclen;
6407 size_t padlen;
6408
6409 nmp = NFSTONMP(dnp);
6410 if (nfs_mount_gone(nmp)) {
6411 return ENXIO;
6412 }
6413 nfsvers = nmp->nm_vers;
6414 nmreaddirsize = nmp->nm_readdirsize;
6415 nmrsize = nmp->nm_rsize;
6416 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
6417 fh = zalloc(nfs_fhandle_zone);
6418 resend:
6419 rdirplus = ((nfsvers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) ? 1 : 0;
6420
6421 if ((lockerror = nfs_node_lock(dnp))) {
6422 NFS_ZFREE(nfs_fhandle_zone, fh);
6423 return lockerror;
6424 }
6425
6426 /* determine cookie to use, and move dp to the right offset */
6427 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
6428 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
6429 if (ndbhp->ndbh_count) {
6430 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
6431 dp = NFS_DIRENTRY_NEXT(dp);
6432 }
6433 cookie = dp->d_seekoff;
6434 dp = NFS_DIRENTRY_NEXT(dp);
6435 } else {
6436 cookie = bp->nb_lblkno;
6437 /* increment with every buffer read */
6438 OSAddAtomic64(1, &nfsclntstats.readdir_bios);
6439 }
6440 lastcookie = cookie;
6441
6442 /*
6443 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
6444 * the buffer is full (or we hit EOF). Then put the remainder of the
6445 * results in the next buffer(s).
6446 */
6447 nfsm_chain_null(&nmreq);
6448 nfsm_chain_null(&nmrep);
6449 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
6450 nfsm_chain_build_alloc_init(error, &nmreq,
6451 NFSX_FH(nfsvers) + NFSX_READDIR(nfsvers) + NFSX_UNSIGNED);
6452 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6453 if (nfsvers == NFS_VER3) {
6454 /* opaque values don't need swapping, but as long */
6455 /* as we are consistent about it, it should be ok */
6456 nfsm_chain_add_64(error, &nmreq, cookie);
6457 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
6458 } else {
6459 nfsm_chain_add_32(error, &nmreq, cookie);
6460 }
6461 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
6462 if (rdirplus) {
6463 nfsm_chain_add_32(error, &nmreq, nmrsize);
6464 }
6465 nfsm_chain_build_done(error, &nmreq);
6466 nfs_node_unlock(dnp);
6467 lockerror = ENOENT;
6468 nfsmout_if(error);
6469
6470 error = nfs_request(dnp, NULL, &nmreq,
6471 rdirplus ? NFSPROC_READDIRPLUS : NFSPROC_READDIR,
6472 ctx, NULL, &nmrep, &xid, &status);
6473
6474 if ((lockerror = nfs_node_lock(dnp))) {
6475 error = lockerror;
6476 }
6477
6478 savedxid = xid;
6479 if (nfsvers == NFS_VER3) {
6480 nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid);
6481 }
6482 if (!error) {
6483 error = status;
6484 }
6485 if (nfsvers == NFS_VER3) {
6486 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
6487 }
6488 nfsm_chain_get_32(error, &nmrep, more_entries);
6489
6490 if (!lockerror) {
6491 nfs_node_unlock(dnp);
6492 lockerror = ENOENT;
6493 }
6494 if (error == NFSERR_NOTSUPP) {
6495 /* oops... it doesn't look like readdirplus is supported */
6496 lck_mtx_lock(&nmp->nm_lock);
6497 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_RDIRPLUS);
6498 lck_mtx_unlock(&nmp->nm_lock);
6499 nfsm_chain_cleanup(&nmreq);
6500 nfsm_chain_cleanup(&nmrep);
6501 goto resend;
6502 }
6503 nfsmout_if(error);
6504
6505 if (rdirplus) {
6506 microuptime(&now);
6507 if (lastcookie == 0) {
6508 dnp->n_rdirplusstamp_sof = now.tv_sec;
6509 dnp->n_rdirplusstamp_eof = 0;
6510 }
6511 }
6512
6513 /* loop through the entries packing them into the buffer */
6514 while (more_entries) {
6515 if (nfsvers == NFS_VER3) {
6516 nfsm_chain_get_64(error, &nmrep, fileno);
6517 } else {
6518 nfsm_chain_get_32(error, &nmrep, fileno);
6519 }
6520 nfsm_chain_get_32(error, &nmrep, namlen);
6521 nfsmout_if(error);
6522 /* just truncate names that don't fit in direntry.d_name */
6523 if (namlen <= 0) {
6524 error = EBADRPC;
6525 goto nfsmout;
6526 }
6527 if (namlen > (sizeof(dp->d_name) - 1)) {
6528 skiplen = namlen - sizeof(dp->d_name) + 1;
6529 namlen = sizeof(dp->d_name) - 1;
6530 } else {
6531 skiplen = 0;
6532 }
6533 /* guess that fh size will be same as parent */
6534 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
6535 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
6536 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
6537 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
6538 space_needed = reclen + attrlen;
6539 space_free = nfs_dir_buf_freespace(bp, rdirplus);
6540 if (space_needed > space_free) {
6541 /*
6542 * We still have entries to pack, but we've
6543 * run out of room in the current buffer.
6544 * So we need to move to the next buffer.
6545 * The block# for the next buffer is the
6546 * last cookie in the current buffer.
6547 */
6548 nextbuffer:
6549 ndbhp->ndbh_flags |= NDB_FULL;
6550 nfs_buf_release(bp, 0);
6551 bp_dropped = 1;
6552 bp = NULL;
6553 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
6554 nfsmout_if(error);
6555 /* initialize buffer */
6556 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
6557 ndbhp->ndbh_flags = 0;
6558 ndbhp->ndbh_count = 0;
6559 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
6560 ndbhp->ndbh_ncgen = dnp->n_ncgen;
6561 space_free = nfs_dir_buf_freespace(bp, rdirplus);
6562 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
6563 /* increment with every buffer read */
6564 OSAddAtomic64(1, &nfsclntstats.readdir_bios);
6565 }
6566 nmrepsave = nmrep;
6567 dp->d_fileno = fileno;
6568 dp->d_namlen = (uint16_t)namlen;
6569 dp->d_reclen = reclen;
6570 dp->d_type = DT_UNKNOWN;
6571 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
6572 nfsmout_if(error);
6573 dp->d_name[namlen] = '\0';
6574 if (skiplen) {
6575 nfsm_chain_adv(error, &nmrep,
6576 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
6577 }
6578 if (nfsvers == NFS_VER3) {
6579 nfsm_chain_get_64(error, &nmrep, cookie);
6580 } else {
6581 nfsm_chain_get_32(error, &nmrep, cookie);
6582 }
6583 nfsmout_if(error);
6584 dp->d_seekoff = cookie;
6585 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
6586 /* we've got a big cookie, make sure flag is set */
6587 lck_mtx_lock(&nmp->nm_lock);
6588 nmp->nm_state |= NFSSTA_BIGCOOKIES;
6589 lck_mtx_unlock(&nmp->nm_lock);
6590 bigcookies = 1;
6591 }
6592 if (rdirplus) {
6593 nvattrp = NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count);
6594 /* check for attributes */
6595 nfsm_chain_get_32(error, &nmrep, attrflag);
6596 nfsmout_if(error);
6597 if (attrflag) {
6598 /* grab attributes */
6599 error = nfs_parsefattr(nmp, &nmrep, NFS_VER3, nvattrp);
6600 nfsmout_if(error);
6601 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
6602 /* fileid is already in d_fileno, so stash xid in attrs */
6603 nvattrp->nva_fileid = savedxid;
6604 nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID;
6605 } else {
6606 /* mark the attributes invalid */
6607 bzero(nvattrp, sizeof(struct nfs_vattr));
6608 }
6609 /* check for file handle */
6610 nfsm_chain_get_32(error, &nmrep, fhflag);
6611 nfsmout_if(error);
6612 if (fhflag) {
6613 nfsm_chain_get_fh(error, &nmrep, NFS_VER3, fh);
6614 nfsmout_if(error);
6615 fhlen = fh->fh_len + 1;
6616 xlen = fhlen + sizeof(time_t);
6617 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
6618 space_needed = reclen + attrlen;
6619 if (space_needed > space_free) {
6620 /* didn't actually have the room... move on to next buffer */
6621 nmrep = nmrepsave;
6622 goto nextbuffer;
6623 }
6624 /* pack the file handle into the record */
6625 dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfsm_chain_get_fh() */
6626 bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len);
6627 } else {
6628 /* mark the file handle invalid */
6629 fh->fh_len = 0;
6630 fhlen = fh->fh_len + 1;
6631 xlen = fhlen + sizeof(time_t);
6632 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
6633 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
6634 }
6635 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
6636 dp->d_reclen = reclen;
6637 nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid);
6638 }
6639 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
6640 ndbhp->ndbh_count++;
6641 lastcookie = cookie;
6642 /* advance to next direntry in buffer */
6643 dp = NFS_DIRENTRY_NEXT(dp);
6644 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
6645 /* zero out the pad bytes */
6646 padlen = (char*)dp - padstart;
6647 if (padlen > 0) {
6648 bzero(padstart, padlen);
6649 }
6650 /* check for more entries */
6651 nfsm_chain_get_32(error, &nmrep, more_entries);
6652 nfsmout_if(error);
6653 }
6654 /* Finally, get the eof boolean */
6655 nfsm_chain_get_32(error, &nmrep, eof);
6656 nfsmout_if(error);
6657 if (eof) {
6658 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
6659 nfs_node_lock_force(dnp);
6660 dnp->n_eofcookie = lastcookie;
6661 if (rdirplus) {
6662 dnp->n_rdirplusstamp_eof = now.tv_sec;
6663 }
6664 nfs_node_unlock(dnp);
6665 } else {
6666 more_entries = 1;
6667 }
6668 if (bp_dropped) {
6669 nfs_buf_release(bp, 0);
6670 bp = NULL;
6671 break;
6672 }
6673 if ((lockerror = nfs_node_lock(dnp))) {
6674 error = lockerror;
6675 }
6676 nfsmout_if(error);
6677 nfsm_chain_cleanup(&nmrep);
6678 nfsm_chain_null(&nmreq);
6679 }
6680 nfsmout:
6681 if (bp_dropped && bp) {
6682 nfs_buf_release(bp, 0);
6683 }
6684 if (!lockerror) {
6685 nfs_node_unlock(dnp);
6686 }
6687 nfsm_chain_cleanup(&nmreq);
6688 nfsm_chain_cleanup(&nmrep);
6689 NFS_ZFREE(nfs_fhandle_zone, fh);
6690 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
6691 }
6692
6693 /*
6694 * Silly rename. To make the NFS filesystem that is stateless look a little
6695 * more like the "ufs" a remove of an active vnode is translated to a rename
6696 * to a funny looking filename that is removed by nfs_vnop_inactive on the
6697 * nfsnode. There is the potential for another process on a different client
6698 * to create the same funny name between when the lookitup() fails and the
6699 * rename() completes, but...
6700 */
6701
6702 /* format of "random" silly names - includes a number and pid */
6703 /* (note: shouldn't exceed size of nfs_sillyrename.nsr_name) */
6704 #define NFS_SILLYNAME_FORMAT ".nfs.%08x.%04x"
6705 /* starting from zero isn't silly enough */
6706 static uint32_t nfs_sillyrename_number = 0x20051025;
6707
6708 int
nfs_sillyrename(nfsnode_t dnp,nfsnode_t np,struct componentname * cnp,vfs_context_t ctx)6709 nfs_sillyrename(
6710 nfsnode_t dnp,
6711 nfsnode_t np,
6712 struct componentname *cnp,
6713 vfs_context_t ctx)
6714 {
6715 struct nfs_sillyrename *nsp;
6716 int error;
6717 pid_t pid;
6718 kauth_cred_t cred;
6719 uint32_t num;
6720 struct nfsmount *nmp;
6721
6722 nmp = NFSTONMP(dnp);
6723 if (nfs_mount_gone(nmp)) {
6724 return ENXIO;
6725 }
6726
6727 nfs_name_cache_purge(dnp, np, cnp, ctx);
6728
6729 nsp = kalloc_type(struct nfs_sillyrename, Z_WAITOK | Z_NOFAIL);
6730 cred = vfs_context_ucred(ctx);
6731 kauth_cred_ref(cred);
6732 nsp->nsr_cred = cred;
6733 nsp->nsr_dnp = dnp;
6734 error = vnode_ref(NFSTOV(dnp));
6735 if (error) {
6736 goto bad_norele;
6737 }
6738
6739 /* Fudge together a funny name */
6740 pid = vfs_context_pid(ctx);
6741 num = OSAddAtomic(1, &nfs_sillyrename_number);
6742 nsp->nsr_namlen = snprintf(nsp->nsr_name, sizeof(nsp->nsr_name),
6743 NFS_SILLYNAME_FORMAT, num, (pid & 0xffff));
6744 if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) {
6745 nsp->nsr_namlen = sizeof(nsp->nsr_name) - 1;
6746 }
6747
6748 /* Try lookitups until we get one that isn't there */
6749 while (nfs_lookitup(dnp, nsp->nsr_name, nsp->nsr_namlen, ctx, NULL) == 0) {
6750 num = OSAddAtomic(1, &nfs_sillyrename_number);
6751 nsp->nsr_namlen = snprintf(nsp->nsr_name, sizeof(nsp->nsr_name),
6752 NFS_SILLYNAME_FORMAT, num, (pid & 0xffff));
6753 if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) {
6754 nsp->nsr_namlen = sizeof(nsp->nsr_name) - 1;
6755 }
6756 }
6757
6758 /* now, do the rename */
6759 error = nmp->nm_funcs->nf_rename_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
6760 dnp, nsp->nsr_name, nsp->nsr_namlen, ctx);
6761
6762 /* Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. */
6763 if (error == ENOENT) {
6764 error = 0;
6765 }
6766 if (!error) {
6767 nfs_node_lock_force(dnp);
6768 if (dnp->n_flag & NNEGNCENTRIES) {
6769 dnp->n_flag &= ~NNEGNCENTRIES;
6770 cache_purge_negatives(NFSTOV(dnp));
6771 }
6772 nfs_node_unlock(dnp);
6773 }
6774 FSDBG(267, dnp, np, num, error);
6775 if (error) {
6776 goto bad;
6777 }
6778 error = nfs_lookitup(dnp, nsp->nsr_name, nsp->nsr_namlen, ctx, &np);
6779 nfs_node_lock_force(np);
6780 np->n_sillyrename = nsp;
6781 nfs_node_unlock(np);
6782 return 0;
6783 bad:
6784 vnode_rele(NFSTOV(dnp));
6785 bad_norele:
6786 nsp->nsr_cred = NOCRED;
6787 kauth_cred_unref(&cred);
6788 kfree_type(struct nfs_sillyrename, nsp);
6789 return error;
6790 }
6791
6792 int
nfs3_lookup_rpc_async(nfsnode_t dnp,char * name,int namelen,vfs_context_t ctx,struct nfsreq ** reqp)6793 nfs3_lookup_rpc_async(
6794 nfsnode_t dnp,
6795 char *name,
6796 int namelen,
6797 vfs_context_t ctx,
6798 struct nfsreq **reqp)
6799 {
6800 struct nfsmount *nmp;
6801 struct nfsm_chain nmreq;
6802 int error = 0, nfsvers;
6803
6804 nmp = NFSTONMP(dnp);
6805 if (nfs_mount_gone(nmp)) {
6806 return ENXIO;
6807 }
6808 nfsvers = nmp->nm_vers;
6809
6810 nfsm_chain_null(&nmreq);
6811
6812 nfsm_chain_build_alloc_init(error, &nmreq,
6813 NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen));
6814 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6815 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
6816 nfsm_chain_build_done(error, &nmreq);
6817 nfsmout_if(error);
6818 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_LOOKUP,
6819 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, reqp);
6820 nfsmout:
6821 nfsm_chain_cleanup(&nmreq);
6822 return error;
6823 }
6824
6825 int
nfs3_lookup_rpc_async_finish(nfsnode_t dnp,__unused char * name,__unused int namelen,vfs_context_t ctx,struct nfsreq * req,u_int64_t * xidp,fhandle_t * fhp,struct nfs_vattr * nvap)6826 nfs3_lookup_rpc_async_finish(
6827 nfsnode_t dnp,
6828 __unused char *name,
6829 __unused int namelen,
6830 vfs_context_t ctx,
6831 struct nfsreq *req,
6832 u_int64_t *xidp,
6833 fhandle_t *fhp,
6834 struct nfs_vattr *nvap)
6835 {
6836 int error = 0, lockerror = ENOENT, status = 0, nfsvers, attrflag;
6837 u_int64_t xid;
6838 struct nfsmount *nmp;
6839 struct nfsm_chain nmrep;
6840
6841 nmp = NFSTONMP(dnp);
6842 if (nmp == NULL) {
6843 return ENXIO;
6844 }
6845 nfsvers = nmp->nm_vers;
6846
6847 nfsm_chain_null(&nmrep);
6848
6849 error = nfs_request_async_finish(req, &nmrep, xidp, &status);
6850
6851 if ((lockerror = nfs_node_lock(dnp))) {
6852 error = lockerror;
6853 }
6854 xid = *xidp;
6855 if (error || status) {
6856 if (nfsvers == NFS_VER3) {
6857 nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid);
6858 }
6859 if (!error) {
6860 error = status;
6861 }
6862 goto nfsmout;
6863 }
6864
6865 nfsmout_if(error || !fhp || !nvap);
6866
6867 /* get the file handle */
6868 nfsm_chain_get_fh(error, &nmrep, nfsvers, fhp);
6869
6870 /* get the attributes */
6871 if (nfsvers == NFS_VER3) {
6872 nfsm_chain_postop_attr_get(nmp, error, &nmrep, attrflag, nvap);
6873 nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid);
6874 if (!error && !attrflag) {
6875 error = nfs3_getattr_rpc(NULL, NFSTOMP(dnp), fhp->fh_data, fhp->fh_len, 0, ctx, nvap, xidp);
6876 }
6877 } else {
6878 error = nfs_parsefattr(nmp, &nmrep, nfsvers, nvap);
6879 }
6880 nfsmout:
6881 if (!lockerror) {
6882 nfs_node_unlock(dnp);
6883 }
6884 nfsm_chain_cleanup(&nmrep);
6885 return error;
6886 }
6887
6888 /*
6889 * Look up a file name and optionally either update the file handle or
6890 * allocate an nfsnode, depending on the value of npp.
6891 * npp == NULL --> just do the lookup
6892 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
6893 * handled too
6894 * *npp != NULL --> update the file handle in the vnode
6895 */
6896 int
nfs_lookitup(nfsnode_t dnp,char * name,int namelen,vfs_context_t ctx,nfsnode_t * npp)6897 nfs_lookitup(
6898 nfsnode_t dnp,
6899 char *name,
6900 int namelen,
6901 vfs_context_t ctx,
6902 nfsnode_t *npp)
6903 {
6904 int error = 0;
6905 nfsnode_t np, newnp = NULL;
6906 u_int64_t xid;
6907 fhandle_t *fh;
6908 struct nfsmount *nmp;
6909 struct nfs_vattr *nvattr;
6910 struct nfsreq *req;
6911
6912 nmp = NFSTONMP(dnp);
6913 if (nfs_mount_gone(nmp)) {
6914 return ENXIO;
6915 }
6916
6917 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) &&
6918 (namelen > nmp->nm_fsattr.nfsa_maxname)) {
6919 return ENAMETOOLONG;
6920 }
6921
6922 fh = zalloc(nfs_fhandle_zone);
6923 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
6924 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
6925 NVATTR_INIT(nvattr);
6926
6927 /* check for lookup of "." */
6928 if ((name[0] == '.') && (namelen == 1)) {
6929 /* skip lookup, we know who we are */
6930 fh->fh_len = 0;
6931 newnp = dnp;
6932 goto nfsmout;
6933 }
6934
6935 error = nmp->nm_funcs->nf_lookup_rpc_async(dnp, name, namelen, ctx, &req);
6936 nfsmout_if(error);
6937 error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, name, namelen, ctx, req, &xid, fh, nvattr);
6938 nfsmout_if(!npp || error);
6939
6940 if (*npp) {
6941 np = *npp;
6942 if (fh->fh_len != np->n_fhsize) {
6943 u_char *oldbuf = (np->n_fhsize > NFS_SMALLFH) ? np->n_fhp : NULL;
6944 if (fh->fh_len > NFS_SMALLFH) {
6945 np->n_fhp = kalloc_data(fh->fh_len, Z_WAITOK);
6946 if (!np->n_fhp) {
6947 np->n_fhp = oldbuf;
6948 error = ENOMEM;
6949 goto nfsmout;
6950 }
6951 } else {
6952 np->n_fhp = &np->n_fh[0];
6953 }
6954 if (oldbuf) {
6955 kfree_data(oldbuf, np->n_fhsize);
6956 }
6957 }
6958 bcopy(fh->fh_data, np->n_fhp, fh->fh_len);
6959 np->n_fhsize = fh->fh_len;
6960 nfs_node_lock_force(np);
6961 error = nfs_loadattrcache(np, nvattr, &xid, 0);
6962 nfs_node_unlock(np);
6963 nfsmout_if(error);
6964 newnp = np;
6965 } else if (NFS_CMPFH(dnp, fh->fh_data, fh->fh_len)) {
6966 nfs_node_lock_force(dnp);
6967 if (dnp->n_xid <= xid) {
6968 error = nfs_loadattrcache(dnp, nvattr, &xid, 0);
6969 }
6970 nfs_node_unlock(dnp);
6971 nfsmout_if(error);
6972 newnp = dnp;
6973 } else {
6974 struct componentname cn, *cnp = &cn;
6975 bzero(cnp, sizeof(*cnp));
6976 cnp->cn_nameptr = name;
6977 cnp->cn_namelen = namelen;
6978 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len,
6979 nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
6980 nfsmout_if(error);
6981 newnp = np;
6982 }
6983
6984 nfsmout:
6985 if (npp && !*npp && !error) {
6986 *npp = newnp;
6987 }
6988 NVATTR_CLEANUP(nvattr);
6989 NFS_ZFREE(nfs_fhandle_zone, fh);
6990 NFS_ZFREE(nfs_req_zone, req);
6991 zfree(KT_NFS_VATTR, nvattr);
6992 return error;
6993 }
6994
6995 /*
6996 * set up and initialize a "._" file lookup structure used for
6997 * performing async lookups.
6998 */
6999 void
nfs_dulookup_init(struct nfs_dulookup * dulp,nfsnode_t dnp,const char * name,int namelen,vfs_context_t ctx)7000 nfs_dulookup_init(struct nfs_dulookup *dulp, nfsnode_t dnp, const char *name, int namelen, vfs_context_t ctx)
7001 {
7002 int error, du_namelen;
7003 vnode_t du_vp;
7004 struct nfsmount *nmp = NFSTONMP(dnp);
7005
7006 /* check for ._ file in name cache */
7007 dulp->du_flags = 0;
7008 bzero(&dulp->du_cn, sizeof(dulp->du_cn));
7009 du_namelen = namelen + 2;
7010 if (!nmp || NMFLAG(nmp, NONEGNAMECACHE)) {
7011 return;
7012 }
7013 if ((namelen >= 2) && (name[0] == '.') && (name[1] == '_')) {
7014 return;
7015 }
7016 if (du_namelen >= (int)sizeof(dulp->du_smallname)) {
7017 dulp->du_cn.cn_nameptr = kalloc_data(du_namelen + 1, Z_WAITOK);
7018 } else {
7019 dulp->du_cn.cn_nameptr = dulp->du_smallname;
7020 }
7021 if (!dulp->du_cn.cn_nameptr) {
7022 return;
7023 }
7024 dulp->du_cn.cn_namelen = du_namelen;
7025 snprintf(dulp->du_cn.cn_nameptr, du_namelen + 1, "._%s", name);
7026 dulp->du_cn.cn_nameptr[du_namelen] = '\0';
7027 dulp->du_cn.cn_nameiop = LOOKUP;
7028 dulp->du_cn.cn_flags = MAKEENTRY;
7029
7030 error = cache_lookup(NFSTOV(dnp), &du_vp, &dulp->du_cn);
7031 if (error == -1) {
7032 vnode_put(du_vp);
7033 } else if (!error) {
7034 nmp = NFSTONMP(dnp);
7035 if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) {
7036 /* if rdirplus, try dir buf cache lookup */
7037 nfsnode_t du_np = NULL;
7038 if (!nfs_dir_buf_cache_lookup(dnp, &du_np, &dulp->du_cn, ctx, 0, NULL) && du_np) {
7039 /* dir buf cache hit */
7040 du_vp = NFSTOV(du_np);
7041 vnode_put(du_vp);
7042 error = -1;
7043 }
7044 }
7045 if (!error) {
7046 dulp->du_flags |= NFS_DULOOKUP_DOIT;
7047 }
7048 }
7049 }
7050
7051 /*
7052 * start an async "._" file lookup request
7053 */
7054 void
nfs_dulookup_start(struct nfs_dulookup * dulp,nfsnode_t dnp,vfs_context_t ctx)7055 nfs_dulookup_start(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx)
7056 {
7057 struct nfsmount *nmp = NFSTONMP(dnp);
7058 struct nfsreq *req = &dulp->du_req;
7059
7060 if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_DOIT) || (dulp->du_flags & NFS_DULOOKUP_INPROG)) {
7061 return;
7062 }
7063 if (!nmp->nm_funcs->nf_lookup_rpc_async(dnp, dulp->du_cn.cn_nameptr,
7064 dulp->du_cn.cn_namelen, ctx, &req)) {
7065 dulp->du_flags |= NFS_DULOOKUP_INPROG;
7066 }
7067 }
7068
7069 /*
7070 * finish an async "._" file lookup request and clean up the structure
7071 */
7072 void
nfs_dulookup_finish(struct nfs_dulookup * dulp,nfsnode_t dnp,vfs_context_t ctx)7073 nfs_dulookup_finish(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx)
7074 {
7075 struct nfsmount *nmp = NFSTONMP(dnp);
7076 int error;
7077 nfsnode_t du_np;
7078 u_int64_t xid;
7079 fhandle_t *fh;
7080 struct nfs_vattr *nvattr;
7081
7082 if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_INPROG)) {
7083 goto out;
7084 }
7085
7086 fh = zalloc(nfs_fhandle_zone);
7087 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
7088 NVATTR_INIT(nvattr);
7089 error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, dulp->du_cn.cn_nameptr,
7090 dulp->du_cn.cn_namelen, ctx, &dulp->du_req, &xid, fh, nvattr);
7091 dulp->du_flags &= ~NFS_DULOOKUP_INPROG;
7092 if (error == ENOENT) {
7093 /* add a negative entry in the name cache */
7094 nfs_node_lock_force(dnp);
7095 cache_enter(NFSTOV(dnp), NULL, &dulp->du_cn);
7096 dnp->n_flag |= NNEGNCENTRIES;
7097 nfs_node_unlock(dnp);
7098 } else if (!error) {
7099 error = nfs_nget(NFSTOMP(dnp), dnp, &dulp->du_cn, fh->fh_data, fh->fh_len,
7100 nvattr, &xid, dulp->du_req.r_auth, NG_MAKEENTRY, &du_np);
7101 if (!error) {
7102 nfs_node_unlock(du_np);
7103 vnode_put(NFSTOV(du_np));
7104 }
7105 }
7106 NVATTR_CLEANUP(nvattr);
7107 NFS_ZFREE(nfs_fhandle_zone, fh);
7108 zfree(KT_NFS_VATTR, nvattr);
7109 out:
7110 if (dulp->du_flags & NFS_DULOOKUP_INPROG) {
7111 nfs_request_async_cancel(&dulp->du_req);
7112 }
7113 if (dulp->du_cn.cn_nameptr && (dulp->du_cn.cn_nameptr != dulp->du_smallname)) {
7114 kfree_data(dulp->du_cn.cn_nameptr, dulp->du_cn.cn_namelen + 1);
7115 }
7116 }
7117
7118
7119 /*
7120 * NFS Version 3 commit RPC
7121 */
7122 int
nfs3_commit_rpc(nfsnode_t np,uint64_t offset,uint64_t count,kauth_cred_t cred,uint64_t wverf)7123 nfs3_commit_rpc(
7124 nfsnode_t np,
7125 uint64_t offset,
7126 uint64_t count,
7127 kauth_cred_t cred,
7128 uint64_t wverf)
7129 {
7130 struct nfsmount *nmp;
7131 int error = 0, lockerror, status = 0, wccpostattr = 0, nfsvers;
7132 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
7133 u_int64_t xid, newwverf;
7134 uint32_t count32;
7135 struct nfsm_chain nmreq, nmrep;
7136
7137 nmp = NFSTONMP(np);
7138 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
7139 if (nfs_mount_gone(nmp)) {
7140 return ENXIO;
7141 }
7142 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
7143 return 0;
7144 }
7145 nfsvers = nmp->nm_vers;
7146 count32 = count > UINT32_MAX ? 0 : (uint32_t)count;
7147
7148 nfsm_chain_null(&nmreq);
7149 nfsm_chain_null(&nmrep);
7150
7151 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(NFS_VER3));
7152 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7153 nfsm_chain_add_64(error, &nmreq, offset);
7154 nfsm_chain_add_32(error, &nmreq, count32);
7155 nfsm_chain_build_done(error, &nmreq);
7156 nfsmout_if(error);
7157 error = nfs_request2(np, NULL, &nmreq, NFSPROC_COMMIT,
7158 current_thread(), cred, NULL, 0, &nmrep, &xid, &status);
7159 if ((lockerror = nfs_node_lock(np))) {
7160 error = lockerror;
7161 }
7162 /* can we do anything useful with the wcc info? */
7163 nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid);
7164 if (!lockerror) {
7165 nfs_node_unlock(np);
7166 }
7167 if (!error) {
7168 error = status;
7169 }
7170 nfsm_chain_get_64(error, &nmrep, newwverf);
7171 nfsmout_if(error);
7172 lck_mtx_lock(&nmp->nm_lock);
7173 if (nmp->nm_verf != newwverf) {
7174 nmp->nm_verf = newwverf;
7175 }
7176 if (wverf != newwverf) {
7177 error = NFSERR_STALEWRITEVERF;
7178 }
7179 lck_mtx_unlock(&nmp->nm_lock);
7180 nfsmout:
7181 nfsm_chain_cleanup(&nmreq);
7182 nfsm_chain_cleanup(&nmrep);
7183 return error;
7184 }
7185
7186
7187 int
nfs_vnop_blockmap(__unused struct vnop_blockmap_args * ap)7188 nfs_vnop_blockmap(
7189 __unused struct vnop_blockmap_args /* {
7190 * struct vnodeop_desc *a_desc;
7191 * vnode_t a_vp;
7192 * off_t a_foffset;
7193 * size_t a_size;
7194 * daddr64_t *a_bpn;
7195 * size_t *a_run;
7196 * void *a_poff;
7197 * int a_flags;
7198 * } */*ap)
7199 {
7200 return ENOTSUP;
7201 }
7202
7203
7204 /*
7205 * fsync vnode op. Just call nfs_flush().
7206 */
7207 /* ARGSUSED */
7208 int
nfs_vnop_fsync(struct vnop_fsync_args * ap)7209 nfs_vnop_fsync(
7210 struct vnop_fsync_args /* {
7211 * struct vnodeop_desc *a_desc;
7212 * vnode_t a_vp;
7213 * int a_waitfor;
7214 * vfs_context_t a_context;
7215 * } */*ap)
7216 {
7217 int error = nfs_flush(VTONFS(ap->a_vp), ap->a_waitfor, vfs_context_thread(ap->a_context), 0);
7218 return NFS_MAPERR(error);
7219 }
7220
7221
7222 /*
7223 * Do an NFS pathconf RPC.
7224 */
7225 int
nfs3_pathconf_rpc(nfsnode_t np,struct nfs_fsattr * nfsap,vfs_context_t ctx)7226 nfs3_pathconf_rpc(
7227 nfsnode_t np,
7228 struct nfs_fsattr *nfsap,
7229 vfs_context_t ctx)
7230 {
7231 u_int64_t xid;
7232 int error = 0, lockerror, status = 0, nfsvers;
7233 struct nfsm_chain nmreq, nmrep;
7234 struct nfsmount *nmp = NFSTONMP(np);
7235 uint32_t val = 0;
7236
7237 if (nfs_mount_gone(nmp)) {
7238 return ENXIO;
7239 }
7240 nfsvers = nmp->nm_vers;
7241
7242 nfsm_chain_null(&nmreq);
7243 nfsm_chain_null(&nmrep);
7244
7245 /* fetch pathconf info from server */
7246 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(NFS_VER3));
7247 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7248 nfsm_chain_build_done(error, &nmreq);
7249 nfsmout_if(error);
7250 error = nfs_request(np, NULL, &nmreq, NFSPROC_PATHCONF, ctx, NULL, &nmrep, &xid, &status);
7251 if ((lockerror = nfs_node_lock(np))) {
7252 error = lockerror;
7253 }
7254 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
7255 if (!lockerror) {
7256 nfs_node_unlock(np);
7257 }
7258 if (!error) {
7259 error = status;
7260 }
7261 nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxlink);
7262 nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxname);
7263 nfsap->nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC | NFS_FSFLAG_CHOWN_RESTRICTED | NFS_FSFLAG_CASE_INSENSITIVE | NFS_FSFLAG_CASE_PRESERVING);
7264 nfsm_chain_get_32(error, &nmrep, val);
7265 if (val) {
7266 nfsap->nfsa_flags |= NFS_FSFLAG_NO_TRUNC;
7267 }
7268 nfsm_chain_get_32(error, &nmrep, val);
7269 if (val) {
7270 nfsap->nfsa_flags |= NFS_FSFLAG_CHOWN_RESTRICTED;
7271 }
7272 nfsm_chain_get_32(error, &nmrep, val);
7273 if (val) {
7274 nfsap->nfsa_flags |= NFS_FSFLAG_CASE_INSENSITIVE;
7275 }
7276 nfsm_chain_get_32(error, &nmrep, val);
7277 if (val) {
7278 nfsap->nfsa_flags |= NFS_FSFLAG_CASE_PRESERVING;
7279 }
7280 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_MAXLINK);
7281 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_MAXNAME);
7282 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_NO_TRUNC);
7283 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED);
7284 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE);
7285 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_PRESERVING);
7286 nfsmout:
7287 nfsm_chain_cleanup(&nmreq);
7288 nfsm_chain_cleanup(&nmrep);
7289 return error;
7290 }
7291
7292 /* save pathconf info for NFSv3 mount */
7293 void
nfs3_pathconf_cache(struct nfsmount * nmp,struct nfs_fsattr * nfsap)7294 nfs3_pathconf_cache(struct nfsmount *nmp, struct nfs_fsattr *nfsap)
7295 {
7296 nmp->nm_fsattr.nfsa_maxlink = nfsap->nfsa_maxlink;
7297 nmp->nm_fsattr.nfsa_maxname = nfsap->nfsa_maxname;
7298 nmp->nm_fsattr.nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC | NFS_FSFLAG_CHOWN_RESTRICTED | NFS_FSFLAG_CASE_INSENSITIVE | NFS_FSFLAG_CASE_PRESERVING);
7299 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_NO_TRUNC;
7300 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CHOWN_RESTRICTED;
7301 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE;
7302 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CASE_PRESERVING;
7303 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXLINK);
7304 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME);
7305 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_NO_TRUNC);
7306 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED);
7307 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE);
7308 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_PRESERVING);
7309 nmp->nm_state |= NFSSTA_GOTPATHCONF;
7310 }
7311
7312 static uint
nfs_pathconf_maxfile_bits(uint64_t maxFileSize)7313 nfs_pathconf_maxfile_bits(uint64_t maxFileSize)
7314 {
7315 uint nbits;
7316
7317 nbits = 1;
7318 if (maxFileSize & 0xffffffff00000000ULL) {
7319 nbits += 32;
7320 maxFileSize >>= 32;
7321 }
7322 if (maxFileSize & 0xffff0000) {
7323 nbits += 16;
7324 maxFileSize >>= 16;
7325 }
7326 if (maxFileSize & 0xff00) {
7327 nbits += 8;
7328 maxFileSize >>= 8;
7329 }
7330 if (maxFileSize & 0xf0) {
7331 nbits += 4;
7332 maxFileSize >>= 4;
7333 }
7334 if (maxFileSize & 0xc) {
7335 nbits += 2;
7336 maxFileSize >>= 2;
7337 }
7338 if (maxFileSize & 0x2) {
7339 nbits += 1;
7340 }
7341
7342 return nbits;
7343 }
7344
7345 /*
7346 * Return POSIX pathconf information applicable to nfs.
7347 *
7348 * The NFS V2 protocol doesn't support this, so just return EINVAL
7349 * for V2.
7350 */
7351 /* ARGSUSED */
7352 int
nfs_vnop_pathconf(struct vnop_pathconf_args * ap)7353 nfs_vnop_pathconf(
7354 struct vnop_pathconf_args /* {
7355 * struct vnodeop_desc *a_desc;
7356 * vnode_t a_vp;
7357 * int a_name;
7358 * int32_t *a_retval;
7359 * vfs_context_t a_context;
7360 * } */*ap)
7361 {
7362 vnode_t vp = ap->a_vp;
7363 nfsnode_t np = VTONFS(vp);
7364 struct nfsmount *nmp;
7365 struct nfs_fsattr nfsa, *nfsap;
7366 int error = 0;
7367
7368 nmp = VTONMP(vp);
7369 if (nfs_mount_gone(nmp)) {
7370 return ENXIO;
7371 }
7372
7373 switch (ap->a_name) {
7374 case _PC_LINK_MAX:
7375 case _PC_NAME_MAX:
7376 case _PC_CHOWN_RESTRICTED:
7377 case _PC_NO_TRUNC:
7378 case _PC_CASE_SENSITIVE:
7379 case _PC_CASE_PRESERVING:
7380 break;
7381 case _PC_FILESIZEBITS:
7382 if (nmp->nm_vers == NFS_VER2) {
7383 *ap->a_retval = 32;
7384 } else if (!NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE)) {
7385 *ap->a_retval = 64;
7386 } else {
7387 *ap->a_retval = nfs_pathconf_maxfile_bits(nmp->nm_fsattr.nfsa_maxfilesize);
7388 }
7389 return 0;
7390 case _PC_XATTR_SIZE_BITS:
7391 /* Do we support xattrs natively? */
7392 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) {
7393 /* same as file size bits if named attrs supported */
7394 *ap->a_retval = nfs_pathconf_maxfile_bits(nmp->nm_fsattr.nfsa_maxfilesize);
7395 return 0;
7396 }
7397 /* No... so just return an error */
7398 return EINVAL;
7399 default:
7400 /* don't bother contacting the server if we know the answer */
7401 return EINVAL;
7402 }
7403
7404 if (nmp->nm_vers == NFS_VER2) {
7405 return EINVAL;
7406 }
7407
7408 lck_mtx_lock(&nmp->nm_lock);
7409 if (nmp->nm_vers == NFS_VER3) {
7410 if (!(nmp->nm_state & NFSSTA_GOTPATHCONF) || (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS) && nmp->nm_dnp != np)) {
7411 /* no pathconf info cached OR we were asked for non-root pathconf and filesystem does not support FSF_HOMOGENEOUS */
7412 lck_mtx_unlock(&nmp->nm_lock);
7413 NFS_CLEAR_ATTRIBUTES(nfsa.nfsa_bitmap);
7414 error = nfs3_pathconf_rpc(np, &nfsa, ap->a_context);
7415 if (error) {
7416 return NFS_MAPERR(error);
7417 }
7418 nmp = VTONMP(vp);
7419 if (nfs_mount_gone(nmp)) {
7420 return ENXIO;
7421 }
7422 lck_mtx_lock(&nmp->nm_lock);
7423 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS) {
7424 /* all files have the same pathconf info, */
7425 /* so cache a copy of the results */
7426 nfs3_pathconf_cache(nmp, &nfsa);
7427 }
7428 nfsap = &nfsa;
7429 } else {
7430 nfsap = &nmp->nm_fsattr;
7431 }
7432 }
7433 #if CONFIG_NFS4
7434 else if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS)) {
7435 /* no pathconf info cached */
7436 lck_mtx_unlock(&nmp->nm_lock);
7437 NFS_CLEAR_ATTRIBUTES(nfsa.nfsa_bitmap);
7438 error = nfs4_pathconf_rpc(np, &nfsa, ap->a_context);
7439 if (error) {
7440 return NFS_MAPERR(error);
7441 }
7442 nmp = VTONMP(vp);
7443 if (nfs_mount_gone(nmp)) {
7444 return ENXIO;
7445 }
7446 lck_mtx_lock(&nmp->nm_lock);
7447 nfsap = &nfsa;
7448 }
7449 #endif
7450 else {
7451 nfsap = &nmp->nm_fsattr;
7452 }
7453 switch (ap->a_name) {
7454 case _PC_LINK_MAX:
7455 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXLINK)) {
7456 *ap->a_retval = nfsap->nfsa_maxlink;
7457 #if CONFIG_NFS4
7458 } else if ((nmp->nm_vers == NFS_VER4) && NFS_BITMAP_ISSET(np->n_vattr.nva_bitmap, NFS_FATTR_MAXLINK)) {
7459 *ap->a_retval = np->n_vattr.nva_maxlink;
7460 #endif
7461 } else {
7462 error = EINVAL;
7463 }
7464 break;
7465 case _PC_NAME_MAX:
7466 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXNAME)) {
7467 *ap->a_retval = nfsap->nfsa_maxname;
7468 } else {
7469 error = EINVAL;
7470 }
7471 break;
7472 case _PC_CHOWN_RESTRICTED:
7473 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED)) {
7474 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CHOWN_RESTRICTED) ? 200112 /* _POSIX_CHOWN_RESTRICTED */ : 0;
7475 } else {
7476 error = EINVAL;
7477 }
7478 break;
7479 case _PC_NO_TRUNC:
7480 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_NO_TRUNC)) {
7481 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_NO_TRUNC) ? 200112 /* _POSIX_NO_TRUNC */ : 0;
7482 } else {
7483 error = EINVAL;
7484 }
7485 break;
7486 case _PC_CASE_SENSITIVE:
7487 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) {
7488 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE) ? 0 : 1;
7489 } else {
7490 error = EINVAL;
7491 }
7492 break;
7493 case _PC_CASE_PRESERVING:
7494 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) {
7495 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CASE_PRESERVING) ? 1 : 0;
7496 } else {
7497 error = EINVAL;
7498 }
7499 break;
7500 default:
7501 error = EINVAL;
7502 }
7503
7504 lck_mtx_unlock(&nmp->nm_lock);
7505
7506 return NFS_MAPERR(error);
7507 }
7508
7509 /*
7510 * Read wrapper for special devices.
7511 */
7512 int
nfsspec_vnop_read(struct vnop_read_args * ap)7513 nfsspec_vnop_read(
7514 struct vnop_read_args /* {
7515 * struct vnodeop_desc *a_desc;
7516 * vnode_t a_vp;
7517 * struct uio *a_uio;
7518 * int a_ioflag;
7519 * vfs_context_t a_context;
7520 * } */*ap)
7521 {
7522 nfsnode_t np = VTONFS(ap->a_vp);
7523 struct timespec now;
7524 int error;
7525
7526 /*
7527 * Set access flag.
7528 */
7529 if ((error = nfs_node_lock(np))) {
7530 return NFS_MAPERR(error);
7531 }
7532 np->n_flag |= NACC;
7533 nanotime(&now);
7534 np->n_atim.tv_sec = now.tv_sec;
7535 np->n_atim.tv_nsec = now.tv_nsec;
7536 nfs_node_unlock(np);
7537 return spec_read(ap);
7538 }
7539
7540 /*
7541 * Write wrapper for special devices.
7542 */
7543 int
nfsspec_vnop_write(struct vnop_write_args * ap)7544 nfsspec_vnop_write(
7545 struct vnop_write_args /* {
7546 * struct vnodeop_desc *a_desc;
7547 * vnode_t a_vp;
7548 * struct uio *a_uio;
7549 * int a_ioflag;
7550 * vfs_context_t a_context;
7551 * } */*ap)
7552 {
7553 nfsnode_t np = VTONFS(ap->a_vp);
7554 struct timespec now;
7555 int error;
7556
7557 /*
7558 * Set update flag.
7559 */
7560 if ((error = nfs_node_lock(np))) {
7561 return NFS_MAPERR(error);
7562 }
7563 np->n_flag |= NUPD;
7564 nanotime(&now);
7565 np->n_mtim.tv_sec = now.tv_sec;
7566 np->n_mtim.tv_nsec = now.tv_nsec;
7567 nfs_node_unlock(np);
7568 return spec_write(ap);
7569 }
7570
7571 /*
7572 * Close wrapper for special devices.
7573 *
7574 * Update the times on the nfsnode then do device close.
7575 */
7576 int
nfsspec_vnop_close(struct vnop_close_args * ap)7577 nfsspec_vnop_close(
7578 struct vnop_close_args /* {
7579 * struct vnodeop_desc *a_desc;
7580 * vnode_t a_vp;
7581 * int a_fflag;
7582 * vfs_context_t a_context;
7583 * } */*ap)
7584 {
7585 vnode_t vp = ap->a_vp;
7586 nfsnode_t np = VTONFS(vp);
7587 struct vnode_attr vattr;
7588 mount_t mp;
7589 int error;
7590
7591 if ((error = nfs_node_lock(np))) {
7592 return NFS_MAPERR(error);
7593 }
7594 if (np->n_flag & (NACC | NUPD)) {
7595 np->n_flag |= NCHG;
7596 if (!vnode_isinuse(vp, 0) && (mp = vnode_mount(vp)) && !vfs_isrdonly(mp)) {
7597 VATTR_INIT(&vattr);
7598 if (np->n_flag & NACC) {
7599 vattr.va_access_time = np->n_atim;
7600 VATTR_SET_ACTIVE(&vattr, va_access_time);
7601 }
7602 if (np->n_flag & NUPD) {
7603 vattr.va_modify_time = np->n_mtim;
7604 VATTR_SET_ACTIVE(&vattr, va_modify_time);
7605 }
7606 nfs_node_unlock(np);
7607 vnode_setattr(vp, &vattr, ap->a_context);
7608 } else {
7609 nfs_node_unlock(np);
7610 }
7611 } else {
7612 nfs_node_unlock(np);
7613 }
7614 return spec_close(ap);
7615 }
7616
7617 #if FIFO
7618
7619 /*
7620 * Read wrapper for fifos.
7621 */
7622 int
nfsfifo_vnop_read(struct vnop_read_args * ap)7623 nfsfifo_vnop_read(
7624 struct vnop_read_args /* {
7625 * struct vnodeop_desc *a_desc;
7626 * vnode_t a_vp;
7627 * struct uio *a_uio;
7628 * int a_ioflag;
7629 * vfs_context_t a_context;
7630 * } */*ap)
7631 {
7632 nfsnode_t np = VTONFS(ap->a_vp);
7633 struct timespec now;
7634 int error;
7635
7636 /*
7637 * Set access flag.
7638 */
7639 if ((error = nfs_node_lock(np))) {
7640 return NFS_MAPERR(error);
7641 }
7642 np->n_flag |= NACC;
7643 nanotime(&now);
7644 np->n_atim.tv_sec = now.tv_sec;
7645 np->n_atim.tv_nsec = now.tv_nsec;
7646 nfs_node_unlock(np);
7647 return fifo_read(ap);
7648 }
7649
7650 /*
7651 * Write wrapper for fifos.
7652 */
7653 int
nfsfifo_vnop_write(struct vnop_write_args * ap)7654 nfsfifo_vnop_write(
7655 struct vnop_write_args /* {
7656 * struct vnodeop_desc *a_desc;
7657 * vnode_t a_vp;
7658 * struct uio *a_uio;
7659 * int a_ioflag;
7660 * vfs_context_t a_context;
7661 * } */*ap)
7662 {
7663 nfsnode_t np = VTONFS(ap->a_vp);
7664 struct timespec now;
7665 int error;
7666
7667 /*
7668 * Set update flag.
7669 */
7670 if ((error = nfs_node_lock(np))) {
7671 return NFS_MAPERR(error);
7672 }
7673 np->n_flag |= NUPD;
7674 nanotime(&now);
7675 np->n_mtim.tv_sec = now.tv_sec;
7676 np->n_mtim.tv_nsec = now.tv_nsec;
7677 nfs_node_unlock(np);
7678 return fifo_write(ap);
7679 }
7680
7681 /*
7682 * Close wrapper for fifos.
7683 *
7684 * Update the times on the nfsnode then do fifo close.
7685 */
7686 int
nfsfifo_vnop_close(struct vnop_close_args * ap)7687 nfsfifo_vnop_close(
7688 struct vnop_close_args /* {
7689 * struct vnodeop_desc *a_desc;
7690 * vnode_t a_vp;
7691 * int a_fflag;
7692 * vfs_context_t a_context;
7693 * } */*ap)
7694 {
7695 vnode_t vp = ap->a_vp;
7696 nfsnode_t np = VTONFS(vp);
7697 struct vnode_attr vattr;
7698 struct timespec now;
7699 mount_t mp;
7700 int error;
7701
7702 if ((error = nfs_node_lock(np))) {
7703 return NFS_MAPERR(error);
7704 }
7705 if (np->n_flag & (NACC | NUPD)) {
7706 nanotime(&now);
7707 if (np->n_flag & NACC) {
7708 np->n_atim.tv_sec = now.tv_sec;
7709 np->n_atim.tv_nsec = now.tv_nsec;
7710 }
7711 if (np->n_flag & NUPD) {
7712 np->n_mtim.tv_sec = now.tv_sec;
7713 np->n_mtim.tv_nsec = now.tv_nsec;
7714 }
7715 np->n_flag |= NCHG;
7716 if (!vnode_isinuse(vp, 1) && (mp = vnode_mount(vp)) && !vfs_isrdonly(mp)) {
7717 VATTR_INIT(&vattr);
7718 if (np->n_flag & NACC) {
7719 vattr.va_access_time = np->n_atim;
7720 VATTR_SET_ACTIVE(&vattr, va_access_time);
7721 }
7722 if (np->n_flag & NUPD) {
7723 vattr.va_modify_time = np->n_mtim;
7724 VATTR_SET_ACTIVE(&vattr, va_modify_time);
7725 }
7726 nfs_node_unlock(np);
7727 vnode_setattr(vp, &vattr, ap->a_context);
7728 } else {
7729 nfs_node_unlock(np);
7730 }
7731 } else {
7732 nfs_node_unlock(np);
7733 }
7734 return fifo_close(ap);
7735 }
7736 #endif /* FIFO */
7737
7738 /*ARGSUSED*/
7739 int
nfs_vnop_ioctl(struct vnop_ioctl_args * ap)7740 nfs_vnop_ioctl(
7741 struct vnop_ioctl_args /* {
7742 * struct vnodeop_desc *a_desc;
7743 * vnode_t a_vp;
7744 * u_int32_t a_command;
7745 * caddr_t a_data;
7746 * int a_fflag;
7747 * vfs_context_t a_context;
7748 * } */*ap)
7749 {
7750 vfs_context_t ctx = ap->a_context;
7751 vnode_t vp = ap->a_vp;
7752 struct nfsmount *mp = VTONMP(vp);
7753 int error = ENOTTY;
7754 #if CONFIG_NFS_GSS
7755 struct user_nfs_gss_principal gprinc = {};
7756 size_t len;
7757 #endif
7758
7759 if (mp == NULL) {
7760 return ENXIO;
7761 }
7762 switch (ap->a_command) {
7763 case F_FULLFSYNC:
7764 if (vnode_vfsisrdonly(vp)) {
7765 return EROFS;
7766 }
7767 error = nfs_flush(VTONFS(vp), MNT_WAIT, vfs_context_thread(ctx), 0);
7768 break;
7769 #if CONFIG_NFS_GSS
7770 case NFS_IOC_DESTROY_CRED:
7771 if (!auth_is_kerberized(mp->nm_auth)) {
7772 return ENOTSUP;
7773 }
7774 if ((error = nfs_gss_clnt_ctx_remove(mp, vfs_context_ucred(ctx))) == ENOENT) {
7775 error = 0;
7776 }
7777 break;
7778 case NFS_IOC_SET_CRED:
7779 case NFS_IOC_SET_CRED64:
7780 if (!auth_is_kerberized(mp->nm_auth)) {
7781 return ENOTSUP;
7782 }
7783 if ((ap->a_command == NFS_IOC_SET_CRED && vfs_context_is64bit(ctx)) ||
7784 (ap->a_command == NFS_IOC_SET_CRED64 && !vfs_context_is64bit(ctx))) {
7785 return EINVAL;
7786 }
7787 if (vfs_context_is64bit(ctx)) {
7788 gprinc = *(struct user_nfs_gss_principal *)ap->a_data;
7789 } else {
7790 struct nfs_gss_principal *tp;
7791 tp = (struct nfs_gss_principal *)ap->a_data;
7792 gprinc.princlen = tp->princlen;
7793 gprinc.nametype = tp->nametype;
7794 gprinc.principal = CAST_USER_ADDR_T(tp->principal);
7795 }
7796 NFSCLNT_DBG(NFSCLNT_FAC_GSS, 7, "Enter NFS_FSCTL_SET_CRED (64-bit=%d): principal length %zu name type %d usr pointer 0x%llx\n", vfs_context_is64bit(ctx), gprinc.princlen, gprinc.nametype, gprinc.principal);
7797 if (gprinc.princlen > MAXPATHLEN) {
7798 return EINVAL;
7799 }
7800 uint8_t *p;
7801 p = kalloc_data(gprinc.princlen + 1, Z_WAITOK | Z_ZERO);
7802 if (p == NULL) {
7803 return ENOMEM;
7804 }
7805 assert((user_addr_t)gprinc.principal == gprinc.principal);
7806 error = copyin((user_addr_t)gprinc.principal, p, gprinc.princlen);
7807 if (error) {
7808 NFSCLNT_DBG(NFSCLNT_FAC_GSS, 7, "NFS_FSCTL_SET_CRED could not copy in princiapl data of len %zu: %d\n",
7809 gprinc.princlen, error);
7810 kfree_data(p, gprinc.princlen + 1);
7811 return NFS_MAPERR(error);
7812 }
7813 NFSCLNT_DBG(NFSCLNT_FAC_GSS, 7, "Seting credential to principal %s\n", p);
7814 error = nfs_gss_clnt_ctx_set_principal(mp, ctx, p, gprinc.princlen, gprinc.nametype);
7815 NFSCLNT_DBG(NFSCLNT_FAC_GSS, 7, "Seting credential to principal %s returned %d\n", p, error);
7816 kfree_data(p, gprinc.princlen + 1);
7817 break;
7818 case NFS_IOC_GET_CRED:
7819 case NFS_IOC_GET_CRED64:
7820 if (!auth_is_kerberized(mp->nm_auth)) {
7821 return ENOTSUP;
7822 }
7823 if ((ap->a_command == NFS_IOC_GET_CRED && vfs_context_is64bit(ctx)) ||
7824 (ap->a_command == NFS_IOC_GET_CRED64 && !vfs_context_is64bit(ctx))) {
7825 return EINVAL;
7826 }
7827 error = nfs_gss_clnt_ctx_get_principal(mp, ctx, &gprinc);
7828 if (error) {
7829 break;
7830 }
7831 if (vfs_context_is64bit(ctx)) {
7832 struct user_nfs_gss_principal *upp = (struct user_nfs_gss_principal *)ap->a_data;
7833 len = upp->princlen;
7834 if (gprinc.princlen < len) {
7835 len = gprinc.princlen;
7836 }
7837 upp->princlen = gprinc.princlen;
7838 upp->nametype = gprinc.nametype;
7839 upp->flags = gprinc.flags;
7840 if (gprinc.principal) {
7841 assert((user_addr_t)upp->principal == upp->principal);
7842 error = copyout((void *)gprinc.principal, (user_addr_t)upp->principal, len);
7843 } else {
7844 upp->principal = USER_ADDR_NULL;
7845 }
7846 } else {
7847 struct nfs_gss_principal *u32pp = (struct nfs_gss_principal *)ap->a_data;
7848 len = u32pp->princlen;
7849 if (gprinc.princlen < len) {
7850 len = gprinc.princlen;
7851 }
7852 u32pp->princlen = gprinc.princlen;
7853 u32pp->nametype = gprinc.nametype;
7854 u32pp->flags = gprinc.flags;
7855 if (gprinc.principal) {
7856 error = copyout((void *)gprinc.principal, u32pp->principal, len);
7857 } else {
7858 u32pp->principal = (user32_addr_t)0;
7859 }
7860 }
7861 if (error) {
7862 NFSCLNT_DBG(NFSCLNT_FAC_GSS, 7, "NFS_FSCTL_GET_CRED could not copy out princiapl data of len %zu: %d\n",
7863 gprinc.princlen, error);
7864 }
7865 if (gprinc.principal) {
7866 void *ptr = (void *)gprinc.principal;
7867 gprinc.principal = 0;
7868 kfree_data(ptr, gprinc.princlen);
7869 }
7870 #endif /* CONFIG_NFS_GSS */
7871 }
7872
7873 return NFS_MAPERR(error);
7874 }
7875
7876 /*ARGSUSED*/
7877 int
nfs_vnop_select(__unused struct vnop_select_args * ap)7878 nfs_vnop_select(
7879 __unused struct vnop_select_args /* {
7880 * struct vnodeop_desc *a_desc;
7881 * vnode_t a_vp;
7882 * int a_which;
7883 * int a_fflags;
7884 * void *a_wql;
7885 * vfs_context_t a_context;
7886 * } */*ap)
7887 {
7888 /*
7889 * We were once bogusly seltrue() which returns 1. Is this right?
7890 */
7891 return 1;
7892 }
7893
7894 /*
7895 * vnode OP for pagein using UPL
7896 *
7897 * No buffer I/O, just RPCs straight into the mapped pages.
7898 */
7899 int
nfs_vnop_pagein(struct vnop_pagein_args * ap)7900 nfs_vnop_pagein(
7901 struct vnop_pagein_args /* {
7902 * struct vnodeop_desc *a_desc;
7903 * vnode_t a_vp;
7904 * upl_t a_pl;
7905 * vm_offset_t a_pl_offset;
7906 * off_t a_f_offset;
7907 * size_t a_size;
7908 * int a_flags;
7909 * vfs_context_t a_context;
7910 * } */*ap)
7911 {
7912 vnode_t vp = ap->a_vp;
7913 upl_t pl = ap->a_pl;
7914 upl_size_t size = (upl_size_t)ap->a_size;
7915 off_t f_offset = ap->a_f_offset;
7916 upl_offset_t pl_offset = ap->a_pl_offset;
7917 int flags = ap->a_flags;
7918 thread_t thd;
7919 kauth_cred_t cred;
7920 nfsnode_t np = VTONFS(vp);
7921 size_t nmrsize, iosize, txsize, rxsize, retsize;
7922 off_t txoffset;
7923 struct nfsmount *nmp;
7924 int error = 0, eof = 0;
7925 vm_offset_t ioaddr, rxaddr;
7926 uio_t uio;
7927 int nofreeupl = flags & UPL_NOCOMMIT;
7928 upl_page_info_t *plinfo;
7929 #define MAXPAGINGREQS 16 /* max outstanding RPCs for pagein/pageout */
7930 struct nfsreq *req[MAXPAGINGREQS];
7931 int nextsend, nextwait;
7932 #if CONFIG_NFS4
7933 uint32_t stategenid = 0;
7934 #endif
7935 uint32_t restart = 0;
7936 kern_return_t kret;
7937
7938 FSDBG(322, np, f_offset, size, flags);
7939 if (pl == (upl_t)NULL) {
7940 panic("nfs_pagein: no upl");
7941 }
7942
7943 if (size <= 0) {
7944 printf("nfs_pagein: invalid size %u", size);
7945 if (!nofreeupl) {
7946 (void) ubc_upl_abort_range(pl, pl_offset, size, 0);
7947 }
7948 return EINVAL;
7949 }
7950 if (f_offset < 0 || f_offset >= (off_t)np->n_size || (f_offset & PAGE_MASK_64)) {
7951 if (!nofreeupl) {
7952 ubc_upl_abort_range(pl, pl_offset, size,
7953 UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
7954 }
7955 return EINVAL;
7956 }
7957
7958 thd = vfs_context_thread(ap->a_context);
7959 cred = ubc_getcred(vp);
7960 if (!IS_VALID_CRED(cred)) {
7961 cred = vfs_context_ucred(ap->a_context);
7962 }
7963
7964 nmp = VTONMP(vp);
7965 if (nfs_mount_gone(nmp)) {
7966 if (!nofreeupl) {
7967 ubc_upl_abort_range(pl, pl_offset, size,
7968 UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
7969 }
7970 return ENXIO;
7971 }
7972 nmrsize = nmp->nm_rsize;
7973 uio = uio_create(1, f_offset, UIO_SYSSPACE, UIO_READ);
7974
7975 plinfo = ubc_upl_pageinfo(pl);
7976 kret = ubc_upl_map(pl, &ioaddr);
7977 if (kret != KERN_SUCCESS) {
7978 panic("nfs_vnop_pagein: ubc_upl_map() failed with (%d)", kret);
7979 }
7980 ioaddr += pl_offset;
7981
7982 tryagain:
7983 #if CONFIG_NFS4
7984 if (nmp->nm_vers >= NFS_VER4) {
7985 stategenid = nmp->nm_stategenid;
7986 }
7987 #endif
7988 txsize = rxsize = size;
7989 txoffset = f_offset;
7990 rxaddr = ioaddr;
7991
7992 bzero(req, sizeof(req));
7993 nextsend = nextwait = 0;
7994 do {
7995 if (np->n_flag & NREVOKE) {
7996 error = EIO;
7997 break;
7998 }
7999 /* send requests while we need to and have available slots */
8000 while ((txsize > 0) && (req[nextsend] == NULL)) {
8001 iosize = MIN(nmrsize, txsize);
8002 if ((error = nmp->nm_funcs->nf_read_rpc_async(np, txoffset, iosize, thd, cred, NULL, &req[nextsend]))) {
8003 req[nextsend] = NULL;
8004 break;
8005 }
8006 txoffset += iosize;
8007 txsize -= iosize;
8008 nextsend = (nextsend + 1) % MAXPAGINGREQS;
8009 }
8010 /* wait while we need to and break out if more requests to send */
8011 while ((rxsize > 0) && req[nextwait]) {
8012 iosize = retsize = MIN(nmrsize, rxsize);
8013 uio_reset(uio, uio_offset(uio), UIO_SYSSPACE, UIO_READ);
8014 uio_addiov(uio, CAST_USER_ADDR_T(rxaddr), iosize);
8015 FSDBG(322, uio_offset(uio), uio_resid(uio), rxaddr, rxsize);
8016 #if UPL_DEBUG
8017 upl_ubc_alias_set(pl, (uintptr_t) current_thread(), (uintptr_t) 2);
8018 #endif /* UPL_DEBUG */
8019 OSAddAtomic64(1, &nfsclntstats.pageins);
8020 error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req[nextwait], uio, &retsize, &eof);
8021 req[nextwait] = NULL;
8022 nextwait = (nextwait + 1) % MAXPAGINGREQS;
8023 partial_read:
8024 #if CONFIG_NFS4
8025 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error)) {
8026 lck_mtx_lock(&nmp->nm_lock);
8027 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
8028 NP(np, "nfs_vnop_pagein: error %d, initiating recovery", error);
8029 nfs_need_recover(nmp, error);
8030 }
8031 lck_mtx_unlock(&nmp->nm_lock);
8032 restart++;
8033 goto cancel;
8034 }
8035 #endif
8036 if (error) {
8037 FSDBG(322, uio_offset(uio), uio_resid(uio), error, -1);
8038 break;
8039 }
8040
8041 if ((nmp->nm_vers != NFS_VER2 && (eof || retsize == 0)) || ((nmp->nm_vers == NFS_VER2) && (retsize < iosize))) {
8042 /* EOF was reached, Just zero fill the rest of the valid area. */
8043 size_t zcnt = iosize - retsize;
8044 bzero((char *)rxaddr + retsize, zcnt);
8045 FSDBG(324, uio_offset(uio), retsize, zcnt, rxaddr);
8046 uio_update(uio, zcnt);
8047 retsize += zcnt;
8048 }
8049
8050 rxaddr += retsize;
8051 rxsize -= retsize;
8052 iosize -= retsize;
8053
8054 if (iosize) {
8055 /* Handle partial reads */
8056 struct nfsreq *treq = NULL;
8057 off_t ttxoffset = f_offset + rxaddr - ioaddr;
8058 retsize = iosize;
8059
8060 error = nmp->nm_funcs->nf_read_rpc_async(np, ttxoffset, iosize, thd, cred, NULL, &treq);
8061 if (error) {
8062 break;
8063 }
8064 error = nmp->nm_funcs->nf_read_rpc_async_finish(np, treq, uio, &retsize, &eof);
8065 goto partial_read;
8066 }
8067
8068 if (txsize) {
8069 break;
8070 }
8071 }
8072 } while (!error && (txsize || rxsize));
8073
8074 restart = 0;
8075
8076 if (error) {
8077 #if CONFIG_NFS4
8078 cancel:
8079 #endif
8080 /* cancel any outstanding requests */
8081 while (req[nextwait]) {
8082 nfs_request_async_cancel(req[nextwait]);
8083 req[nextwait] = NULL;
8084 nextwait = (nextwait + 1) % MAXPAGINGREQS;
8085 }
8086 if (np->n_flag & NREVOKE) {
8087 error = EIO;
8088 } else if (restart) {
8089 if (restart <= nfs_mount_state_max_restarts(nmp)) { /* guard against no progress */
8090 if (error == NFSERR_GRACE) {
8091 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
8092 }
8093 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
8094 goto tryagain;
8095 }
8096 } else {
8097 NP(np, "nfs_pagein: too many restarts, aborting");
8098 }
8099 }
8100 }
8101
8102 /* Free allocated uio buffer */
8103 uio_free(uio);
8104 ubc_upl_unmap(pl);
8105
8106 if (!nofreeupl) {
8107 if (error) {
8108 /*
8109 * See comment in vnode_pagein() on handling EAGAIN, even though UPL_NOCOMMIT flag
8110 * is not set, we will not abort this upl, since VM subsystem will handle it.
8111 */
8112 if (error != EAGAIN && error != EPERM) {
8113 ubc_upl_abort_range(pl, pl_offset, size,
8114 UPL_ABORT_ERROR |
8115 UPL_ABORT_FREE_ON_EMPTY);
8116 }
8117 } else {
8118 ubc_upl_commit_range(pl, pl_offset, size,
8119 UPL_COMMIT_CLEAR_DIRTY |
8120 UPL_COMMIT_FREE_ON_EMPTY);
8121 }
8122 }
8123 return NFS_MAPERR(error);
8124 }
8125
8126
8127 /*
8128 * the following are needed only by nfs_pageout to know how to handle errors
8129 * see nfs_pageout comments on explanation of actions.
8130 * the errors here are copied from errno.h and errors returned by servers
8131 * are expected to match the same numbers here. If not, our actions maybe
8132 * erroneous.
8133 */
8134 char nfs_pageouterrorhandler(int);
8135 enum actiontype {NOACTION, DUMP, DUMPANDLOG, RETRY, SEVER};
8136 #define NFS_ELAST 88
8137 static u_char errorcount[NFS_ELAST + 1]; /* better be zeros when initialized */
8138 static const char errortooutcome[NFS_ELAST + 1] = {
8139 NOACTION,
8140 DUMP, /* EPERM 1 Operation not permitted */
8141 DUMP, /* ENOENT 2 No such file or directory */
8142 DUMPANDLOG, /* ESRCH 3 No such process */
8143 RETRY, /* EINTR 4 Interrupted system call */
8144 DUMP, /* EIO 5 Input/output error */
8145 DUMP, /* ENXIO 6 Device not configured */
8146 DUMPANDLOG, /* E2BIG 7 Argument list too long */
8147 DUMPANDLOG, /* ENOEXEC 8 Exec format error */
8148 DUMPANDLOG, /* EBADF 9 Bad file descriptor */
8149 DUMPANDLOG, /* ECHILD 10 No child processes */
8150 DUMPANDLOG, /* EDEADLK 11 Resource deadlock avoided - was EAGAIN */
8151 RETRY, /* ENOMEM 12 Cannot allocate memory */
8152 DUMP, /* EACCES 13 Permission denied */
8153 DUMPANDLOG, /* EFAULT 14 Bad address */
8154 DUMPANDLOG, /* ENOTBLK 15 POSIX - Block device required */
8155 RETRY, /* EBUSY 16 Device busy */
8156 DUMP, /* EEXIST 17 File exists */
8157 DUMP, /* EXDEV 18 Cross-device link */
8158 DUMP, /* ENODEV 19 Operation not supported by device */
8159 DUMP, /* ENOTDIR 20 Not a directory */
8160 DUMP, /* EISDIR 21 Is a directory */
8161 DUMP, /* EINVAL 22 Invalid argument */
8162 DUMPANDLOG, /* ENFILE 23 Too many open files in system */
8163 DUMPANDLOG, /* EMFILE 24 Too many open files */
8164 DUMPANDLOG, /* ENOTTY 25 Inappropriate ioctl for device */
8165 DUMPANDLOG, /* ETXTBSY 26 Text file busy - POSIX */
8166 DUMP, /* EFBIG 27 File too large */
8167 DUMP, /* ENOSPC 28 No space left on device */
8168 DUMPANDLOG, /* ESPIPE 29 Illegal seek */
8169 DUMP, /* EROFS 30 Read-only file system */
8170 DUMP, /* EMLINK 31 Too many links */
8171 RETRY, /* EPIPE 32 Broken pipe */
8172 /* math software */
8173 DUMPANDLOG, /* EDOM 33 Numerical argument out of domain */
8174 DUMPANDLOG, /* ERANGE 34 Result too large */
8175 RETRY, /* EAGAIN/EWOULDBLOCK 35 Resource temporarily unavailable */
8176 DUMPANDLOG, /* EINPROGRESS 36 Operation now in progress */
8177 DUMPANDLOG, /* EALREADY 37 Operation already in progress */
8178 /* ipc/network software -- argument errors */
8179 DUMPANDLOG, /* ENOTSOC 38 Socket operation on non-socket */
8180 DUMPANDLOG, /* EDESTADDRREQ 39 Destination address required */
8181 DUMPANDLOG, /* EMSGSIZE 40 Message too long */
8182 DUMPANDLOG, /* EPROTOTYPE 41 Protocol wrong type for socket */
8183 DUMPANDLOG, /* ENOPROTOOPT 42 Protocol not available */
8184 DUMPANDLOG, /* EPROTONOSUPPORT 43 Protocol not supported */
8185 DUMPANDLOG, /* ESOCKTNOSUPPORT 44 Socket type not supported */
8186 DUMPANDLOG, /* ENOTSUP 45 Operation not supported */
8187 DUMPANDLOG, /* EPFNOSUPPORT 46 Protocol family not supported */
8188 DUMPANDLOG, /* EAFNOSUPPORT 47 Address family not supported by protocol family */
8189 DUMPANDLOG, /* EADDRINUSE 48 Address already in use */
8190 DUMPANDLOG, /* EADDRNOTAVAIL 49 Can't assign requested address */
8191 /* ipc/network software -- operational errors */
8192 RETRY, /* ENETDOWN 50 Network is down */
8193 RETRY, /* ENETUNREACH 51 Network is unreachable */
8194 RETRY, /* ENETRESET 52 Network dropped connection on reset */
8195 RETRY, /* ECONNABORTED 53 Software caused connection abort */
8196 RETRY, /* ECONNRESET 54 Connection reset by peer */
8197 RETRY, /* ENOBUFS 55 No buffer space available */
8198 RETRY, /* EISCONN 56 Socket is already connected */
8199 RETRY, /* ENOTCONN 57 Socket is not connected */
8200 RETRY, /* ESHUTDOWN 58 Can't send after socket shutdown */
8201 RETRY, /* ETOOMANYREFS 59 Too many references: can't splice */
8202 RETRY, /* ETIMEDOUT 60 Operation timed out */
8203 RETRY, /* ECONNREFUSED 61 Connection refused */
8204
8205 DUMPANDLOG, /* ELOOP 62 Too many levels of symbolic links */
8206 DUMP, /* ENAMETOOLONG 63 File name too long */
8207 RETRY, /* EHOSTDOWN 64 Host is down */
8208 RETRY, /* EHOSTUNREACH 65 No route to host */
8209 DUMP, /* ENOTEMPTY 66 Directory not empty */
8210 /* quotas & mush */
8211 DUMPANDLOG, /* PROCLIM 67 Too many processes */
8212 DUMPANDLOG, /* EUSERS 68 Too many users */
8213 DUMPANDLOG, /* EDQUOT 69 Disc quota exceeded */
8214 /* Network File System */
8215 DUMP, /* ESTALE 70 Stale NFS file handle */
8216 DUMP, /* EREMOTE 71 Too many levels of remote in path */
8217 DUMPANDLOG, /* EBADRPC 72 RPC struct is bad */
8218 DUMPANDLOG, /* ERPCMISMATCH 73 RPC version wrong */
8219 DUMPANDLOG, /* EPROGUNAVAIL 74 RPC prog. not avail */
8220 DUMPANDLOG, /* EPROGMISMATCH 75 Program version wrong */
8221 DUMPANDLOG, /* EPROCUNAVAIL 76 Bad procedure for program */
8222
8223 DUMPANDLOG, /* ENOLCK 77 No locks available */
8224 DUMPANDLOG, /* ENOSYS 78 Function not implemented */
8225 DUMPANDLOG, /* EFTYPE 79 Inappropriate file type or format */
8226 DUMPANDLOG, /* EAUTH 80 Authentication error */
8227 DUMPANDLOG, /* ENEEDAUTH 81 Need authenticator */
8228 /* Intelligent device errors */
8229 DUMPANDLOG, /* EPWROFF 82 Device power is off */
8230 DUMPANDLOG, /* EDEVERR 83 Device error, e.g. paper out */
8231 DUMPANDLOG, /* EOVERFLOW 84 Value too large to be stored in data type */
8232 /* Program loading errors */
8233 DUMPANDLOG, /* EBADEXEC 85 Bad executable */
8234 DUMPANDLOG, /* EBADARCH 86 Bad CPU type in executable */
8235 DUMPANDLOG, /* ESHLIBVERS 87 Shared library version mismatch */
8236 DUMPANDLOG, /* EBADMACHO 88 Malformed Macho file */
8237 };
8238
8239 char
nfs_pageouterrorhandler(int error)8240 nfs_pageouterrorhandler(int error)
8241 {
8242 if (error > NFS_ELAST) {
8243 return DUMP;
8244 } else {
8245 return errortooutcome[error];
8246 }
8247 }
8248
8249
8250 /*
8251 * vnode OP for pageout using UPL
8252 *
8253 * No buffer I/O, just RPCs straight from the mapped pages.
8254 * File size changes are not permitted in pageout.
8255 */
8256 int
nfs_vnop_pageout(struct vnop_pageout_args * ap)8257 nfs_vnop_pageout(
8258 struct vnop_pageout_args /* {
8259 * struct vnodeop_desc *a_desc;
8260 * vnode_t a_vp;
8261 * upl_t a_pl;
8262 * vm_offset_t a_pl_offset;
8263 * off_t a_f_offset;
8264 * size_t a_size;
8265 * int a_flags;
8266 * vfs_context_t a_context;
8267 * } */*ap)
8268 {
8269 vnode_t vp = ap->a_vp;
8270 upl_t pl = ap->a_pl;
8271 upl_size_t size = (upl_size_t)ap->a_size;
8272 off_t f_offset = ap->a_f_offset;
8273 upl_offset_t pl_offset = ap->a_pl_offset;
8274 upl_offset_t pgsize;
8275 int flags = ap->a_flags;
8276 nfsnode_t np = VTONFS(vp);
8277 thread_t thd;
8278 kauth_cred_t cred;
8279 struct nfsbuf *bp;
8280 struct nfsmount *nmp = VTONMP(vp);
8281 daddr64_t lbn;
8282 int error = 0, iomode;
8283 off_t off, txoffset, rxoffset;
8284 vm_offset_t ioaddr, txaddr, rxaddr;
8285 uio_t auio;
8286 int nofreeupl = flags & UPL_NOCOMMIT;
8287 size_t nmwsize, biosize, iosize, remsize;
8288 struct nfsreq *req[MAXPAGINGREQS];
8289 int nextsend, nextwait, wverfset, commit;
8290 uint64_t wverf, wverf2, xsize, txsize, rxsize;
8291 #if CONFIG_NFS4
8292 uint32_t stategenid = 0;
8293 #endif
8294 uint32_t vrestart = 0, restart = 0, vrestarts = 0, restarts = 0;
8295 kern_return_t kret;
8296
8297 FSDBG(323, f_offset, size, pl, pl_offset);
8298
8299 if (pl == (upl_t)NULL) {
8300 panic("nfs_pageout: no upl");
8301 }
8302
8303 if (size <= 0) {
8304 printf("nfs_pageout: invalid size %u", size);
8305 if (!nofreeupl) {
8306 ubc_upl_abort_range(pl, pl_offset, size, 0);
8307 }
8308 return EINVAL;
8309 }
8310
8311 if (!nmp) {
8312 if (!nofreeupl) {
8313 ubc_upl_abort(pl, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
8314 }
8315 return ENXIO;
8316 }
8317 biosize = nmp->nm_biosize;
8318 nmwsize = nmp->nm_wsize;
8319
8320 nfs_data_lock_noupdate(np, NFS_DATA_LOCK_SHARED);
8321
8322 /*
8323 * Check to see whether the buffer is incore.
8324 * If incore and not busy, invalidate it from the cache.
8325 */
8326 for (iosize = 0; iosize < size; iosize += xsize) {
8327 off = f_offset + iosize;
8328 /* need make sure we do things on block boundaries */
8329 xsize = biosize - (off % biosize);
8330 if (off + (off_t)xsize > f_offset + (off_t)size) {
8331 xsize = f_offset + size - off;
8332 }
8333 lbn = (daddr64_t)(off / biosize);
8334 lck_mtx_lock(&nfs_buf_mutex);
8335 if ((bp = nfs_buf_incore(np, lbn))) {
8336 FSDBG(323, off, bp, bp->nb_lflags, bp->nb_flags);
8337 if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) {
8338 lck_mtx_unlock(&nfs_buf_mutex);
8339 nfs_data_unlock_noupdate(np);
8340 /* no panic. just tell vm we are busy */
8341 if (!nofreeupl) {
8342 ubc_upl_abort_range(pl, pl_offset, size, 0);
8343 }
8344 return EBUSY;
8345 }
8346 if (bp->nb_dirtyend > 0) {
8347 /*
8348 * if there's a dirty range in the buffer, check
8349 * to see if it extends beyond the pageout region
8350 *
8351 * if the dirty region lies completely within the
8352 * pageout region, we just invalidate the buffer
8353 * because it's all being written out now anyway.
8354 *
8355 * if any of the dirty region lies outside the
8356 * pageout region, we'll try to clip the dirty
8357 * region to eliminate the portion that's being
8358 * paged out. If that's not possible, because
8359 * the dirty region extends before and after the
8360 * pageout region, then we'll just return EBUSY.
8361 */
8362 off_t boff, start, end;
8363 boff = NBOFF(bp);
8364 start = off;
8365 end = off + xsize;
8366 /* clip end to EOF */
8367 if (end > (off_t)np->n_size) {
8368 end = np->n_size;
8369 }
8370 start -= boff;
8371 end -= boff;
8372 if ((bp->nb_dirtyoff < start) &&
8373 (bp->nb_dirtyend > end)) {
8374 /*
8375 * not gonna be able to clip the dirty region
8376 *
8377 * But before returning the bad news, move the
8378 * buffer to the start of the delwri list and
8379 * give the list a push to try to flush the
8380 * buffer out.
8381 */
8382 FSDBG(323, np, bp, 0xd00deebc, EBUSY);
8383 nfs_buf_remfree(bp);
8384 TAILQ_INSERT_HEAD(&nfsbufdelwri, bp, nb_free);
8385 nfsbufdelwricnt++;
8386 nfs_buf_drop(bp);
8387 nfs_buf_delwri_push(1);
8388 lck_mtx_unlock(&nfs_buf_mutex);
8389 nfs_data_unlock_noupdate(np);
8390 if (!nofreeupl) {
8391 ubc_upl_abort_range(pl, pl_offset, size, 0);
8392 }
8393 return EBUSY;
8394 }
8395 if ((bp->nb_dirtyoff < start) ||
8396 (bp->nb_dirtyend > end)) {
8397 /* clip dirty region, if necessary */
8398 if (bp->nb_dirtyoff < start) {
8399 bp->nb_dirtyend = MIN(bp->nb_dirtyend, start);
8400 }
8401 if (bp->nb_dirtyend > end) {
8402 bp->nb_dirtyoff = MAX(bp->nb_dirtyoff, end);
8403 }
8404 FSDBG(323, bp, bp->nb_dirtyoff, bp->nb_dirtyend, 0xd00dee00);
8405 /* we're leaving this block dirty */
8406 nfs_buf_drop(bp);
8407 lck_mtx_unlock(&nfs_buf_mutex);
8408 continue;
8409 }
8410 }
8411 nfs_buf_remfree(bp);
8412 lck_mtx_unlock(&nfs_buf_mutex);
8413 SET(bp->nb_flags, NB_INVAL);
8414 nfs_node_lock_force(np);
8415 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
8416 CLR(bp->nb_flags, NB_NEEDCOMMIT);
8417 np->n_needcommitcnt--;
8418 CHECK_NEEDCOMMITCNT(np);
8419 }
8420 nfs_node_unlock(np);
8421 nfs_buf_release(bp, 1);
8422 } else {
8423 lck_mtx_unlock(&nfs_buf_mutex);
8424 }
8425 }
8426
8427 thd = vfs_context_thread(ap->a_context);
8428 cred = ubc_getcred(vp);
8429 if (!IS_VALID_CRED(cred)) {
8430 cred = vfs_context_ucred(ap->a_context);
8431 }
8432
8433 nfs_node_lock_force(np);
8434 if (np->n_flag & NWRITEERR) {
8435 error = np->n_error;
8436 nfs_node_unlock(np);
8437 nfs_data_unlock_noupdate(np);
8438 if (!nofreeupl) {
8439 ubc_upl_abort_range(pl, pl_offset, size,
8440 UPL_ABORT_FREE_ON_EMPTY);
8441 }
8442 return NFS_MAPERR(error);
8443 }
8444 nfs_node_unlock(np);
8445
8446 if (f_offset < 0 || f_offset >= (off_t)np->n_size ||
8447 f_offset & PAGE_MASK_64 || size & PAGE_MASK_64) {
8448 nfs_data_unlock_noupdate(np);
8449 if (!nofreeupl) {
8450 ubc_upl_abort_range(pl, pl_offset, size,
8451 UPL_ABORT_FREE_ON_EMPTY);
8452 }
8453 return EINVAL;
8454 }
8455
8456 kret = ubc_upl_map(pl, &ioaddr);
8457 if (kret != KERN_SUCCESS) {
8458 panic("nfs_vnop_pageout: ubc_upl_map() failed with (%d)", kret);
8459 }
8460 ioaddr += pl_offset;
8461
8462 if ((u_quad_t)f_offset + size > np->n_size) {
8463 xsize = np->n_size - f_offset;
8464 } else {
8465 xsize = size;
8466 }
8467
8468 pgsize = (upl_offset_t)round_page_64(xsize);
8469 if ((size > pgsize) && !nofreeupl) {
8470 ubc_upl_abort_range(pl, pl_offset + pgsize, size - pgsize,
8471 UPL_ABORT_FREE_ON_EMPTY);
8472 }
8473
8474 /*
8475 * check for partial page and clear the
8476 * contents past end of the file before
8477 * releasing it in the VM page cache
8478 */
8479 if ((u_quad_t)f_offset < np->n_size && (u_quad_t)f_offset + size > np->n_size) {
8480 uint64_t io = np->n_size - f_offset;
8481 NFS_BZERO((caddr_t)(ioaddr + io), size - io);
8482 FSDBG(321, np->n_size, f_offset, f_offset + io, size - io);
8483 }
8484 nfs_data_unlock_noupdate(np);
8485
8486 auio = uio_create(1, 0, UIO_SYSSPACE, UIO_WRITE);
8487
8488 tryagain:
8489 #if CONFIG_NFS4
8490 if (nmp->nm_vers >= NFS_VER4) {
8491 stategenid = nmp->nm_stategenid;
8492 }
8493 #endif
8494 wverf = wverf2 = wverfset = 0;
8495 txsize = rxsize = xsize;
8496 txoffset = rxoffset = f_offset;
8497 txaddr = rxaddr = ioaddr;
8498 commit = NFS_WRITE_FILESYNC;
8499
8500 bzero(req, sizeof(req));
8501 nextsend = nextwait = 0;
8502 do {
8503 if (np->n_flag & NREVOKE) {
8504 error = EIO;
8505 break;
8506 }
8507 /* send requests while we need to and have available slots */
8508 while ((txsize > 0) && (req[nextsend] == NULL)) {
8509 iosize = (size_t)MIN(nmwsize, txsize);
8510 uio_reset(auio, txoffset, UIO_SYSSPACE, UIO_WRITE);
8511 uio_addiov(auio, CAST_USER_ADDR_T(txaddr), iosize);
8512 FSDBG(323, uio_offset(auio), iosize, txaddr, txsize);
8513 OSAddAtomic64(1, &nfsclntstats.pageouts);
8514 nfs_node_lock_force(np);
8515 np->n_numoutput++;
8516 nfs_node_unlock(np);
8517 vnode_startwrite(vp);
8518 iomode = NFS_WRITE_UNSTABLE;
8519 if ((error = nmp->nm_funcs->nf_write_rpc_async(np, auio, iosize, thd, cred, iomode, NULL, &req[nextsend]))) {
8520 req[nextsend] = NULL;
8521 vnode_writedone(vp);
8522 nfs_node_lock_force(np);
8523 np->n_numoutput--;
8524 nfs_node_unlock(np);
8525 break;
8526 }
8527 txaddr += iosize;
8528 txoffset += iosize;
8529 txsize -= iosize;
8530 nextsend = (nextsend + 1) % MAXPAGINGREQS;
8531 }
8532 /* wait while we need to and break out if more requests to send */
8533 while ((rxsize > 0) && req[nextwait]) {
8534 iosize = remsize = (size_t)MIN(nmwsize, rxsize);
8535 error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req[nextwait], &iomode, &iosize, &wverf2);
8536 req[nextwait] = NULL;
8537 nextwait = (nextwait + 1) % MAXPAGINGREQS;
8538 vnode_writedone(vp);
8539 nfs_node_lock_force(np);
8540 np->n_numoutput--;
8541 nfs_node_unlock(np);
8542 #if CONFIG_NFS4
8543 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error)) {
8544 lck_mtx_lock(&nmp->nm_lock);
8545 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
8546 NP(np, "nfs_vnop_pageout: error %d, initiating recovery", error);
8547 nfs_need_recover(nmp, error);
8548 }
8549 lck_mtx_unlock(&nmp->nm_lock);
8550 restart = 1;
8551 goto cancel;
8552 }
8553 #endif
8554 if (error) {
8555 FSDBG(323, rxoffset, rxsize, error, -1);
8556 break;
8557 }
8558 if (!wverfset) {
8559 wverf = wverf2;
8560 wverfset = 1;
8561 } else if (wverf != wverf2) {
8562 /* verifier changed, so we need to restart all the writes */
8563 vrestart = 1;
8564 goto cancel;
8565 }
8566 /* Retain the lowest commitment level returned. */
8567 if (iomode < commit) {
8568 commit = iomode;
8569 }
8570 rxaddr += iosize;
8571 rxoffset += iosize;
8572 rxsize -= iosize;
8573 remsize -= iosize;
8574 if (remsize > 0) {
8575 /* need to try sending the remainder */
8576 iosize = remsize;
8577 uio_reset(auio, rxoffset, UIO_SYSSPACE, UIO_WRITE);
8578 uio_addiov(auio, CAST_USER_ADDR_T(rxaddr), remsize);
8579 iomode = NFS_WRITE_UNSTABLE;
8580 error = nfs_write_rpc2(np, auio, thd, cred, &iomode, &wverf2);
8581 #if CONFIG_NFS4
8582 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error)) {
8583 NP(np, "nfs_vnop_pageout: restart: error %d", error);
8584 lck_mtx_lock(&nmp->nm_lock);
8585 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
8586 NP(np, "nfs_vnop_pageout: error %d, initiating recovery", error);
8587 nfs_need_recover(nmp, error);
8588 }
8589 lck_mtx_unlock(&nmp->nm_lock);
8590 restart = 1;
8591 goto cancel;
8592 }
8593 #endif
8594 if (error) {
8595 FSDBG(323, rxoffset, rxsize, error, -1);
8596 break;
8597 }
8598 if (wverf != wverf2) {
8599 /* verifier changed, so we need to restart all the writes */
8600 vrestart = 1;
8601 goto cancel;
8602 }
8603 if (iomode < commit) {
8604 commit = iomode;
8605 }
8606 rxaddr += iosize;
8607 rxoffset += iosize;
8608 rxsize -= iosize;
8609 }
8610 if (txsize) {
8611 break;
8612 }
8613 }
8614 } while (!error && (txsize || rxsize));
8615
8616 vrestart = 0;
8617
8618 if (!error && (commit != NFS_WRITE_FILESYNC)) {
8619 error = nmp->nm_funcs->nf_commit_rpc(np, f_offset, xsize, cred, wverf);
8620 if (error == NFSERR_STALEWRITEVERF) {
8621 vrestart = 1;
8622 error = EIO;
8623 }
8624 }
8625
8626 if (error) {
8627 cancel:
8628 /* cancel any outstanding requests */
8629 while (req[nextwait]) {
8630 nfs_request_async_cancel(req[nextwait]);
8631 req[nextwait] = NULL;
8632 nextwait = (nextwait + 1) % MAXPAGINGREQS;
8633 vnode_writedone(vp);
8634 nfs_node_lock_force(np);
8635 np->n_numoutput--;
8636 nfs_node_unlock(np);
8637 }
8638 if (np->n_flag & NREVOKE) {
8639 error = EIO;
8640 } else {
8641 if (vrestart) {
8642 if (++vrestarts <= 100) { /* guard against no progress */
8643 goto tryagain;
8644 }
8645 NP(np, "nfs_pageout: too many restarts, aborting");
8646 FSDBG(323, f_offset, xsize, ERESTART, -1);
8647 }
8648 if (restart) {
8649 if (restarts <= nfs_mount_state_max_restarts(nmp)) { /* guard against no progress */
8650 if (error == NFSERR_GRACE) {
8651 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
8652 }
8653 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
8654 goto tryagain;
8655 }
8656 } else {
8657 NP(np, "nfs_pageout: too many restarts, aborting");
8658 FSDBG(323, f_offset, xsize, ERESTART, -1);
8659 }
8660 }
8661 }
8662 }
8663
8664 /* Free allocated uio buffer */
8665 uio_free(auio);
8666 ubc_upl_unmap(pl);
8667
8668 /*
8669 * We've had several different solutions on what to do when the pageout
8670 * gets an error. If we don't handle it, and return an error to the
8671 * caller, vm, it will retry . This can end in endless looping
8672 * between vm and here doing retries of the same page. Doing a dump
8673 * back to vm, will get it out of vm's knowledge and we lose whatever
8674 * data existed. This is risky, but in some cases necessary. For
8675 * example, the initial fix here was to do that for ESTALE. In that case
8676 * the server is telling us that the file is no longer the same. We
8677 * would not want to keep paging out to that. We also saw some 151
8678 * errors from Auspex server and NFSv3 can return errors higher than
8679 * ELAST. Those along with NFS known server errors we will "dump" from
8680 * vm. Errors we don't expect to occur, we dump and log for further
8681 * analysis. Errors that could be transient, networking ones,
8682 * we let vm "retry". Lastly, errors that we retry, but may have potential
8683 * to storm the network, we "retrywithsleep". "sever" will be used in
8684 * in the future to dump all pages of object for cases like ESTALE.
8685 * All this is the basis for the states returned and first guesses on
8686 * error handling. Tweaking expected as more statistics are gathered.
8687 * Note, in the long run we may need another more robust solution to
8688 * have some kind of persistant store when the vm cannot dump nor keep
8689 * retrying as a solution, but this would be a file architectural change
8690 */
8691 if (!nofreeupl) { /* otherwise stacked file system has to handle this */
8692 if (error) {
8693 int abortflags = 0;
8694 char action = nfs_pageouterrorhandler(error);
8695
8696 switch (action) {
8697 case DUMP:
8698 abortflags = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY;
8699 break;
8700 case DUMPANDLOG:
8701 abortflags = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY;
8702 if (error <= NFS_ELAST) {
8703 if ((errorcount[error] % 100) == 0) {
8704 NP(np, "nfs_pageout: unexpected error %d. dumping vm page", error);
8705 }
8706 errorcount[error]++;
8707 }
8708 break;
8709 case RETRY:
8710 abortflags = UPL_ABORT_FREE_ON_EMPTY;
8711 break;
8712 case SEVER: /* not implemented */
8713 default:
8714 NP(np, "nfs_pageout: action %d not expected", action);
8715 break;
8716 }
8717
8718 ubc_upl_abort_range(pl, pl_offset, pgsize, abortflags);
8719 /* return error in all cases above */
8720 } else {
8721 ubc_upl_commit_range(pl, pl_offset, pgsize,
8722 UPL_COMMIT_CLEAR_DIRTY |
8723 UPL_COMMIT_FREE_ON_EMPTY);
8724 }
8725 }
8726 return NFS_MAPERR(error);
8727 }
8728
8729 /* Blktooff derives file offset given a logical block number */
8730 int
nfs_vnop_blktooff(struct vnop_blktooff_args * ap)8731 nfs_vnop_blktooff(
8732 struct vnop_blktooff_args /* {
8733 * struct vnodeop_desc *a_desc;
8734 * vnode_t a_vp;
8735 * daddr64_t a_lblkno;
8736 * off_t *a_offset;
8737 * } */*ap)
8738 {
8739 int biosize;
8740 vnode_t vp = ap->a_vp;
8741 struct nfsmount *nmp = VTONMP(vp);
8742
8743 if (nfs_mount_gone(nmp)) {
8744 return ENXIO;
8745 }
8746 biosize = nmp->nm_biosize;
8747
8748 *ap->a_offset = (off_t)(ap->a_lblkno * biosize);
8749
8750 return 0;
8751 }
8752
8753 int
nfs_vnop_offtoblk(struct vnop_offtoblk_args * ap)8754 nfs_vnop_offtoblk(
8755 struct vnop_offtoblk_args /* {
8756 * struct vnodeop_desc *a_desc;
8757 * vnode_t a_vp;
8758 * off_t a_offset;
8759 * daddr64_t *a_lblkno;
8760 * } */*ap)
8761 {
8762 int biosize;
8763 vnode_t vp = ap->a_vp;
8764 struct nfsmount *nmp = VTONMP(vp);
8765
8766 if (nfs_mount_gone(nmp)) {
8767 return ENXIO;
8768 }
8769 biosize = nmp->nm_biosize;
8770
8771 *ap->a_lblkno = (daddr64_t)(ap->a_offset / biosize);
8772
8773 return 0;
8774 }
8775
8776 /*
8777 * vnode change monitoring
8778 */
8779 int
nfs_vnop_monitor(struct vnop_monitor_args * ap)8780 nfs_vnop_monitor(
8781 struct vnop_monitor_args /* {
8782 * struct vnodeop_desc *a_desc;
8783 * vnode_t a_vp;
8784 * uint32_t a_events;
8785 * uint32_t a_flags;
8786 * void *a_handle;
8787 * vfs_context_t a_context;
8788 * } */*ap)
8789 {
8790 nfsnode_t np = VTONFS(ap->a_vp);
8791 struct nfsmount *nmp = VTONMP(ap->a_vp);
8792 int error = 0;
8793
8794 if (nfs_mount_gone(nmp)) {
8795 return ENXIO;
8796 }
8797
8798 /* make sure that the vnode's monitoring status is up to date */
8799 lck_mtx_lock(&nmp->nm_lock);
8800 if (vnode_ismonitored(ap->a_vp)) {
8801 /* This vnode is currently being monitored, make sure we're tracking it. */
8802 if (np->n_monlink.le_next == NFSNOLIST) {
8803 LIST_INSERT_HEAD(&nmp->nm_monlist, np, n_monlink);
8804 nfs_mount_sock_thread_wake(nmp);
8805 }
8806 } else {
8807 /* This vnode is no longer being monitored, make sure we're not tracking it. */
8808 /* Wait for any in-progress getattr to complete first. */
8809 while (np->n_mflag & NMMONSCANINPROG) {
8810 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
8811 np->n_mflag |= NMMONSCANWANT;
8812 msleep(&np->n_mflag, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts);
8813 }
8814 if (np->n_monlink.le_next != NFSNOLIST) {
8815 LIST_REMOVE(np, n_monlink);
8816 np->n_monlink.le_next = NFSNOLIST;
8817 }
8818 }
8819 lck_mtx_unlock(&nmp->nm_lock);
8820
8821 return NFS_MAPERR(error);
8822 }
8823
8824 /*
8825 * Send a vnode notification for the given events.
8826 */
8827 void
nfs_vnode_notify(nfsnode_t np,uint32_t events)8828 nfs_vnode_notify(nfsnode_t np, uint32_t events)
8829 {
8830 struct nfsmount *nmp = NFSTONMP(np);
8831 struct nfs_vattr *nvattr;
8832 struct vnode_attr vattr, *vap = NULL;
8833 struct timeval now;
8834
8835 microuptime(&now);
8836 if ((np->n_evtstamp == now.tv_sec) || !nmp) {
8837 /* delay sending this notify */
8838 np->n_events |= events;
8839 return;
8840 }
8841 events |= np->n_events;
8842 np->n_events = 0;
8843 np->n_evtstamp = now.tv_sec;
8844 nvattr = zalloc_flags(KT_NFS_VATTR, Z_WAITOK);
8845
8846 vfs_get_notify_attributes(&vattr);
8847 if (!nfs_getattrcache(np, nvattr, 0)) {
8848 vap = &vattr;
8849 VATTR_INIT(vap);
8850
8851 VATTR_RETURN(vap, va_fsid, vfs_statfs(nmp->nm_mountp)->f_fsid.val[0]);
8852 VATTR_RETURN(vap, va_fileid, nvattr->nva_fileid);
8853 VATTR_RETURN(vap, va_mode, nvattr->nva_mode);
8854 VATTR_RETURN(vap, va_uid, nvattr->nva_uid);
8855 VATTR_RETURN(vap, va_gid, nvattr->nva_gid);
8856 VATTR_RETURN(vap, va_nlink, nvattr->nva_nlink);
8857 }
8858 vnode_notify(NFSTOV(np), events, vap);
8859 zfree(KT_NFS_VATTR, nvattr);
8860 }
8861
8862 #endif /* CONFIG_NFS_CLIENT */
8863