Bug Summary

File:nfs/nfs_vnops.c
Warning:line 1628, column 3
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name nfs_vnops.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/nfs/nfs_vnops.c
1/* $OpenBSD: nfs_vnops.c,v 1.188 2021/12/12 09:14:59 visa Exp $ */
2/* $NetBSD: nfs_vnops.c,v 1.62.4.1 1996/07/08 20:26:52 jtc Exp $ */
3
4/*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Rick Macklem at The University of Guelph.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
36 */
37
38
39/*
40 * vnode op calls for Sun NFS version 2 and 3
41 */
42
43#include <sys/param.h>
44#include <sys/kernel.h>
45#include <sys/systm.h>
46#include <sys/resourcevar.h>
47#include <sys/poll.h>
48#include <sys/proc.h>
49#include <sys/mount.h>
50#include <sys/buf.h>
51#include <sys/malloc.h>
52#include <sys/pool.h>
53#include <sys/mbuf.h>
54#include <sys/conf.h>
55#include <sys/namei.h>
56#include <sys/vnode.h>
57#include <sys/lock.h>
58#include <sys/dirent.h>
59#include <sys/fcntl.h>
60#include <sys/lockf.h>
61#include <sys/queue.h>
62#include <sys/specdev.h>
63#include <sys/unistd.h>
64
65#include <miscfs/fifofs/fifo.h>
66
67#include <nfs/rpcv2.h>
68#include <nfs/nfsproto.h>
69#include <nfs/nfs.h>
70#include <nfs/nfsnode.h>
71#include <nfs/nfsmount.h>
72#include <nfs/xdr_subs.h>
73#include <nfs/nfsm_subs.h>
74#include <nfs/nfs_var.h>
75
76#include <uvm/uvm_extern.h>
77
78#include <netinet/in.h>
79
80int nfs_access(void *);
81int nfs_advlock(void *);
82int nfs_bmap(void *);
83int nfs_bwrite(void *);
84int nfs_close(void *);
85int nfs_commit(struct vnode *, u_quad_t, int, struct proc *);
86int nfs_create(void *);
87int nfs_flush(struct vnode *, struct ucred *, int, struct proc *, int);
88int nfs_fsync(void *);
89int nfs_getattr(void *);
90int nfs_getreq(struct nfsrv_descript *, struct nfsd *, int);
91int nfs_islocked(void *);
92int nfs_link(void *);
93int nfs_lock(void *);
94int nfs_lookitup(struct vnode *, char *, int, struct ucred *, struct proc *,
95 struct nfsnode **);
96int nfs_lookup(void *);
97int nfs_mkdir(void *);
98int nfs_mknod(void *);
99int nfs_mknodrpc(struct vnode *, struct vnode **, struct componentname *,
100 struct vattr *);
101int nfs_null(struct vnode *, struct ucred *, struct proc *);
102int nfs_open(void *);
103int nfs_pathconf(void *);
104int nfs_poll(void *);
105int nfs_print(void *);
106int nfs_read(void *);
107int nfs_readdir(void *);
108int nfs_readdirplusrpc(struct vnode *, struct uio *, struct ucred *, int *,
109 struct proc *);
110int nfs_readdirrpc(struct vnode *, struct uio *, struct ucred *, int *);
111int nfs_remove(void *);
112int nfs_removerpc(struct vnode *, char *, int, struct ucred *, struct proc *);
113int nfs_rename(void *);
114int nfs_renameit(struct vnode *, struct componentname *, struct sillyrename *);
115int nfs_renamerpc(struct vnode *, char *, int, struct vnode *, char *, int,
116 struct ucred *, struct proc *);
117int nfs_rmdir(void *);
118int nfs_setattr(void *);
119int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
120 struct proc *);
121int nfs_sillyrename(struct vnode *, struct vnode *,
122 struct componentname *);
123int nfs_strategy(void *);
124int nfs_symlink(void *);
125int nfs_unlock(void *);
126
127void nfs_cache_enter(struct vnode *, struct vnode *, struct componentname *);
128
129int nfsfifo_close(void *);
130int nfsfifo_read(void *);
131int nfsfifo_reclaim(void *);
132int nfsfifo_write(void *);
133
134int nfsspec_access(void *);
135int nfsspec_close(void *);
136int nfsspec_read(void *);
137int nfsspec_write(void *);
138
139/* Global vfs data structures for nfs. */
140const struct vops nfs_vops = {
141 .vop_lookup = nfs_lookup,
142 .vop_create = nfs_create,
143 .vop_mknod = nfs_mknod,
144 .vop_open = nfs_open,
145 .vop_close = nfs_close,
146 .vop_access = nfs_access,
147 .vop_getattr = nfs_getattr,
148 .vop_setattr = nfs_setattr,
149 .vop_read = nfs_read,
150 .vop_write = nfs_write,
151 .vop_ioctl = nfs_ioctl((int (*)(void *))enoioctl),
152 .vop_poll = nfs_poll,
153 .vop_kqfilter = nfs_kqfilter,
154 .vop_revoke = vop_generic_revoke,
155 .vop_fsync = nfs_fsync,
156 .vop_remove = nfs_remove,
157 .vop_link = nfs_link,
158 .vop_rename = nfs_rename,
159 .vop_mkdir = nfs_mkdir,
160 .vop_rmdir = nfs_rmdir,
161 .vop_symlink = nfs_symlink,
162 .vop_readdir = nfs_readdir,
163 .vop_readlink = nfs_readlink,
164 .vop_abortop = vop_generic_abortop,
165 .vop_inactive = nfs_inactive,
166 .vop_reclaim = nfs_reclaim,
167 .vop_lock = nfs_lock,
168 .vop_unlock = nfs_unlock,
169 .vop_bmap = nfs_bmap,
170 .vop_strategy = nfs_strategy,
171 .vop_print = nfs_print,
172 .vop_islocked = nfs_islocked,
173 .vop_pathconf = nfs_pathconf,
174 .vop_advlock = nfs_advlock,
175 .vop_bwrite = nfs_bwrite
176};
177
178/* Special device vnode ops. */
179const struct vops nfs_specvops = {
180 .vop_close = nfsspec_close,
181 .vop_access = nfsspec_access,
182 .vop_getattr = nfs_getattr,
183 .vop_setattr = nfs_setattr,
184 .vop_read = nfsspec_read,
185 .vop_write = nfsspec_write,
186 .vop_fsync = nfs_fsync,
187 .vop_inactive = nfs_inactive,
188 .vop_reclaim = nfs_reclaim,
189 .vop_lock = nfs_lock,
190 .vop_unlock = nfs_unlock,
191 .vop_print = nfs_print,
192 .vop_islocked = nfs_islocked,
193
194 /* XXX: Keep in sync with spec_vops. */
195 .vop_lookup = vop_generic_lookup,
196 .vop_create = vop_generic_badop,
197 .vop_mknod = vop_generic_badop,
198 .vop_open = spec_open,
199 .vop_ioctl = spec_ioctl,
200 .vop_poll = spec_poll,
201 .vop_kqfilter = spec_kqfilter,
202 .vop_revoke = vop_generic_revoke,
203 .vop_remove = vop_generic_badop,
204 .vop_link = vop_generic_badop,
205 .vop_rename = vop_generic_badop,
206 .vop_mkdir = vop_generic_badop,
207 .vop_rmdir = vop_generic_badop,
208 .vop_symlink = vop_generic_badop,
209 .vop_readdir = vop_generic_badop,
210 .vop_readlink = vop_generic_badop,
211 .vop_abortop = vop_generic_badop,
212 .vop_bmap = vop_generic_bmap,
213 .vop_strategy = spec_strategy,
214 .vop_pathconf = spec_pathconf,
215 .vop_advlock = spec_advlock,
216 .vop_bwrite = vop_generic_bwrite,
217};
218
219#ifdef FIFO1
220const struct vops nfs_fifovops = {
221 .vop_close = nfsfifo_close,
222 .vop_access = nfsspec_access,
223 .vop_getattr = nfs_getattr,
224 .vop_setattr = nfs_setattr,
225 .vop_read = nfsfifo_read,
226 .vop_write = nfsfifo_write,
227 .vop_fsync = nfs_fsync,
228 .vop_inactive = nfs_inactive,
229 .vop_reclaim = nfsfifo_reclaim,
230 .vop_lock = nfs_lock,
231 .vop_unlock = nfs_unlock,
232 .vop_print = nfs_print,
233 .vop_islocked = nfs_islocked,
234 .vop_bwrite = vop_generic_bwrite,
235
236 /* XXX: Keep in sync with fifo_vops. */
237 .vop_lookup = vop_generic_lookup,
238 .vop_create = vop_generic_badop,
239 .vop_mknod = vop_generic_badop,
240 .vop_open = fifo_open,
241 .vop_ioctl = fifo_ioctl,
242 .vop_poll = fifo_poll,
243 .vop_kqfilter = fifo_kqfilter,
244 .vop_revoke = vop_generic_revoke,
245 .vop_remove = vop_generic_badop,
246 .vop_link = vop_generic_badop,
247 .vop_rename = vop_generic_badop,
248 .vop_mkdir = vop_generic_badop,
249 .vop_rmdir = vop_generic_badop,
250 .vop_symlink = vop_generic_badop,
251 .vop_readdir = vop_generic_badop,
252 .vop_readlink = vop_generic_badop,
253 .vop_abortop = vop_generic_badop,
254 .vop_bmap = vop_generic_bmap,
255 .vop_strategy = vop_generic_badop,
256 .vop_pathconf = fifo_pathconf,
257 .vop_advlock = fifo_advlock,
258};
259#endif /* FIFO */
260
261/*
262 * Global variables
263 */
264extern u_int32_t nfs_true, nfs_false;
265extern u_int32_t nfs_xdrneg1;
266extern struct nfsstats nfsstats;
267extern nfstype nfsv3_type[9];
268int nfs_numasync = 0;
269
270void
271nfs_cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
272{
273 struct nfsnode *np;
274
275 if (vp != NULL((void *)0)) {
276 np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
277 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
278 } else {
279 np = VTONFS(dvp)((struct nfsnode *)(dvp)->v_data);
280 if (!np->n_ctime)
281 np->n_ctime = np->n_vattr.va_mtime.tv_sec;
282 }
283
284 cache_enter(dvp, vp, cnp);
285}
286
287/*
288 * nfs null call from vfs.
289 */
290int
291nfs_null(struct vnode *vp, struct ucred *cred, struct proc *procp)
292{
293 struct nfsm_info info;
294 int error = 0;
295
296 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(0);
297 error = nfs_request(vp, NFSPROC_NULL0, &info);
298 m_freem(info.nmi_mrep);
299 return (error);
300}
301
302/*
303 * nfs access vnode op.
304 * For nfs version 2, just return ok. File accesses may fail later.
305 * For nfs version 3, use the access rpc to check accessibility. If file modes
306 * are changed on the server, accesses might still fail later.
307 */
308int
309nfs_access(void *v)
310{
311 struct vop_access_args *ap = v;
312 struct vnode *vp = ap->a_vp;
313 u_int32_t *tl;
314 int32_t t1;
315 caddr_t cp2;
316 int error = 0, attrflag;
317 u_int32_t mode, rmode;
318 int v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
319 int cachevalid;
320 struct nfsm_info info;
321
322 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
323
324 /*
325 * Disallow write attempts on filesystems mounted read-only;
326 * unless the file is a socket, fifo, or a block or character
327 * device resident on the filesystem.
328 */
329 if ((ap->a_mode & VWRITE00200) && (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001)) {
330 switch (vp->v_type) {
331 case VREG:
332 case VDIR:
333 case VLNK:
334 return (EROFS30);
335 default:
336 break;
337 }
338 }
339
340 /*
341 * Check access cache first. If a request has been made for this uid
342 * shortly before, use the cached result.
343 */
344 cachevalid = (np->n_accstamp != -1 &&
345 (gettime() - np->n_accstamp) < nfs_attrtimeo(np) &&
346 np->n_accuid == ap->a_cred->cr_uid);
347
348 if (cachevalid) {
349 if (!np->n_accerror) {
350 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
351 return (np->n_accerror);
352 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
353 return (np->n_accerror);
354 }
355
356 /*
357 * For nfs v3, do an access rpc, otherwise you are stuck emulating
358 * ufs_access() locally using the vattr. This may not be correct,
359 * since the server may apply other access criteria such as
360 * client uid-->server uid mapping that we do not know about, but
361 * this is better than just returning anything that is lying about
362 * in the cache.
363 */
364 if (v3) {
365 nfsstats.rpccnt[NFSPROC_ACCESS4]++;
366 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3)((v3) ? (64 + 4) : 32) + NFSX_UNSIGNED4);
367 nfsm_fhtom(&info, vp, v3);
368 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
369 if (ap->a_mode & VREAD00400)
370 mode = NFSV3ACCESS_READ0x01;
371 else
372 mode = 0;
373 if (vp->v_type == VDIR) {
374 if (ap->a_mode & VWRITE00200)
375 mode |= (NFSV3ACCESS_MODIFY0x04 | NFSV3ACCESS_EXTEND0x08 |
376 NFSV3ACCESS_DELETE0x10);
377 if (ap->a_mode & VEXEC00100)
378 mode |= NFSV3ACCESS_LOOKUP0x02;
379 } else {
380 if (ap->a_mode & VWRITE00200)
381 mode |= (NFSV3ACCESS_MODIFY0x04 | NFSV3ACCESS_EXTEND0x08);
382 if (ap->a_mode & VEXEC00100)
383 mode |= NFSV3ACCESS_EXECUTE0x20;
384 }
385 *tl = txdr_unsigned(mode)((__uint32_t)(__builtin_constant_p((int32_t)(mode)) ? (__uint32_t
)(((__uint32_t)((int32_t)(mode)) & 0xff) << 24 | ((
__uint32_t)((int32_t)(mode)) & 0xff00) << 8 | ((__uint32_t
)((int32_t)(mode)) & 0xff0000) >> 8 | ((__uint32_t)
((int32_t)(mode)) & 0xff000000) >> 24) : __swap32md
((int32_t)(mode))))
;
386
387 info.nmi_procp = ap->a_p;
388 info.nmi_cred = ap->a_cred;
389 error = nfs_request(vp, NFSPROC_ACCESS4, &info);
390
391 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
392 if (error) {
393 m_freem(info.nmi_mrep);
394 goto nfsmout;
395 }
396
397 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
398 rmode = fxdr_unsigned(u_int32_t, *tl)((u_int32_t)(__uint32_t)(__builtin_constant_p((int32_t)(*tl))
? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) << 8 |
((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8 | (
(__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24) :
__swap32md((int32_t)(*tl))))
;
399 /*
400 * The NFS V3 spec does not clarify whether or not
401 * the returned access bits can be a superset of
402 * the ones requested, so...
403 */
404 if ((rmode & mode) != mode)
405 error = EACCES13;
406
407 m_freem(info.nmi_mrep);
408 } else
409 return (nfsspec_access(ap));
410
411
412 /*
413 * If we got the same result as for a previous, different request, OR
414 * it in. Don't update the timestamp in that case.
415 */
416 if (!error || error == EACCES13) {
417 if (cachevalid && np->n_accstamp != -1 &&
418 error == np->n_accerror) {
419 if (!error)
420 np->n_accmode |= ap->a_mode;
421 else {
422 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
423 np->n_accmode = ap->a_mode;
424 }
425 } else {
426 np->n_accstamp = gettime();
427 np->n_accuid = ap->a_cred->cr_uid;
428 np->n_accmode = ap->a_mode;
429 np->n_accerror = error;
430 }
431 }
432nfsmout:
433 return (error);
434}
435
436/*
437 * nfs open vnode op
438 * Check to see if the type is ok
439 * and that deletion is not in progress.
440 * For paged in text files, you will need to flush the page cache
441 * if consistency is lost.
442 */
443int
444nfs_open(void *v)
445{
446 struct vop_open_args *ap = v;
447 struct vnode *vp = ap->a_vp;
448 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
449 struct vattr vattr;
450 int error;
451
452 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
453#ifdef DIAGNOSTIC1
454 printf("open eacces vtyp=%d\n",vp->v_type);
455#endif
456 return (EACCES13);
457 }
458
459 /*
460 * Initialize read and write creds here, for swapfiles
461 * and other paths that don't set the creds themselves.
462 */
463
464 if (ap->a_mode & FREAD0x0001) {
465 if (np->n_rcred) {
466 crfree(np->n_rcred);
467 }
468 np->n_rcred = ap->a_cred;
469 crhold(np->n_rcred);
470 }
471 if (ap->a_mode & FWRITE0x0002) {
472 if (np->n_wcred) {
473 crfree(np->n_wcred);
474 }
475 np->n_wcred = ap->a_cred;
476 crhold(np->n_wcred);
477 }
478
479 if (np->n_flag & NMODIFIED0x0004) {
480 error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred, ap->a_p);
481 if (error == EINTR4)
482 return (error);
483 uvm_vnp_uncache(vp);
484 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
485 if (vp->v_type == VDIR)
486 np->n_direofoffsetn_un2.nd_direof = 0;
487 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
488 if (error)
489 return (error);
490 np->n_mtime = vattr.va_mtime;
491 } else {
492 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
493 if (error)
494 return (error);
495 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)(((&np->n_mtime)->tv_sec == (&vattr.va_mtime)->
tv_sec) ? ((&np->n_mtime)->tv_nsec != (&vattr.va_mtime
)->tv_nsec) : ((&np->n_mtime)->tv_sec != (&vattr
.va_mtime)->tv_sec))
) {
496 if (vp->v_type == VDIR)
497 np->n_direofoffsetn_un2.nd_direof = 0;
498 error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred, ap->a_p);
499 if (error == EINTR4)
500 return (error);
501 uvm_vnp_uncache(vp);
502 np->n_mtime = vattr.va_mtime;
503 }
504 }
505 /* For open/close consistency. */
506 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
507 return (0);
508}
509
510/*
511 * nfs close vnode op
512 * What an NFS client should do upon close after writing is a debatable issue.
513 * Most NFS clients push delayed writes to the server upon close, basically for
514 * two reasons:
515 * 1 - So that any write errors may be reported back to the client process
516 * doing the close system call. By far the two most likely errors are
517 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
518 * 2 - To put a worst case upper bound on cache inconsistency between
519 * multiple clients for the file.
520 * There is also a consistency problem for Version 2 of the protocol w.r.t.
521 * not being able to tell if other clients are writing a file concurrently,
522 * since there is no way of knowing if the changed modify time in the reply
523 * is only due to the write for this client.
524 * (NFS Version 3 provides weak cache consistency data in the reply that
525 * should be sufficient to detect and handle this case.)
526 *
527 * The current code does the following:
528 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
529 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
530 * or commit them (this satisfies 1 and 2 except for the
531 * case where the server crashes after this close but
532 * before the commit RPC, which is felt to be "good
533 * enough". Changing the last argument to nfs_flush() to
534 * a 1 would force a commit operation, if it is felt a
535 * commit is necessary now.
536 */
537int
538nfs_close(void *v)
539{
540 struct vop_close_args *ap = v;
541 struct vnode *vp = ap->a_vp;
542 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
543 int error = 0;
544
545 if (vp->v_type == VREG) {
546 if (np->n_flag & NMODIFIED0x0004) {
547 if (NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
) {
548 error = nfs_flush(vp, ap->a_cred, MNT_WAIT1, ap->a_p, 0);
549 np->n_flag &= ~NMODIFIED0x0004;
550 } else
551 error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred, ap->a_p);
552 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
553 }
554 if (np->n_flag & NWRITEERR0x0008) {
555 np->n_flag &= ~NWRITEERR0x0008;
556 error = np->n_error;
557 }
558 }
559 return (error);
560}
561
562/*
563 * nfs getattr call from vfs.
564 */
565int
566nfs_getattr(void *v)
567{
568 struct vop_getattr_args *ap = v;
569 struct vnode *vp = ap->a_vp;
570 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
571 struct nfsm_info info;
572 int32_t t1;
573 int error = 0;
574
575 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
576
577 /*
578 * Update local times for special files.
579 */
580 if (np->n_flag & (NACC0x0100 | NUPD0x0200))
581 np->n_flag |= NCHG0x0400;
582 /*
583 * First look in the cache.
584 */
585 if (nfs_getattrcache(vp, ap->a_vap) == 0)
586 return (0);
587
588 nfsstats.rpccnt[NFSPROC_GETATTR1]++;
589 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32));
590 nfsm_fhtom(&info, vp, info.nmi_v3);
591 info.nmi_procp = ap->a_p;
592 info.nmi_cred = ap->a_cred;
593 error = nfs_request(vp, NFSPROC_GETATTR1, &info);
594 if (!error)
595 nfsm_loadattr(vp, ap->a_vap){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (ap->a_vap))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
596 m_freem(info.nmi_mrep);
597nfsmout:
598 return (error);
599}
600
601/*
602 * nfs setattr call.
603 */
604int
605nfs_setattr(void *v)
606{
607 struct vop_setattr_args *ap = v;
608 struct vnode *vp = ap->a_vp;
609 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
610 struct vattr *vap = ap->a_vap;
611 int hint = NOTE_ATTRIB0x0008;
612 int error = 0;
613 u_quad_t tsize = 0;
614
615 /*
616 * Setting of flags is not supported.
617 */
618 if (vap->va_flags != VNOVAL(-1))
619 return (EOPNOTSUPP45);
620
621 /*
622 * Disallow write attempts if the filesystem is mounted read-only.
623 */
624 if ((vap->va_uid != (uid_t)VNOVAL(-1) ||
625 vap->va_gid != (gid_t)VNOVAL(-1) ||
626 vap->va_atime.tv_nsec != VNOVAL(-1) ||
627 vap->va_mtime.tv_nsec != VNOVAL(-1) ||
628 vap->va_mode != (mode_t)VNOVAL(-1)) &&
629 (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001))
630 return (EROFS30);
631 if (vap->va_size != VNOVAL(-1)) {
632 switch (vp->v_type) {
633 case VDIR:
634 return (EISDIR21);
635 case VCHR:
636 case VBLK:
637 case VSOCK:
638 case VFIFO:
639 if (vap->va_mtime.tv_nsec == VNOVAL(-1) &&
640 vap->va_atime.tv_nsec == VNOVAL(-1) &&
641 vap->va_mode == (mode_t)VNOVAL(-1) &&
642 vap->va_uid == (uid_t)VNOVAL(-1) &&
643 vap->va_gid == (gid_t)VNOVAL(-1))
644 return (0);
645 vap->va_size = VNOVAL(-1);
646 break;
647 default:
648 /*
649 * Disallow write attempts if the filesystem is
650 * mounted read-only.
651 */
652 if (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001)
653 return (EROFS30);
654 if (vap->va_size == 0)
655 error = nfs_vinvalbuf(vp, 0,
656 ap->a_cred, ap->a_p);
657 else
658 error = nfs_vinvalbuf(vp, V_SAVE0x0001,
659 ap->a_cred, ap->a_p);
660 if (error)
661 return (error);
662 tsize = np->n_size;
663 np->n_size = np->n_vattr.va_size = vap->va_size;
664 uvm_vnp_setsize(vp, np->n_size);
665 };
666 } else if ((vap->va_mtime.tv_nsec != VNOVAL(-1) ||
667 vap->va_atime.tv_nsec != VNOVAL(-1)) &&
668 vp->v_type == VREG &&
669 (error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred,
670 ap->a_p)) == EINTR4)
671 return (error);
672 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
673 if (error && vap->va_size != VNOVAL(-1)) {
674 np->n_size = np->n_vattr.va_size = tsize;
675 uvm_vnp_setsize(vp, np->n_size);
676 }
677
678 if (vap->va_size != VNOVAL(-1) && vap->va_size < tsize)
679 hint |= NOTE_TRUNCATE0x0080;
680
681 VN_KNOTE(vp, hint)do { struct klist *__list = (&vp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (hint)); } while (
0)
; /* XXX setattrrpc? */
682
683 return (error);
684}
685
686/*
687 * Do an nfs setattr rpc.
688 */
689int
690nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
691 struct proc *procp)
692{
693 struct nfsv2_sattr *sp;
694 struct nfsm_info info;
695 int32_t t1;
696 caddr_t cp2;
697 u_int32_t *tl;
698 int error = 0, wccflag = NFSV3_WCCRATTR0;
699 int v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
700
701 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
702
703 nfsstats.rpccnt[NFSPROC_SETATTR2]++;
704 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3)((v3) ? (64 + 4) : 32) + NFSX_SATTR(v3)((v3) ? 60 : 32));
705 nfsm_fhtom(&info, vp, v3);
706
707 if (info.nmi_v3) {
708 nfsm_v3attrbuild(&info.nmi_mb, vap, 1);
709 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
710 *tl = nfs_false;
711 } else {
712 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
713 if (vap->va_mode == (mode_t)VNOVAL(-1))
714 sp->sa_mode = nfs_xdrneg1;
715 else
716 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((vp->v_type)
== VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode
))) : (int)((vttoif_tab[(int)((vp->v_type))]) | ((vap->
va_mode))))) ? (__uint32_t)(((__uint32_t)((int32_t)(((vp->
v_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->
va_mode))) : (int)((vttoif_tab[(int)((vp->v_type))]) | ((vap
->va_mode))))) & 0xff) << 24 | ((__uint32_t)((int32_t
)(((vp->v_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]
) | ((vap->va_mode))) : (int)((vttoif_tab[(int)((vp->v_type
))]) | ((vap->va_mode))))) & 0xff00) << 8 | ((__uint32_t
)((int32_t)(((vp->v_type) == VFIFO) ? (int)((vttoif_tab[(int
)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int)((vp
->v_type))]) | ((vap->va_mode))))) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(((vp->v_type) == VFIFO) ? (int
)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab
[(int)((vp->v_type))]) | ((vap->va_mode))))) & 0xff000000
) >> 24) : __swap32md((int32_t)(((vp->v_type) == VFIFO
) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (
int)((vttoif_tab[(int)((vp->v_type))]) | ((vap->va_mode
)))))))
;
717 if (vap->va_uid == (uid_t)VNOVAL(-1))
718 sp->sa_uid = nfs_xdrneg1;
719 else
720 sp->sa_uid = txdr_unsigned(vap->va_uid)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_uid))
? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_uid)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_uid))
& 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->va_uid
)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(vap->
va_uid)) & 0xff000000) >> 24) : __swap32md((int32_t
)(vap->va_uid))))
;
721 if (vap->va_gid == (gid_t)VNOVAL(-1))
722 sp->sa_gid = nfs_xdrneg1;
723 else
724 sp->sa_gid = txdr_unsigned(vap->va_gid)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_gid))
? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_gid)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_gid))
& 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->va_gid
)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(vap->
va_gid)) & 0xff000000) >> 24) : __swap32md((int32_t
)(vap->va_gid))))
;
725 sp->sa_size = txdr_unsigned(vap->va_size)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_size)
) ? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_size)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_size)
) & 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->
va_size)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t
)(vap->va_size)) & 0xff000000) >> 24) : __swap32md
((int32_t)(vap->va_size))))
;
726 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
727 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
728 }
729
730 info.nmi_procp = procp;
731 info.nmi_cred = cred;
732 error = nfs_request(vp, NFSPROC_SETATTR2, &info);
733
734 if (info.nmi_v3)
735 nfsm_wcc_data(vp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(vp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(vp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(vp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((vp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((vp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
736 else if (error == 0)
737 nfsm_loadattr(vp, NULL){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
738
739 m_freem(info.nmi_mrep);
740nfsmout:
741 return (error);
742}
743
744/*
745 * nfs lookup call, one step at a time...
746 * First look in cache
747 * If not found, unlock the directory nfsnode and do the rpc
748 */
749int
750nfs_lookup(void *v)
751{
752 struct vop_lookup_args *ap = v;
753 struct componentname *cnp = ap->a_cnp;
754 struct vnode *dvp = ap->a_dvp;
755 struct vnode **vpp = ap->a_vpp;
756 struct nfsm_info info;
757 int flags;
758 struct vnode *newvp;
759 u_int32_t *tl;
760 int32_t t1;
761 struct nfsmount *nmp;
762 caddr_t cp2;
763 long len;
764 nfsfh_t *fhp;
765 struct nfsnode *np;
766 int lockparent, wantparent, error = 0, attrflag, fhsize;
767
768 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
769
770 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
771 flags = cnp->cn_flags;
772
773 *vpp = NULLVP((struct vnode *)((void *)0));
774 newvp = NULLVP((struct vnode *)((void *)0));
775 if ((flags & ISLASTCN0x008000) && (dvp->v_mount->mnt_flag & MNT_RDONLY0x00000001) &&
776 (cnp->cn_nameiop == DELETE2 || cnp->cn_nameiop == RENAME3))
777 return (EROFS30);
778 if (dvp->v_type != VDIR)
779 return (ENOTDIR20);
780 lockparent = flags & LOCKPARENT0x0008;
781 wantparent = flags & (LOCKPARENT0x0008|WANTPARENT0x0010);
782 nmp = VFSTONFS(dvp->v_mount)((struct nfsmount *)((dvp->v_mount)->mnt_data));
783 np = VTONFS(dvp)((struct nfsnode *)(dvp)->v_data);
784
785 /*
786 * Before tediously performing a linear scan of the directory,
787 * check the name cache to see if the directory/name pair
788 * we are looking for is known already.
789 * If the directory/name pair is found in the name cache,
790 * we have to ensure the directory has not changed from
791 * the time the cache entry has been created. If it has,
792 * the cache entry has to be ignored.
793 */
794 if ((error = cache_lookup(dvp, vpp, cnp)) >= 0) {
795 struct vattr vattr;
796 int err2;
797
798 if (error && error != ENOENT2) {
799 *vpp = NULLVP((struct vnode *)((void *)0));
800 return (error);
801 }
802
803 if (cnp->cn_flags & PDIRUNLOCK0x200000) {
804 err2 = vn_lock(dvp, LK_EXCLUSIVE0x0001UL | LK_RETRY0x2000UL);
805 if (err2 != 0) {
806 *vpp = NULLVP((struct vnode *)((void *)0));
807 return (err2);
808 }
809 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
810 }
811
812 err2 = VOP_ACCESS(dvp, VEXEC00100, cnp->cn_cred, cnp->cn_proc);
813 if (err2 != 0) {
814 if (error == 0) {
815 if (*vpp != dvp)
816 vput(*vpp);
817 else
818 vrele(*vpp);
819 }
820 *vpp = NULLVP((struct vnode *)((void *)0));
821 return (err2);
822 }
823
824 if (error == ENOENT2) {
825 if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred,
826 cnp->cn_proc) && vattr.va_mtime.tv_sec ==
827 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_ctime)
828 return (ENOENT2);
829 cache_purge(dvp);
830 np->n_ctime = 0;
831 goto dorpc;
832 }
833
834 newvp = *vpp;
835 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_proc)
836 && vattr.va_ctime.tv_sec == VTONFS(newvp)((struct nfsnode *)(newvp)->v_data)->n_ctime)
837 {
838 nfsstats.lookupcache_hits++;
839 if (cnp->cn_nameiop != LOOKUP0 && (flags & ISLASTCN0x008000))
840 cnp->cn_flags |= SAVENAME0x000800;
841 if ((!lockparent || !(flags & ISLASTCN0x008000)) &&
842 newvp != dvp) {
843 VOP_UNLOCK(dvp);
844 cnp->cn_flags |= PDIRUNLOCK0x200000;
845 }
846 return (0);
847 }
848 cache_purge(newvp);
849 if (newvp != dvp)
850 vput(newvp);
851 else
852 vrele(newvp);
853 *vpp = NULLVP((struct vnode *)((void *)0));
854 }
855dorpc:
856 error = 0;
857 newvp = NULLVP((struct vnode *)((void *)0));
858 nfsstats.lookupcache_misses++;
859 nfsstats.rpccnt[NFSPROC_LOOKUP3]++;
860 len = cnp->cn_namelen;
861 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
862 NFSX_UNSIGNED4 + nfsm_rndup(len)(((len)+3)&(~0x3)));
863 nfsm_fhtom(&info, dvp, info.nmi_v3);
864 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN)if ((len) > (255)) { m_freem(info.nmi_mreq); error = 63; goto
nfsmout; } nfsm_strtombuf(&info.nmi_mb, (cnp->cn_nameptr
), (len))
;
865
866 info.nmi_procp = cnp->cn_proc;
867 info.nmi_cred = cnp->cn_cred;
868 error = nfs_request(dvp, NFSPROC_LOOKUP3, &info);
869
870 if (error) {
871 if (info.nmi_v3)
872 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
873 m_freem(info.nmi_mrep);
874 goto nfsmout;
875 }
876
877 nfsm_getfh(fhp, fhsize, info.nmi_v3){ if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr
.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if
(t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos
+= (4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; if (((fhsize) = ((int)(__uint32_t)(__builtin_constant_p((
int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) &
0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl))))) <= 0 || (fhsize) >
64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout; } } else
(fhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data
)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
((((fhsize)+3)&(~0x3)))) { ((fhp)) = (nfsfh_t *)(info.nmi_dpos
); info.nmi_dpos += ((((fhsize)+3)&(~0x3))); } else if ((
t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, ((((fhsize
)+3)&(~0x3))), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { ((fhp)) = (nfsfh_t *)
cp2; } }; }
;
878
879 /*
880 * Handle RENAME case...
881 */
882 if (cnp->cn_nameiop == RENAME3 && wantparent && (flags & ISLASTCN0x008000)) {
883 if (NFS_CMPFH(np, fhp, fhsize)((np)->n_fhsize == (fhsize) && !bcmp((caddr_t)(np)
->n_fhp, (caddr_t)(fhp), (fhsize)))
) {
884 m_freem(info.nmi_mrep);
885 return (EISDIR21);
886 }
887 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
888 if (error) {
889 m_freem(info.nmi_mrep);
890 return (error);
891 }
892 newvp = NFSTOV(np)((np)->n_vnode);
893 if (info.nmi_v3) {
894 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
895 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
896 } else
897 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
898 *vpp = newvp;
899 m_freem(info.nmi_mrep);
900 cnp->cn_flags |= SAVENAME0x000800;
901 if (!lockparent) {
902 VOP_UNLOCK(dvp);
903 cnp->cn_flags |= PDIRUNLOCK0x200000;
904 }
905 return (0);
906 }
907
908 /*
909 * The postop attr handling is duplicated for each if case,
910 * because it should be done while dvp is locked (unlocking
911 * dvp is different for each case).
912 */
913
914 if (NFS_CMPFH(np, fhp, fhsize)((np)->n_fhsize == (fhsize) && !bcmp((caddr_t)(np)
->n_fhp, (caddr_t)(fhp), (fhsize)))
) {
915 vref(dvp);
916 newvp = dvp;
917 if (info.nmi_v3) {
918 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
919 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
920 } else
921 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
922 } else if (flags & ISDOTDOT0x002000) {
923 VOP_UNLOCK(dvp);
924 cnp->cn_flags |= PDIRUNLOCK0x200000;
925
926 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
927 if (error) {
928 if (vn_lock(dvp, LK_EXCLUSIVE0x0001UL | LK_RETRY0x2000UL) == 0)
929 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
930 m_freem(info.nmi_mrep);
931 return (error);
932 }
933 newvp = NFSTOV(np)((np)->n_vnode);
934
935 if (info.nmi_v3) {
936 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
937 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
938 } else
939 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
940
941 if (lockparent && (flags & ISLASTCN0x008000)) {
942 if ((error = vn_lock(dvp, LK_EXCLUSIVE0x0001UL))) {
943 m_freem(info.nmi_mrep);
944 vput(newvp);
945 return error;
946 }
947 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
948 }
949
950 } else {
951 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
952 if (error) {
953 m_freem(info.nmi_mrep);
954 return error;
955 }
956 newvp = NFSTOV(np)((np)->n_vnode);
957 if (info.nmi_v3) {
958 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
959 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
960 } else
961 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
962 if (!lockparent || !(flags & ISLASTCN0x008000)) {
963 VOP_UNLOCK(dvp);
964 cnp->cn_flags |= PDIRUNLOCK0x200000;
965 }
966 }
967
968 if (cnp->cn_nameiop != LOOKUP0 && (flags & ISLASTCN0x008000))
969 cnp->cn_flags |= SAVENAME0x000800;
970 if ((cnp->cn_flags & MAKEENTRY0x004000) &&
971 (cnp->cn_nameiop != DELETE2 || !(flags & ISLASTCN0x008000))) {
972 nfs_cache_enter(dvp, newvp, cnp);
973 }
974
975 *vpp = newvp;
976 m_freem(info.nmi_mrep);
977
978nfsmout:
979 if (error) {
980 /*
981 * We get here only because of errors returned by
982 * the RPC. Otherwise we'll have returned above
983 * (the nfsm_* macros will jump to nfsmout
984 * on error).
985 */
986 if (error == ENOENT2 && (cnp->cn_flags & MAKEENTRY0x004000) &&
987 cnp->cn_nameiop != CREATE1) {
988 nfs_cache_enter(dvp, NULL((void *)0), cnp);
989 }
990 if (newvp != NULLVP((struct vnode *)((void *)0))) {
991 if (newvp != dvp)
992 vput(newvp);
993 else
994 vrele(newvp);
995 }
996 if ((cnp->cn_nameiop == CREATE1 || cnp->cn_nameiop == RENAME3) &&
997 (flags & ISLASTCN0x008000) && error == ENOENT2) {
998 if (dvp->v_mount->mnt_flag & MNT_RDONLY0x00000001)
999 error = EROFS30;
1000 else
1001 error = EJUSTRETURN-2;
1002 }
1003 if (cnp->cn_nameiop != LOOKUP0 && (flags & ISLASTCN0x008000))
1004 cnp->cn_flags |= SAVENAME0x000800;
1005 *vpp = NULL((void *)0);
1006 }
1007 return (error);
1008}
1009
1010/*
1011 * nfs read call.
1012 * Just call nfs_bioread() to do the work.
1013 */
1014int
1015nfs_read(void *v)
1016{
1017 struct vop_read_args *ap = v;
1018 struct vnode *vp = ap->a_vp;
1019
1020 if (vp->v_type != VREG)
1021 return (EPERM1);
1022 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1023}
1024
1025/*
1026 * nfs readlink call
1027 */
1028int
1029nfs_readlink(void *v)
1030{
1031 struct vop_readlink_args *ap = v;
1032 struct vnode *vp = ap->a_vp;
1033
1034 if (vp->v_type != VLNK)
1035 return (EPERM1);
1036 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
1037}
1038
1039/*
1040 * Lock an inode.
1041 */
1042int
1043nfs_lock(void *v)
1044{
1045 struct vop_lock_args *ap = v;
1046 struct vnode *vp = ap->a_vp;
1047
1048 return rrw_enter(&VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_lock, ap->a_flags & LK_RWFLAGS(0x0001UL|0x0002UL|0x0040UL|0x0080UL|0x0100UL));
1049}
1050
1051/*
1052 * Unlock an inode.
1053 */
1054int
1055nfs_unlock(void *v)
1056{
1057 struct vop_unlock_args *ap = v;
1058 struct vnode *vp = ap->a_vp;
1059
1060 rrw_exit(&VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_lock);
1061 return 0;
1062}
1063
1064/*
1065 * Check for a locked inode.
1066 */
1067int
1068nfs_islocked(void *v)
1069{
1070 struct vop_islocked_args *ap = v;
1071
1072 return rrw_status(&VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data)->n_lock);
1073}
1074
1075/*
1076 * Do a readlink rpc.
1077 * Called by nfs_doio() from below the buffer cache.
1078 */
1079int
1080nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1081{
1082 struct nfsm_info info;
1083 u_int32_t *tl;
1084 int32_t t1;
1085 caddr_t cp2;
1086 int error = 0, len, attrflag;
1087
1088 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1089
1090 nfsstats.rpccnt[NFSPROC_READLINK5]++;
1091 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32));
1092 nfsm_fhtom(&info, vp, info.nmi_v3);
1093
1094 info.nmi_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
1095 info.nmi_cred = cred;
1096 error = nfs_request(vp, NFSPROC_READLINK5, &info);
1097
1098 if (info.nmi_v3)
1099 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
1100 if (!error) {
1101 nfsm_strsiz(len, NFS_MAXPATHLEN){ { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.
nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (
tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else
if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (
4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep)
; goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if (((
len) = ((int32_t)(__uint32_t)(__builtin_constant_p((int32_t)(
*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) < 0 || (len) > (1024)
) { m_freem(info.nmi_mrep); error = 72; goto nfsmout; } }
;
1102 nfsm_mtouio(uiop, len)if ((len) > 0 && (t1 = nfsm_mbuftouio(&info.nmi_md
, (uiop), (len), &info.nmi_dpos)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; }
;
1103 }
1104
1105 m_freem(info.nmi_mrep);
1106
1107nfsmout:
1108 return (error);
1109}
1110
1111/*
1112 * nfs read rpc call
1113 * Ditto above
1114 */
1115int
1116nfs_readrpc(struct vnode *vp, struct uio *uiop)
1117{
1118 struct nfsm_info info;
1119 u_int32_t *tl;
1120 int32_t t1;
1121 caddr_t cp2;
1122 struct nfsmount *nmp;
1123 int error = 0, len, retlen, tsiz, eof, attrflag;
1124
1125 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1126
1127 eof = 0;
1128
1129 nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
1130 tsiz = uiop->uio_resid;
1131 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3)
1132 return (EFBIG27);
1133 while (tsiz > 0) {
1134 nfsstats.rpccnt[NFSPROC_READ6]++;
1135 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1136 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1137 NFSX_UNSIGNED4 * 3);
1138 nfsm_fhtom(&info, vp, info.nmi_v3);
1139 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4 * 3);
1140 if (info.nmi_v3) {
1141 txdr_hyper(uiop->uio_offset, tl)do { ((u_int32_t *)(tl))[0] = (__uint32_t)(__builtin_constant_p
((u_int32_t)((uiop->uio_offset) >> 32)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)((uiop->
uio_offset) >> 32)) & 0xff00) << 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) >> 32)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)((uiop->uio_offset) >> 32))); ((u_int32_t *
)(tl))[1] = (__uint32_t)(__builtin_constant_p((u_int32_t)((uiop
->uio_offset) & 0xffffffff)) ? (__uint32_t)(((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff00) << 8 | ((__uint32_t)
((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff000000) >> 24) : __swap32md
((u_int32_t)((uiop->uio_offset) & 0xffffffff))); } while
(0)
;
1142 *(tl + 2) = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1143 } else {
1144 *tl++ = txdr_unsigned(uiop->uio_offset)((__uint32_t)(__builtin_constant_p((int32_t)(uiop->uio_offset
)) ? (__uint32_t)(((__uint32_t)((int32_t)(uiop->uio_offset
)) & 0xff) << 24 | ((__uint32_t)((int32_t)(uiop->
uio_offset)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(uiop->uio_offset)) & 0xff0000) >> 8 | ((__uint32_t
)((int32_t)(uiop->uio_offset)) & 0xff000000) >> 24
) : __swap32md((int32_t)(uiop->uio_offset))))
;
1145 *tl++ = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1146 *tl = 0;
1147 }
1148
1149 info.nmi_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
1150 info.nmi_cred = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_rcred;
1151 error = nfs_request(vp, NFSPROC_READ6, &info);
1152 if (info.nmi_v3)
1153 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
1154 if (error) {
1155 m_freem(info.nmi_mrep);
1156 goto nfsmout;
1157 }
1158
1159 if (info.nmi_v3) {
1160 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
1161 eof = fxdr_unsigned(int, *(tl + 1))((int)(__uint32_t)(__builtin_constant_p((int32_t)(*(tl + 1)))
? (__uint32_t)(((__uint32_t)((int32_t)(*(tl + 1))) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*(tl + 1))) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*(tl + 1))) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*(tl + 1))) & 0xff000000
) >> 24) : __swap32md((int32_t)(*(tl + 1)))))
;
1162 } else {
1163 nfsm_loadattr(vp, NULL){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
1164 }
1165
1166 nfsm_strsiz(retlen, nmp->nm_rsize){ { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.
nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (
tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else
if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (
4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep)
; goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if (((
retlen) = ((int32_t)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) < 0 || (retlen) > (nmp
->nm_rsize)) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } }
;
1167 nfsm_mtouio(uiop, retlen)if ((retlen) > 0 && (t1 = nfsm_mbuftouio(&info
.nmi_md, (uiop), (retlen), &info.nmi_dpos)) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; }
;
1168 m_freem(info.nmi_mrep);
1169 tsiz -= retlen;
1170 if (info.nmi_v3) {
1171 if (eof || retlen == 0)
1172 tsiz = 0;
1173 } else if (retlen < len)
1174 tsiz = 0;
1175 }
1176
1177nfsmout:
1178 return (error);
1179}
1180
1181/*
1182 * nfs write call
1183 */
1184int
1185nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, int *must_commit)
1186{
1187 struct nfsm_info info;
1188 u_int32_t *tl;
1189 int32_t t1, backup;
1190 caddr_t cp2;
1191 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
1192 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR0, rlen, commit;
1193 int committed = NFSV3WRITE_FILESYNC2;
1194
1195 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1196
1197#ifdef DIAGNOSTIC1
1198 if (uiop->uio_iovcnt != 1)
1199 panic("nfs: writerpc iovcnt > 1");
1200#endif
1201 *must_commit = 0;
1202 tsiz = uiop->uio_resid;
1203 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3)
1204 return (EFBIG27);
1205 while (tsiz > 0) {
1206 nfsstats.rpccnt[NFSPROC_WRITE7]++;
1207 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1208 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32)
1209 + 5 * NFSX_UNSIGNED4 + nfsm_rndup(len)(((len)+3)&(~0x3)));
1210 nfsm_fhtom(&info, vp, info.nmi_v3);
1211 if (info.nmi_v3) {
1212 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED4);
1213 txdr_hyper(uiop->uio_offset, tl)do { ((u_int32_t *)(tl))[0] = (__uint32_t)(__builtin_constant_p
((u_int32_t)((uiop->uio_offset) >> 32)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)((uiop->
uio_offset) >> 32)) & 0xff00) << 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) >> 32)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)((uiop->uio_offset) >> 32))); ((u_int32_t *
)(tl))[1] = (__uint32_t)(__builtin_constant_p((u_int32_t)((uiop
->uio_offset) & 0xffffffff)) ? (__uint32_t)(((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff00) << 8 | ((__uint32_t)
((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff000000) >> 24) : __swap32md
((u_int32_t)((uiop->uio_offset) & 0xffffffff))); } while
(0)
;
1214 tl += 2;
1215 *tl++ = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1216 *tl++ = txdr_unsigned(*iomode)((__uint32_t)(__builtin_constant_p((int32_t)(*iomode)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*iomode)) & 0xff) << 24 |
((__uint32_t)((int32_t)(*iomode)) & 0xff00) << 8 |
((__uint32_t)((int32_t)(*iomode)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*iomode)) & 0xff000000) >>
24) : __swap32md((int32_t)(*iomode))))
;
1217 *tl = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1218 } else {
1219 u_int32_t x;
1220
1221 tl = nfsm_build(&info.nmi_mb, 4 * NFSX_UNSIGNED4);
1222 /* Set both "begin" and "current" to non-garbage. */
1223 x = txdr_unsigned((u_int32_t)uiop->uio_offset)((__uint32_t)(__builtin_constant_p((int32_t)((u_int32_t)uiop->
uio_offset)) ? (__uint32_t)(((__uint32_t)((int32_t)((u_int32_t
)uiop->uio_offset)) & 0xff) << 24 | ((__uint32_t
)((int32_t)((u_int32_t)uiop->uio_offset)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)((u_int32_t)uiop->uio_offset))
& 0xff0000) >> 8 | ((__uint32_t)((int32_t)((u_int32_t
)uiop->uio_offset)) & 0xff000000) >> 24) : __swap32md
((int32_t)((u_int32_t)uiop->uio_offset))))
;
1224 *tl++ = x; /* "begin offset" */
1225 *tl++ = x; /* "current offset" */
1226 x = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1227 *tl++ = x; /* total to this offset */
1228 *tl = x; /* size of this write */
1229
1230 }
1231 nfsm_uiotombuf(&info.nmi_mb, uiop, len);
1232
1233 info.nmi_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
1234 info.nmi_cred = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_wcred;
1235 error = nfs_request(vp, NFSPROC_WRITE7, &info);
1236 if (info.nmi_v3) {
1237 wccflag = NFSV3_WCCCHK1;
1238 nfsm_wcc_data(vp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(vp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(vp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(vp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((vp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((vp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1239 }
1240
1241 if (error) {
1242 m_freem(info.nmi_mrep);
1243 goto nfsmout;
1244 }
1245
1246 if (info.nmi_v3) {
1247 wccflag = NFSV3_WCCCHK1;
1248 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4 + 8)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4
+ 8); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (2 * 4 + 8), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2
; } }
1249 + NFSX_V3WRITEVERF){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4 + 8)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4
+ 8); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (2 * 4 + 8), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2
; } }
;
1250 rlen = fxdr_unsigned(int, *tl++)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl++)) ? (
__uint32_t)(((__uint32_t)((int32_t)(*tl++)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl++)) & 0xff00) << 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl++))))
;
1251 if (rlen <= 0) {
1252 error = NFSERR_IO5;
1253 break;
1254 } else if (rlen < len) {
1255 backup = len - rlen;
1256 uiop->uio_iov->iov_base =
1257 (char *)uiop->uio_iov->iov_base -
1258 backup;
1259 uiop->uio_iov->iov_len += backup;
1260 uiop->uio_offset -= backup;
1261 uiop->uio_resid += backup;
1262 len = rlen;
1263 }
1264 commit = fxdr_unsigned(int, *tl++)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl++)) ? (
__uint32_t)(((__uint32_t)((int32_t)(*tl++)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl++)) & 0xff00) << 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl++))))
;
1265
1266 /*
1267 * Return the lowest commitment level
1268 * obtained by any of the RPCs.
1269 */
1270 if (committed == NFSV3WRITE_FILESYNC2)
1271 committed = commit;
1272 else if (committed == NFSV3WRITE_DATASYNC1 &&
1273 commit == NFSV3WRITE_UNSTABLE0)
1274 committed = commit;
1275 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF0x00040000) == 0) {
1276 bcopy(tl, nmp->nm_verf,
1277 NFSX_V3WRITEVERF8);
1278 nmp->nm_flag |= NFSMNT_HASWRITEVERF0x00040000;
1279 } else if (bcmp(tl,
1280 nmp->nm_verf, NFSX_V3WRITEVERF8)) {
1281 *must_commit = 1;
1282 bcopy(tl, nmp->nm_verf,
1283 NFSX_V3WRITEVERF8);
1284 }
1285 } else {
1286 nfsm_loadattr(vp, NULL){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
1287 }
1288 if (wccflag)
1289 VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_mtime = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_vattr.va_mtime;
1290 m_freem(info.nmi_mrep);
1291 tsiz -= len;
1292 }
1293nfsmout:
1294 *iomode = committed;
1295 if (error)
1296 uiop->uio_resid = tsiz;
1297 return (error);
1298}
1299
1300/*
1301 * nfs mknod rpc
1302 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1303 * mode set to specify the file type and the size field for rdev.
1304 */
1305int
1306nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1307 struct vattr *vap)
1308{
1309 struct nfsv2_sattr *sp;
1310 struct nfsm_info info;
1311 u_int32_t *tl;
1312 int32_t t1;
1313 struct vnode *newvp = NULL((void *)0);
1314 struct nfsnode *np = NULL((void *)0);
1315 char *cp2;
1316 int error = 0, wccflag = NFSV3_WCCRATTR0, gotvp = 0;
1317 u_int32_t rdev;
1318
1319 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1320
1321 if (vap->va_type == VCHR || vap->va_type == VBLK)
1322 rdev = txdr_unsigned(vap->va_rdev)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_rdev)
) ? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_rdev)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_rdev)
) & 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->
va_rdev)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t
)(vap->va_rdev)) & 0xff000000) >> 24) : __swap32md
((int32_t)(vap->va_rdev))))
;
1323 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1324 rdev = nfs_xdrneg1;
1325 else {
1326 VOP_ABORTOP(dvp, cnp);
1327 return (EOPNOTSUPP45);
1328 }
1329 nfsstats.rpccnt[NFSPROC_MKNOD11]++;
1330 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1331 4 * NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)) +
1332 NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
1333 nfsm_fhtom(&info, dvp, info.nmi_v3);
1334 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1335
1336 if (info.nmi_v3) {
1337 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
1338 *tl++ = vtonfsv3_type(vap->va_type)((__uint32_t)(__builtin_constant_p((int32_t)(nfsv3_type[((int32_t
)(vap->va_type))])) ? (__uint32_t)(((__uint32_t)((int32_t)
(nfsv3_type[((int32_t)(vap->va_type))])) & 0xff) <<
24 | ((__uint32_t)((int32_t)(nfsv3_type[((int32_t)(vap->va_type
))])) & 0xff00) << 8 | ((__uint32_t)((int32_t)(nfsv3_type
[((int32_t)(vap->va_type))])) & 0xff0000) >> 8 |
((__uint32_t)((int32_t)(nfsv3_type[((int32_t)(vap->va_type
))])) & 0xff000000) >> 24) : __swap32md((int32_t)(nfsv3_type
[((int32_t)(vap->va_type))]))))
;
1339 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1340 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1341 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED4);
1342 *tl++ = txdr_unsigned(major(vap->va_rdev))((__uint32_t)(__builtin_constant_p((int32_t)((((unsigned)(vap
->va_rdev) >> 8) & 0xff))) ? (__uint32_t)(((__uint32_t
)((int32_t)((((unsigned)(vap->va_rdev) >> 8) & 0xff
))) & 0xff) << 24 | ((__uint32_t)((int32_t)((((unsigned
)(vap->va_rdev) >> 8) & 0xff))) & 0xff00) <<
8 | ((__uint32_t)((int32_t)((((unsigned)(vap->va_rdev) >>
8) & 0xff))) & 0xff0000) >> 8 | ((__uint32_t)(
(int32_t)((((unsigned)(vap->va_rdev) >> 8) & 0xff
))) & 0xff000000) >> 24) : __swap32md((int32_t)((((
unsigned)(vap->va_rdev) >> 8) & 0xff)))))
;
1343 *tl = txdr_unsigned(minor(vap->va_rdev))((__uint32_t)(__builtin_constant_p((int32_t)(((unsigned)((vap
->va_rdev) & 0xff) | (((vap->va_rdev) & 0xffff0000
) >> 8)))) ? (__uint32_t)(((__uint32_t)((int32_t)(((unsigned
)((vap->va_rdev) & 0xff) | (((vap->va_rdev) & 0xffff0000
) >> 8)))) & 0xff) << 24 | ((__uint32_t)((int32_t
)(((unsigned)((vap->va_rdev) & 0xff) | (((vap->va_rdev
) & 0xffff0000) >> 8)))) & 0xff00) << 8 |
((__uint32_t)((int32_t)(((unsigned)((vap->va_rdev) & 0xff
) | (((vap->va_rdev) & 0xffff0000) >> 8)))) &
0xff0000) >> 8 | ((__uint32_t)((int32_t)(((unsigned)((
vap->va_rdev) & 0xff) | (((vap->va_rdev) & 0xffff0000
) >> 8)))) & 0xff000000) >> 24) : __swap32md(
(int32_t)(((unsigned)((vap->va_rdev) & 0xff) | (((vap->
va_rdev) & 0xffff0000) >> 8))))))
;
1344 }
1345 } else {
1346 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1347 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((vap->va_type
) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode
))) : (int)((vttoif_tab[(int)((vap->va_type))]) | ((vap->
va_mode))))) ? (__uint32_t)(((__uint32_t)((int32_t)(((vap->
va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->
va_mode))) : (int)((vttoif_tab[(int)((vap->va_type))]) | (
(vap->va_mode))))) & 0xff) << 24 | ((__uint32_t)
((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab[(
int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int)
((vap->va_type))]) | ((vap->va_mode))))) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(((vap->va_type) == VFIFO) ? (
int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)
((vttoif_tab[(int)((vap->va_type))]) | ((vap->va_mode))
))) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(((vap
->va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | (
(vap->va_mode))) : (int)((vttoif_tab[(int)((vap->va_type
))]) | ((vap->va_mode))))) & 0xff000000) >> 24) :
__swap32md((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((vap->va_type))]) | ((vap->va_mode)))))))
;
1348 sp->sa_uid = nfs_xdrneg1;
1349 sp->sa_gid = nfs_xdrneg1;
1350 sp->sa_size = rdev;
1351 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1352 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1353 }
1354
1355 KASSERT(cnp->cn_proc == curproc)((cnp->cn_proc == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/nfs/nfs_vnops.c", 1355, "cnp->cn_proc == curproc"
))
;
1356 info.nmi_procp = cnp->cn_proc;
1357 info.nmi_cred = cnp->cn_cred;
1358 error = nfs_request(dvp, NFSPROC_MKNOD11, &info);
1359 if (!error) {
1360 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1361 if (!gotvp) {
1362 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1363 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1364 if (!error)
1365 newvp = NFSTOV(np)((np)->n_vnode);
1366 }
1367 }
1368 if (info.nmi_v3)
1369 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1370 m_freem(info.nmi_mrep);
1371
1372nfsmout:
1373 if (error) {
1374 if (newvp)
1375 vput(newvp);
1376 } else {
1377 if (cnp->cn_flags & MAKEENTRY0x004000)
1378 nfs_cache_enter(dvp, newvp, cnp);
1379 *vpp = newvp;
1380 }
1381 pool_put(&namei_pool, cnp->cn_pnbuf);
1382 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1383 if (!wccflag)
1384 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1385 return (error);
1386}
1387
1388/*
1389 * nfs mknod vop
1390 * just call nfs_mknodrpc() to do the work.
1391 */
1392int
1393nfs_mknod(void *v)
1394{
1395 struct vop_mknod_args *ap = v;
1396 struct vnode *newvp;
1397 int error;
1398
1399 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap);
1400 if (!error)
1401 vput(newvp);
1402
1403 VN_KNOTE(ap->a_dvp, NOTE_WRITE)do { struct klist *__list = (&ap->a_dvp->v_selectinfo
.si_note); if (__list != ((void *)0)) knote(__list, (0x0002))
; } while (0)
;
1404
1405 return (error);
1406}
1407
1408int
1409nfs_create(void *v)
1410{
1411 struct vop_create_args *ap = v;
1412 struct vnode *dvp = ap->a_dvp;
1413 struct vattr *vap = ap->a_vap;
1414 struct componentname *cnp = ap->a_cnp;
1415 struct nfsv2_sattr *sp;
1416 struct nfsm_info info;
1417 struct timespec ts;
1418 u_int32_t *tl;
1419 int32_t t1;
1420 struct nfsnode *np = NULL((void *)0);
1421 struct vnode *newvp = NULL((void *)0);
1422 caddr_t cp2;
1423 int error = 0, wccflag = NFSV3_WCCRATTR0, gotvp = 0, fmode = 0;
1424
1425 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1426
1427 /*
1428 * Oops, not for me..
1429 */
1430 if (vap->va_type == VSOCK)
1431 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1432
1433 if (vap->va_vaflags & VA_EXCLUSIVE0x02)
1434 fmode |= O_EXCL0x0800;
1435
1436again:
1437 nfsstats.rpccnt[NFSPROC_CREATE8]++;
1438 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1439 2 * NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)) +
1440 NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
1441 nfsm_fhtom(&info, dvp, info.nmi_v3);
1442 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1443 if (info.nmi_v3) {
1444 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
1445 if (fmode & O_EXCL0x0800) {
1446 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE)((__uint32_t)(__builtin_constant_p((int32_t)(2)) ? (__uint32_t
)(((__uint32_t)((int32_t)(2)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(2)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(2)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(2)
) & 0xff000000) >> 24) : __swap32md((int32_t)(2))))
;
1447 tl = nfsm_build(&info.nmi_mb, NFSX_V3CREATEVERF8);
1448 arc4random_buf(tl, sizeof(*tl) * 2);
1449 } else {
1450 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED)((__uint32_t)(__builtin_constant_p((int32_t)(0)) ? (__uint32_t
)(((__uint32_t)((int32_t)(0)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(0)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(0)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(0)
) & 0xff000000) >> 24) : __swap32md((int32_t)(0))))
;
1451 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1452 }
1453 } else {
1454 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1455 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((vap->va_type
) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode
))) : (int)((vttoif_tab[(int)((vap->va_type))]) | ((vap->
va_mode))))) ? (__uint32_t)(((__uint32_t)((int32_t)(((vap->
va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->
va_mode))) : (int)((vttoif_tab[(int)((vap->va_type))]) | (
(vap->va_mode))))) & 0xff) << 24 | ((__uint32_t)
((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab[(
int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int)
((vap->va_type))]) | ((vap->va_mode))))) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(((vap->va_type) == VFIFO) ? (
int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)
((vttoif_tab[(int)((vap->va_type))]) | ((vap->va_mode))
))) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(((vap
->va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | (
(vap->va_mode))) : (int)((vttoif_tab[(int)((vap->va_type
))]) | ((vap->va_mode))))) & 0xff000000) >> 24) :
__swap32md((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((vap->va_type))]) | ((vap->va_mode)))))))
;
1456 sp->sa_uid = nfs_xdrneg1;
1457 sp->sa_gid = nfs_xdrneg1;
1458 sp->sa_size = 0;
1459 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1460 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1461 }
1462
1463 KASSERT(cnp->cn_proc == curproc)((cnp->cn_proc == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/nfs/nfs_vnops.c", 1463, "cnp->cn_proc == curproc"
))
;
1464 info.nmi_procp = cnp->cn_proc;
1465 info.nmi_cred = cnp->cn_cred;
1466 error = nfs_request(dvp, NFSPROC_CREATE8, &info);
1467 if (!error) {
1468 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1469 if (!gotvp) {
1470 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1471 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1472 if (!error)
1473 newvp = NFSTOV(np)((np)->n_vnode);
1474 }
1475 }
1476 if (info.nmi_v3)
1477 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1478 m_freem(info.nmi_mrep);
1479
1480nfsmout:
1481 if (error) {
1482 if (newvp) {
1483 vput(newvp);
1484 newvp = NULL((void *)0);
1485 }
1486 if (info.nmi_v3 && (fmode & O_EXCL0x0800) && error == NFSERR_NOTSUPP10004) {
1487 fmode &= ~O_EXCL0x0800;
1488 goto again;
1489 }
1490 } else if (info.nmi_v3 && (fmode & O_EXCL0x0800)) {
1491 getnanotime(&ts);
1492 if (vap->va_atime.tv_nsec == VNOVAL(-1))
1493 vap->va_atime = ts;
1494 if (vap->va_mtime.tv_nsec == VNOVAL(-1))
1495 vap->va_mtime = ts;
1496 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1497 }
1498 if (!error) {
1499 if (cnp->cn_flags & MAKEENTRY0x004000)
1500 nfs_cache_enter(dvp, newvp, cnp);
1501 *ap->a_vpp = newvp;
1502 }
1503 pool_put(&namei_pool, cnp->cn_pnbuf);
1504 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1505 if (!wccflag)
1506 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1507 VN_KNOTE(ap->a_dvp, NOTE_WRITE)do { struct klist *__list = (&ap->a_dvp->v_selectinfo
.si_note); if (__list != ((void *)0)) knote(__list, (0x0002))
; } while (0)
;
1508 return (error);
1509}
1510
1511/*
1512 * nfs file remove call
1513 * To try and make nfs semantics closer to ufs semantics, a file that has
1514 * other processes using the vnode is renamed instead of removed and then
1515 * removed later on the last close.
1516 * - If v_usecount > 1
1517 * If a rename is not already in the works
1518 * call nfs_sillyrename() to set it up
1519 * else
1520 * do the remove rpc
1521 */
1522int
1523nfs_remove(void *v)
1524{
1525 struct vop_remove_args *ap = v;
1526 struct vnode *vp = ap->a_vp;
1527 struct vnode *dvp = ap->a_dvp;
1528 struct componentname *cnp = ap->a_cnp;
1529 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
1530 int error = 0;
1531 struct vattr vattr;
1532
1533#ifdef DIAGNOSTIC1
1534 if ((cnp->cn_flags & HASBUF0x000400) == 0)
1535 panic("nfs_remove: no name");
1536 if (vp->v_usecount < 1)
1537 panic("nfs_remove: bad v_usecount");
1538#endif
1539 if (vp->v_type == VDIR)
1540 error = EPERM1;
1541 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1542 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1543 vattr.va_nlink > 1)) {
1544 /*
1545 * Purge the name cache so that the chance of a lookup for
1546 * the name succeeding while the remove is in progress is
1547 * minimized. Without node locking it can still happen, such
1548 * that an I/O op returns ESTALE, but since you get this if
1549 * another host removes the file..
1550 */
1551 cache_purge(vp);
1552 /*
1553 * throw away biocache buffers, mainly to avoid
1554 * unnecessary delayed writes later.
1555 */
1556 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc);
1557 /* Do the rpc */
1558 if (error != EINTR4)
1559 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1560 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1561 /*
1562 * Kludge City: If the first reply to the remove rpc is lost..
1563 * the reply to the retransmitted request will be ENOENT
1564 * since the file was in fact removed
1565 * Therefore, we cheat and return success.
1566 */
1567 if (error == ENOENT2)
1568 error = 0;
1569 } else if (!np->n_sillyrename)
1570 error = nfs_sillyrename(dvp, vp, cnp);
1571 pool_put(&namei_pool, cnp->cn_pnbuf);
1572 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
1573 VN_KNOTE(vp, NOTE_DELETE)do { struct klist *__list = (&vp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0001)); } while
(0)
;
1574 VN_KNOTE(dvp, NOTE_WRITE)do { struct klist *__list = (&dvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0002)); } while
(0)
;
1575 if (vp == dvp)
1576 vrele(vp);
1577 else
1578 vput(vp);
1579 vput(dvp);
1580 return (error);
1581}
1582
1583/*
1584 * nfs file remove rpc called from nfs_inactive
1585 */
1586int
1587nfs_removeit(struct sillyrename *sp)
1588{
1589 KASSERT(VOP_ISLOCKED(sp->s_dvp))((VOP_ISLOCKED(sp->s_dvp)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/nfs/nfs_vnops.c", 1589, "VOP_ISLOCKED(sp->s_dvp)"
))
;
1
Assuming the condition is true
2
'?' condition is true
1590 /*
1591 * Make sure that the directory vnode is still valid.
1592 *
1593 * NFS can potentially try to nuke a silly *after* the directory
1594 * has already been pushed out on a forced unmount. Since the silly
1595 * is going to go away anyway, this is fine.
1596 */
1597 if (sp->s_dvp->v_type == VBAD)
3
Assuming field 'v_type' is not equal to VBAD
4
Taking false branch
1598 return (0);
1599 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
5
Calling 'nfs_removerpc'
1600 NULL((void *)0)));
1601}
1602
1603/*
1604 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1605 */
1606int
1607nfs_removerpc(struct vnode *dvp, char *name, int namelen, struct ucred *cred,
1608 struct proc *proc)
1609{
1610 struct nfsm_info info;
1611 u_int32_t *tl;
1612 int32_t t1;
1613 caddr_t cp2;
1614 int error = 0, wccflag = NFSV3_WCCRATTR0;
1615
1616 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1617
1618 nfsstats.rpccnt[NFSPROC_REMOVE12]++;
1619 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
6
Assuming field 'nmi_v3' is 0
7
'?' condition is false
1620 NFSX_UNSIGNED4 + nfsm_rndup(namelen)(((namelen)+3)&(~0x3)));
1621 nfsm_fhtom(&info, dvp, info.nmi_v3);
1622 nfsm_strtom(name, namelen, NFS_MAXNAMLEN)if ((namelen) > (255)) { m_freem(info.nmi_mreq); error = 63
; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (name), (namelen
))
;
8
Assuming 'namelen' is <= 255
9
Taking false branch
1623
1624 info.nmi_procp = proc;
1625 info.nmi_cred = cred;
1626 error = nfs_request(dvp, NFSPROC_REMOVE12, &info);
1627 if (info.nmi_v3)
10
Assuming field 'nmi_v3' is not equal to 0
11
Taking true branch
1628 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
12
Assuming field 'nmi_mrep' is not equal to null
13
Taking true branch
14
'ttattrf' declared without an initial value
15
Assuming 't1' is < 4
16
Taking false branch
17
Assuming the condition is false
18
Taking false branch
19
Assuming the condition is false
20
Taking false branch
21
Assuming field 'nmi_mrep' is equal to null
22
Taking false branch
23
Taking false branch
24
Assigned value is garbage or undefined
1629 m_freem(info.nmi_mrep);
1630
1631nfsmout:
1632 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1633 if (!wccflag)
1634 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1635 return (error);
1636}
1637
1638/*
1639 * nfs file rename call
1640 */
1641int
1642nfs_rename(void *v)
1643{
1644 struct vop_rename_args *ap = v;
1645 struct vnode *fvp = ap->a_fvp;
1646 struct vnode *tvp = ap->a_tvp;
1647 struct vnode *fdvp = ap->a_fdvp;
1648 struct vnode *tdvp = ap->a_tdvp;
1649 struct componentname *tcnp = ap->a_tcnp;
1650 struct componentname *fcnp = ap->a_fcnp;
1651 int error;
1652
1653#ifdef DIAGNOSTIC1
1654 if ((tcnp->cn_flags & HASBUF0x000400) == 0 ||
1655 (fcnp->cn_flags & HASBUF0x000400) == 0)
1656 panic("nfs_rename: no name");
1657#endif
1658 /* Check for cross-device rename */
1659 if ((fvp->v_mount != tdvp->v_mount) ||
1660 (tvp && (fvp->v_mount != tvp->v_mount))) {
1661 error = EXDEV18;
1662 goto out;
1663 }
1664
1665 /*
1666 * If the tvp exists and is in use, sillyrename it before doing the
1667 * rename of the new file over it.
1668 */
1669 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)((struct nfsnode *)(tvp)->v_data)->n_sillyrename &&
1670 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1671 VN_KNOTE(tvp, NOTE_DELETE)do { struct klist *__list = (&tvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0001)); } while
(0)
;
1672 vput(tvp);
1673 tvp = NULL((void *)0);
1674 }
1675
1676 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1677 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1678 tcnp->cn_proc);
1679
1680 VN_KNOTE(fdvp, NOTE_WRITE)do { struct klist *__list = (&fdvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0002)); } while
(0)
;
1681 VN_KNOTE(tdvp, NOTE_WRITE)do { struct klist *__list = (&tdvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0002)); } while
(0)
;
1682
1683 if (fvp->v_type == VDIR) {
1684 if (tvp != NULL((void *)0) && tvp->v_type == VDIR)
1685 cache_purge(tdvp);
1686 cache_purge(fdvp);
1687 }
1688out:
1689 if (tdvp == tvp)
1690 vrele(tdvp);
1691 else
1692 vput(tdvp);
1693 if (tvp)
1694 vput(tvp);
1695 vrele(fdvp);
1696 vrele(fvp);
1697 /*
1698 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1699 */
1700 if (error == ENOENT2)
1701 error = 0;
1702 return (error);
1703}
1704
1705/*
1706 * nfs file rename rpc called from nfs_remove() above
1707 */
1708int
1709nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
1710 struct sillyrename *sp)
1711{
1712 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1713 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
));
1714}
1715
1716/*
1717 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1718 */
1719int
1720nfs_renamerpc(struct vnode *fdvp, char *fnameptr, int fnamelen,
1721 struct vnode *tdvp, char *tnameptr, int tnamelen, struct ucred *cred,
1722 struct proc *proc)
1723{
1724 struct nfsm_info info;
1725 u_int32_t *tl;
1726 int32_t t1;
1727 caddr_t cp2;
1728 int error = 0, fwccflag = NFSV3_WCCRATTR0, twccflag = NFSV3_WCCRATTR0;
1729
1730 info.nmi_v3 = NFS_ISV3(fdvp)(((struct nfsmount *)(((fdvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1731
1732 nfsstats.rpccnt[NFSPROC_RENAME14]++;
1733 info.nmi_mb = info.nmi_mreq = nfsm_reqhead((NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1734 NFSX_UNSIGNED4) * 2 + nfsm_rndup(fnamelen)(((fnamelen)+3)&(~0x3)) + nfsm_rndup(tnamelen)(((tnamelen)+3)&(~0x3)));
1735 nfsm_fhtom(&info, fdvp, info.nmi_v3);
1736 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN)if ((fnamelen) > (255)) { m_freem(info.nmi_mreq); error = 63
; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (fnameptr)
, (fnamelen))
;
1737 nfsm_fhtom(&info, tdvp, info.nmi_v3);
1738 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN)if ((tnamelen) > (255)) { m_freem(info.nmi_mreq); error = 63
; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (tnameptr)
, (tnamelen))
;
1739
1740 info.nmi_procp = proc;
1741 info.nmi_cred = cred;
1742 error = nfs_request(fdvp, NFSPROC_RENAME14, &info);
1743 if (info.nmi_v3) {
1744 nfsm_wcc_data(fdvp, fwccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (fwccflag
) { ttretf = (((&((struct nfsnode *)(fdvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(fdvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(fdvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((fdvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((fdvp)) = ttvp; } } }; if (fwccflag) { (fwccflag
) = ttretf; } else { (fwccflag) = ttattrf; } } } while (0)
;
1745 nfsm_wcc_data(tdvp, twccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (twccflag
) { ttretf = (((&((struct nfsnode *)(tdvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(tdvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(tdvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((tdvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((tdvp)) = ttvp; } } }; if (twccflag) { (twccflag
) = ttretf; } else { (twccflag) = ttattrf; } } } while (0)
;
1746 }
1747 m_freem(info.nmi_mrep);
1748
1749nfsmout:
1750 VTONFS(fdvp)((struct nfsnode *)(fdvp)->v_data)->n_flag |= NMODIFIED0x0004;
1751 VTONFS(tdvp)((struct nfsnode *)(tdvp)->v_data)->n_flag |= NMODIFIED0x0004;
1752 if (!fwccflag)
1753 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp))((((struct nfsnode *)(fdvp)->v_data))->n_attrstamp = 0);
1754 if (!twccflag)
1755 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp))((((struct nfsnode *)(tdvp)->v_data))->n_attrstamp = 0);
1756 return (error);
1757}
1758
1759/*
1760 * nfs hard link create call
1761 */
1762int
1763nfs_link(void *v)
1764{
1765 struct vop_link_args *ap = v;
1766 struct vnode *vp = ap->a_vp;
1767 struct vnode *dvp = ap->a_dvp;
1768 struct componentname *cnp = ap->a_cnp;
1769 struct nfsm_info info;
1770 u_int32_t *tl;
1771 int32_t t1;
1772 caddr_t cp2;
1773 int error = 0, wccflag = NFSV3_WCCRATTR0, attrflag = 0;
1774
1775 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1776
1777 if (dvp->v_mount != vp->v_mount) {
1778 pool_put(&namei_pool, cnp->cn_pnbuf);
1779 vput(dvp);
1780 return (EXDEV18);
1781 }
1782 error = vn_lock(vp, LK_EXCLUSIVE0x0001UL);
1783 if (error != 0) {
1784 VOP_ABORTOP(dvp, cnp);
1785 vput(dvp);
1786 return (error);
1787 }
1788
1789 /*
1790 * Push all writes to the server, so that the attribute cache
1791 * doesn't get "out of sync" with the server.
1792 * XXX There should be a better way!
1793 */
1794 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT1, cnp->cn_proc);
1795
1796 nfsstats.rpccnt[NFSPROC_LINK15]++;
1797 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(2 * NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1798 NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)));
1799 nfsm_fhtom(&info, vp, info.nmi_v3);
1800 nfsm_fhtom(&info, dvp, info.nmi_v3);
1801 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1802
1803 info.nmi_procp = cnp->cn_proc;
1804 info.nmi_cred = cnp->cn_cred;
1805 error = nfs_request(vp, NFSPROC_LINK15, &info);
1806 if (info.nmi_v3) {
1807 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
1808 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1809 }
1810 m_freem(info.nmi_mrep);
1811nfsmout:
1812 pool_put(&namei_pool, cnp->cn_pnbuf);
1813 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1814 if (!attrflag)
1815 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp))((((struct nfsnode *)(vp)->v_data))->n_attrstamp = 0);
1816 if (!wccflag)
1817 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1818
1819 VN_KNOTE(vp, NOTE_LINK)do { struct klist *__list = (&vp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0010)); } while
(0)
;
1820 VN_KNOTE(dvp, NOTE_WRITE)do { struct klist *__list = (&dvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0002)); } while
(0)
;
1821 VOP_UNLOCK(vp);
1822 vput(dvp);
1823 return (error);
1824}
1825
1826/*
1827 * nfs symbolic link create call
1828 */
1829int
1830nfs_symlink(void *v)
1831{
1832 struct vop_symlink_args *ap = v;
1833 struct vnode *dvp = ap->a_dvp;
1834 struct vattr *vap = ap->a_vap;
1835 struct componentname *cnp = ap->a_cnp;
1836 struct nfsv2_sattr *sp;
1837 struct nfsm_info info;
1838 u_int32_t *tl;
1839 int32_t t1;
1840 caddr_t cp2;
1841 int slen, error = 0, wccflag = NFSV3_WCCRATTR0, gotvp;
1842 struct vnode *newvp = NULL((void *)0);
1843
1844 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1845
1846 nfsstats.rpccnt[NFSPROC_SYMLINK10]++;
1847 slen = strlen(ap->a_target);
1848 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1849 2 * NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)) + nfsm_rndup(slen)(((slen)+3)&(~0x3)) +
1850 NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
1851 nfsm_fhtom(&info, dvp, info.nmi_v3);
1852 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1853 if (info.nmi_v3)
1854 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1855 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN)if ((slen) > (1024)) { m_freem(info.nmi_mreq); error = 63;
goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (ap->a_target
), (slen))
;
1856 if (!info.nmi_v3) {
1857 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1858 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((VLNK) == VFIFO
) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (
int)((vttoif_tab[(int)((VLNK))]) | ((vap->va_mode))))) ? (
__uint32_t)(((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)
((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab
[(int)((VLNK))]) | ((vap->va_mode))))) & 0xff) <<
24 | ((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode))))) & 0xff00) << 8
| ((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode))))) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode))))) & 0xff000000) >>
24) : __swap32md((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode)))))))
;
1859 sp->sa_uid = nfs_xdrneg1;
1860 sp->sa_gid = nfs_xdrneg1;
1861 sp->sa_size = nfs_xdrneg1;
1862 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1863 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1864 }
1865
1866 info.nmi_procp = cnp->cn_proc;
1867 info.nmi_cred = cnp->cn_cred;
1868 error = nfs_request(dvp, NFSPROC_SYMLINK10, &info);
1869 if (info.nmi_v3) {
1870 if (!error)
1871 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1872 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1873 }
1874 m_freem(info.nmi_mrep);
1875
1876nfsmout:
1877 if (newvp)
1878 vput(newvp);
1879 pool_put(&namei_pool, cnp->cn_pnbuf);
1880 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1881 if (!wccflag)
1882 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1883 VN_KNOTE(dvp, NOTE_WRITE)do { struct klist *__list = (&dvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0002)); } while
(0)
;
1884 vput(dvp);
1885 return (error);
1886}
1887
1888/*
1889 * nfs make dir call
1890 */
1891int
1892nfs_mkdir(void *v)
1893{
1894 struct vop_mkdir_args *ap = v;
1895 struct vnode *dvp = ap->a_dvp;
1896 struct vattr *vap = ap->a_vap;
1897 struct componentname *cnp = ap->a_cnp;
1898 struct nfsv2_sattr *sp;
1899 struct nfsm_info info;
1900 u_int32_t *tl;
1901 int32_t t1;
1902 int len;
1903 struct nfsnode *np = NULL((void *)0);
1904 struct vnode *newvp = NULL((void *)0);
1905 caddr_t cp2;
1906 int error = 0, wccflag = NFSV3_WCCRATTR0;
1907 int gotvp = 0;
1908
1909 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1910
1911 len = cnp->cn_namelen;
1912 nfsstats.rpccnt[NFSPROC_MKDIR9]++;
1913 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1914 NFSX_UNSIGNED4 + nfsm_rndup(len)(((len)+3)&(~0x3)) + NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
1915 nfsm_fhtom(&info, dvp, info.nmi_v3);
1916 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN)if ((len) > (255)) { m_freem(info.nmi_mreq); error = 63; goto
nfsmout; } nfsm_strtombuf(&info.nmi_mb, (cnp->cn_nameptr
), (len))
;
1917
1918 if (info.nmi_v3) {
1919 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1920 } else {
1921 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1922 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((VDIR) == VFIFO
) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (
int)((vttoif_tab[(int)((VDIR))]) | ((vap->va_mode))))) ? (
__uint32_t)(((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)
((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab
[(int)((VDIR))]) | ((vap->va_mode))))) & 0xff) <<
24 | ((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode))))) & 0xff00) << 8
| ((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode))))) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode))))) & 0xff000000) >>
24) : __swap32md((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode)))))))
;
1923 sp->sa_uid = nfs_xdrneg1;
1924 sp->sa_gid = nfs_xdrneg1;
1925 sp->sa_size = nfs_xdrneg1;
1926 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1927 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1928 }
1929
1930 info.nmi_procp = cnp->cn_proc;
1931 info.nmi_cred = cnp->cn_cred;
1932 error = nfs_request(dvp, NFSPROC_MKDIR9, &info);
1933 if (!error)
1934 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1935 if (info.nmi_v3)
1936 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1937 m_freem(info.nmi_mrep);
1938
1939nfsmout:
1940 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1941 if (!wccflag)
1942 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1943
1944 if (error == 0 && newvp == NULL((void *)0)) {
1945 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1946 cnp->cn_proc, &np);
1947 if (!error) {
1948 newvp = NFSTOV(np)((np)->n_vnode);
1949 if (newvp->v_type != VDIR)
1950 error = EEXIST17;
1951 }
1952 }
1953 if (error) {
1954 if (newvp)
1955 vput(newvp);
1956 } else {
1957 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK)do { struct klist *__list = (&dvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0002|0x0010));
} while (0)
;
1958 if (cnp->cn_flags & MAKEENTRY0x004000)
1959 nfs_cache_enter(dvp, newvp, cnp);
1960 *ap->a_vpp = newvp;
1961 }
1962 pool_put(&namei_pool, cnp->cn_pnbuf);
1963 vput(dvp);
1964 return (error);
1965}
1966
1967/*
1968 * nfs remove directory call
1969 */
1970int
1971nfs_rmdir(void *v)
1972{
1973 struct vop_rmdir_args *ap = v;
1974 struct vnode *vp = ap->a_vp;
1975 struct vnode *dvp = ap->a_dvp;
1976 struct componentname *cnp = ap->a_cnp;
1977 struct nfsm_info info;
1978 u_int32_t *tl;
1979 int32_t t1;
1980 caddr_t cp2;
1981 int error = 0, wccflag = NFSV3_WCCRATTR0;
1982
1983 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1984
1985 nfsstats.rpccnt[NFSPROC_RMDIR13]++;
1986 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1987 NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)));
1988 nfsm_fhtom(&info, dvp, info.nmi_v3);
1989 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1990
1991 info.nmi_procp = cnp->cn_proc;
1992 info.nmi_cred = cnp->cn_cred;
1993 error = nfs_request(dvp, NFSPROC_RMDIR13, &info);
1994 if (info.nmi_v3)
1995 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1996 m_freem(info.nmi_mrep);
1997
1998nfsmout:
1999 pool_put(&namei_pool, cnp->cn_pnbuf);
2000 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
2001 if (!wccflag)
2002 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
2003
2004 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK)do { struct klist *__list = (&dvp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0002|0x0010));
} while (0)
;
2005 VN_KNOTE(vp, NOTE_DELETE)do { struct klist *__list = (&vp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0001)); } while
(0)
;
2006
2007 cache_purge(vp);
2008 vput(vp);
2009 vput(dvp);
2010 /*
2011 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2012 */
2013 if (error == ENOENT2)
2014 error = 0;
2015 return (error);
2016}
2017
2018
2019/*
2020 * The readdir logic below has a big design bug. It stores the NFS cookie in
2021 * the returned uio->uio_offset but does not store the verifier (it cannot).
2022 * Instead, the code stores the verifier in the nfsnode and applies that
2023 * verifies to all cookies, no matter what verifier was originally with
2024 * the cookie.
2025 *
2026 * From a practical standpoint, this is not a problem since almost all
2027 * NFS servers do not change the validity of cookies across deletes
2028 * and inserts.
2029 */
2030
2031struct nfs_dirent {
2032 u_int32_t cookie[2];
2033 struct dirent dirent;
2034};
2035
2036#define NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1)) (sizeof (struct nfs_dirent) - (MAXNAMLEN255 + 1))
2037#define NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent) offsetof(struct nfs_dirent, dirent)__builtin_offsetof(struct nfs_dirent, dirent)
2038
2039/*
2040 * nfs readdir call
2041 */
2042int
2043nfs_readdir(void *v)
2044{
2045 struct vop_readdir_args *ap = v;
2046 struct vnode *vp = ap->a_vp;
2047 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2048 struct uio *uio = ap->a_uio;
2049 int tresid, error = 0;
2050 struct vattr vattr;
2051 int cnt;
2052 u_int64_t newoff = uio->uio_offset;
2053 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2054 struct uio readdir_uio;
2055 struct iovec readdir_iovec;
2056 struct proc * p = uio->uio_procp;
2057 int done = 0, eof = 0;
2058 struct ucred *cred = ap->a_cred;
2059 void *data;
2060
2061 if (vp->v_type != VDIR)
2062 return (EPERM1);
2063 /*
2064 * First, check for hit on the EOF offset cache
2065 */
2066 if (np->n_direofoffsetn_un2.nd_direof != 0 &&
2067 uio->uio_offset == np->n_direofoffsetn_un2.nd_direof) {
2068 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
2069 timespeccmp(&np->n_mtime, &vattr.va_mtime, ==)(((&np->n_mtime)->tv_sec == (&vattr.va_mtime)->
tv_sec) ? ((&np->n_mtime)->tv_nsec == (&vattr.va_mtime
)->tv_nsec) : ((&np->n_mtime)->tv_sec == (&vattr
.va_mtime)->tv_sec))
) {
2070 nfsstats.direofcache_hits++;
2071 *ap->a_eofflag = 1;
2072 return (0);
2073 }
2074 }
2075
2076 if (uio->uio_resid < NFS_FABLKSIZE512)
2077 return (EINVAL22);
2078
2079 tresid = uio->uio_resid;
2080
2081 if (uio->uio_rw != UIO_READ)
2082 return (EINVAL22);
2083
2084 if ((nmp->nm_flag & (NFSMNT_NFSV30x00000200 | NFSMNT_GOTFSINFO0x00100000)) == NFSMNT_NFSV30x00000200)
2085 (void)nfs_fsinfo(nmp, vp, cred, p);
2086
2087 cnt = 5;
2088
2089 /* M_ZERO to avoid leaking kernel data in dirent padding */
2090 data = malloc(NFS_DIRBLKSIZ1024, M_TEMP127, M_WAITOK0x0001|M_ZERO0x0008);
2091 do {
2092 struct nfs_dirent *ndp = data;
2093
2094 readdir_iovec.iov_len = NFS_DIRBLKSIZ1024;
2095 readdir_iovec.iov_base = data;
2096 readdir_uio.uio_offset = newoff;
2097 readdir_uio.uio_iov = &readdir_iovec;
2098 readdir_uio.uio_iovcnt = 1;
2099 readdir_uio.uio_segflg = UIO_SYSSPACE;
2100 readdir_uio.uio_rw = UIO_READ;
2101 readdir_uio.uio_resid = NFS_DIRBLKSIZ1024;
2102 readdir_uio.uio_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
2103
2104 if (nmp->nm_flag & NFSMNT_RDIRPLUS0x00010000) {
2105 error = nfs_readdirplusrpc(vp, &readdir_uio, cred,
2106 &eof, p);
2107 if (error == NFSERR_NOTSUPP10004)
2108 nmp->nm_flag &= ~NFSMNT_RDIRPLUS0x00010000;
2109 }
2110 if ((nmp->nm_flag & NFSMNT_RDIRPLUS0x00010000) == 0)
2111 error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof);
2112
2113 if (error == NFSERR_BAD_COOKIE10003)
2114 error = EINVAL22;
2115
2116 while (error == 0 &&
2117 ndp < (struct nfs_dirent *)readdir_iovec.iov_base) {
2118 struct dirent *dp = &ndp->dirent;
2119 int reclen = dp->d_reclen;
2120
2121 dp->d_reclen -= NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent);
2122 dp->d_off = fxdr_hyper(&ndp->cookie[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&ndp->cookie[0]))[0]) ? (__uint32_t)(((__uint32_t)(((
u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[0
]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
ndp->cookie[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&ndp->cookie
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&ndp->cookie[0]))[1]) ? (__uint32_t)((
(__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&ndp->
cookie[0]))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(&ndp->cookie[0]))[1]) & 0xff0000) >> 8 |
((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff000000) >> 24) : __swap32md(((u_int32_t *)(&ndp
->cookie[0]))[1]))))
;
2123
2124 if (uio->uio_resid < dp->d_reclen) {
2125 eof = 0;
2126 done = 1;
2127 break;
2128 }
2129
2130 if ((error = uiomove(dp, dp->d_reclen, uio)))
2131 break;
2132
2133 newoff = fxdr_hyper(&ndp->cookie[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&ndp->cookie[0]))[0]) ? (__uint32_t)(((__uint32_t)(((
u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[0
]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
ndp->cookie[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&ndp->cookie
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&ndp->cookie[0]))[1]) ? (__uint32_t)((
(__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&ndp->
cookie[0]))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(&ndp->cookie[0]))[1]) & 0xff0000) >> 8 |
((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff000000) >> 24) : __swap32md(((u_int32_t *)(&ndp
->cookie[0]))[1]))))
;
2134
2135 ndp = (struct nfs_dirent *)((u_int8_t *)ndp + reclen);
2136 }
2137 } while (!error && !done && !eof && cnt--);
2138
2139 free(data, M_TEMP127, NFS_DIRBLKSIZ1024);
2140 data = NULL((void *)0);
2141
2142 uio->uio_offset = newoff;
2143
2144 if (!error && (eof || uio->uio_resid == tresid)) {
2145 nfsstats.direofcache_misses++;
2146 *ap->a_eofflag = 1;
2147 return (0);
2148 }
2149
2150 *ap->a_eofflag = 0;
2151 return (error);
2152}
2153
2154
2155/*
2156 * The function below stuff the cookies in after the name
2157 */
2158
2159/*
2160 * Readdir rpc call.
2161 */
2162int
2163nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2164 int *end_of_directory)
2165{
2166 int len, left;
2167 struct nfs_dirent *ndp = NULL((void *)0);
2168 struct dirent *dp = NULL((void *)0);
2169 struct nfsm_info info;
2170 u_int32_t *tl;
2171 caddr_t cp;
2172 int32_t t1;
2173 caddr_t cp2;
2174 nfsuint64 cookie;
2175 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2176 struct nfsnode *dnp = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2177 u_quad_t fileno;
2178 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2179 int attrflag;
2180
2181 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
2182
2183#ifdef DIAGNOSTIC1
2184 if (uiop->uio_iovcnt != 1 ||
2185 (uiop->uio_resid & (NFS_DIRBLKSIZ1024 - 1)))
2186 panic("nfs readdirrpc bad uio");
2187#endif
2188
2189 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0])do { ((u_int32_t *)(&cookie.nfsuquad[0]))[0] = (__uint32_t
)(__builtin_constant_p((u_int32_t)((uiop->uio_offset) >>
32)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)((uiop->uio_offset) >> 32)) & 0xff00) << 8
| ((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)((uiop
->uio_offset) >> 32)) & 0xff000000) >> 24)
: __swap32md((u_int32_t)((uiop->uio_offset) >> 32))
); ((u_int32_t *)(&cookie.nfsuquad[0]))[1] = (__uint32_t)
(__builtin_constant_p((u_int32_t)((uiop->uio_offset) &
0xffffffff)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->
uio_offset) & 0xffffffff)) & 0xff) << 24 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff0000) >> 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)((uiop->uio_offset)
& 0xffffffff))); } while (0)
;
2190
2191 /*
2192 * Loop around doing readdir rpc's of size nm_readdirsize
2193 * truncated to a multiple of NFS_READDIRBLKSIZ.
2194 * The stopping criteria is EOF or buffer full.
2195 */
2196 while (more_dirs && bigenough) {
2197 nfsstats.rpccnt[NFSPROC_READDIR16]++;
2198 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32)
2199 + NFSX_READDIR(info.nmi_v3)((info.nmi_v3) ? (5 * 4) : (2 * 4)));
2200 nfsm_fhtom(&info, vp, info.nmi_v3);
2201 if (info.nmi_v3) {
2202 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED4);
2203 *tl++ = cookie.nfsuquad[0];
2204 *tl++ = cookie.nfsuquad[1];
2205 if (cookie.nfsuquad[0] == 0 &&
2206 cookie.nfsuquad[1] == 0) {
2207 *tl++ = 0;
2208 *tl++ = 0;
2209 } else {
2210 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0];
2211 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1];
2212 }
2213 } else {
2214 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED4);
2215 *tl++ = cookie.nfsuquad[1];
2216 }
2217 *tl = txdr_unsigned(nmp->nm_readdirsize)((__uint32_t)(__builtin_constant_p((int32_t)(nmp->nm_readdirsize
)) ? (__uint32_t)(((__uint32_t)((int32_t)(nmp->nm_readdirsize
)) & 0xff) << 24 | ((__uint32_t)((int32_t)(nmp->
nm_readdirsize)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(nmp->nm_readdirsize)) & 0xff0000) >> 8 | ((__uint32_t
)((int32_t)(nmp->nm_readdirsize)) & 0xff000000) >>
24) : __swap32md((int32_t)(nmp->nm_readdirsize))))
;
2218
2219 info.nmi_procp = uiop->uio_procp;
2220 info.nmi_cred = cred;
2221 error = nfs_request(vp, NFSPROC_READDIR16, &info);
2222 if (info.nmi_v3)
2223 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
2224
2225 if (error) {
2226 m_freem(info.nmi_mrep);
2227 goto nfsmout;
2228 }
2229
2230 if (info.nmi_v3) {
2231 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2232 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2233 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0] = *tl++;
2234 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1] = *tl;
2235 }
2236
2237 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2238 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2239
2240 /* loop thru the dir entries, doctoring them to dirent form */
2241 while (more_dirs && bigenough) {
2242 if (info.nmi_v3) {
2243 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2244 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2245 fileno = fxdr_hyper(tl)((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(tl))[0]) ? (__uint32_t)(((__uint32_t)(((u_int32_t *)(tl))[0]
) & 0xff) << 24 | ((__uint32_t)(((u_int32_t *)(tl))
[0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(
tl))[0]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[0]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(tl))[1]) ? (__uint32_t)(((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff) << 24 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[1]))))
;
2246 len = fxdr_unsigned(int, *(tl + 2))((int)(__uint32_t)(__builtin_constant_p((int32_t)(*(tl + 2)))
? (__uint32_t)(((__uint32_t)((int32_t)(*(tl + 2))) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff000000
) >> 24) : __swap32md((int32_t)(*(tl + 2)))))
;
2247 } else {
2248 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2249 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2250 fileno = fxdr_unsigned(u_quad_t, *tl++)((u_quad_t)(__uint32_t)(__builtin_constant_p((int32_t)(*tl++)
) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl++)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl++)) & 0xff00) << 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl++))))
;
2251 len = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2252 }
2253 if (len <= 0 || len > NFS_MAXNAMLEN255) {
2254 error = EBADRPC72;
2255 m_freem(info.nmi_mrep);
2256 goto nfsmout;
2257 }
2258 tlen = DIRENT_RECSIZE(len)((__builtin_offsetof(struct dirent, d_name) + (len) + 1 + 7) &
~ 7)
+ NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent);
2259 left = NFS_READDIRBLKSIZ512 - blksiz;
2260 if (tlen > left) {
2261 dp->d_reclen += left;
2262 uiop->uio_iov->iov_base += left;
2263 uiop->uio_iov->iov_len -= left;
2264 uiop->uio_resid -= left;
2265 blksiz = 0;
2266 }
2267 if (tlen > uiop->uio_resid)
2268 bigenough = 0;
2269 if (bigenough) {
2270 ndp = (struct nfs_dirent *)
2271 uiop->uio_iov->iov_base;
2272 dp = &ndp->dirent;
2273 dp->d_fileno = fileno;
2274 dp->d_namlen = len;
2275 dp->d_reclen = tlen;
2276 dp->d_type = DT_UNKNOWN0;
2277 blksiz += tlen;
2278 if (blksiz == NFS_READDIRBLKSIZ512)
2279 blksiz = 0;
2280 uiop->uio_resid -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2281 uiop->uio_iov->iov_base =
2282 (char *)uiop->uio_iov->iov_base +
2283 NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2284 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2285 nfsm_mtouio(uiop, len)if ((len) > 0 && (t1 = nfsm_mbuftouio(&info.nmi_md
, (uiop), (len), &info.nmi_dpos)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; }
;
2286 cp = uiop->uio_iov->iov_base;
2287 tlen -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1)) + len;
2288 *cp = '\0'; /* null terminate */
2289 uiop->uio_iov->iov_base += tlen;
2290 uiop->uio_iov->iov_len -= tlen;
2291 uiop->uio_resid -= tlen;
2292 } else
2293 nfsm_adv(nfsm_rndup(len)){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= ((((len)+3)&
(~0x3)))) { info.nmi_dpos += ((((len)+3)&(~0x3))); } else
if ((t1 = nfs_adv(&info.nmi_md, &info.nmi_dpos, ((((
len)+3)&(~0x3))), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2294 if (info.nmi_v3) {
2295 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2296 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2297 } else {
2298 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2299 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2300 }
2301 if (bigenough) {
2302 if (info.nmi_v3) {
2303 ndp->cookie[0] = cookie.nfsuquad[0] =
2304 *tl++;
2305 } else
2306 ndp->cookie[0] = 0;
2307
2308 ndp->cookie[1] = cookie.nfsuquad[1] = *tl++;
2309 } else if (info.nmi_v3)
2310 tl += 2;
2311 else
2312 tl++;
2313 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2314 }
2315 /*
2316 * If at end of rpc data, get the eof boolean
2317 */
2318 if (!more_dirs) {
2319 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2320 more_dirs = (fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
== 0);
2321 }
2322 m_freem(info.nmi_mrep);
2323 }
2324 /*
2325 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ
2326 * by increasing d_reclen for the last record.
2327 */
2328 if (blksiz > 0) {
2329 left = NFS_READDIRBLKSIZ512 - blksiz;
2330 dp->d_reclen += left;
2331 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base +
2332 left;
2333 uiop->uio_iov->iov_len -= left;
2334 uiop->uio_resid -= left;
2335 }
2336
2337 /*
2338 * We are now either at the end of the directory or have filled the
2339 * block.
2340 */
2341 if (bigenough) {
2342 dnp->n_direofoffsetn_un2.nd_direof = fxdr_hyper(&cookie.nfsuquad[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&cookie.nfsuquad[0]))[0]) ? (__uint32_t)(((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[
0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
cookie.nfsuquad[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&cookie.nfsuquad
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) ? (__uint32_t)(
((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&cookie
.nfsuquad[0]))[1]) & 0xff00) << 8 | ((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[1]) & 0xff0000) >>
8 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1
]) & 0xff000000) >> 24) : __swap32md(((u_int32_t *)
(&cookie.nfsuquad[0]))[1]))))
;
2343 if (end_of_directory) *end_of_directory = 1;
2344 } else {
2345 if (uiop->uio_resid > 0)
2346 printf("EEK! readdirrpc resid > 0\n");
2347 }
2348
2349nfsmout:
2350 return (error);
2351}
2352
2353/*
2354 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2355 */
2356int
2357nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2358 int *end_of_directory, struct proc *p)
2359{
2360 int len, left;
2361 struct nfs_dirent *ndirp = NULL((void *)0);
2362 struct dirent *dp = NULL((void *)0);
2363 struct nfsm_info info;
2364 u_int32_t *tl;
2365 caddr_t cp;
2366 int32_t t1;
2367 struct vnode *newvp;
2368 caddr_t cp2, dpossav1, dpossav2;
2369 struct mbuf *mdsav1, *mdsav2;
2370 struct nameidata nami, *ndp = &nami;
2371 struct componentname *cnp = &ndp->ni_cnd;
2372 nfsuint64 cookie;
2373 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2374 struct nfsnode *dnp = VTONFS(vp)((struct nfsnode *)(vp)->v_data), *np;
2375 nfsfh_t *fhp;
2376 u_quad_t fileno;
2377 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2378 int attrflag, fhsize;
2379
2380#ifdef DIAGNOSTIC1
2381 if (uiop->uio_iovcnt != 1 ||
2382 (uiop->uio_resid & (NFS_DIRBLKSIZ1024 - 1)))
2383 panic("nfs readdirplusrpc bad uio");
2384#endif
2385 NDINIT(ndp, 0, 0, UIO_SYSSPACE, NULL, p)ndinitat(ndp, 0, 0, UIO_SYSSPACE, -100, ((void *)0), p);
2386 ndp->ni_dvp = vp;
2387 newvp = NULLVP((struct vnode *)((void *)0));
2388
2389 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0])do { ((u_int32_t *)(&cookie.nfsuquad[0]))[0] = (__uint32_t
)(__builtin_constant_p((u_int32_t)((uiop->uio_offset) >>
32)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)((uiop->uio_offset) >> 32)) & 0xff00) << 8
| ((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)((uiop
->uio_offset) >> 32)) & 0xff000000) >> 24)
: __swap32md((u_int32_t)((uiop->uio_offset) >> 32))
); ((u_int32_t *)(&cookie.nfsuquad[0]))[1] = (__uint32_t)
(__builtin_constant_p((u_int32_t)((uiop->uio_offset) &
0xffffffff)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->
uio_offset) & 0xffffffff)) & 0xff) << 24 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff0000) >> 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)((uiop->uio_offset)
& 0xffffffff))); } while (0)
;
2390
2391 /*
2392 * Loop around doing readdir rpc's of size nm_readdirsize
2393 * truncated to a multiple of NFS_READDIRBLKSIZ.
2394 * The stopping criteria is EOF or buffer full.
2395 */
2396 while (more_dirs && bigenough) {
2397 nfsstats.rpccnt[NFSPROC_READDIRPLUS17]++;
2398 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)((1) ? (64 + 4) : 32) + 6 * NFSX_UNSIGNED4);
2399 nfsm_fhtom(&info, vp, 1);
2400 tl = nfsm_build(&info.nmi_mb, 6 * NFSX_UNSIGNED4);
2401 *tl++ = cookie.nfsuquad[0];
2402 *tl++ = cookie.nfsuquad[1];
2403 if (cookie.nfsuquad[0] == 0 &&
2404 cookie.nfsuquad[1] == 0) {
2405 *tl++ = 0;
2406 *tl++ = 0;
2407 } else {
2408 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0];
2409 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1];
2410 }
2411 *tl++ = txdr_unsigned(nmp->nm_readdirsize)((__uint32_t)(__builtin_constant_p((int32_t)(nmp->nm_readdirsize
)) ? (__uint32_t)(((__uint32_t)((int32_t)(nmp->nm_readdirsize
)) & 0xff) << 24 | ((__uint32_t)((int32_t)(nmp->
nm_readdirsize)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(nmp->nm_readdirsize)) & 0xff0000) >> 8 | ((__uint32_t
)((int32_t)(nmp->nm_readdirsize)) & 0xff000000) >>
24) : __swap32md((int32_t)(nmp->nm_readdirsize))))
;
2412 *tl = txdr_unsigned(nmp->nm_rsize)((__uint32_t)(__builtin_constant_p((int32_t)(nmp->nm_rsize
)) ? (__uint32_t)(((__uint32_t)((int32_t)(nmp->nm_rsize)) &
0xff) << 24 | ((__uint32_t)((int32_t)(nmp->nm_rsize
)) & 0xff00) << 8 | ((__uint32_t)((int32_t)(nmp->
nm_rsize)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t
)(nmp->nm_rsize)) & 0xff000000) >> 24) : __swap32md
((int32_t)(nmp->nm_rsize))))
;
2413
2414 info.nmi_procp = uiop->uio_procp;
2415 info.nmi_cred = cred;
2416 error = nfs_request(vp, NFSPROC_READDIRPLUS17, &info);
2417 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
2418 if (error) {
2419 m_freem(info.nmi_mrep);
2420 goto nfsmout;
2421 }
2422
2423 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2424 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0] = *tl++;
2425 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1] = *tl++;
2426 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2427
2428 /* loop thru the dir entries, doctoring them to 4bsd form */
2429 while (more_dirs && bigenough) {
2430 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2431 fileno = fxdr_hyper(tl)((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(tl))[0]) ? (__uint32_t)(((__uint32_t)(((u_int32_t *)(tl))[0]
) & 0xff) << 24 | ((__uint32_t)(((u_int32_t *)(tl))
[0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(
tl))[0]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[0]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(tl))[1]) ? (__uint32_t)(((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff) << 24 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[1]))))
;
2432 len = fxdr_unsigned(int, *(tl + 2))((int)(__uint32_t)(__builtin_constant_p((int32_t)(*(tl + 2)))
? (__uint32_t)(((__uint32_t)((int32_t)(*(tl + 2))) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff000000
) >> 24) : __swap32md((int32_t)(*(tl + 2)))))
;
2433 if (len <= 0 || len > NFS_MAXNAMLEN255) {
2434 error = EBADRPC72;
2435 m_freem(info.nmi_mrep);
2436 goto nfsmout;
2437 }
2438 tlen = DIRENT_RECSIZE(len)((__builtin_offsetof(struct dirent, d_name) + (len) + 1 + 7) &
~ 7)
+ NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent);
2439 left = NFS_READDIRBLKSIZ512 - blksiz;
2440 if (tlen > left) {
2441 dp->d_reclen += left;
2442 uiop->uio_iov->iov_base =
2443 (char *)uiop->uio_iov->iov_base + left;
2444 uiop->uio_iov->iov_len -= left;
2445 uiop->uio_resid -= left;
2446 blksiz = 0;
2447 }
2448 if (tlen > uiop->uio_resid)
2449 bigenough = 0;
2450 if (bigenough) {
2451 ndirp = (struct nfs_dirent *)
2452 uiop->uio_iov->iov_base;
2453 dp = &ndirp->dirent;
2454 dp->d_fileno = fileno;
2455 dp->d_namlen = len;
2456 dp->d_reclen = tlen;
2457 dp->d_type = DT_UNKNOWN0;
2458 blksiz += tlen;
2459 if (blksiz == NFS_READDIRBLKSIZ512)
2460 blksiz = 0;
2461 uiop->uio_resid -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2462 uiop->uio_iov->iov_base =
2463 (char *)uiop->uio_iov->iov_base +
2464 NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2465 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2466 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2467 cnp->cn_namelen = len;
2468 nfsm_mtouio(uiop, len)if ((len) > 0 && (t1 = nfsm_mbuftouio(&info.nmi_md
, (uiop), (len), &info.nmi_dpos)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; }
;
2469 cp = uiop->uio_iov->iov_base;
2470 tlen -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1)) + len;
2471 *cp = '\0';
2472 uiop->uio_iov->iov_base += tlen;
2473 uiop->uio_iov->iov_len -= tlen;
2474 uiop->uio_resid -= tlen;
2475 } else
2476 nfsm_adv(nfsm_rndup(len)){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= ((((len)+3)&
(~0x3)))) { info.nmi_dpos += ((((len)+3)&(~0x3))); } else
if ((t1 = nfs_adv(&info.nmi_md, &info.nmi_dpos, ((((
len)+3)&(~0x3))), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2477 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2478 if (bigenough) {
2479 ndirp->cookie[0] = cookie.nfsuquad[0] = *tl++;
2480 ndirp->cookie[1] = cookie.nfsuquad[1] = *tl++;
2481 } else
2482 tl += 2;
2483
2484 /*
2485 * Since the attributes are before the file handle
2486 * (sigh), we must skip over the attributes and then
2487 * come back and get them.
2488 */
2489 attrflag = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2490 if (attrflag) {
2491 dpossav1 = info.nmi_dpos;
2492 mdsav1 = info.nmi_md;
2493 nfsm_adv(NFSX_V3FATTR){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (84)) { info.
nmi_dpos += (84); } else if ((t1 = nfs_adv(&info.nmi_md, &
info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2494 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2495 doit = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2496 if (doit) {
2497 nfsm_getfh(fhp, fhsize, 1){ if (1) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; if (((fhsize) = ((int)(__uint32_t)(__builtin_constant_p((
int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) &
0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl))))) <= 0 || (fhsize) >
64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout; } } else
(fhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data
)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
((((fhsize)+3)&(~0x3)))) { ((fhp)) = (nfsfh_t *)(info.nmi_dpos
); info.nmi_dpos += ((((fhsize)+3)&(~0x3))); } else if ((
t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, ((((fhsize
)+3)&(~0x3))), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { ((fhp)) = (nfsfh_t *)
cp2; } }; }
;
2498 if (NFS_CMPFH(dnp, fhp, fhsize)((dnp)->n_fhsize == (fhsize) && !bcmp((caddr_t)(dnp
)->n_fhp, (caddr_t)(fhp), (fhsize)))
) {
2499 vref(vp);
2500 newvp = vp;
2501 np = dnp;
2502 } else {
2503 error = nfs_nget(vp->v_mount,
2504 fhp, fhsize, &np);
2505 if (error)
2506 doit = 0;
2507 else
2508 newvp = NFSTOV(np)((np)->n_vnode);
2509 }
2510 }
2511 if (doit && bigenough) {
2512 dpossav2 = info.nmi_dpos;
2513 info.nmi_dpos = dpossav1;
2514 mdsav2 = info.nmi_md;
2515 info.nmi_md = mdsav1;
2516 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
2517 info.nmi_dpos = dpossav2;
2518 info.nmi_md = mdsav2;
2519 dp->d_type = IFTODT(((((vttoif_tab[(int)(np->n_vattr.va_type)])) & 0170000
) >> 12)
2520 VTTOIF(np->n_vattr.va_type))((((vttoif_tab[(int)(np->n_vattr.va_type)])) & 0170000
) >> 12)
;
2521 if (cnp->cn_namelen <=
2522 NAMECACHE_MAXLEN31) {
2523 ndp->ni_vp = newvp;
2524 cache_purge(ndp->ni_dvp);
2525 nfs_cache_enter(ndp->ni_dvp,
2526 ndp->ni_vp, cnp);
2527 }
2528 }
2529 } else {
2530 /* Just skip over the file handle */
2531 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2532 i = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2533 if (i > 0)
2534 nfsm_adv(nfsm_rndup(i)){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= ((((i)+3)&
(~0x3)))) { info.nmi_dpos += ((((i)+3)&(~0x3))); } else if
((t1 = nfs_adv(&info.nmi_md, &info.nmi_dpos, ((((i)+
3)&(~0x3))), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2535 }
2536 if (newvp != NULLVP((struct vnode *)((void *)0))) {
2537 if (newvp == vp)
2538 vrele(newvp);
2539 else
2540 vput(newvp);
2541 newvp = NULLVP((struct vnode *)((void *)0));
2542 }
2543 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2544 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2545 }
2546 /*
2547 * If at end of rpc data, get the eof boolean
2548 */
2549 if (!more_dirs) {
2550 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2551 more_dirs = (fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
== 0);
2552 }
2553 m_freem(info.nmi_mrep);
2554 }
2555 /*
2556 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ
2557 * by increasing d_reclen for the last record.
2558 */
2559 if (blksiz > 0) {
2560 left = NFS_READDIRBLKSIZ512 - blksiz;
2561 dp->d_reclen += left;
2562 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base +
2563 left;
2564 uiop->uio_iov->iov_len -= left;
2565 uiop->uio_resid -= left;
2566 }
2567
2568 /*
2569 * We are now either at the end of the directory or have filled the
2570 * block.
2571 */
2572 if (bigenough) {
2573 dnp->n_direofoffsetn_un2.nd_direof = fxdr_hyper(&cookie.nfsuquad[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&cookie.nfsuquad[0]))[0]) ? (__uint32_t)(((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[
0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
cookie.nfsuquad[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&cookie.nfsuquad
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) ? (__uint32_t)(
((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&cookie
.nfsuquad[0]))[1]) & 0xff00) << 8 | ((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[1]) & 0xff0000) >>
8 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1
]) & 0xff000000) >> 24) : __swap32md(((u_int32_t *)
(&cookie.nfsuquad[0]))[1]))))
;
2574 if (end_of_directory) *end_of_directory = 1;
2575 } else {
2576 if (uiop->uio_resid > 0)
2577 printf("EEK! readdirplusrpc resid > 0\n");
2578 }
2579
2580nfsmout:
2581 if (newvp != NULLVP((struct vnode *)((void *)0))) {
2582 if (newvp == vp)
2583 vrele(newvp);
2584 else
2585 vput(newvp);
2586 }
2587 return (error);
2588}
2589
2590/*
2591 * Silly rename. To make the NFS filesystem that is stateless look a little
2592 * more like the "ufs" a remove of an active vnode is translated to a rename
2593 * to a funny looking filename that is removed by nfs_inactive on the
2594 * nfsnode. There is the potential for another process on a different client
2595 * to create the same funny name between the nfs_lookitup() fails and the
2596 * nfs_rename() completes, but...
2597 */
2598int
2599nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2600{
2601 struct sillyrename *sp;
2602 struct nfsnode *np;
2603 int error;
2604
2605 cache_purge(dvp);
2606 np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2607 sp = malloc(sizeof(*sp), M_NFSREQ22, M_WAITOK0x0001);
2608 sp->s_cred = crdup(cnp->cn_cred);
2609 sp->s_dvp = dvp;
2610 vref(dvp);
2611
2612 if (vp->v_type == VDIR) {
2613#ifdef DIAGNOSTIC1
2614 printf("nfs: sillyrename dir\n");
2615#endif
2616 error = EINVAL22;
2617 goto bad;
2618 }
2619
2620 /* Try lookitups until we get one that isn't there */
2621 while (1) {
2622 /* Fudge together a funny name */
2623 u_int32_t rnd[2];
2624
2625 arc4random_buf(&rnd, sizeof rnd);
2626 sp->s_namlen = snprintf(sp->s_name, sizeof sp->s_name,
2627 ".nfs%08X%08X", rnd[0], rnd[1]);
2628 if (sp->s_namlen > sizeof sp->s_name)
2629 sp->s_namlen = strlen(sp->s_name);
2630
2631 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2632 cnp->cn_proc, NULL((void *)0)))
2633 break;
2634 }
2635
2636 error = nfs_renameit(dvp, cnp, sp);
2637 if (error)
2638 goto bad;
2639 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2640 cnp->cn_proc, &np);
2641 np->n_sillyrename = sp;
2642 return (0);
2643bad:
2644 vrele(sp->s_dvp);
2645 crfree(sp->s_cred);
2646 free(sp, M_NFSREQ22, sizeof(*sp));
2647 return (error);
2648}
2649
2650/*
2651 * Look up a file name and optionally either update the file handle or
2652 * allocate an nfsnode, depending on the value of npp.
2653 * npp == NULL --> just do the lookup
2654 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2655 * handled too
2656 * *npp != NULL --> update the file handle in the vnode
2657 */
2658int
2659nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
2660 struct proc *procp, struct nfsnode **npp)
2661{
2662 struct nfsm_info info;
2663 u_int32_t *tl;
2664 int32_t t1;
2665 struct vnode *newvp = NULL((void *)0);
2666 struct nfsnode *np, *dnp = VTONFS(dvp)((struct nfsnode *)(dvp)->v_data);
2667 caddr_t cp2;
2668 int error = 0, fhlen, attrflag = 0;
2669 nfsfh_t *nfhp;
2670
2671 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
2672
2673 nfsstats.rpccnt[NFSPROC_LOOKUP3]++;
2674 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) + NFSX_UNSIGNED4 +
2675 nfsm_rndup(len)(((len)+3)&(~0x3)));
2676 nfsm_fhtom(&info, dvp, info.nmi_v3);
2677 nfsm_strtom(name, len, NFS_MAXNAMLEN)if ((len) > (255)) { m_freem(info.nmi_mreq); error = 63; goto
nfsmout; } nfsm_strtombuf(&info.nmi_mb, (name), (len))
;
2678
2679 info.nmi_procp = procp;
2680 info.nmi_cred = cred;
2681 error = nfs_request(dvp, NFSPROC_LOOKUP3, &info);
2682 if (error && !info.nmi_v3) {
2683 m_freem(info.nmi_mrep);
2684 goto nfsmout;
2685 }
2686
2687 if (npp && !error) {
2688 nfsm_getfh(nfhp, fhlen, info.nmi_v3){ if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr
.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if
(t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos
+= (4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; if (((fhlen) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) <= 0 || (fhlen) > 64)
{ m_freem(info.nmi_mrep); error = 72; goto nfsmout; } } else
(fhlen) = 32; { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data
)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
((((fhlen)+3)&(~0x3)))) { ((nfhp)) = (nfsfh_t *)(info.nmi_dpos
); info.nmi_dpos += ((((fhlen)+3)&(~0x3))); } else if ((t1
= nfsm_disct(&info.nmi_md, &info.nmi_dpos, ((((fhlen
)+3)&(~0x3))), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { ((nfhp)) = (nfsfh_t *
)cp2; } }; }
;
2689 if (*npp) {
2690 np = *npp;
2691 np->n_fhp = &np->n_fh;
2692 bcopy(nfhp, np->n_fhp, fhlen);
2693 np->n_fhsize = fhlen;
2694 newvp = NFSTOV(np)((np)->n_vnode);
2695 } else if (NFS_CMPFH(dnp, nfhp, fhlen)((dnp)->n_fhsize == (fhlen) && !bcmp((caddr_t)(dnp
)->n_fhp, (caddr_t)(nfhp), (fhlen)))
) {
2696 vref(dvp);
2697 newvp = dvp;
2698 np = dnp;
2699 } else {
2700 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2701 if (error) {
2702 m_freem(info.nmi_mrep);
2703 return (error);
2704 }
2705 newvp = NFSTOV(np)((np)->n_vnode);
2706 }
2707 if (info.nmi_v3) {
2708 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
2709 if (!attrflag && *npp == NULL((void *)0)) {
2710 m_freem(info.nmi_mrep);
2711 if (newvp == dvp)
2712 vrele(newvp);
2713 else
2714 vput(newvp);
2715 return (ENOENT2);
2716 }
2717 } else
2718 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
2719 }
2720 m_freem(info.nmi_mrep);
2721nfsmout:
2722 if (npp && *npp == NULL((void *)0)) {
2723 if (error) {
2724 if (newvp == dvp)
2725 vrele(newvp);
2726 else
2727 vput(newvp);
2728 } else
2729 *npp = np;
2730 }
2731 return (error);
2732}
2733
2734/*
2735 * Nfs Version 3 commit rpc
2736 */
2737int
2738nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct proc *procp)
2739{
2740 struct nfsm_info info;
2741 u_int32_t *tl;
2742 int32_t t1;
2743 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2744 caddr_t cp2;
2745 int error = 0, wccflag = NFSV3_WCCRATTR0;
2746
2747 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF0x00040000) == 0)
2748 return (0);
2749 nfsstats.rpccnt[NFSPROC_COMMIT21]++;
2750 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)((1) ? (64 + 4) : 32));
2751 nfsm_fhtom(&info, vp, 1);
2752
2753 tl = nfsm_build(&info.nmi_mb, 3 * NFSX_UNSIGNED4);
2754 txdr_hyper(offset, tl)do { ((u_int32_t *)(tl))[0] = (__uint32_t)(__builtin_constant_p
((u_int32_t)((offset) >> 32)) ? (__uint32_t)(((__uint32_t
)((u_int32_t)((offset) >> 32)) & 0xff) << 24 |
((__uint32_t)((u_int32_t)((offset) >> 32)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)((offset) >> 32
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)((offset
) >> 32)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)((offset) >> 32))); ((u_int32_t *)(tl))[1] = (
__uint32_t)(__builtin_constant_p((u_int32_t)((offset) & 0xffffffff
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((offset) & 0xffffffff
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)((offset
) & 0xffffffff)) & 0xff00) << 8 | ((__uint32_t)
((u_int32_t)((offset) & 0xffffffff)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)((offset) & 0xffffffff)) &
0xff000000) >> 24) : __swap32md((u_int32_t)((offset) &
0xffffffff))); } while (0)
;
2755 tl += 2;
2756 *tl = txdr_unsigned(cnt)((__uint32_t)(__builtin_constant_p((int32_t)(cnt)) ? (__uint32_t
)(((__uint32_t)((int32_t)(cnt)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(cnt)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(cnt)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
cnt)) & 0xff000000) >> 24) : __swap32md((int32_t)(cnt
))))
;
2757
2758 info.nmi_procp = procp;
2759 info.nmi_cred = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_wcred;
2760 error = nfs_request(vp, NFSPROC_COMMIT21, &info);
2761 nfsm_wcc_data(vp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(vp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(vp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(vp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((vp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((vp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
2762
2763 if (!error) {
2764 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (8)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (8); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (8),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2765 if (bcmp(nmp->nm_verf, tl,
2766 NFSX_V3WRITEVERF8)) {
2767 bcopy(tl, nmp->nm_verf,
2768 NFSX_V3WRITEVERF8);
2769 error = NFSERR_STALEWRITEVERF30001;
2770 }
2771 }
2772 m_freem(info.nmi_mrep);
2773
2774nfsmout:
2775 return (error);
2776}
2777
2778/*
2779 * Kludge City..
2780 * - make nfs_bmap() essentially a no-op that does no translation
2781 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2782 * (Maybe I could use the process's page mapping, but I was concerned that
2783 * Kernel Write might not be enabled and also figured copyout() would do
2784 * a lot more work than bcopy() and also it currently happens in the
2785 * context of the swapper process (2).
2786 */
2787int
2788nfs_bmap(void *v)
2789{
2790 struct vop_bmap_args *ap = v;
2791 struct vnode *vp = ap->a_vp;
2792
2793 if (ap->a_vpp != NULL((void *)0))
2794 *ap->a_vpp = vp;
2795 if (ap->a_bnp != NULL((void *)0))
2796 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize)((vp->v_mount->mnt_stat.f_iosize) >> 9);
2797 return (0);
2798}
2799
2800/*
2801 * Strategy routine.
2802 * For async requests when nfsiod(s) are running, queue the request by
2803 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2804 * request.
2805 */
2806int
2807nfs_strategy(void *v)
2808{
2809 struct vop_strategy_args *ap = v;
2810 struct buf *bp = ap->a_bp;
2811 struct proc *p;
2812 int error = 0;
2813
2814 if ((bp->b_flags & (B_PHYS0x00002000|B_ASYNC0x00000004)) == (B_PHYS0x00002000|B_ASYNC0x00000004))
2815 panic("nfs physio/async");
2816 if (bp->b_flags & B_ASYNC0x00000004)
2817 p = NULL((void *)0);
2818 else
2819 p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
; /* XXX */
2820 /*
2821 * If the op is asynchronous and an i/o daemon is waiting
2822 * queue the request, wake it up and wait for completion
2823 * otherwise just do it ourselves.
2824 */
2825 if ((bp->b_flags & B_ASYNC0x00000004) == 0 || nfs_asyncio(bp, 0))
2826 error = nfs_doio(bp, p);
2827 return (error);
2828}
2829
2830/*
2831 * fsync vnode op. Just call nfs_flush() with commit == 1.
2832 */
2833int
2834nfs_fsync(void *v)
2835{
2836 struct vop_fsync_args *ap = v;
2837
2838 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2839}
2840
2841/*
2842 * Flush all the blocks associated with a vnode.
2843 * Walk through the buffer pool and push any dirty pages
2844 * associated with the vnode.
2845 */
2846int
2847nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct proc *p,
2848 int commit)
2849{
2850 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2851 struct buf *bp;
2852 int i;
2853 struct buf *nbp;
2854 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2855 uint64_t slptimeo = INFSLP0xffffffffffffffffULL;
2856 int s, error = 0, slpflag = 0, retv, bvecpos;
2857 int passone = 1;
2858 u_quad_t off = (u_quad_t)-1, endoff = 0, toff;
2859#ifndef NFS_COMMITBVECSIZ20
2860#define NFS_COMMITBVECSIZ20 20
2861#endif
2862 struct buf *bvec[NFS_COMMITBVECSIZ20];
2863
2864 if (nmp->nm_flag & NFSMNT_INT0x00000040)
2865 slpflag = PCATCH0x100;
2866 if (!commit)
2867 passone = 0;
2868 /*
2869 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2870 * server, but nas not been committed to stable storage on the server
2871 * yet. On the first pass, the byte range is worked out and the commit
2872 * rpc is done. On the second pass, nfs_writebp() is called to do the
2873 * job.
2874 */
2875again:
2876 bvecpos = 0;
2877 if (NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
&& commit) {
2878 s = splbio()splraise(0x6);
2879 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp)for ((bp) = ((&vp->v_dirtyblkhd)->lh_first); (bp) &&
((nbp) = ((bp)->b_vnbufs.le_next), 1); (bp) = (nbp))
{
2880 if (bvecpos >= NFS_COMMITBVECSIZ20)
2881 break;
2882 if ((bp->b_flags & (B_BUSY0x00000010 | B_DELWRI0x00000080 | B_NEEDCOMMIT0x00000002))
2883 != (B_DELWRI0x00000080 | B_NEEDCOMMIT0x00000002))
2884 continue;
2885 bremfreebufcache_take(bp);
2886 bp->b_flags |= B_WRITEINPROG0x00020000;
2887 buf_acquire(bp);
2888
2889 /*
2890 * A list of these buffers is kept so that the
2891 * second loop knows which buffers have actually
2892 * been committed. This is necessary, since there
2893 * may be a race between the commit rpc and new
2894 * uncommitted writes on the file.
2895 */
2896 bvec[bvecpos++] = bp;
2897 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE(1 << 9) +
2898 bp->b_dirtyoff;
2899 if (toff < off)
2900 off = toff;
2901 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2902 if (toff > endoff)
2903 endoff = toff;
2904 }
2905 splx(s)spllower(s);
2906 }
2907 if (bvecpos > 0) {
2908 /*
2909 * Commit data on the server, as required.
2910 */
2911 bcstats.pendingwrites++;
2912 bcstats.numwrites++;
2913 retv = nfs_commit(vp, off, (int)(endoff - off), p);
2914 if (retv == NFSERR_STALEWRITEVERF30001)
2915 nfs_clearcommit(vp->v_mount);
2916 /*
2917 * Now, either mark the blocks I/O done or mark the
2918 * blocks dirty, depending on whether the commit
2919 * succeeded.
2920 */
2921 for (i = 0; i < bvecpos; i++) {
2922 bp = bvec[i];
2923 bp->b_flags &= ~(B_NEEDCOMMIT0x00000002 | B_WRITEINPROG0x00020000);
2924 if (retv) {
2925 if (i == 0)
2926 bcstats.pendingwrites--;
2927 brelse(bp);
2928 } else {
2929 if (i > 0)
2930 bcstats.pendingwrites++;
2931 s = splbio()splraise(0x6);
2932 buf_undirty(bp);
2933 vp->v_numoutput++;
2934 bp->b_flags |= B_ASYNC0x00000004;
2935 bp->b_flags &= ~(B_READ0x00008000|B_DONE0x00000100|B_ERROR0x00000400);
2936 bp->b_dirtyoff = bp->b_dirtyend = 0;
2937 biodone(bp);
2938 splx(s)spllower(s);
2939 }
2940 }
2941 }
2942
2943 /*
2944 * Start/do any write(s) that are required.
2945 */
2946loop:
2947 s = splbio()splraise(0x6);
2948 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp)for ((bp) = ((&vp->v_dirtyblkhd)->lh_first); (bp) &&
((nbp) = ((bp)->b_vnbufs.le_next), 1); (bp) = (nbp))
{
2949 if (bp->b_flags & B_BUSY0x00000010) {
2950 if (waitfor != MNT_WAIT1 || passone)
2951 continue;
2952 bp->b_flags |= B_WANTED0x00010000;
2953 error = tsleep_nsec(bp, slpflag | (PRIBIO16 + 1),
2954 "nfsfsync", slptimeo);
2955 splx(s)spllower(s);
2956 if (error) {
2957 if (nfs_sigintr(nmp, NULL((void *)0), p))
2958 return (EINTR4);
2959 if (slpflag == PCATCH0x100) {
2960 slpflag = 0;
2961 slptimeo = SEC_TO_NSEC(2);
2962 }
2963 }
2964 goto loop;
2965 }
2966 if ((bp->b_flags & B_DELWRI0x00000080) == 0)
2967 panic("nfs_fsync: not dirty");
2968 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT0x00000002))
2969 continue;
2970 bremfreebufcache_take(bp);
2971 if (passone || !commit) {
2972 bp->b_flags |= B_ASYNC0x00000004;
2973 } else {
2974 bp->b_flags |= (B_ASYNC0x00000004|B_WRITEINPROG0x00020000|B_NEEDCOMMIT0x00000002);
2975 }
2976 buf_acquire(bp);
2977 splx(s)spllower(s);
2978 VOP_BWRITE(bp);
2979 goto loop;
2980 }
2981 splx(s)spllower(s);
2982 if (passone) {
2983 passone = 0;
2984 goto again;
2985 }
2986 if (waitfor == MNT_WAIT1) {
2987 loop2:
2988 s = splbio()splraise(0x6);
2989 error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo);
2990 splx(s)spllower(s);
2991 if (error) {
2992 if (nfs_sigintr(nmp, NULL((void *)0), p))
2993 return (EINTR4);
2994 if (slpflag == PCATCH0x100) {
2995 slpflag = 0;
2996 slptimeo = SEC_TO_NSEC(2);
2997 }
2998 goto loop2;
2999 }
3000
3001 if (!LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0)) && commit) {
3002#if 0
3003 vprint("nfs_fsync: dirty", vp);
3004#endif
3005 goto loop;
3006 }
3007 }
3008 if (np->n_flag & NWRITEERR0x0008) {
3009 error = np->n_error;
3010 np->n_flag &= ~NWRITEERR0x0008;
3011 }
3012 return (error);
3013}
3014
3015/*
3016 * Return POSIX pathconf information applicable to nfs.
3017 * Fake it. For v3 we could ask the server, but such code
3018 * hasn't been written yet.
3019 */
3020/* ARGSUSED */
3021int
3022nfs_pathconf(void *v)
3023{
3024 struct vop_pathconf_args *ap = v;
3025 struct nfsmount *nmp = VFSTONFS(ap->a_vp->v_mount)((struct nfsmount *)((ap->a_vp->v_mount)->mnt_data));
3026 int error = 0;
3027
3028 switch (ap->a_name) {
3029 case _PC_LINK_MAX1:
3030 *ap->a_retval = LINK_MAX32767;
3031 break;
3032 case _PC_NAME_MAX4:
3033 *ap->a_retval = NAME_MAX255;
3034 break;
3035 case _PC_CHOWN_RESTRICTED7:
3036 *ap->a_retval = 1;
3037 break;
3038 case _PC_NO_TRUNC8:
3039 *ap->a_retval = 1;
3040 break;
3041 case _PC_ALLOC_SIZE_MIN11:
3042 *ap->a_retval = NFS_FABLKSIZE512;
3043 break;
3044 case _PC_FILESIZEBITS13:
3045 *ap->a_retval = 64;
3046 break;
3047 case _PC_REC_INCR_XFER_SIZE15:
3048 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize);
3049 break;
3050 case _PC_REC_MAX_XFER_SIZE16:
3051 *ap->a_retval = -1; /* means ``unlimited'' */
3052 break;
3053 case _PC_REC_MIN_XFER_SIZE17:
3054 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize);
3055 break;
3056 case _PC_REC_XFER_ALIGN18:
3057 *ap->a_retval = PAGE_SIZE(1 << 12);
3058 break;
3059 case _PC_SYMLINK_MAX19:
3060 *ap->a_retval = MAXPATHLEN1024;
3061 break;
3062 case _PC_2_SYMLINKS10:
3063 *ap->a_retval = 1;
3064 break;
3065 case _PC_TIMESTAMP_RESOLUTION21:
3066 *ap->a_retval = NFS_ISV3(ap->a_vp)(((struct nfsmount *)(((ap->a_vp)->v_mount)->mnt_data
))->nm_flag & 0x00000200)
? 1 : 1000;
3067 break;
3068 default:
3069 error = EINVAL22;
3070 break;
3071 }
3072
3073 return (error);
3074}
3075
3076/*
3077 * NFS advisory byte-level locks.
3078 */
3079int
3080nfs_advlock(void *v)
3081{
3082 struct vop_advlock_args *ap = v;
3083 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3084
3085 return (lf_advlock(&np->n_lockf, np->n_size, ap->a_id, ap->a_op,
3086 ap->a_fl, ap->a_flags));
3087}
3088
3089/*
3090 * Print out the contents of an nfsnode.
3091 */
3092int
3093nfs_print(void *v)
3094{
3095 struct vop_print_args *ap = v;
3096 struct vnode *vp = ap->a_vp;
3097 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3098
3099 printf("tag VT_NFS, fileid %lld fsid 0x%lx",
3100 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3101#ifdef FIFO1
3102 if (vp->v_type == VFIFO)
3103 fifo_printinfo(vp);
3104#endif
3105 printf("\n");
3106 return (0);
3107}
3108
3109/*
3110 * Just call nfs_writebp() with the force argument set to 1.
3111 */
3112int
3113nfs_bwrite(void *v)
3114{
3115 struct vop_bwrite_args *ap = v;
3116
3117 return (nfs_writebp(ap->a_bp, 1));
3118}
3119
3120/*
3121 * This is a clone of vop_generic_bwrite(), except that B_WRITEINPROG isn't set unless
3122 * the force flag is one and it also handles the B_NEEDCOMMIT flag.
3123 */
3124int
3125nfs_writebp(struct buf *bp, int force)
3126{
3127 int oldflags = bp->b_flags, retv = 1;
3128 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
; /* XXX */
3129 off_t off;
3130 size_t cnt;
3131 int s;
3132 struct vnode *vp;
3133 struct nfsnode *np;
3134
3135 if(!(bp->b_flags & B_BUSY0x00000010))
3136 panic("bwrite: buffer is not busy???");
3137
3138 vp = bp->b_vp;
3139 np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3140
3141 bp->b_flags &= ~(B_READ0x00008000|B_DONE0x00000100|B_ERROR0x00000400);
3142
3143 s = splbio()splraise(0x6);
3144 buf_undirty(bp);
3145
3146 if ((oldflags & B_ASYNC0x00000004) && !(oldflags & B_DELWRI0x00000080) && p)
3147 ++p->p_ru.ru_oublock;
3148
3149 bp->b_vp->v_numoutput++;
3150 splx(s)spllower(s);
3151
3152 /*
3153 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
3154 * an actual write will have to be scheduled via. VOP_STRATEGY().
3155 * If B_WRITEINPROG is already set, then push it with a write anyhow.
3156 */
3157 if ((oldflags & (B_NEEDCOMMIT0x00000002 | B_WRITEINPROG0x00020000)) == B_NEEDCOMMIT0x00000002) {
3158 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE(1 << 9) + bp->b_dirtyoff;
3159 cnt = bp->b_dirtyend - bp->b_dirtyoff;
3160
3161 rw_enter_write(&np->n_commitlock);
3162 if (!(bp->b_flags & B_NEEDCOMMIT0x00000002)) {
3163 rw_exit_write(&np->n_commitlock);
3164 return (0);
3165 }
3166
3167 /*
3168 * If it's already been committed by somebody else,
3169 * bail.
3170 */
3171 if (!nfs_in_committed_range(vp, bp)) {
3172 int pushedrange = 0;
3173 /*
3174 * Since we're going to do this, push as much
3175 * as we can.
3176 */
3177
3178 if (nfs_in_tobecommitted_range(vp, bp)) {
3179 pushedrange = 1;
3180 off = np->n_pushlo;
3181 cnt = np->n_pushhi - np->n_pushlo;
3182 }
3183
3184 bp->b_flags |= B_WRITEINPROG0x00020000;
3185 bcstats.pendingwrites++;
3186 bcstats.numwrites++;
3187 retv = nfs_commit(bp->b_vp, off, cnt, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
3188 bp->b_flags &= ~B_WRITEINPROG0x00020000;
3189
3190 if (retv == 0) {
3191 if (pushedrange)
3192 nfs_merge_commit_ranges(vp);
3193 else
3194 nfs_add_committed_range(vp, bp);
3195 } else
3196 bcstats.pendingwrites--;
3197 } else
3198 retv = 0; /* It has already been committed. */
3199
3200 rw_exit_write(&np->n_commitlock);
3201 if (!retv) {
3202 bp->b_dirtyoff = bp->b_dirtyend = 0;
3203 bp->b_flags &= ~B_NEEDCOMMIT0x00000002;
3204 s = splbio()splraise(0x6);
3205 biodone(bp);
3206 splx(s)spllower(s);
3207 } else if (retv == NFSERR_STALEWRITEVERF30001)
3208 nfs_clearcommit(bp->b_vp->v_mount);
3209 }
3210 if (retv) {
3211 buf_flip_dma(bp);
3212 if (force)
3213 bp->b_flags |= B_WRITEINPROG0x00020000;
3214 VOP_STRATEGY(bp->b_vp, bp);
3215 }
3216
3217 if( (oldflags & B_ASYNC0x00000004) == 0) {
3218 int rtval;
3219
3220 bp->b_flags |= B_RAW0x00004000;
3221 rtval = biowait(bp);
3222 if (!(oldflags & B_DELWRI0x00000080) && p) {
3223 ++p->p_ru.ru_oublock;
3224 }
3225 brelse(bp);
3226 return (rtval);
3227 }
3228
3229 return (0);
3230}
3231
3232/*
3233 * nfs special file access vnode op.
3234 * Essentially just get vattr and then imitate iaccess() since the device is
3235 * local to the client.
3236 */
3237int
3238nfsspec_access(void *v)
3239{
3240 struct vop_access_args *ap = v;
3241 struct vattr va;
3242 struct vnode *vp = ap->a_vp;
3243 int error;
3244
3245 /*
3246 * Disallow write attempts on filesystems mounted read-only;
3247 * unless the file is a socket, fifo, or a block or character
3248 * device resident on the filesystem.
3249 */
3250 if ((ap->a_mode & VWRITE00200) && (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001)) {
3251 switch (vp->v_type) {
3252 case VREG:
3253 case VDIR:
3254 case VLNK:
3255 return (EROFS30);
3256 default:
3257 break;
3258 }
3259 }
3260
3261 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p);
3262 if (error)
3263 return (error);
3264
3265 return (vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid,
3266 ap->a_mode, ap->a_cred));
3267}
3268
3269int
3270nfs_poll(void *v)
3271{
3272 struct vop_poll_args *ap = v;
3273
3274 /*
3275 * We should really check to see if I/O is possible.
3276 */
3277 return (ap->a_events & (POLLIN0x0001 | POLLOUT0x0004 | POLLRDNORM0x0040 | POLLWRNORM0x0004));
3278}
3279
3280/*
3281 * Read wrapper for special devices.
3282 */
3283int
3284nfsspec_read(void *v)
3285{
3286 struct vop_read_args *ap = v;
3287 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3288
3289 /*
3290 * Set access flag.
3291 */
3292 np->n_flag |= NACC0x0100;
3293 getnanotime(&np->n_atimn_un1.nf_atim);
3294 return (spec_read(ap));
3295}
3296
3297/*
3298 * Write wrapper for special devices.
3299 */
3300int
3301nfsspec_write(void *v)
3302{
3303 struct vop_write_args *ap = v;
3304 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3305
3306 /*
3307 * Set update flag.
3308 */
3309 np->n_flag |= NUPD0x0200;
3310 getnanotime(&np->n_mtimn_un2.nf_mtim);
3311 return (spec_write(ap));
3312}
3313
3314/*
3315 * Close wrapper for special devices.
3316 *
3317 * Update the times on the nfsnode then do device close.
3318 */
3319int
3320nfsspec_close(void *v)
3321{
3322 struct vop_close_args *ap = v;
3323 struct vnode *vp = ap->a_vp;
3324 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3325 struct vattr vattr;
3326
3327 if (np->n_flag & (NACC0x0100 | NUPD0x0200)) {
3328 np->n_flag |= NCHG0x0400;
3329 if (vp->v_usecount == 1 &&
3330 (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001) == 0) {
3331 VATTR_NULL(&vattr)vattr_null(&vattr);
3332 if (np->n_flag & NACC0x0100)
3333 vattr.va_atime = np->n_atimn_un1.nf_atim;
3334 if (np->n_flag & NUPD0x0200)
3335 vattr.va_mtime = np->n_mtimn_un2.nf_mtim;
3336 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3337 }
3338 }
3339 return (spec_close(ap));
3340}
3341
3342#ifdef FIFO1
3343/*
3344 * Read wrapper for fifos.
3345 */
3346int
3347nfsfifo_read(void *v)
3348{
3349 struct vop_read_args *ap = v;
3350 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3351
3352 /*
3353 * Set access flag.
3354 */
3355 np->n_flag |= NACC0x0100;
3356 getnanotime(&np->n_atimn_un1.nf_atim);
3357 return (fifo_read(ap));
3358}
3359
3360/*
3361 * Write wrapper for fifos.
3362 */
3363int
3364nfsfifo_write(void *v)
3365{
3366 struct vop_write_args *ap = v;
3367 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3368
3369 /*
3370 * Set update flag.
3371 */
3372 np->n_flag |= NUPD0x0200;
3373 getnanotime(&np->n_mtimn_un2.nf_mtim);
3374 return (fifo_write(ap));
3375}
3376
3377/*
3378 * Close wrapper for fifos.
3379 *
3380 * Update the times on the nfsnode then do fifo close.
3381 */
3382int
3383nfsfifo_close(void *v)
3384{
3385 struct vop_close_args *ap = v;
3386 struct vnode *vp = ap->a_vp;
3387 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3388 struct vattr vattr;
3389
3390 if (np->n_flag & (NACC0x0100 | NUPD0x0200)) {
3391 if (np->n_flag & NACC0x0100) {
3392 getnanotime(&np->n_atimn_un1.nf_atim);
3393 }
3394 if (np->n_flag & NUPD0x0200) {
3395 getnanotime(&np->n_mtimn_un2.nf_mtim);
3396 }
3397 np->n_flag |= NCHG0x0400;
3398 if (vp->v_usecount == 1 &&
3399 (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001) == 0) {
3400 VATTR_NULL(&vattr)vattr_null(&vattr);
3401 if (np->n_flag & NACC0x0100)
3402 vattr.va_atime = np->n_atimn_un1.nf_atim;
3403 if (np->n_flag & NUPD0x0200)
3404 vattr.va_mtime = np->n_mtimn_un2.nf_mtim;
3405 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3406 }
3407 }
3408 return (fifo_close(ap));
3409}
3410
3411int
3412nfsfifo_reclaim(void *v)
3413{
3414 fifo_reclaim(v);
3415 return (nfs_reclaim(v));
3416}
3417#endif /* ! FIFO */