Bug Summary

File:nfs/nfs_vnops.c
Warning:line 1364, column 3
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name nfs_vnops.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/nfs/nfs_vnops.c
1/* $OpenBSD: nfs_vnops.c,v 1.193 2023/04/26 10:00:37 beck Exp $ */
2/* $NetBSD: nfs_vnops.c,v 1.62.4.1 1996/07/08 20:26:52 jtc Exp $ */
3
4/*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Rick Macklem at The University of Guelph.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
36 */
37
38
39/*
40 * vnode op calls for Sun NFS version 2 and 3
41 */
42
43#include <sys/param.h>
44#include <sys/kernel.h>
45#include <sys/systm.h>
46#include <sys/resourcevar.h>
47#include <sys/proc.h>
48#include <sys/mount.h>
49#include <sys/buf.h>
50#include <sys/malloc.h>
51#include <sys/pool.h>
52#include <sys/mbuf.h>
53#include <sys/conf.h>
54#include <sys/namei.h>
55#include <sys/vnode.h>
56#include <sys/lock.h>
57#include <sys/dirent.h>
58#include <sys/fcntl.h>
59#include <sys/lockf.h>
60#include <sys/queue.h>
61#include <sys/specdev.h>
62#include <sys/unistd.h>
63
64#include <miscfs/fifofs/fifo.h>
65
66#include <nfs/rpcv2.h>
67#include <nfs/nfsproto.h>
68#include <nfs/nfs.h>
69#include <nfs/nfsnode.h>
70#include <nfs/nfsmount.h>
71#include <nfs/xdr_subs.h>
72#include <nfs/nfsm_subs.h>
73#include <nfs/nfs_var.h>
74
75#include <uvm/uvm_extern.h>
76
77#include <netinet/in.h>
78
79int nfs_access(void *);
80int nfs_advlock(void *);
81int nfs_bmap(void *);
82int nfs_bwrite(void *);
83int nfs_close(void *);
84int nfs_commit(struct vnode *, u_quad_t, int, struct proc *);
85int nfs_create(void *);
86int nfs_flush(struct vnode *, struct ucred *, int, struct proc *, int);
87int nfs_fsync(void *);
88int nfs_getattr(void *);
89int nfs_getreq(struct nfsrv_descript *, struct nfsd *, int);
90int nfs_islocked(void *);
91int nfs_link(void *);
92int nfs_lock(void *);
93int nfs_lookitup(struct vnode *, char *, int, struct ucred *, struct proc *,
94 struct nfsnode **);
95int nfs_lookup(void *);
96int nfs_mkdir(void *);
97int nfs_mknod(void *);
98int nfs_mknodrpc(struct vnode *, struct vnode **, struct componentname *,
99 struct vattr *);
100int nfs_null(struct vnode *, struct ucred *, struct proc *);
101int nfs_open(void *);
102int nfs_pathconf(void *);
103int nfs_print(void *);
104int nfs_read(void *);
105int nfs_readdir(void *);
106int nfs_readdirplusrpc(struct vnode *, struct uio *, struct ucred *, int *,
107 struct proc *);
108int nfs_readdirrpc(struct vnode *, struct uio *, struct ucred *, int *);
109int nfs_remove(void *);
110int nfs_removerpc(struct vnode *, char *, int, struct ucred *, struct proc *);
111int nfs_rename(void *);
112int nfs_renameit(struct vnode *, struct componentname *, struct sillyrename *);
113int nfs_renamerpc(struct vnode *, char *, int, struct vnode *, char *, int,
114 struct ucred *, struct proc *);
115int nfs_rmdir(void *);
116int nfs_setattr(void *);
117int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
118 struct proc *);
119int nfs_sillyrename(struct vnode *, struct vnode *,
120 struct componentname *);
121int nfs_strategy(void *);
122int nfs_symlink(void *);
123int nfs_unlock(void *);
124
125void nfs_cache_enter(struct vnode *, struct vnode *, struct componentname *);
126
127int nfsfifo_close(void *);
128int nfsfifo_read(void *);
129int nfsfifo_reclaim(void *);
130int nfsfifo_write(void *);
131
132int nfsspec_access(void *);
133int nfsspec_close(void *);
134int nfsspec_read(void *);
135int nfsspec_write(void *);
136
137/* Global vfs data structures for nfs. */
138const struct vops nfs_vops = {
139 .vop_lookup = nfs_lookup,
140 .vop_create = nfs_create,
141 .vop_mknod = nfs_mknod,
142 .vop_open = nfs_open,
143 .vop_close = nfs_close,
144 .vop_access = nfs_access,
145 .vop_getattr = nfs_getattr,
146 .vop_setattr = nfs_setattr,
147 .vop_read = nfs_read,
148 .vop_write = nfs_write,
149 .vop_ioctl = nfs_ioctl((int (*)(void *))enoioctl),
150 .vop_kqfilter = nfs_kqfilter,
151 .vop_revoke = vop_generic_revoke,
152 .vop_fsync = nfs_fsync,
153 .vop_remove = nfs_remove,
154 .vop_link = nfs_link,
155 .vop_rename = nfs_rename,
156 .vop_mkdir = nfs_mkdir,
157 .vop_rmdir = nfs_rmdir,
158 .vop_symlink = nfs_symlink,
159 .vop_readdir = nfs_readdir,
160 .vop_readlink = nfs_readlink,
161 .vop_abortop = vop_generic_abortop,
162 .vop_inactive = nfs_inactive,
163 .vop_reclaim = nfs_reclaim,
164 .vop_lock = nfs_lock,
165 .vop_unlock = nfs_unlock,
166 .vop_bmap = nfs_bmap,
167 .vop_strategy = nfs_strategy,
168 .vop_print = nfs_print,
169 .vop_islocked = nfs_islocked,
170 .vop_pathconf = nfs_pathconf,
171 .vop_advlock = nfs_advlock,
172 .vop_bwrite = nfs_bwrite
173};
174
175/* Special device vnode ops. */
176const struct vops nfs_specvops = {
177 .vop_close = nfsspec_close,
178 .vop_access = nfsspec_access,
179 .vop_getattr = nfs_getattr,
180 .vop_setattr = nfs_setattr,
181 .vop_read = nfsspec_read,
182 .vop_write = nfsspec_write,
183 .vop_fsync = nfs_fsync,
184 .vop_inactive = nfs_inactive,
185 .vop_reclaim = nfs_reclaim,
186 .vop_lock = nfs_lock,
187 .vop_unlock = nfs_unlock,
188 .vop_print = nfs_print,
189 .vop_islocked = nfs_islocked,
190
191 /* XXX: Keep in sync with spec_vops. */
192 .vop_lookup = vop_generic_lookup,
193 .vop_create = vop_generic_badop,
194 .vop_mknod = vop_generic_badop,
195 .vop_open = spec_open,
196 .vop_ioctl = spec_ioctl,
197 .vop_kqfilter = spec_kqfilter,
198 .vop_revoke = vop_generic_revoke,
199 .vop_remove = vop_generic_badop,
200 .vop_link = vop_generic_badop,
201 .vop_rename = vop_generic_badop,
202 .vop_mkdir = vop_generic_badop,
203 .vop_rmdir = vop_generic_badop,
204 .vop_symlink = vop_generic_badop,
205 .vop_readdir = vop_generic_badop,
206 .vop_readlink = vop_generic_badop,
207 .vop_abortop = vop_generic_badop,
208 .vop_bmap = vop_generic_bmap,
209 .vop_strategy = spec_strategy,
210 .vop_pathconf = spec_pathconf,
211 .vop_advlock = spec_advlock,
212 .vop_bwrite = vop_generic_bwrite,
213};
214
215#ifdef FIFO1
216const struct vops nfs_fifovops = {
217 .vop_close = nfsfifo_close,
218 .vop_access = nfsspec_access,
219 .vop_getattr = nfs_getattr,
220 .vop_setattr = nfs_setattr,
221 .vop_read = nfsfifo_read,
222 .vop_write = nfsfifo_write,
223 .vop_fsync = nfs_fsync,
224 .vop_inactive = nfs_inactive,
225 .vop_reclaim = nfsfifo_reclaim,
226 .vop_lock = nfs_lock,
227 .vop_unlock = nfs_unlock,
228 .vop_print = nfs_print,
229 .vop_islocked = nfs_islocked,
230 .vop_bwrite = vop_generic_bwrite,
231
232 /* XXX: Keep in sync with fifo_vops. */
233 .vop_lookup = vop_generic_lookup,
234 .vop_create = vop_generic_badop,
235 .vop_mknod = vop_generic_badop,
236 .vop_open = fifo_open,
237 .vop_ioctl = fifo_ioctl,
238 .vop_kqfilter = fifo_kqfilter,
239 .vop_revoke = vop_generic_revoke,
240 .vop_remove = vop_generic_badop,
241 .vop_link = vop_generic_badop,
242 .vop_rename = vop_generic_badop,
243 .vop_mkdir = vop_generic_badop,
244 .vop_rmdir = vop_generic_badop,
245 .vop_symlink = vop_generic_badop,
246 .vop_readdir = vop_generic_badop,
247 .vop_readlink = vop_generic_badop,
248 .vop_abortop = vop_generic_badop,
249 .vop_bmap = vop_generic_bmap,
250 .vop_strategy = vop_generic_badop,
251 .vop_pathconf = fifo_pathconf,
252 .vop_advlock = fifo_advlock,
253};
254#endif /* FIFO */
255
256/*
257 * Global variables
258 */
259extern u_int32_t nfs_true, nfs_false;
260extern u_int32_t nfs_xdrneg1;
261extern struct nfsstats nfsstats;
262extern nfstype nfsv3_type[9];
263int nfs_numasync = 0;
264
265void
266nfs_cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
267{
268 struct nfsnode *np;
269
270 if (vp != NULL((void *)0)) {
271 np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
272 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
273 } else {
274 np = VTONFS(dvp)((struct nfsnode *)(dvp)->v_data);
275 if (!np->n_ctime)
276 np->n_ctime = np->n_vattr.va_mtime.tv_sec;
277 }
278
279 cache_enter(dvp, vp, cnp);
280}
281
282/*
283 * nfs null call from vfs.
284 */
285int
286nfs_null(struct vnode *vp, struct ucred *cred, struct proc *procp)
287{
288 struct nfsm_info info;
289 int error = 0;
290
291 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(0);
292 error = nfs_request(vp, NFSPROC_NULL0, &info);
293 m_freem(info.nmi_mrep);
294 return (error);
295}
296
297/*
298 * nfs access vnode op.
299 * For nfs version 2, just return ok. File accesses may fail later.
300 * For nfs version 3, use the access rpc to check accessibility. If file modes
301 * are changed on the server, accesses might still fail later.
302 */
303int
304nfs_access(void *v)
305{
306 struct vop_access_args *ap = v;
307 struct vnode *vp = ap->a_vp;
308 u_int32_t *tl;
309 int32_t t1;
310 caddr_t cp2;
311 int error = 0, attrflag;
312 u_int32_t mode, rmode;
313 int v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
314 int cachevalid;
315 struct nfsm_info info;
316
317 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
318
319 /*
320 * Disallow write attempts on filesystems mounted read-only;
321 * unless the file is a socket, fifo, or a block or character
322 * device resident on the filesystem.
323 */
324 if ((ap->a_mode & VWRITE00200) && (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001)) {
325 switch (vp->v_type) {
326 case VREG:
327 case VDIR:
328 case VLNK:
329 return (EROFS30);
330 default:
331 break;
332 }
333 }
334
335 /*
336 * Check access cache first. If a request has been made for this uid
337 * shortly before, use the cached result.
338 */
339 cachevalid = (np->n_accstamp != -1 &&
340 (gettime() - np->n_accstamp) < nfs_attrtimeo(np) &&
341 np->n_accuid == ap->a_cred->cr_uid);
342
343 if (cachevalid) {
344 if (!np->n_accerror) {
345 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
346 return (np->n_accerror);
347 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
348 return (np->n_accerror);
349 }
350
351 /*
352 * For nfs v3, do an access rpc, otherwise you are stuck emulating
353 * ufs_access() locally using the vattr. This may not be correct,
354 * since the server may apply other access criteria such as
355 * client uid-->server uid mapping that we do not know about, but
356 * this is better than just returning anything that is lying about
357 * in the cache.
358 */
359 if (v3) {
360 nfsstats.rpccnt[NFSPROC_ACCESS4]++;
361 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3)((v3) ? (64 + 4) : 32) + NFSX_UNSIGNED4);
362 nfsm_fhtom(&info, vp, v3);
363 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
364 if (ap->a_mode & VREAD00400)
365 mode = NFSV3ACCESS_READ0x01;
366 else
367 mode = 0;
368 if (vp->v_type == VDIR) {
369 if (ap->a_mode & VWRITE00200)
370 mode |= (NFSV3ACCESS_MODIFY0x04 | NFSV3ACCESS_EXTEND0x08 |
371 NFSV3ACCESS_DELETE0x10);
372 if (ap->a_mode & VEXEC00100)
373 mode |= NFSV3ACCESS_LOOKUP0x02;
374 } else {
375 if (ap->a_mode & VWRITE00200)
376 mode |= (NFSV3ACCESS_MODIFY0x04 | NFSV3ACCESS_EXTEND0x08);
377 if (ap->a_mode & VEXEC00100)
378 mode |= NFSV3ACCESS_EXECUTE0x20;
379 }
380 *tl = txdr_unsigned(mode)((__uint32_t)(__builtin_constant_p((int32_t)(mode)) ? (__uint32_t
)(((__uint32_t)((int32_t)(mode)) & 0xff) << 24 | ((
__uint32_t)((int32_t)(mode)) & 0xff00) << 8 | ((__uint32_t
)((int32_t)(mode)) & 0xff0000) >> 8 | ((__uint32_t)
((int32_t)(mode)) & 0xff000000) >> 24) : __swap32md
((int32_t)(mode))))
;
381
382 info.nmi_procp = ap->a_p;
383 info.nmi_cred = ap->a_cred;
384 error = nfs_request(vp, NFSPROC_ACCESS4, &info);
385
386 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
387 if (error) {
388 m_freem(info.nmi_mrep);
389 goto nfsmout;
390 }
391
392 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
393 rmode = fxdr_unsigned(u_int32_t, *tl)((u_int32_t)(__uint32_t)(__builtin_constant_p((int32_t)(*tl))
? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) << 8 |
((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8 | (
(__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24) :
__swap32md((int32_t)(*tl))))
;
394 /*
395 * The NFS V3 spec does not clarify whether or not
396 * the returned access bits can be a superset of
397 * the ones requested, so...
398 */
399 if ((rmode & mode) != mode)
400 error = EACCES13;
401
402 m_freem(info.nmi_mrep);
403 } else
404 return (nfsspec_access(ap));
405
406
407 /*
408 * If we got the same result as for a previous, different request, OR
409 * it in. Don't update the timestamp in that case.
410 */
411 if (!error || error == EACCES13) {
412 if (cachevalid && np->n_accstamp != -1 &&
413 error == np->n_accerror) {
414 if (!error)
415 np->n_accmode |= ap->a_mode;
416 else {
417 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
418 np->n_accmode = ap->a_mode;
419 }
420 } else {
421 np->n_accstamp = gettime();
422 np->n_accuid = ap->a_cred->cr_uid;
423 np->n_accmode = ap->a_mode;
424 np->n_accerror = error;
425 }
426 }
427nfsmout:
428 return (error);
429}
430
431/*
432 * nfs open vnode op
433 * Check to see if the type is ok
434 * and that deletion is not in progress.
435 * For paged in text files, you will need to flush the page cache
436 * if consistency is lost.
437 */
438int
439nfs_open(void *v)
440{
441 struct vop_open_args *ap = v;
442 struct vnode *vp = ap->a_vp;
443 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
444 struct vattr vattr;
445 int error;
446
447 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
448#ifdef DIAGNOSTIC1
449 printf("open eacces vtyp=%d\n",vp->v_type);
450#endif
451 return (EACCES13);
452 }
453
454 /*
455 * Initialize read and write creds here, for swapfiles
456 * and other paths that don't set the creds themselves.
457 */
458
459 if (ap->a_mode & FREAD0x0001) {
460 if (np->n_rcred) {
461 crfree(np->n_rcred);
462 }
463 np->n_rcred = ap->a_cred;
464 crhold(np->n_rcred);
465 }
466 if (ap->a_mode & FWRITE0x0002) {
467 if (np->n_wcred) {
468 crfree(np->n_wcred);
469 }
470 np->n_wcred = ap->a_cred;
471 crhold(np->n_wcred);
472 }
473
474 if (np->n_flag & NMODIFIED0x0004) {
475 error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred, ap->a_p);
476 if (error == EINTR4)
477 return (error);
478 uvm_vnp_uncache(vp);
479 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
480 if (vp->v_type == VDIR)
481 np->n_direofoffsetn_un2.nd_direof = 0;
482 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
483 if (error)
484 return (error);
485 np->n_mtime = vattr.va_mtime;
486 } else {
487 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
488 if (error)
489 return (error);
490 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)(((&np->n_mtime)->tv_sec == (&vattr.va_mtime)->
tv_sec) ? ((&np->n_mtime)->tv_nsec != (&vattr.va_mtime
)->tv_nsec) : ((&np->n_mtime)->tv_sec != (&vattr
.va_mtime)->tv_sec))
) {
491 if (vp->v_type == VDIR)
492 np->n_direofoffsetn_un2.nd_direof = 0;
493 error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred, ap->a_p);
494 if (error == EINTR4)
495 return (error);
496 uvm_vnp_uncache(vp);
497 np->n_mtime = vattr.va_mtime;
498 }
499 }
500 /* For open/close consistency. */
501 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
502 return (0);
503}
504
505/*
506 * nfs close vnode op
507 * What an NFS client should do upon close after writing is a debatable issue.
508 * Most NFS clients push delayed writes to the server upon close, basically for
509 * two reasons:
510 * 1 - So that any write errors may be reported back to the client process
511 * doing the close system call. By far the two most likely errors are
512 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
513 * 2 - To put a worst case upper bound on cache inconsistency between
514 * multiple clients for the file.
515 * There is also a consistency problem for Version 2 of the protocol w.r.t.
516 * not being able to tell if other clients are writing a file concurrently,
517 * since there is no way of knowing if the changed modify time in the reply
518 * is only due to the write for this client.
519 * (NFS Version 3 provides weak cache consistency data in the reply that
520 * should be sufficient to detect and handle this case.)
521 *
522 * The current code does the following:
523 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
524 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
525 * or commit them (this satisfies 1 and 2 except for the
526 * case where the server crashes after this close but
527 * before the commit RPC, which is felt to be "good
528 * enough". Changing the last argument to nfs_flush() to
529 * a 1 would force a commit operation, if it is felt a
530 * commit is necessary now.
531 */
532int
533nfs_close(void *v)
534{
535 struct vop_close_args *ap = v;
536 struct vnode *vp = ap->a_vp;
537 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
538 int error = 0;
539
540 if (vp->v_type == VREG) {
541 if (np->n_flag & NMODIFIED0x0004) {
542 if (NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
) {
543 error = nfs_flush(vp, ap->a_cred, MNT_WAIT1, ap->a_p, 0);
544 np->n_flag &= ~NMODIFIED0x0004;
545 } else
546 error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred, ap->a_p);
547 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
548 }
549 if (np->n_flag & NWRITEERR0x0008) {
550 np->n_flag &= ~NWRITEERR0x0008;
551 error = np->n_error;
552 }
553 }
554 return (error);
555}
556
557/*
558 * nfs getattr call from vfs.
559 */
560int
561nfs_getattr(void *v)
562{
563 struct vop_getattr_args *ap = v;
564 struct vnode *vp = ap->a_vp;
565 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
566 struct nfsm_info info;
567 int32_t t1;
568 int error = 0;
569
570 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
571
572 /*
573 * Update local times for special files.
574 */
575 if (np->n_flag & (NACC0x0100 | NUPD0x0200))
576 np->n_flag |= NCHG0x0400;
577 /*
578 * First look in the cache.
579 */
580 if (nfs_getattrcache(vp, ap->a_vap) == 0)
581 return (0);
582
583 nfsstats.rpccnt[NFSPROC_GETATTR1]++;
584 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32));
585 nfsm_fhtom(&info, vp, info.nmi_v3);
586 info.nmi_procp = ap->a_p;
587 info.nmi_cred = ap->a_cred;
588 error = nfs_request(vp, NFSPROC_GETATTR1, &info);
589 if (!error)
590 nfsm_loadattr(vp, ap->a_vap){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (ap->a_vap))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
591 m_freem(info.nmi_mrep);
592nfsmout:
593 return (error);
594}
595
596/*
597 * nfs setattr call.
598 */
599int
600nfs_setattr(void *v)
601{
602 struct vop_setattr_args *ap = v;
603 struct vnode *vp = ap->a_vp;
604 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
605 struct vattr *vap = ap->a_vap;
606 int hint = NOTE_ATTRIB0x0008;
607 int error = 0;
608 u_quad_t tsize = 0;
609
610 /*
611 * Setting of flags is not supported.
612 */
613 if (vap->va_flags != VNOVAL(-1))
614 return (EOPNOTSUPP45);
615
616 /*
617 * Disallow write attempts if the filesystem is mounted read-only.
618 */
619 if ((vap->va_uid != (uid_t)VNOVAL(-1) ||
620 vap->va_gid != (gid_t)VNOVAL(-1) ||
621 vap->va_atime.tv_nsec != VNOVAL(-1) ||
622 vap->va_mtime.tv_nsec != VNOVAL(-1) ||
623 vap->va_mode != (mode_t)VNOVAL(-1)) &&
624 (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001))
625 return (EROFS30);
626 if (vap->va_size != VNOVAL(-1)) {
627 switch (vp->v_type) {
628 case VDIR:
629 return (EISDIR21);
630 case VCHR:
631 case VBLK:
632 case VSOCK:
633 case VFIFO:
634 if (vap->va_mtime.tv_nsec == VNOVAL(-1) &&
635 vap->va_atime.tv_nsec == VNOVAL(-1) &&
636 vap->va_mode == (mode_t)VNOVAL(-1) &&
637 vap->va_uid == (uid_t)VNOVAL(-1) &&
638 vap->va_gid == (gid_t)VNOVAL(-1))
639 return (0);
640 vap->va_size = VNOVAL(-1);
641 break;
642 default:
643 /*
644 * Disallow write attempts if the filesystem is
645 * mounted read-only.
646 */
647 if (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001)
648 return (EROFS30);
649 if (vap->va_size == 0)
650 error = nfs_vinvalbuf(vp, 0,
651 ap->a_cred, ap->a_p);
652 else
653 error = nfs_vinvalbuf(vp, V_SAVE0x0001,
654 ap->a_cred, ap->a_p);
655 if (error)
656 return (error);
657 tsize = np->n_size;
658 np->n_size = np->n_vattr.va_size = vap->va_size;
659 uvm_vnp_setsize(vp, np->n_size);
660 };
661 } else if ((vap->va_mtime.tv_nsec != VNOVAL(-1) ||
662 vap->va_atime.tv_nsec != VNOVAL(-1)) &&
663 vp->v_type == VREG &&
664 (error = nfs_vinvalbuf(vp, V_SAVE0x0001, ap->a_cred,
665 ap->a_p)) == EINTR4)
666 return (error);
667 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
668 if (error && vap->va_size != VNOVAL(-1)) {
669 np->n_size = np->n_vattr.va_size = tsize;
670 uvm_vnp_setsize(vp, np->n_size);
671 }
672
673 if (vap->va_size != VNOVAL(-1) && vap->va_size < tsize)
674 hint |= NOTE_TRUNCATE0x0080;
675
676 VN_KNOTE(vp, hint)knote_locked(&vp->v_klist, (hint)); /* XXX setattrrpc? */
677
678 return (error);
679}
680
681/*
682 * Do an nfs setattr rpc.
683 */
684int
685nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
686 struct proc *procp)
687{
688 struct nfsv2_sattr *sp;
689 struct nfsm_info info;
690 int32_t t1;
691 caddr_t cp2;
692 u_int32_t *tl;
693 int error = 0, wccflag = NFSV3_WCCRATTR0;
694 int v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
695
696 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
697
698 nfsstats.rpccnt[NFSPROC_SETATTR2]++;
699 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3)((v3) ? (64 + 4) : 32) + NFSX_SATTR(v3)((v3) ? 60 : 32));
700 nfsm_fhtom(&info, vp, v3);
701
702 if (info.nmi_v3) {
703 nfsm_v3attrbuild(&info.nmi_mb, vap, 1);
704 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
705 *tl = nfs_false;
706 } else {
707 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
708 if (vap->va_mode == (mode_t)VNOVAL(-1))
709 sp->sa_mode = nfs_xdrneg1;
710 else
711 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((vp->v_type)
== VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode
))) : (int)((vttoif_tab[(int)((vp->v_type))]) | ((vap->
va_mode))))) ? (__uint32_t)(((__uint32_t)((int32_t)(((vp->
v_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->
va_mode))) : (int)((vttoif_tab[(int)((vp->v_type))]) | ((vap
->va_mode))))) & 0xff) << 24 | ((__uint32_t)((int32_t
)(((vp->v_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]
) | ((vap->va_mode))) : (int)((vttoif_tab[(int)((vp->v_type
))]) | ((vap->va_mode))))) & 0xff00) << 8 | ((__uint32_t
)((int32_t)(((vp->v_type) == VFIFO) ? (int)((vttoif_tab[(int
)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int)((vp
->v_type))]) | ((vap->va_mode))))) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(((vp->v_type) == VFIFO) ? (int
)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab
[(int)((vp->v_type))]) | ((vap->va_mode))))) & 0xff000000
) >> 24) : __swap32md((int32_t)(((vp->v_type) == VFIFO
) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (
int)((vttoif_tab[(int)((vp->v_type))]) | ((vap->va_mode
)))))))
;
712 if (vap->va_uid == (uid_t)VNOVAL(-1))
713 sp->sa_uid = nfs_xdrneg1;
714 else
715 sp->sa_uid = txdr_unsigned(vap->va_uid)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_uid))
? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_uid)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_uid))
& 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->va_uid
)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(vap->
va_uid)) & 0xff000000) >> 24) : __swap32md((int32_t
)(vap->va_uid))))
;
716 if (vap->va_gid == (gid_t)VNOVAL(-1))
717 sp->sa_gid = nfs_xdrneg1;
718 else
719 sp->sa_gid = txdr_unsigned(vap->va_gid)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_gid))
? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_gid)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_gid))
& 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->va_gid
)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(vap->
va_gid)) & 0xff000000) >> 24) : __swap32md((int32_t
)(vap->va_gid))))
;
720 sp->sa_size = txdr_unsigned(vap->va_size)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_size)
) ? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_size)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_size)
) & 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->
va_size)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t
)(vap->va_size)) & 0xff000000) >> 24) : __swap32md
((int32_t)(vap->va_size))))
;
721 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
722 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
723 }
724
725 info.nmi_procp = procp;
726 info.nmi_cred = cred;
727 error = nfs_request(vp, NFSPROC_SETATTR2, &info);
728
729 if (info.nmi_v3)
730 nfsm_wcc_data(vp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(vp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(vp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(vp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((vp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((vp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
731 else if (error == 0)
732 nfsm_loadattr(vp, NULL){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
733
734 m_freem(info.nmi_mrep);
735nfsmout:
736 return (error);
737}
738
739/*
740 * nfs lookup call, one step at a time...
741 * First look in cache
742 * If not found, unlock the directory nfsnode and do the rpc
743 */
744int
745nfs_lookup(void *v)
746{
747 struct vop_lookup_args *ap = v;
748 struct componentname *cnp = ap->a_cnp;
749 struct vnode *dvp = ap->a_dvp;
750 struct vnode **vpp = ap->a_vpp;
751 struct nfsm_info info;
752 int flags;
753 struct vnode *newvp;
754 u_int32_t *tl;
755 int32_t t1;
756 struct nfsmount *nmp;
757 caddr_t cp2;
758 long len;
759 nfsfh_t *fhp;
760 struct nfsnode *np;
761 int lockparent, wantparent, error = 0, attrflag, fhsize;
762
763 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
764
765 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
766 flags = cnp->cn_flags;
767
768 *vpp = NULLVP((struct vnode *)((void *)0));
769 newvp = NULLVP((struct vnode *)((void *)0));
770 if ((flags & ISLASTCN0x008000) && (dvp->v_mount->mnt_flag & MNT_RDONLY0x00000001) &&
771 (cnp->cn_nameiop == DELETE2 || cnp->cn_nameiop == RENAME3))
772 return (EROFS30);
773 if (dvp->v_type != VDIR)
774 return (ENOTDIR20);
775 lockparent = flags & LOCKPARENT0x0008;
776 wantparent = flags & (LOCKPARENT0x0008|WANTPARENT0x0010);
777 nmp = VFSTONFS(dvp->v_mount)((struct nfsmount *)((dvp->v_mount)->mnt_data));
778 np = VTONFS(dvp)((struct nfsnode *)(dvp)->v_data);
779
780 /*
781 * Before tediously performing a linear scan of the directory,
782 * check the name cache to see if the directory/name pair
783 * we are looking for is known already.
784 * If the directory/name pair is found in the name cache,
785 * we have to ensure the directory has not changed from
786 * the time the cache entry has been created. If it has,
787 * the cache entry has to be ignored.
788 */
789 if ((error = cache_lookup(dvp, vpp, cnp)) >= 0) {
790 struct vattr vattr;
791 int err2;
792
793 if (error && error != ENOENT2) {
794 *vpp = NULLVP((struct vnode *)((void *)0));
795 return (error);
796 }
797
798 if (cnp->cn_flags & PDIRUNLOCK0x200000) {
799 err2 = vn_lock(dvp, LK_EXCLUSIVE0x0001UL | LK_RETRY0x2000UL);
800 if (err2 != 0) {
801 *vpp = NULLVP((struct vnode *)((void *)0));
802 return (err2);
803 }
804 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
805 }
806
807 err2 = VOP_ACCESS(dvp, VEXEC00100, cnp->cn_cred, cnp->cn_proc);
808 if (err2 != 0) {
809 if (error == 0) {
810 if (*vpp != dvp)
811 vput(*vpp);
812 else
813 vrele(*vpp);
814 }
815 *vpp = NULLVP((struct vnode *)((void *)0));
816 return (err2);
817 }
818
819 if (error == ENOENT2) {
820 if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred,
821 cnp->cn_proc) && vattr.va_mtime.tv_sec ==
822 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_ctime)
823 return (ENOENT2);
824 cache_purge(dvp);
825 np->n_ctime = 0;
826 goto dorpc;
827 }
828
829 newvp = *vpp;
830 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_proc)
831 && vattr.va_ctime.tv_sec == VTONFS(newvp)((struct nfsnode *)(newvp)->v_data)->n_ctime)
832 {
833 nfsstats.lookupcache_hits++;
834 if (cnp->cn_nameiop != LOOKUP0 && (flags & ISLASTCN0x008000))
835 cnp->cn_flags |= SAVENAME0x000800;
836 if ((!lockparent || !(flags & ISLASTCN0x008000)) &&
837 newvp != dvp) {
838 VOP_UNLOCK(dvp);
839 cnp->cn_flags |= PDIRUNLOCK0x200000;
840 }
841 return (0);
842 }
843 cache_purge(newvp);
844 if (newvp != dvp)
845 vput(newvp);
846 else
847 vrele(newvp);
848 *vpp = NULLVP((struct vnode *)((void *)0));
849 }
850dorpc:
851 error = 0;
852 newvp = NULLVP((struct vnode *)((void *)0));
853 nfsstats.lookupcache_misses++;
854 nfsstats.rpccnt[NFSPROC_LOOKUP3]++;
855 len = cnp->cn_namelen;
856 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
857 NFSX_UNSIGNED4 + nfsm_rndup(len)(((len)+3)&(~0x3)));
858 nfsm_fhtom(&info, dvp, info.nmi_v3);
859 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN)if ((len) > (255)) { m_freem(info.nmi_mreq); error = 63; goto
nfsmout; } nfsm_strtombuf(&info.nmi_mb, (cnp->cn_nameptr
), (len))
;
860
861 info.nmi_procp = cnp->cn_proc;
862 info.nmi_cred = cnp->cn_cred;
863 error = nfs_request(dvp, NFSPROC_LOOKUP3, &info);
864
865 if (error) {
866 if (info.nmi_v3)
867 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
868 m_freem(info.nmi_mrep);
869 goto nfsmout;
870 }
871
872 nfsm_getfh(fhp, fhsize, info.nmi_v3){ if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr
.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if
(t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos
+= (4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; if (((fhsize) = ((int)(__uint32_t)(__builtin_constant_p((
int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) &
0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl))))) <= 0 || (fhsize) >
64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout; } } else
(fhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data
)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
((((fhsize)+3)&(~0x3)))) { ((fhp)) = (nfsfh_t *)(info.nmi_dpos
); info.nmi_dpos += ((((fhsize)+3)&(~0x3))); } else if ((
t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, ((((fhsize
)+3)&(~0x3))), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { ((fhp)) = (nfsfh_t *)
cp2; } }; }
;
873
874 /*
875 * Handle RENAME case...
876 */
877 if (cnp->cn_nameiop == RENAME3 && wantparent && (flags & ISLASTCN0x008000)) {
878 if (NFS_CMPFH(np, fhp, fhsize)((np)->n_fhsize == (fhsize) && !bcmp((caddr_t)(np)
->n_fhp, (caddr_t)(fhp), (fhsize)))
) {
879 m_freem(info.nmi_mrep);
880 return (EISDIR21);
881 }
882 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
883 if (error) {
884 m_freem(info.nmi_mrep);
885 return (error);
886 }
887 newvp = NFSTOV(np)((np)->n_vnode);
888 if (info.nmi_v3) {
889 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
890 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
891 } else
892 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
893 *vpp = newvp;
894 m_freem(info.nmi_mrep);
895 cnp->cn_flags |= SAVENAME0x000800;
896 if (!lockparent) {
897 VOP_UNLOCK(dvp);
898 cnp->cn_flags |= PDIRUNLOCK0x200000;
899 }
900 return (0);
901 }
902
903 /*
904 * The postop attr handling is duplicated for each if case,
905 * because it should be done while dvp is locked (unlocking
906 * dvp is different for each case).
907 */
908
909 if (NFS_CMPFH(np, fhp, fhsize)((np)->n_fhsize == (fhsize) && !bcmp((caddr_t)(np)
->n_fhp, (caddr_t)(fhp), (fhsize)))
) {
910 vref(dvp);
911 newvp = dvp;
912 if (info.nmi_v3) {
913 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
914 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
915 } else
916 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
917 } else if (flags & ISDOTDOT0x002000) {
918 VOP_UNLOCK(dvp);
919 cnp->cn_flags |= PDIRUNLOCK0x200000;
920
921 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
922 if (error) {
923 if (vn_lock(dvp, LK_EXCLUSIVE0x0001UL | LK_RETRY0x2000UL) == 0)
924 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
925 m_freem(info.nmi_mrep);
926 return (error);
927 }
928 newvp = NFSTOV(np)((np)->n_vnode);
929
930 if (info.nmi_v3) {
931 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
932 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
933 } else
934 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
935
936 if (lockparent && (flags & ISLASTCN0x008000)) {
937 if ((error = vn_lock(dvp, LK_EXCLUSIVE0x0001UL))) {
938 m_freem(info.nmi_mrep);
939 vput(newvp);
940 return error;
941 }
942 cnp->cn_flags &= ~PDIRUNLOCK0x200000;
943 }
944
945 } else {
946 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
947 if (error) {
948 m_freem(info.nmi_mrep);
949 return error;
950 }
951 newvp = NFSTOV(np)((np)->n_vnode);
952 if (info.nmi_v3) {
953 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
954 nfsm_postop_attr(dvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (dvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (dvp) = ttvp; } } }
;
955 } else
956 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
957 if (!lockparent || !(flags & ISLASTCN0x008000)) {
958 VOP_UNLOCK(dvp);
959 cnp->cn_flags |= PDIRUNLOCK0x200000;
960 }
961 }
962
963 if (cnp->cn_nameiop != LOOKUP0 && (flags & ISLASTCN0x008000))
964 cnp->cn_flags |= SAVENAME0x000800;
965 if ((cnp->cn_flags & MAKEENTRY0x004000) &&
966 (cnp->cn_nameiop != DELETE2 || !(flags & ISLASTCN0x008000))) {
967 nfs_cache_enter(dvp, newvp, cnp);
968 }
969
970 *vpp = newvp;
971 m_freem(info.nmi_mrep);
972
973nfsmout:
974 if (error) {
975 /*
976 * We get here only because of errors returned by
977 * the RPC. Otherwise we'll have returned above
978 * (the nfsm_* macros will jump to nfsmout
979 * on error).
980 */
981 if (error == ENOENT2 && (cnp->cn_flags & MAKEENTRY0x004000) &&
982 cnp->cn_nameiop != CREATE1) {
983 nfs_cache_enter(dvp, NULL((void *)0), cnp);
984 }
985 if (newvp != NULLVP((struct vnode *)((void *)0))) {
986 if (newvp != dvp)
987 vput(newvp);
988 else
989 vrele(newvp);
990 }
991 if ((cnp->cn_nameiop == CREATE1 || cnp->cn_nameiop == RENAME3) &&
992 (flags & ISLASTCN0x008000) && error == ENOENT2) {
993 if (dvp->v_mount->mnt_flag & MNT_RDONLY0x00000001)
994 error = EROFS30;
995 else
996 error = EJUSTRETURN-2;
997 }
998 if (cnp->cn_nameiop != LOOKUP0 && (flags & ISLASTCN0x008000))
999 cnp->cn_flags |= SAVENAME0x000800;
1000 *vpp = NULL((void *)0);
1001 }
1002 return (error);
1003}
1004
1005/*
1006 * nfs read call.
1007 * Just call nfs_bioread() to do the work.
1008 */
1009int
1010nfs_read(void *v)
1011{
1012 struct vop_read_args *ap = v;
1013 struct vnode *vp = ap->a_vp;
1014
1015 if (vp->v_type != VREG)
1016 return (EPERM1);
1017 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1018}
1019
1020/*
1021 * nfs readlink call
1022 */
1023int
1024nfs_readlink(void *v)
1025{
1026 struct vop_readlink_args *ap = v;
1027 struct vnode *vp = ap->a_vp;
1028
1029 if (vp->v_type != VLNK)
1030 return (EPERM1);
1031 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
1032}
1033
1034/*
1035 * Lock an inode.
1036 */
1037int
1038nfs_lock(void *v)
1039{
1040 struct vop_lock_args *ap = v;
1041 struct vnode *vp = ap->a_vp;
1042
1043 return rrw_enter(&VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_lock, ap->a_flags & LK_RWFLAGS(0x0001UL|0x0002UL|0x0040UL|0x0080UL|0x0100UL));
1044}
1045
1046/*
1047 * Unlock an inode.
1048 */
1049int
1050nfs_unlock(void *v)
1051{
1052 struct vop_unlock_args *ap = v;
1053 struct vnode *vp = ap->a_vp;
1054
1055 rrw_exit(&VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_lock);
1056 return 0;
1057}
1058
1059/*
1060 * Check for a locked inode.
1061 */
1062int
1063nfs_islocked(void *v)
1064{
1065 struct vop_islocked_args *ap = v;
1066
1067 return rrw_status(&VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data)->n_lock);
1068}
1069
1070/*
1071 * Do a readlink rpc.
1072 * Called by nfs_doio() from below the buffer cache.
1073 */
1074int
1075nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1076{
1077 struct nfsm_info info;
1078 u_int32_t *tl;
1079 int32_t t1;
1080 caddr_t cp2;
1081 int error = 0, len, attrflag;
1082
1083 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1084
1085 nfsstats.rpccnt[NFSPROC_READLINK5]++;
1086 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32));
1087 nfsm_fhtom(&info, vp, info.nmi_v3);
1088
1089 info.nmi_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
1090 info.nmi_cred = cred;
1091 error = nfs_request(vp, NFSPROC_READLINK5, &info);
1092
1093 if (info.nmi_v3)
1094 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
1095 if (!error) {
1096 nfsm_strsiz(len, NFS_MAXPATHLEN){ { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.
nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (
tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else
if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (
4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep)
; goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if (((
len) = ((int32_t)(__uint32_t)(__builtin_constant_p((int32_t)(
*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) < 0 || (len) > (1024)
) { m_freem(info.nmi_mrep); error = 72; goto nfsmout; } }
;
1097 nfsm_mtouio(uiop, len)if ((len) > 0 && (t1 = nfsm_mbuftouio(&info.nmi_md
, (uiop), (len), &info.nmi_dpos)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; }
;
1098 }
1099
1100 m_freem(info.nmi_mrep);
1101
1102nfsmout:
1103 return (error);
1104}
1105
1106/*
1107 * nfs read rpc call
1108 * Ditto above
1109 */
1110int
1111nfs_readrpc(struct vnode *vp, struct uio *uiop)
1112{
1113 struct nfsm_info info;
1114 u_int32_t *tl;
1115 int32_t t1;
1116 caddr_t cp2;
1117 struct nfsmount *nmp;
1118 int error = 0, len, retlen, tsiz, eof, attrflag;
1119
1120 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1121
1122 eof = 0;
1123
1124 nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
1125 tsiz = uiop->uio_resid;
1126 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3)
1127 return (EFBIG27);
1128 while (tsiz > 0) {
1129 nfsstats.rpccnt[NFSPROC_READ6]++;
1130 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1131 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1132 NFSX_UNSIGNED4 * 3);
1133 nfsm_fhtom(&info, vp, info.nmi_v3);
1134 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4 * 3);
1135 if (info.nmi_v3) {
1136 txdr_hyper(uiop->uio_offset, tl)do { ((u_int32_t *)(tl))[0] = (__uint32_t)(__builtin_constant_p
((u_int32_t)((uiop->uio_offset) >> 32)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)((uiop->
uio_offset) >> 32)) & 0xff00) << 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) >> 32)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)((uiop->uio_offset) >> 32))); ((u_int32_t *
)(tl))[1] = (__uint32_t)(__builtin_constant_p((u_int32_t)((uiop
->uio_offset) & 0xffffffff)) ? (__uint32_t)(((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff00) << 8 | ((__uint32_t)
((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff000000) >> 24) : __swap32md
((u_int32_t)((uiop->uio_offset) & 0xffffffff))); } while
(0)
;
1137 *(tl + 2) = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1138 } else {
1139 *tl++ = txdr_unsigned(uiop->uio_offset)((__uint32_t)(__builtin_constant_p((int32_t)(uiop->uio_offset
)) ? (__uint32_t)(((__uint32_t)((int32_t)(uiop->uio_offset
)) & 0xff) << 24 | ((__uint32_t)((int32_t)(uiop->
uio_offset)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(uiop->uio_offset)) & 0xff0000) >> 8 | ((__uint32_t
)((int32_t)(uiop->uio_offset)) & 0xff000000) >> 24
) : __swap32md((int32_t)(uiop->uio_offset))))
;
1140 *tl++ = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1141 *tl = 0;
1142 }
1143
1144 info.nmi_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
1145 info.nmi_cred = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_rcred;
1146 error = nfs_request(vp, NFSPROC_READ6, &info);
1147 if (info.nmi_v3)
1148 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
1149 if (error) {
1150 m_freem(info.nmi_mrep);
1151 goto nfsmout;
1152 }
1153
1154 if (info.nmi_v3) {
1155 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
1156 eof = fxdr_unsigned(int, *(tl + 1))((int)(__uint32_t)(__builtin_constant_p((int32_t)(*(tl + 1)))
? (__uint32_t)(((__uint32_t)((int32_t)(*(tl + 1))) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*(tl + 1))) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*(tl + 1))) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*(tl + 1))) & 0xff000000
) >> 24) : __swap32md((int32_t)(*(tl + 1)))))
;
1157 } else {
1158 nfsm_loadattr(vp, NULL){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
1159 }
1160
1161 nfsm_strsiz(retlen, nmp->nm_rsize){ { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.
nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (
tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else
if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (
4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep)
; goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if (((
retlen) = ((int32_t)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) < 0 || (retlen) > (nmp
->nm_rsize)) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } }
;
1162 nfsm_mtouio(uiop, retlen)if ((retlen) > 0 && (t1 = nfsm_mbuftouio(&info
.nmi_md, (uiop), (retlen), &info.nmi_dpos)) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; }
;
1163 m_freem(info.nmi_mrep);
1164 tsiz -= retlen;
1165 if (info.nmi_v3) {
1166 if (eof || retlen == 0)
1167 tsiz = 0;
1168 } else if (retlen < len)
1169 tsiz = 0;
1170 }
1171
1172nfsmout:
1173 return (error);
1174}
1175
1176/*
1177 * nfs write call
1178 */
1179int
1180nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, int *must_commit)
1181{
1182 struct nfsm_info info;
1183 u_int32_t *tl;
1184 int32_t t1, backup;
1185 caddr_t cp2;
1186 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
1187 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR0, rlen, commit;
1188 int committed = NFSV3WRITE_FILESYNC2;
1189
1190 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1191
1192#ifdef DIAGNOSTIC1
1193 if (uiop->uio_iovcnt != 1)
1194 panic("nfs: writerpc iovcnt > 1");
1195#endif
1196 *must_commit = 0;
1197 tsiz = uiop->uio_resid;
1198 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3)
1199 return (EFBIG27);
1200 while (tsiz > 0) {
1201 nfsstats.rpccnt[NFSPROC_WRITE7]++;
1202 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1203 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32)
1204 + 5 * NFSX_UNSIGNED4 + nfsm_rndup(len)(((len)+3)&(~0x3)));
1205 nfsm_fhtom(&info, vp, info.nmi_v3);
1206 if (info.nmi_v3) {
1207 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED4);
1208 txdr_hyper(uiop->uio_offset, tl)do { ((u_int32_t *)(tl))[0] = (__uint32_t)(__builtin_constant_p
((u_int32_t)((uiop->uio_offset) >> 32)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)((uiop->
uio_offset) >> 32)) & 0xff00) << 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) >> 32)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)((uiop->uio_offset) >> 32))); ((u_int32_t *
)(tl))[1] = (__uint32_t)(__builtin_constant_p((u_int32_t)((uiop
->uio_offset) & 0xffffffff)) ? (__uint32_t)(((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff00) << 8 | ((__uint32_t)
((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff000000) >> 24) : __swap32md
((u_int32_t)((uiop->uio_offset) & 0xffffffff))); } while
(0)
;
1209 tl += 2;
1210 *tl++ = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1211 *tl++ = txdr_unsigned(*iomode)((__uint32_t)(__builtin_constant_p((int32_t)(*iomode)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*iomode)) & 0xff) << 24 |
((__uint32_t)((int32_t)(*iomode)) & 0xff00) << 8 |
((__uint32_t)((int32_t)(*iomode)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*iomode)) & 0xff000000) >>
24) : __swap32md((int32_t)(*iomode))))
;
1212 *tl = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1213 } else {
1214 u_int32_t x;
1215
1216 tl = nfsm_build(&info.nmi_mb, 4 * NFSX_UNSIGNED4);
1217 /* Set both "begin" and "current" to non-garbage. */
1218 x = txdr_unsigned((u_int32_t)uiop->uio_offset)((__uint32_t)(__builtin_constant_p((int32_t)((u_int32_t)uiop->
uio_offset)) ? (__uint32_t)(((__uint32_t)((int32_t)((u_int32_t
)uiop->uio_offset)) & 0xff) << 24 | ((__uint32_t
)((int32_t)((u_int32_t)uiop->uio_offset)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)((u_int32_t)uiop->uio_offset))
& 0xff0000) >> 8 | ((__uint32_t)((int32_t)((u_int32_t
)uiop->uio_offset)) & 0xff000000) >> 24) : __swap32md
((int32_t)((u_int32_t)uiop->uio_offset))))
;
1219 *tl++ = x; /* "begin offset" */
1220 *tl++ = x; /* "current offset" */
1221 x = txdr_unsigned(len)((__uint32_t)(__builtin_constant_p((int32_t)(len)) ? (__uint32_t
)(((__uint32_t)((int32_t)(len)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(len)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(len)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
len)) & 0xff000000) >> 24) : __swap32md((int32_t)(len
))))
;
1222 *tl++ = x; /* total to this offset */
1223 *tl = x; /* size of this write */
1224
1225 }
1226 nfsm_uiotombuf(&info.nmi_mb, uiop, len);
1227
1228 info.nmi_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
1229 info.nmi_cred = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_wcred;
1230 error = nfs_request(vp, NFSPROC_WRITE7, &info);
1231 if (info.nmi_v3) {
1232 wccflag = NFSV3_WCCCHK1;
1233 nfsm_wcc_data(vp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(vp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(vp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(vp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((vp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((vp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1234 }
1235
1236 if (error) {
1237 m_freem(info.nmi_mrep);
1238 goto nfsmout;
1239 }
1240
1241 if (info.nmi_v3) {
1242 wccflag = NFSV3_WCCCHK1;
1243 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4 + 8)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4
+ 8); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (2 * 4 + 8), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2
; } }
1244 + NFSX_V3WRITEVERF){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4 + 8)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4
+ 8); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (2 * 4 + 8), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2
; } }
;
1245 rlen = fxdr_unsigned(int, *tl++)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl++)) ? (
__uint32_t)(((__uint32_t)((int32_t)(*tl++)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl++)) & 0xff00) << 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl++))))
;
1246 if (rlen <= 0) {
1247 error = NFSERR_IO5;
1248 break;
1249 } else if (rlen < len) {
1250 backup = len - rlen;
1251 uiop->uio_iov->iov_base =
1252 (char *)uiop->uio_iov->iov_base -
1253 backup;
1254 uiop->uio_iov->iov_len += backup;
1255 uiop->uio_offset -= backup;
1256 uiop->uio_resid += backup;
1257 len = rlen;
1258 }
1259 commit = fxdr_unsigned(int, *tl++)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl++)) ? (
__uint32_t)(((__uint32_t)((int32_t)(*tl++)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl++)) & 0xff00) << 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl++))))
;
1260
1261 /*
1262 * Return the lowest commitment level
1263 * obtained by any of the RPCs.
1264 */
1265 if (committed == NFSV3WRITE_FILESYNC2)
1266 committed = commit;
1267 else if (committed == NFSV3WRITE_DATASYNC1 &&
1268 commit == NFSV3WRITE_UNSTABLE0)
1269 committed = commit;
1270 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF0x00040000) == 0) {
1271 bcopy(tl, nmp->nm_verf,
1272 NFSX_V3WRITEVERF8);
1273 nmp->nm_flag |= NFSMNT_HASWRITEVERF0x00040000;
1274 } else if (bcmp(tl,
1275 nmp->nm_verf, NFSX_V3WRITEVERF8)) {
1276 *must_commit = 1;
1277 bcopy(tl, nmp->nm_verf,
1278 NFSX_V3WRITEVERF8);
1279 }
1280 } else {
1281 nfsm_loadattr(vp, NULL){ struct vnode *ttvp = (vp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (vp
) = ttvp; }
;
1282 }
1283 if (wccflag)
1284 VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_mtime = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_vattr.va_mtime;
1285 m_freem(info.nmi_mrep);
1286 tsiz -= len;
1287 }
1288nfsmout:
1289 *iomode = committed;
1290 if (error)
1291 uiop->uio_resid = tsiz;
1292 return (error);
1293}
1294
1295/*
1296 * nfs mknod rpc
1297 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1298 * mode set to specify the file type and the size field for rdev.
1299 */
1300int
1301nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1302 struct vattr *vap)
1303{
1304 struct nfsv2_sattr *sp;
1305 struct nfsm_info info;
1306 u_int32_t *tl;
1307 int32_t t1;
1308 struct vnode *newvp = NULL((void *)0);
1309 struct nfsnode *np = NULL((void *)0);
1310 char *cp2;
1311 int error = 0, wccflag = NFSV3_WCCRATTR0, gotvp = 0;
1312 u_int32_t rdev;
1313
1314 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1315
1316 if (vap->va_type == VCHR || vap->va_type == VBLK)
1
Assuming field 'va_type' is not equal to VCHR
2
Assuming field 'va_type' is not equal to VBLK
1317 rdev = txdr_unsigned(vap->va_rdev)((__uint32_t)(__builtin_constant_p((int32_t)(vap->va_rdev)
) ? (__uint32_t)(((__uint32_t)((int32_t)(vap->va_rdev)) &
0xff) << 24 | ((__uint32_t)((int32_t)(vap->va_rdev)
) & 0xff00) << 8 | ((__uint32_t)((int32_t)(vap->
va_rdev)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t
)(vap->va_rdev)) & 0xff000000) >> 24) : __swap32md
((int32_t)(vap->va_rdev))))
;
1318 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
3
Assuming field 'va_type' is equal to VFIFO
1319 rdev = nfs_xdrneg1;
1320 else {
1321 VOP_ABORTOP(dvp, cnp);
1322 return (EOPNOTSUPP45);
1323 }
1324 nfsstats.rpccnt[NFSPROC_MKNOD11]++;
1325 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
4
Assuming field 'nmi_v3' is 0
5
'?' condition is false
1326 4 * NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)) +
1327 NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
6
'?' condition is false
1328 nfsm_fhtom(&info, dvp, info.nmi_v3);
1329 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
7
Assuming field 'cn_namelen' is <= 255
8
Taking false branch
1330
1331 if (info.nmi_v3) {
9
Assuming field 'nmi_v3' is not equal to 0
10
Taking true branch
1332 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
1333 *tl++ = vtonfsv3_type(vap->va_type)((__uint32_t)(__builtin_constant_p((int32_t)(nfsv3_type[((int32_t
)(vap->va_type))])) ? (__uint32_t)(((__uint32_t)((int32_t)
(nfsv3_type[((int32_t)(vap->va_type))])) & 0xff) <<
24 | ((__uint32_t)((int32_t)(nfsv3_type[((int32_t)(vap->va_type
))])) & 0xff00) << 8 | ((__uint32_t)((int32_t)(nfsv3_type
[((int32_t)(vap->va_type))])) & 0xff0000) >> 8 |
((__uint32_t)((int32_t)(nfsv3_type[((int32_t)(vap->va_type
))])) & 0xff000000) >> 24) : __swap32md((int32_t)(nfsv3_type
[((int32_t)(vap->va_type))]))))
;
11
'?' condition is false
1334 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1335 if (vap->va_type == VCHR || vap->va_type == VBLK) {
12
Assuming field 'va_type' is not equal to VCHR
13
Assuming field 'va_type' is not equal to VBLK
1336 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED4);
1337 *tl++ = txdr_unsigned(major(vap->va_rdev))((__uint32_t)(__builtin_constant_p((int32_t)((((unsigned)(vap
->va_rdev) >> 8) & 0xff))) ? (__uint32_t)(((__uint32_t
)((int32_t)((((unsigned)(vap->va_rdev) >> 8) & 0xff
))) & 0xff) << 24 | ((__uint32_t)((int32_t)((((unsigned
)(vap->va_rdev) >> 8) & 0xff))) & 0xff00) <<
8 | ((__uint32_t)((int32_t)((((unsigned)(vap->va_rdev) >>
8) & 0xff))) & 0xff0000) >> 8 | ((__uint32_t)(
(int32_t)((((unsigned)(vap->va_rdev) >> 8) & 0xff
))) & 0xff000000) >> 24) : __swap32md((int32_t)((((
unsigned)(vap->va_rdev) >> 8) & 0xff)))))
;
1338 *tl = txdr_unsigned(minor(vap->va_rdev))((__uint32_t)(__builtin_constant_p((int32_t)(((unsigned)((vap
->va_rdev) & 0xff) | (((vap->va_rdev) & 0xffff0000
) >> 8)))) ? (__uint32_t)(((__uint32_t)((int32_t)(((unsigned
)((vap->va_rdev) & 0xff) | (((vap->va_rdev) & 0xffff0000
) >> 8)))) & 0xff) << 24 | ((__uint32_t)((int32_t
)(((unsigned)((vap->va_rdev) & 0xff) | (((vap->va_rdev
) & 0xffff0000) >> 8)))) & 0xff00) << 8 |
((__uint32_t)((int32_t)(((unsigned)((vap->va_rdev) & 0xff
) | (((vap->va_rdev) & 0xffff0000) >> 8)))) &
0xff0000) >> 8 | ((__uint32_t)((int32_t)(((unsigned)((
vap->va_rdev) & 0xff) | (((vap->va_rdev) & 0xffff0000
) >> 8)))) & 0xff000000) >> 24) : __swap32md(
(int32_t)(((unsigned)((vap->va_rdev) & 0xff) | (((vap->
va_rdev) & 0xffff0000) >> 8))))))
;
1339 }
1340 } else {
1341 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1342 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((vap->va_type
) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode
))) : (int)((vttoif_tab[(int)((vap->va_type))]) | ((vap->
va_mode))))) ? (__uint32_t)(((__uint32_t)((int32_t)(((vap->
va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->
va_mode))) : (int)((vttoif_tab[(int)((vap->va_type))]) | (
(vap->va_mode))))) & 0xff) << 24 | ((__uint32_t)
((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab[(
int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int)
((vap->va_type))]) | ((vap->va_mode))))) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(((vap->va_type) == VFIFO) ? (
int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)
((vttoif_tab[(int)((vap->va_type))]) | ((vap->va_mode))
))) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(((vap
->va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | (
(vap->va_mode))) : (int)((vttoif_tab[(int)((vap->va_type
))]) | ((vap->va_mode))))) & 0xff000000) >> 24) :
__swap32md((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((vap->va_type))]) | ((vap->va_mode)))))))
;
1343 sp->sa_uid = nfs_xdrneg1;
1344 sp->sa_gid = nfs_xdrneg1;
1345 sp->sa_size = rdev;
1346 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1347 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1348 }
1349
1350 KASSERT(cnp->cn_proc == curproc)((cnp->cn_proc == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/nfs/nfs_vnops.c", 1350, "cnp->cn_proc == curproc"
))
;
14
Taking false branch
15
Assuming field 'cn_proc' is equal to field 'ci_curproc'
16
'?' condition is true
1351 info.nmi_procp = cnp->cn_proc;
1352 info.nmi_cred = cnp->cn_cred;
1353 error = nfs_request(dvp, NFSPROC_MKNOD11, &info);
1354 if (!error) {
17
Assuming 'error' is not equal to 0
18
Taking false branch
1355 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1356 if (!gotvp) {
1357 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1358 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1359 if (!error)
1360 newvp = NFSTOV(np)((np)->n_vnode);
1361 }
1362 }
1363 if (info.nmi_v3)
19
Assuming field 'nmi_v3' is not equal to 0
20
Taking true branch
1364 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
21
Assuming field 'nmi_mrep' is not equal to null
22
Taking true branch
23
'ttattrf' declared without an initial value
24
Assuming 't1' is < 4
25
Taking false branch
26
Assuming the condition is false
27
Taking false branch
28
Assuming the condition is false
29
Taking false branch
30
Assuming field 'nmi_mrep' is equal to null
31
Taking false branch
32
Taking false branch
33
Assigned value is garbage or undefined
1365 m_freem(info.nmi_mrep);
1366
1367nfsmout:
1368 if (error) {
1369 if (newvp)
1370 vput(newvp);
1371 } else {
1372 if (cnp->cn_flags & MAKEENTRY0x004000)
1373 nfs_cache_enter(dvp, newvp, cnp);
1374 *vpp = newvp;
1375 }
1376 pool_put(&namei_pool, cnp->cn_pnbuf);
1377 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1378 if (!wccflag)
1379 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1380 return (error);
1381}
1382
1383/*
1384 * nfs mknod vop
1385 * just call nfs_mknodrpc() to do the work.
1386 */
1387int
1388nfs_mknod(void *v)
1389{
1390 struct vop_mknod_args *ap = v;
1391 struct vnode *newvp;
1392 int error;
1393
1394 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap);
1395 if (!error)
1396 vput(newvp);
1397
1398 VN_KNOTE(ap->a_dvp, NOTE_WRITE)knote_locked(&ap->a_dvp->v_klist, (0x0002));
1399
1400 return (error);
1401}
1402
1403int
1404nfs_create(void *v)
1405{
1406 struct vop_create_args *ap = v;
1407 struct vnode *dvp = ap->a_dvp;
1408 struct vattr *vap = ap->a_vap;
1409 struct componentname *cnp = ap->a_cnp;
1410 struct nfsv2_sattr *sp;
1411 struct nfsm_info info;
1412 struct timespec ts;
1413 u_int32_t *tl;
1414 int32_t t1;
1415 struct nfsnode *np = NULL((void *)0);
1416 struct vnode *newvp = NULL((void *)0);
1417 caddr_t cp2;
1418 int error = 0, wccflag = NFSV3_WCCRATTR0, gotvp = 0, fmode = 0;
1419
1420 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1421
1422 /*
1423 * Oops, not for me..
1424 */
1425 if (vap->va_type == VSOCK)
1426 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1427
1428 if (vap->va_vaflags & VA_EXCLUSIVE0x02)
1429 fmode |= O_EXCL0x0800;
1430
1431again:
1432 nfsstats.rpccnt[NFSPROC_CREATE8]++;
1433 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1434 2 * NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)) +
1435 NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
1436 nfsm_fhtom(&info, dvp, info.nmi_v3);
1437 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1438 if (info.nmi_v3) {
1439 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED4);
1440 if (fmode & O_EXCL0x0800) {
1441 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE)((__uint32_t)(__builtin_constant_p((int32_t)(2)) ? (__uint32_t
)(((__uint32_t)((int32_t)(2)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(2)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(2)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(2)
) & 0xff000000) >> 24) : __swap32md((int32_t)(2))))
;
1442 tl = nfsm_build(&info.nmi_mb, NFSX_V3CREATEVERF8);
1443 arc4random_buf(tl, sizeof(*tl) * 2);
1444 } else {
1445 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED)((__uint32_t)(__builtin_constant_p((int32_t)(0)) ? (__uint32_t
)(((__uint32_t)((int32_t)(0)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(0)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(0)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(0)
) & 0xff000000) >> 24) : __swap32md((int32_t)(0))))
;
1446 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1447 }
1448 } else {
1449 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1450 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((vap->va_type
) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode
))) : (int)((vttoif_tab[(int)((vap->va_type))]) | ((vap->
va_mode))))) ? (__uint32_t)(((__uint32_t)((int32_t)(((vap->
va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->
va_mode))) : (int)((vttoif_tab[(int)((vap->va_type))]) | (
(vap->va_mode))))) & 0xff) << 24 | ((__uint32_t)
((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab[(
int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int)
((vap->va_type))]) | ((vap->va_mode))))) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(((vap->va_type) == VFIFO) ? (
int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)
((vttoif_tab[(int)((vap->va_type))]) | ((vap->va_mode))
))) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(((vap
->va_type) == VFIFO) ? (int)((vttoif_tab[(int)(VCHR)]) | (
(vap->va_mode))) : (int)((vttoif_tab[(int)((vap->va_type
))]) | ((vap->va_mode))))) & 0xff000000) >> 24) :
__swap32md((int32_t)(((vap->va_type) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((vap->va_type))]) | ((vap->va_mode)))))))
;
1451 sp->sa_uid = nfs_xdrneg1;
1452 sp->sa_gid = nfs_xdrneg1;
1453 sp->sa_size = 0;
1454 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1455 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1456 }
1457
1458 KASSERT(cnp->cn_proc == curproc)((cnp->cn_proc == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/nfs/nfs_vnops.c", 1458, "cnp->cn_proc == curproc"
))
;
1459 info.nmi_procp = cnp->cn_proc;
1460 info.nmi_cred = cnp->cn_cred;
1461 error = nfs_request(dvp, NFSPROC_CREATE8, &info);
1462 if (!error) {
1463 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1464 if (!gotvp) {
1465 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1466 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1467 if (!error)
1468 newvp = NFSTOV(np)((np)->n_vnode);
1469 }
1470 }
1471 if (info.nmi_v3)
1472 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1473 m_freem(info.nmi_mrep);
1474
1475nfsmout:
1476 if (error) {
1477 if (newvp) {
1478 vput(newvp);
1479 newvp = NULL((void *)0);
1480 }
1481 if (info.nmi_v3 && (fmode & O_EXCL0x0800) && error == NFSERR_NOTSUPP10004) {
1482 fmode &= ~O_EXCL0x0800;
1483 goto again;
1484 }
1485 } else if (info.nmi_v3 && (fmode & O_EXCL0x0800)) {
1486 getnanotime(&ts);
1487 if (vap->va_atime.tv_nsec == VNOVAL(-1))
1488 vap->va_atime = ts;
1489 if (vap->va_mtime.tv_nsec == VNOVAL(-1))
1490 vap->va_mtime = ts;
1491 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1492 }
1493 if (!error) {
1494 if (cnp->cn_flags & MAKEENTRY0x004000)
1495 nfs_cache_enter(dvp, newvp, cnp);
1496 *ap->a_vpp = newvp;
1497 }
1498 pool_put(&namei_pool, cnp->cn_pnbuf);
1499 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1500 if (!wccflag)
1501 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1502 VN_KNOTE(ap->a_dvp, NOTE_WRITE)knote_locked(&ap->a_dvp->v_klist, (0x0002));
1503 return (error);
1504}
1505
1506/*
1507 * nfs file remove call
1508 * To try and make nfs semantics closer to ufs semantics, a file that has
1509 * other processes using the vnode is renamed instead of removed and then
1510 * removed later on the last close.
1511 * - If v_usecount > 1
1512 * If a rename is not already in the works
1513 * call nfs_sillyrename() to set it up
1514 * else
1515 * do the remove rpc
1516 */
1517int
1518nfs_remove(void *v)
1519{
1520 struct vop_remove_args *ap = v;
1521 struct vnode *vp = ap->a_vp;
1522 struct vnode *dvp = ap->a_dvp;
1523 struct componentname *cnp = ap->a_cnp;
1524 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
1525 int error = 0;
1526 struct vattr vattr;
1527
1528#ifdef DIAGNOSTIC1
1529 if ((cnp->cn_flags & HASBUF0x000400) == 0)
1530 panic("nfs_remove: no name");
1531 if (vp->v_usecount < 1)
1532 panic("nfs_remove: bad v_usecount");
1533#endif
1534 if (vp->v_type == VDIR)
1535 error = EPERM1;
1536 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1537 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1538 vattr.va_nlink > 1)) {
1539 /*
1540 * Purge the name cache so that the chance of a lookup for
1541 * the name succeeding while the remove is in progress is
1542 * minimized. Without node locking it can still happen, such
1543 * that an I/O op returns ESTALE, but since you get this if
1544 * another host removes the file..
1545 */
1546 cache_purge(vp);
1547 /*
1548 * throw away biocache buffers, mainly to avoid
1549 * unnecessary delayed writes later.
1550 */
1551 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc);
1552 /* Do the rpc */
1553 if (error != EINTR4)
1554 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1555 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1556 /*
1557 * Kludge City: If the first reply to the remove rpc is lost..
1558 * the reply to the retransmitted request will be ENOENT
1559 * since the file was in fact removed
1560 * Therefore, we cheat and return success.
1561 */
1562 if (error == ENOENT2)
1563 error = 0;
1564 } else if (!np->n_sillyrename)
1565 error = nfs_sillyrename(dvp, vp, cnp);
1566 pool_put(&namei_pool, cnp->cn_pnbuf);
1567 NFS_INVALIDATE_ATTRCACHE(np)((np)->n_attrstamp = 0);
1568 VN_KNOTE(vp, NOTE_DELETE)knote_locked(&vp->v_klist, (0x0001));
1569 VN_KNOTE(dvp, NOTE_WRITE)knote_locked(&dvp->v_klist, (0x0002));
1570 if (vp == dvp)
1571 vrele(vp);
1572 else
1573 vput(vp);
1574 vput(dvp);
1575 return (error);
1576}
1577
1578/*
1579 * nfs file remove rpc called from nfs_inactive
1580 */
1581int
1582nfs_removeit(struct sillyrename *sp)
1583{
1584 KASSERT(VOP_ISLOCKED(sp->s_dvp))((VOP_ISLOCKED(sp->s_dvp)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/nfs/nfs_vnops.c", 1584, "VOP_ISLOCKED(sp->s_dvp)"
))
;
1585 /*
1586 * Make sure that the directory vnode is still valid.
1587 *
1588 * NFS can potentially try to nuke a silly *after* the directory
1589 * has already been pushed out on a forced unmount. Since the silly
1590 * is going to go away anyway, this is fine.
1591 */
1592 if (sp->s_dvp->v_type == VBAD)
1593 return (0);
1594 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1595 NULL((void *)0)));
1596}
1597
1598/*
1599 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1600 */
1601int
1602nfs_removerpc(struct vnode *dvp, char *name, int namelen, struct ucred *cred,
1603 struct proc *proc)
1604{
1605 struct nfsm_info info;
1606 u_int32_t *tl;
1607 int32_t t1;
1608 caddr_t cp2;
1609 int error = 0, wccflag = NFSV3_WCCRATTR0;
1610
1611 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1612
1613 nfsstats.rpccnt[NFSPROC_REMOVE12]++;
1614 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1615 NFSX_UNSIGNED4 + nfsm_rndup(namelen)(((namelen)+3)&(~0x3)));
1616 nfsm_fhtom(&info, dvp, info.nmi_v3);
1617 nfsm_strtom(name, namelen, NFS_MAXNAMLEN)if ((namelen) > (255)) { m_freem(info.nmi_mreq); error = 63
; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (name), (namelen
))
;
1618
1619 info.nmi_procp = proc;
1620 info.nmi_cred = cred;
1621 error = nfs_request(dvp, NFSPROC_REMOVE12, &info);
1622 if (info.nmi_v3)
1623 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1624 m_freem(info.nmi_mrep);
1625
1626nfsmout:
1627 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1628 if (!wccflag)
1629 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1630 return (error);
1631}
1632
1633/*
1634 * nfs file rename call
1635 */
1636int
1637nfs_rename(void *v)
1638{
1639 struct vop_rename_args *ap = v;
1640 struct vnode *fvp = ap->a_fvp;
1641 struct vnode *tvp = ap->a_tvp;
1642 struct vnode *fdvp = ap->a_fdvp;
1643 struct vnode *tdvp = ap->a_tdvp;
1644 struct componentname *tcnp = ap->a_tcnp;
1645 struct componentname *fcnp = ap->a_fcnp;
1646 int error;
1647
1648#ifdef DIAGNOSTIC1
1649 if ((tcnp->cn_flags & HASBUF0x000400) == 0 ||
1650 (fcnp->cn_flags & HASBUF0x000400) == 0)
1651 panic("nfs_rename: no name");
1652#endif
1653 /* Check for cross-device rename */
1654 if ((fvp->v_mount != tdvp->v_mount) ||
1655 (tvp && (fvp->v_mount != tvp->v_mount))) {
1656 error = EXDEV18;
1657 goto out;
1658 }
1659
1660 /*
1661 * If the tvp exists and is in use, sillyrename it before doing the
1662 * rename of the new file over it.
1663 */
1664 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)((struct nfsnode *)(tvp)->v_data)->n_sillyrename &&
1665 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1666 VN_KNOTE(tvp, NOTE_DELETE)knote_locked(&tvp->v_klist, (0x0001));
1667 vput(tvp);
1668 tvp = NULL((void *)0);
1669 }
1670
1671 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1672 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1673 tcnp->cn_proc);
1674
1675 VN_KNOTE(fdvp, NOTE_WRITE)knote_locked(&fdvp->v_klist, (0x0002));
1676 VN_KNOTE(tdvp, NOTE_WRITE)knote_locked(&tdvp->v_klist, (0x0002));
1677
1678 if (fvp->v_type == VDIR) {
1679 if (tvp != NULL((void *)0) && tvp->v_type == VDIR)
1680 cache_purge(tdvp);
1681 cache_purge(fdvp);
1682 }
1683out:
1684 if (tdvp == tvp)
1685 vrele(tdvp);
1686 else
1687 vput(tdvp);
1688 if (tvp)
1689 vput(tvp);
1690 vrele(fdvp);
1691 vrele(fvp);
1692 /*
1693 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1694 */
1695 if (error == ENOENT2)
1696 error = 0;
1697 return (error);
1698}
1699
1700/*
1701 * nfs file rename rpc called from nfs_remove() above
1702 */
1703int
1704nfs_renameit(struct vnode *sdvp, struct componentname *scnp,
1705 struct sillyrename *sp)
1706{
1707 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1708 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
));
1709}
1710
1711/*
1712 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1713 */
1714int
1715nfs_renamerpc(struct vnode *fdvp, char *fnameptr, int fnamelen,
1716 struct vnode *tdvp, char *tnameptr, int tnamelen, struct ucred *cred,
1717 struct proc *proc)
1718{
1719 struct nfsm_info info;
1720 u_int32_t *tl;
1721 int32_t t1;
1722 caddr_t cp2;
1723 int error = 0, fwccflag = NFSV3_WCCRATTR0, twccflag = NFSV3_WCCRATTR0;
1724
1725 info.nmi_v3 = NFS_ISV3(fdvp)(((struct nfsmount *)(((fdvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1726
1727 nfsstats.rpccnt[NFSPROC_RENAME14]++;
1728 info.nmi_mb = info.nmi_mreq = nfsm_reqhead((NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1729 NFSX_UNSIGNED4) * 2 + nfsm_rndup(fnamelen)(((fnamelen)+3)&(~0x3)) + nfsm_rndup(tnamelen)(((tnamelen)+3)&(~0x3)));
1730 nfsm_fhtom(&info, fdvp, info.nmi_v3);
1731 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN)if ((fnamelen) > (255)) { m_freem(info.nmi_mreq); error = 63
; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (fnameptr)
, (fnamelen))
;
1732 nfsm_fhtom(&info, tdvp, info.nmi_v3);
1733 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN)if ((tnamelen) > (255)) { m_freem(info.nmi_mreq); error = 63
; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (tnameptr)
, (tnamelen))
;
1734
1735 info.nmi_procp = proc;
1736 info.nmi_cred = cred;
1737 error = nfs_request(fdvp, NFSPROC_RENAME14, &info);
1738 if (info.nmi_v3) {
1739 nfsm_wcc_data(fdvp, fwccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (fwccflag
) { ttretf = (((&((struct nfsnode *)(fdvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(fdvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(fdvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((fdvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((fdvp)) = ttvp; } } }; if (fwccflag) { (fwccflag
) = ttretf; } else { (fwccflag) = ttattrf; } } } while (0)
;
1740 nfsm_wcc_data(tdvp, twccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (twccflag
) { ttretf = (((&((struct nfsnode *)(tdvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(tdvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(tdvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((tdvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((tdvp)) = ttvp; } } }; if (twccflag) { (twccflag
) = ttretf; } else { (twccflag) = ttattrf; } } } while (0)
;
1741 }
1742 m_freem(info.nmi_mrep);
1743
1744nfsmout:
1745 VTONFS(fdvp)((struct nfsnode *)(fdvp)->v_data)->n_flag |= NMODIFIED0x0004;
1746 VTONFS(tdvp)((struct nfsnode *)(tdvp)->v_data)->n_flag |= NMODIFIED0x0004;
1747 if (!fwccflag)
1748 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp))((((struct nfsnode *)(fdvp)->v_data))->n_attrstamp = 0);
1749 if (!twccflag)
1750 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp))((((struct nfsnode *)(tdvp)->v_data))->n_attrstamp = 0);
1751 return (error);
1752}
1753
1754/*
1755 * nfs hard link create call
1756 */
1757int
1758nfs_link(void *v)
1759{
1760 struct vop_link_args *ap = v;
1761 struct vnode *vp = ap->a_vp;
1762 struct vnode *dvp = ap->a_dvp;
1763 struct componentname *cnp = ap->a_cnp;
1764 struct nfsm_info info;
1765 u_int32_t *tl;
1766 int32_t t1;
1767 caddr_t cp2;
1768 int error = 0, wccflag = NFSV3_WCCRATTR0, attrflag = 0;
1769
1770 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
1771
1772 if (dvp->v_mount != vp->v_mount) {
1773 pool_put(&namei_pool, cnp->cn_pnbuf);
1774 vput(dvp);
1775 return (EXDEV18);
1776 }
1777 error = vn_lock(vp, LK_EXCLUSIVE0x0001UL);
1778 if (error != 0) {
1779 VOP_ABORTOP(dvp, cnp);
1780 vput(dvp);
1781 return (error);
1782 }
1783
1784 /*
1785 * Push all writes to the server, so that the attribute cache
1786 * doesn't get "out of sync" with the server.
1787 * XXX There should be a better way!
1788 */
1789 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT1, cnp->cn_proc);
1790
1791 nfsstats.rpccnt[NFSPROC_LINK15]++;
1792 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(2 * NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1793 NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)));
1794 nfsm_fhtom(&info, vp, info.nmi_v3);
1795 nfsm_fhtom(&info, dvp, info.nmi_v3);
1796 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1797
1798 info.nmi_procp = cnp->cn_proc;
1799 info.nmi_cred = cnp->cn_cred;
1800 error = nfs_request(vp, NFSPROC_LINK15, &info);
1801 if (info.nmi_v3) {
1802 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
1803 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1804 }
1805 m_freem(info.nmi_mrep);
1806nfsmout:
1807 pool_put(&namei_pool, cnp->cn_pnbuf);
1808 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1809 if (!attrflag)
1810 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp))((((struct nfsnode *)(vp)->v_data))->n_attrstamp = 0);
1811 if (!wccflag)
1812 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1813
1814 VN_KNOTE(vp, NOTE_LINK)knote_locked(&vp->v_klist, (0x0010));
1815 VN_KNOTE(dvp, NOTE_WRITE)knote_locked(&dvp->v_klist, (0x0002));
1816 VOP_UNLOCK(vp);
1817 vput(dvp);
1818 return (error);
1819}
1820
1821/*
1822 * nfs symbolic link create call
1823 */
1824int
1825nfs_symlink(void *v)
1826{
1827 struct vop_symlink_args *ap = v;
1828 struct vnode *dvp = ap->a_dvp;
1829 struct vattr *vap = ap->a_vap;
1830 struct componentname *cnp = ap->a_cnp;
1831 struct nfsv2_sattr *sp;
1832 struct nfsm_info info;
1833 u_int32_t *tl;
1834 int32_t t1;
1835 caddr_t cp2;
1836 int slen, error = 0, wccflag = NFSV3_WCCRATTR0, gotvp;
1837 struct vnode *newvp = NULL((void *)0);
1838
1839 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1840
1841 nfsstats.rpccnt[NFSPROC_SYMLINK10]++;
1842 slen = strlen(ap->a_target);
1843 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1844 2 * NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)) + nfsm_rndup(slen)(((slen)+3)&(~0x3)) +
1845 NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
1846 nfsm_fhtom(&info, dvp, info.nmi_v3);
1847 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1848 if (info.nmi_v3)
1849 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1850 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN)if ((slen) > (1024)) { m_freem(info.nmi_mreq); error = 63;
goto nfsmout; } nfsm_strtombuf(&info.nmi_mb, (ap->a_target
), (slen))
;
1851 if (!info.nmi_v3) {
1852 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1853 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((VLNK) == VFIFO
) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (
int)((vttoif_tab[(int)((VLNK))]) | ((vap->va_mode))))) ? (
__uint32_t)(((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)
((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab
[(int)((VLNK))]) | ((vap->va_mode))))) & 0xff) <<
24 | ((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode))))) & 0xff00) << 8
| ((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode))))) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode))))) & 0xff000000) >>
24) : __swap32md((int32_t)(((VLNK) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VLNK))]) | ((vap->va_mode)))))))
;
1854 sp->sa_uid = nfs_xdrneg1;
1855 sp->sa_gid = nfs_xdrneg1;
1856 sp->sa_size = nfs_xdrneg1;
1857 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1858 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1859 }
1860
1861 info.nmi_procp = cnp->cn_proc;
1862 info.nmi_cred = cnp->cn_cred;
1863 error = nfs_request(dvp, NFSPROC_SYMLINK10, &info);
1864 if (info.nmi_v3) {
1865 if (!error)
1866 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1867 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1868 }
1869 m_freem(info.nmi_mrep);
1870
1871nfsmout:
1872 if (newvp)
1873 vput(newvp);
1874 pool_put(&namei_pool, cnp->cn_pnbuf);
1875 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1876 if (!wccflag)
1877 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1878 VN_KNOTE(dvp, NOTE_WRITE)knote_locked(&dvp->v_klist, (0x0002));
1879 vput(dvp);
1880 return (error);
1881}
1882
1883/*
1884 * nfs make dir call
1885 */
1886int
1887nfs_mkdir(void *v)
1888{
1889 struct vop_mkdir_args *ap = v;
1890 struct vnode *dvp = ap->a_dvp;
1891 struct vattr *vap = ap->a_vap;
1892 struct componentname *cnp = ap->a_cnp;
1893 struct nfsv2_sattr *sp;
1894 struct nfsm_info info;
1895 u_int32_t *tl;
1896 int32_t t1;
1897 int len;
1898 struct nfsnode *np = NULL((void *)0);
1899 struct vnode *newvp = NULL((void *)0);
1900 caddr_t cp2;
1901 int error = 0, wccflag = NFSV3_WCCRATTR0;
1902 int gotvp = 0;
1903
1904 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1905
1906 len = cnp->cn_namelen;
1907 nfsstats.rpccnt[NFSPROC_MKDIR9]++;
1908 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1909 NFSX_UNSIGNED4 + nfsm_rndup(len)(((len)+3)&(~0x3)) + NFSX_SATTR(info.nmi_v3)((info.nmi_v3) ? 60 : 32));
1910 nfsm_fhtom(&info, dvp, info.nmi_v3);
1911 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN)if ((len) > (255)) { m_freem(info.nmi_mreq); error = 63; goto
nfsmout; } nfsm_strtombuf(&info.nmi_mb, (cnp->cn_nameptr
), (len))
;
1912
1913 if (info.nmi_v3) {
1914 nfsm_v3attrbuild(&info.nmi_mb, vap, 0);
1915 } else {
1916 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR32);
1917 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode)((__uint32_t)(__builtin_constant_p((int32_t)(((VDIR) == VFIFO
) ? (int)((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (
int)((vttoif_tab[(int)((VDIR))]) | ((vap->va_mode))))) ? (
__uint32_t)(((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)
((vttoif_tab[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab
[(int)((VDIR))]) | ((vap->va_mode))))) & 0xff) <<
24 | ((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode))))) & 0xff00) << 8
| ((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode))))) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode))))) & 0xff000000) >>
24) : __swap32md((int32_t)(((VDIR) == VFIFO) ? (int)((vttoif_tab
[(int)(VCHR)]) | ((vap->va_mode))) : (int)((vttoif_tab[(int
)((VDIR))]) | ((vap->va_mode)))))))
;
1918 sp->sa_uid = nfs_xdrneg1;
1919 sp->sa_gid = nfs_xdrneg1;
1920 sp->sa_size = nfs_xdrneg1;
1921 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1922 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1923 }
1924
1925 info.nmi_procp = cnp->cn_proc;
1926 info.nmi_cred = cnp->cn_cred;
1927 error = nfs_request(dvp, NFSPROC_MKDIR9, &info);
1928 if (!error)
1929 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp){ struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; if (info
.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; (gotvp) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl)))); } else (gotvp) = 1; if (gotvp
) { { if ((info.nmi_v3)) { { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (((ttfhsize) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl))))) <= 0 || (ttfhsize
) > 64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout
; } } else (ttfhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= ((((ttfhsize)+3)&(~0x3)))) { ((ttfhp)) = (
nfsfh_t *)(info.nmi_dpos); info.nmi_dpos += ((((ttfhsize)+3)&
(~0x3))); } else if ((t1 = nfsm_disct(&info.nmi_md, &
info.nmi_dpos, ((((ttfhsize)+3)&(~0x3))), t1, &cp2)) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else
{ ((ttfhp)) = (nfsfh_t *)cp2; } }; }; if ((t1 = nfs_nget((dvp
)->v_mount, ttfhp, ttfhsize, &ttnp)) != 0) { error = t1
; m_freem(info.nmi_mrep); goto nfsmout; } (newvp) = ((ttnp)->
n_vnode); } if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (gotvp) (gotvp) = ((int)(__uint32_t)(__builtin_constant_p
((int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl))
& 0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) &
0xff00) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000
) >> 24) : __swap32md((int32_t)(*tl)))); else if (((int
)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))) { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) +
info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (
84)) { info.nmi_dpos += (84); } else if ((t1 = nfs_adv(&info
.nmi_md, &info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } }; } if (gotvp) { struct vnode
*ttvp = ((newvp)); if ((t1 = nfs_loadattrcache(&ttvp, &
info.nmi_md, &info.nmi_dpos, (((void *)0)))) != 0) { error
= t1; m_freem(info.nmi_mrep); goto nfsmout; } ((newvp)) = ttvp
; }; }
;
1930 if (info.nmi_v3)
1931 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1932 m_freem(info.nmi_mrep);
1933
1934nfsmout:
1935 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1936 if (!wccflag)
1937 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1938
1939 if (error == 0 && newvp == NULL((void *)0)) {
1940 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1941 cnp->cn_proc, &np);
1942 if (!error) {
1943 newvp = NFSTOV(np)((np)->n_vnode);
1944 if (newvp->v_type != VDIR)
1945 error = EEXIST17;
1946 }
1947 }
1948 if (error) {
1949 if (newvp)
1950 vput(newvp);
1951 } else {
1952 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK)knote_locked(&dvp->v_klist, (0x0002|0x0010));
1953 if (cnp->cn_flags & MAKEENTRY0x004000)
1954 nfs_cache_enter(dvp, newvp, cnp);
1955 *ap->a_vpp = newvp;
1956 }
1957 pool_put(&namei_pool, cnp->cn_pnbuf);
1958 vput(dvp);
1959 return (error);
1960}
1961
1962/*
1963 * nfs remove directory call
1964 */
1965int
1966nfs_rmdir(void *v)
1967{
1968 struct vop_rmdir_args *ap = v;
1969 struct vnode *vp = ap->a_vp;
1970 struct vnode *dvp = ap->a_dvp;
1971 struct componentname *cnp = ap->a_cnp;
1972 struct nfsm_info info;
1973 u_int32_t *tl;
1974 int32_t t1;
1975 caddr_t cp2;
1976 int error = 0, wccflag = NFSV3_WCCRATTR0;
1977
1978 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
1979
1980 nfsstats.rpccnt[NFSPROC_RMDIR13]++;
1981 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) +
1982 NFSX_UNSIGNED4 + nfsm_rndup(cnp->cn_namelen)(((cnp->cn_namelen)+3)&(~0x3)));
1983 nfsm_fhtom(&info, dvp, info.nmi_v3);
1984 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN)if ((cnp->cn_namelen) > (255)) { m_freem(info.nmi_mreq)
; error = 63; goto nfsmout; } nfsm_strtombuf(&info.nmi_mb
, (cnp->cn_nameptr), (cnp->cn_namelen))
;
1985
1986 info.nmi_procp = cnp->cn_proc;
1987 info.nmi_cred = cnp->cn_cred;
1988 error = nfs_request(dvp, NFSPROC_RMDIR13, &info);
1989 if (info.nmi_v3)
1990 nfsm_wcc_data(dvp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(dvp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(dvp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(dvp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((dvp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((dvp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
1991 m_freem(info.nmi_mrep);
1992
1993nfsmout:
1994 pool_put(&namei_pool, cnp->cn_pnbuf);
1995 VTONFS(dvp)((struct nfsnode *)(dvp)->v_data)->n_flag |= NMODIFIED0x0004;
1996 if (!wccflag)
1997 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp))((((struct nfsnode *)(dvp)->v_data))->n_attrstamp = 0);
1998
1999 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK)knote_locked(&dvp->v_klist, (0x0002|0x0010));
2000 VN_KNOTE(vp, NOTE_DELETE)knote_locked(&vp->v_klist, (0x0001));
2001
2002 cache_purge(vp);
2003 vput(vp);
2004 vput(dvp);
2005 /*
2006 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2007 */
2008 if (error == ENOENT2)
2009 error = 0;
2010 return (error);
2011}
2012
2013
2014/*
2015 * The readdir logic below has a big design bug. It stores the NFS cookie in
2016 * the returned uio->uio_offset but does not store the verifier (it cannot).
2017 * Instead, the code stores the verifier in the nfsnode and applies that
2018 * verifies to all cookies, no matter what verifier was originally with
2019 * the cookie.
2020 *
2021 * From a practical standpoint, this is not a problem since almost all
2022 * NFS servers do not change the validity of cookies across deletes
2023 * and inserts.
2024 */
2025
2026struct nfs_dirent {
2027 u_int32_t cookie[2];
2028 struct dirent dirent;
2029};
2030
2031#define NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1)) (sizeof (struct nfs_dirent) - (MAXNAMLEN255 + 1))
2032#define NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent) offsetof(struct nfs_dirent, dirent)__builtin_offsetof(struct nfs_dirent, dirent)
2033
2034/*
2035 * nfs readdir call
2036 */
2037int
2038nfs_readdir(void *v)
2039{
2040 struct vop_readdir_args *ap = v;
2041 struct vnode *vp = ap->a_vp;
2042 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2043 struct uio *uio = ap->a_uio;
2044 int tresid, error = 0;
2045 struct vattr vattr;
2046 int cnt;
2047 u_int64_t newoff = uio->uio_offset;
2048 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2049 struct uio readdir_uio;
2050 struct iovec readdir_iovec;
2051 struct proc * p = uio->uio_procp;
2052 int done = 0, eof = 0;
2053 struct ucred *cred = ap->a_cred;
2054 void *data;
2055
2056 if (vp->v_type != VDIR)
2057 return (EPERM1);
2058 /*
2059 * First, check for hit on the EOF offset cache
2060 */
2061 if (np->n_direofoffsetn_un2.nd_direof != 0 &&
2062 uio->uio_offset == np->n_direofoffsetn_un2.nd_direof) {
2063 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
2064 timespeccmp(&np->n_mtime, &vattr.va_mtime, ==)(((&np->n_mtime)->tv_sec == (&vattr.va_mtime)->
tv_sec) ? ((&np->n_mtime)->tv_nsec == (&vattr.va_mtime
)->tv_nsec) : ((&np->n_mtime)->tv_sec == (&vattr
.va_mtime)->tv_sec))
) {
2065 nfsstats.direofcache_hits++;
2066 *ap->a_eofflag = 1;
2067 return (0);
2068 }
2069 }
2070
2071 if (uio->uio_resid < NFS_FABLKSIZE512)
2072 return (EINVAL22);
2073
2074 tresid = uio->uio_resid;
2075
2076 if (uio->uio_rw != UIO_READ)
2077 return (EINVAL22);
2078
2079 if ((nmp->nm_flag & (NFSMNT_NFSV30x00000200 | NFSMNT_GOTFSINFO0x00100000)) == NFSMNT_NFSV30x00000200)
2080 (void)nfs_fsinfo(nmp, vp, cred, p);
2081
2082 cnt = 5;
2083
2084 /* M_ZERO to avoid leaking kernel data in dirent padding */
2085 data = malloc(NFS_DIRBLKSIZ1024, M_TEMP127, M_WAITOK0x0001|M_ZERO0x0008);
2086 do {
2087 struct nfs_dirent *ndp = data;
2088
2089 readdir_iovec.iov_len = NFS_DIRBLKSIZ1024;
2090 readdir_iovec.iov_base = data;
2091 readdir_uio.uio_offset = newoff;
2092 readdir_uio.uio_iov = &readdir_iovec;
2093 readdir_uio.uio_iovcnt = 1;
2094 readdir_uio.uio_segflg = UIO_SYSSPACE;
2095 readdir_uio.uio_rw = UIO_READ;
2096 readdir_uio.uio_resid = NFS_DIRBLKSIZ1024;
2097 readdir_uio.uio_procp = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
2098
2099 if (nmp->nm_flag & NFSMNT_RDIRPLUS0x00010000) {
2100 error = nfs_readdirplusrpc(vp, &readdir_uio, cred,
2101 &eof, p);
2102 if (error == NFSERR_NOTSUPP10004)
2103 nmp->nm_flag &= ~NFSMNT_RDIRPLUS0x00010000;
2104 }
2105 if ((nmp->nm_flag & NFSMNT_RDIRPLUS0x00010000) == 0)
2106 error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof);
2107
2108 if (error == NFSERR_BAD_COOKIE10003)
2109 error = EINVAL22;
2110
2111 while (error == 0 &&
2112 ndp < (struct nfs_dirent *)readdir_iovec.iov_base) {
2113 struct dirent *dp = &ndp->dirent;
2114 int reclen = dp->d_reclen;
2115
2116 dp->d_reclen -= NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent);
2117 dp->d_off = fxdr_hyper(&ndp->cookie[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&ndp->cookie[0]))[0]) ? (__uint32_t)(((__uint32_t)(((
u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[0
]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
ndp->cookie[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&ndp->cookie
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&ndp->cookie[0]))[1]) ? (__uint32_t)((
(__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&ndp->
cookie[0]))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(&ndp->cookie[0]))[1]) & 0xff0000) >> 8 |
((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff000000) >> 24) : __swap32md(((u_int32_t *)(&ndp
->cookie[0]))[1]))))
;
2118
2119 if (uio->uio_resid < dp->d_reclen) {
2120 eof = 0;
2121 done = 1;
2122 break;
2123 }
2124
2125 if ((error = uiomove(dp, dp->d_reclen, uio)))
2126 break;
2127
2128 newoff = fxdr_hyper(&ndp->cookie[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&ndp->cookie[0]))[0]) ? (__uint32_t)(((__uint32_t)(((
u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[0
]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
ndp->cookie[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&ndp->cookie[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&ndp->cookie
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&ndp->cookie[0]))[1]) ? (__uint32_t)((
(__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&ndp->
cookie[0]))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(&ndp->cookie[0]))[1]) & 0xff0000) >> 8 |
((__uint32_t)(((u_int32_t *)(&ndp->cookie[0]))[1]) &
0xff000000) >> 24) : __swap32md(((u_int32_t *)(&ndp
->cookie[0]))[1]))))
;
2129
2130 ndp = (struct nfs_dirent *)((u_int8_t *)ndp + reclen);
2131 }
2132 } while (!error && !done && !eof && cnt--);
2133
2134 free(data, M_TEMP127, NFS_DIRBLKSIZ1024);
2135 data = NULL((void *)0);
2136
2137 uio->uio_offset = newoff;
2138
2139 if (!error && (eof || uio->uio_resid == tresid)) {
2140 nfsstats.direofcache_misses++;
2141 *ap->a_eofflag = 1;
2142 return (0);
2143 }
2144
2145 *ap->a_eofflag = 0;
2146 return (error);
2147}
2148
2149
2150/*
2151 * The function below stuff the cookies in after the name
2152 */
2153
2154/*
2155 * Readdir rpc call.
2156 */
2157int
2158nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2159 int *end_of_directory)
2160{
2161 int len, left;
2162 struct nfs_dirent *ndp = NULL((void *)0);
2163 struct dirent *dp = NULL((void *)0);
2164 struct nfsm_info info;
2165 u_int32_t *tl;
2166 caddr_t cp;
2167 int32_t t1;
2168 caddr_t cp2;
2169 nfsuint64 cookie;
2170 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2171 struct nfsnode *dnp = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2172 u_quad_t fileno;
2173 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2174 int attrflag;
2175
2176 info.nmi_v3 = NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
;
2177
2178#ifdef DIAGNOSTIC1
2179 if (uiop->uio_iovcnt != 1 ||
2180 (uiop->uio_resid & (NFS_DIRBLKSIZ1024 - 1)))
2181 panic("nfs readdirrpc bad uio");
2182#endif
2183
2184 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0])do { ((u_int32_t *)(&cookie.nfsuquad[0]))[0] = (__uint32_t
)(__builtin_constant_p((u_int32_t)((uiop->uio_offset) >>
32)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)((uiop->uio_offset) >> 32)) & 0xff00) << 8
| ((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)((uiop
->uio_offset) >> 32)) & 0xff000000) >> 24)
: __swap32md((u_int32_t)((uiop->uio_offset) >> 32))
); ((u_int32_t *)(&cookie.nfsuquad[0]))[1] = (__uint32_t)
(__builtin_constant_p((u_int32_t)((uiop->uio_offset) &
0xffffffff)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->
uio_offset) & 0xffffffff)) & 0xff) << 24 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff0000) >> 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)((uiop->uio_offset)
& 0xffffffff))); } while (0)
;
2185
2186 /*
2187 * Loop around doing readdir rpc's of size nm_readdirsize
2188 * truncated to a multiple of NFS_READDIRBLKSIZ.
2189 * The stopping criteria is EOF or buffer full.
2190 */
2191 while (more_dirs && bigenough) {
2192 nfsstats.rpccnt[NFSPROC_READDIR16]++;
2193 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32)
2194 + NFSX_READDIR(info.nmi_v3)((info.nmi_v3) ? (5 * 4) : (2 * 4)));
2195 nfsm_fhtom(&info, vp, info.nmi_v3);
2196 if (info.nmi_v3) {
2197 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED4);
2198 *tl++ = cookie.nfsuquad[0];
2199 *tl++ = cookie.nfsuquad[1];
2200 if (cookie.nfsuquad[0] == 0 &&
2201 cookie.nfsuquad[1] == 0) {
2202 *tl++ = 0;
2203 *tl++ = 0;
2204 } else {
2205 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0];
2206 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1];
2207 }
2208 } else {
2209 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED4);
2210 *tl++ = cookie.nfsuquad[1];
2211 }
2212 *tl = txdr_unsigned(nmp->nm_readdirsize)((__uint32_t)(__builtin_constant_p((int32_t)(nmp->nm_readdirsize
)) ? (__uint32_t)(((__uint32_t)((int32_t)(nmp->nm_readdirsize
)) & 0xff) << 24 | ((__uint32_t)((int32_t)(nmp->
nm_readdirsize)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(nmp->nm_readdirsize)) & 0xff0000) >> 8 | ((__uint32_t
)((int32_t)(nmp->nm_readdirsize)) & 0xff000000) >>
24) : __swap32md((int32_t)(nmp->nm_readdirsize))))
;
2213
2214 info.nmi_procp = uiop->uio_procp;
2215 info.nmi_cred = cred;
2216 error = nfs_request(vp, NFSPROC_READDIR16, &info);
2217 if (info.nmi_v3)
2218 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
2219
2220 if (error) {
2221 m_freem(info.nmi_mrep);
2222 goto nfsmout;
2223 }
2224
2225 if (info.nmi_v3) {
2226 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2227 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2228 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0] = *tl++;
2229 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1] = *tl;
2230 }
2231
2232 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2233 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2234
2235 /* loop thru the dir entries, doctoring them to dirent form */
2236 while (more_dirs && bigenough) {
2237 if (info.nmi_v3) {
2238 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2239 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2240 fileno = fxdr_hyper(tl)((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(tl))[0]) ? (__uint32_t)(((__uint32_t)(((u_int32_t *)(tl))[0]
) & 0xff) << 24 | ((__uint32_t)(((u_int32_t *)(tl))
[0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(
tl))[0]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[0]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(tl))[1]) ? (__uint32_t)(((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff) << 24 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[1]))))
;
2241 len = fxdr_unsigned(int, *(tl + 2))((int)(__uint32_t)(__builtin_constant_p((int32_t)(*(tl + 2)))
? (__uint32_t)(((__uint32_t)((int32_t)(*(tl + 2))) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff000000
) >> 24) : __swap32md((int32_t)(*(tl + 2)))))
;
2242 } else {
2243 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2244 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2245 fileno = fxdr_unsigned(u_quad_t, *tl++)((u_quad_t)(__uint32_t)(__builtin_constant_p((int32_t)(*tl++)
) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl++)) & 0xff) <<
24 | ((__uint32_t)((int32_t)(*tl++)) & 0xff00) << 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl++)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl++))))
;
2246 len = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2247 }
2248 if (len <= 0 || len > NFS_MAXNAMLEN255) {
2249 error = EBADRPC72;
2250 m_freem(info.nmi_mrep);
2251 goto nfsmout;
2252 }
2253 tlen = DIRENT_RECSIZE(len)((__builtin_offsetof(struct dirent, d_name) + (len) + 1 + 7) &
~ 7)
+ NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent);
2254 left = NFS_READDIRBLKSIZ512 - blksiz;
2255 if (tlen > left) {
2256 dp->d_reclen += left;
2257 uiop->uio_iov->iov_base += left;
2258 uiop->uio_iov->iov_len -= left;
2259 uiop->uio_resid -= left;
2260 blksiz = 0;
2261 }
2262 if (tlen > uiop->uio_resid)
2263 bigenough = 0;
2264 if (bigenough) {
2265 ndp = (struct nfs_dirent *)
2266 uiop->uio_iov->iov_base;
2267 dp = &ndp->dirent;
2268 dp->d_fileno = fileno;
2269 dp->d_namlen = len;
2270 dp->d_reclen = tlen;
2271 dp->d_type = DT_UNKNOWN0;
2272 blksiz += tlen;
2273 if (blksiz == NFS_READDIRBLKSIZ512)
2274 blksiz = 0;
2275 uiop->uio_resid -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2276 uiop->uio_iov->iov_base =
2277 (char *)uiop->uio_iov->iov_base +
2278 NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2279 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2280 nfsm_mtouio(uiop, len)if ((len) > 0 && (t1 = nfsm_mbuftouio(&info.nmi_md
, (uiop), (len), &info.nmi_dpos)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; }
;
2281 cp = uiop->uio_iov->iov_base;
2282 tlen -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1)) + len;
2283 *cp = '\0'; /* null terminate */
2284 uiop->uio_iov->iov_base += tlen;
2285 uiop->uio_iov->iov_len -= tlen;
2286 uiop->uio_resid -= tlen;
2287 } else
2288 nfsm_adv(nfsm_rndup(len)){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= ((((len)+3)&
(~0x3)))) { info.nmi_dpos += ((((len)+3)&(~0x3))); } else
if ((t1 = nfs_adv(&info.nmi_md, &info.nmi_dpos, ((((
len)+3)&(~0x3))), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2289 if (info.nmi_v3) {
2290 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2291 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2292 } else {
2293 nfsm_dissect(tl, u_int32_t *,{ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
2294 2 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (2 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (2 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (2 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2295 }
2296 if (bigenough) {
2297 if (info.nmi_v3) {
2298 ndp->cookie[0] = cookie.nfsuquad[0] =
2299 *tl++;
2300 } else
2301 ndp->cookie[0] = 0;
2302
2303 ndp->cookie[1] = cookie.nfsuquad[1] = *tl++;
2304 } else if (info.nmi_v3)
2305 tl += 2;
2306 else
2307 tl++;
2308 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2309 }
2310 /*
2311 * If at end of rpc data, get the eof boolean
2312 */
2313 if (!more_dirs) {
2314 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2315 more_dirs = (fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
== 0);
2316 }
2317 m_freem(info.nmi_mrep);
2318 }
2319 /*
2320 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ
2321 * by increasing d_reclen for the last record.
2322 */
2323 if (blksiz > 0) {
2324 left = NFS_READDIRBLKSIZ512 - blksiz;
2325 dp->d_reclen += left;
2326 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base +
2327 left;
2328 uiop->uio_iov->iov_len -= left;
2329 uiop->uio_resid -= left;
2330 }
2331
2332 /*
2333 * We are now either at the end of the directory or have filled the
2334 * block.
2335 */
2336 if (bigenough) {
2337 dnp->n_direofoffsetn_un2.nd_direof = fxdr_hyper(&cookie.nfsuquad[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&cookie.nfsuquad[0]))[0]) ? (__uint32_t)(((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[
0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
cookie.nfsuquad[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&cookie.nfsuquad
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) ? (__uint32_t)(
((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&cookie
.nfsuquad[0]))[1]) & 0xff00) << 8 | ((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[1]) & 0xff0000) >>
8 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1
]) & 0xff000000) >> 24) : __swap32md(((u_int32_t *)
(&cookie.nfsuquad[0]))[1]))))
;
2338 if (end_of_directory) *end_of_directory = 1;
2339 } else {
2340 if (uiop->uio_resid > 0)
2341 printf("EEK! readdirrpc resid > 0\n");
2342 }
2343
2344nfsmout:
2345 return (error);
2346}
2347
2348/*
2349 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2350 */
2351int
2352nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2353 int *end_of_directory, struct proc *p)
2354{
2355 int len, left;
2356 struct nfs_dirent *ndirp = NULL((void *)0);
2357 struct dirent *dp = NULL((void *)0);
2358 struct nfsm_info info;
2359 u_int32_t *tl;
2360 caddr_t cp;
2361 int32_t t1;
2362 struct vnode *newvp;
2363 caddr_t cp2, dpossav1, dpossav2;
2364 struct mbuf *mdsav1, *mdsav2;
2365 struct nameidata nami, *ndp = &nami;
2366 struct componentname *cnp = &ndp->ni_cnd;
2367 nfsuint64 cookie;
2368 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2369 struct nfsnode *dnp = VTONFS(vp)((struct nfsnode *)(vp)->v_data), *np;
2370 nfsfh_t *fhp;
2371 u_quad_t fileno;
2372 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2373 int attrflag, fhsize;
2374
2375#ifdef DIAGNOSTIC1
2376 if (uiop->uio_iovcnt != 1 ||
2377 (uiop->uio_resid & (NFS_DIRBLKSIZ1024 - 1)))
2378 panic("nfs readdirplusrpc bad uio");
2379#endif
2380 NDINIT(ndp, 0, 0, UIO_SYSSPACE, NULL, p)ndinitat(ndp, 0, 0, UIO_SYSSPACE, -100, ((void *)0), p);
2381 ndp->ni_dvp = vp;
2382 newvp = NULLVP((struct vnode *)((void *)0));
2383
2384 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0])do { ((u_int32_t *)(&cookie.nfsuquad[0]))[0] = (__uint32_t
)(__builtin_constant_p((u_int32_t)((uiop->uio_offset) >>
32)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->uio_offset
) >> 32)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)((uiop->uio_offset) >> 32)) & 0xff00) << 8
| ((__uint32_t)((u_int32_t)((uiop->uio_offset) >> 32
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)((uiop
->uio_offset) >> 32)) & 0xff000000) >> 24)
: __swap32md((u_int32_t)((uiop->uio_offset) >> 32))
); ((u_int32_t *)(&cookie.nfsuquad[0]))[1] = (__uint32_t)
(__builtin_constant_p((u_int32_t)((uiop->uio_offset) &
0xffffffff)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((uiop->
uio_offset) & 0xffffffff)) & 0xff) << 24 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)((uiop->uio_offset
) & 0xffffffff)) & 0xff0000) >> 8 | ((__uint32_t
)((u_int32_t)((uiop->uio_offset) & 0xffffffff)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)((uiop->uio_offset)
& 0xffffffff))); } while (0)
;
2385
2386 /*
2387 * Loop around doing readdir rpc's of size nm_readdirsize
2388 * truncated to a multiple of NFS_READDIRBLKSIZ.
2389 * The stopping criteria is EOF or buffer full.
2390 */
2391 while (more_dirs && bigenough) {
2392 nfsstats.rpccnt[NFSPROC_READDIRPLUS17]++;
2393 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)((1) ? (64 + 4) : 32) + 6 * NFSX_UNSIGNED4);
2394 nfsm_fhtom(&info, vp, 1);
2395 tl = nfsm_build(&info.nmi_mb, 6 * NFSX_UNSIGNED4);
2396 *tl++ = cookie.nfsuquad[0];
2397 *tl++ = cookie.nfsuquad[1];
2398 if (cookie.nfsuquad[0] == 0 &&
2399 cookie.nfsuquad[1] == 0) {
2400 *tl++ = 0;
2401 *tl++ = 0;
2402 } else {
2403 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0];
2404 *tl++ = dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1];
2405 }
2406 *tl++ = txdr_unsigned(nmp->nm_readdirsize)((__uint32_t)(__builtin_constant_p((int32_t)(nmp->nm_readdirsize
)) ? (__uint32_t)(((__uint32_t)((int32_t)(nmp->nm_readdirsize
)) & 0xff) << 24 | ((__uint32_t)((int32_t)(nmp->
nm_readdirsize)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(nmp->nm_readdirsize)) & 0xff0000) >> 8 | ((__uint32_t
)((int32_t)(nmp->nm_readdirsize)) & 0xff000000) >>
24) : __swap32md((int32_t)(nmp->nm_readdirsize))))
;
2407 *tl = txdr_unsigned(nmp->nm_rsize)((__uint32_t)(__builtin_constant_p((int32_t)(nmp->nm_rsize
)) ? (__uint32_t)(((__uint32_t)((int32_t)(nmp->nm_rsize)) &
0xff) << 24 | ((__uint32_t)((int32_t)(nmp->nm_rsize
)) & 0xff00) << 8 | ((__uint32_t)((int32_t)(nmp->
nm_rsize)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t
)(nmp->nm_rsize)) & 0xff000000) >> 24) : __swap32md
((int32_t)(nmp->nm_rsize))))
;
2408
2409 info.nmi_procp = uiop->uio_procp;
2410 info.nmi_cred = cred;
2411 error = nfs_request(vp, NFSPROC_READDIRPLUS17, &info);
2412 nfsm_postop_attr(vp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (vp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (vp) = ttvp; } } }
;
2413 if (error) {
2414 m_freem(info.nmi_mrep);
2415 goto nfsmout;
2416 }
2417
2418 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2419 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[0] = *tl++;
2420 dnp->n_cookieverfn_un1.nd_cookieverf.nfsuquad[1] = *tl++;
2421 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2422
2423 /* loop thru the dir entries, doctoring them to 4bsd form */
2424 while (more_dirs && bigenough) {
2425 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2426 fileno = fxdr_hyper(tl)((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(tl))[0]) ? (__uint32_t)(((__uint32_t)(((u_int32_t *)(tl))[0]
) & 0xff) << 24 | ((__uint32_t)(((u_int32_t *)(tl))
[0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(
tl))[0]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[0]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(tl))[1]) ? (__uint32_t)(((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff) << 24 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff0000) >> 8 | ((__uint32_t)(((u_int32_t
*)(tl))[1]) & 0xff000000) >> 24) : __swap32md(((u_int32_t
*)(tl))[1]))))
;
2427 len = fxdr_unsigned(int, *(tl + 2))((int)(__uint32_t)(__builtin_constant_p((int32_t)(*(tl + 2)))
? (__uint32_t)(((__uint32_t)((int32_t)(*(tl + 2))) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff0000
) >> 8 | ((__uint32_t)((int32_t)(*(tl + 2))) & 0xff000000
) >> 24) : __swap32md((int32_t)(*(tl + 2)))))
;
2428 if (len <= 0 || len > NFS_MAXNAMLEN255) {
2429 error = EBADRPC72;
2430 m_freem(info.nmi_mrep);
2431 goto nfsmout;
2432 }
2433 tlen = DIRENT_RECSIZE(len)((__builtin_offsetof(struct dirent, d_name) + (len) + 1 + 7) &
~ 7)
+ NFS_DIRENT_OVERHEAD__builtin_offsetof(struct nfs_dirent, dirent);
2434 left = NFS_READDIRBLKSIZ512 - blksiz;
2435 if (tlen > left) {
2436 dp->d_reclen += left;
2437 uiop->uio_iov->iov_base =
2438 (char *)uiop->uio_iov->iov_base + left;
2439 uiop->uio_iov->iov_len -= left;
2440 uiop->uio_resid -= left;
2441 blksiz = 0;
2442 }
2443 if (tlen > uiop->uio_resid)
2444 bigenough = 0;
2445 if (bigenough) {
2446 ndirp = (struct nfs_dirent *)
2447 uiop->uio_iov->iov_base;
2448 dp = &ndirp->dirent;
2449 dp->d_fileno = fileno;
2450 dp->d_namlen = len;
2451 dp->d_reclen = tlen;
2452 dp->d_type = DT_UNKNOWN0;
2453 blksiz += tlen;
2454 if (blksiz == NFS_READDIRBLKSIZ512)
2455 blksiz = 0;
2456 uiop->uio_resid -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2457 uiop->uio_iov->iov_base =
2458 (char *)uiop->uio_iov->iov_base +
2459 NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2460 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1));
2461 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2462 cnp->cn_namelen = len;
2463 nfsm_mtouio(uiop, len)if ((len) > 0 && (t1 = nfsm_mbuftouio(&info.nmi_md
, (uiop), (len), &info.nmi_dpos)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; }
;
2464 cp = uiop->uio_iov->iov_base;
2465 tlen -= NFS_DIRHDSIZ(sizeof (struct nfs_dirent) - (255 + 1)) + len;
2466 *cp = '\0';
2467 uiop->uio_iov->iov_base += tlen;
2468 uiop->uio_iov->iov_len -= tlen;
2469 uiop->uio_resid -= tlen;
2470 } else
2471 nfsm_adv(nfsm_rndup(len)){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= ((((len)+3)&
(~0x3)))) { info.nmi_dpos += ((((len)+3)&(~0x3))); } else
if ((t1 = nfs_adv(&info.nmi_md, &info.nmi_dpos, ((((
len)+3)&(~0x3))), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2472 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (3 * 4)) { (tl
) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (3 * 4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (3 * 4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2473 if (bigenough) {
2474 ndirp->cookie[0] = cookie.nfsuquad[0] = *tl++;
2475 ndirp->cookie[1] = cookie.nfsuquad[1] = *tl++;
2476 } else
2477 tl += 2;
2478
2479 /*
2480 * Since the attributes are before the file handle
2481 * (sigh), we must skip over the attributes and then
2482 * come back and get them.
2483 */
2484 attrflag = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2485 if (attrflag) {
2486 dpossav1 = info.nmi_dpos;
2487 mdsav1 = info.nmi_md;
2488 nfsm_adv(NFSX_V3FATTR){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (84)) { info.
nmi_dpos += (84); } else if ((t1 = nfs_adv(&info.nmi_md, &
info.nmi_dpos, (84), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2489 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2490 doit = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2491 if (doit) {
2492 nfsm_getfh(fhp, fhsize, 1){ if (1) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)
) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
(4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos +=
(4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; if (((fhsize) = ((int)(__uint32_t)(__builtin_constant_p((
int32_t)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) &
0xff) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00
) << 8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >>
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >>
24) : __swap32md((int32_t)(*tl))))) <= 0 || (fhsize) >
64) { m_freem(info.nmi_mrep); error = 72; goto nfsmout; } } else
(fhsize) = 32; { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data
)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
((((fhsize)+3)&(~0x3)))) { ((fhp)) = (nfsfh_t *)(info.nmi_dpos
); info.nmi_dpos += ((((fhsize)+3)&(~0x3))); } else if ((
t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, ((((fhsize
)+3)&(~0x3))), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { ((fhp)) = (nfsfh_t *)
cp2; } }; }
;
2493 if (NFS_CMPFH(dnp, fhp, fhsize)((dnp)->n_fhsize == (fhsize) && !bcmp((caddr_t)(dnp
)->n_fhp, (caddr_t)(fhp), (fhsize)))
) {
2494 vref(vp);
2495 newvp = vp;
2496 np = dnp;
2497 } else {
2498 error = nfs_nget(vp->v_mount,
2499 fhp, fhsize, &np);
2500 if (error)
2501 doit = 0;
2502 else
2503 newvp = NFSTOV(np)((np)->n_vnode);
2504 }
2505 }
2506 if (doit && bigenough) {
2507 dpossav2 = info.nmi_dpos;
2508 info.nmi_dpos = dpossav1;
2509 mdsav2 = info.nmi_md;
2510 info.nmi_md = mdsav1;
2511 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
2512 info.nmi_dpos = dpossav2;
2513 info.nmi_md = mdsav2;
2514 dp->d_type = IFTODT(((((vttoif_tab[(int)(np->n_vattr.va_type)])) & 0170000
) >> 12)
2515 VTTOIF(np->n_vattr.va_type))((((vttoif_tab[(int)(np->n_vattr.va_type)])) & 0170000
) >> 12)
;
2516 if (cnp->cn_namelen <=
2517 NAMECACHE_MAXLEN31) {
2518 ndp->ni_vp = newvp;
2519 cache_purge(ndp->ni_dvp);
2520 nfs_cache_enter(ndp->ni_dvp,
2521 ndp->ni_vp, cnp);
2522 }
2523 }
2524 } else {
2525 /* Just skip over the file handle */
2526 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2527 i = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2528 if (i > 0)
2529 nfsm_adv(nfsm_rndup(i)){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= ((((i)+3)&
(~0x3)))) { info.nmi_dpos += ((((i)+3)&(~0x3))); } else if
((t1 = nfs_adv(&info.nmi_md, &info.nmi_dpos, ((((i)+
3)&(~0x3))), t1)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } }
;
2530 }
2531 if (newvp != NULLVP((struct vnode *)((void *)0))) {
2532 if (newvp == vp)
2533 vrele(newvp);
2534 else
2535 vput(newvp);
2536 newvp = NULLVP((struct vnode *)((void *)0));
2537 }
2538 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2539 more_dirs = fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
;
2540 }
2541 /*
2542 * If at end of rpc data, get the eof boolean
2543 */
2544 if (!more_dirs) {
2545 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (4),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2546 more_dirs = (fxdr_unsigned(int, *tl)((int)(__uint32_t)(__builtin_constant_p((int32_t)(*tl)) ? (__uint32_t
)(((__uint32_t)((int32_t)(*tl)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(*tl)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(*tl)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
*tl)) & 0xff000000) >> 24) : __swap32md((int32_t)(*
tl))))
== 0);
2547 }
2548 m_freem(info.nmi_mrep);
2549 }
2550 /*
2551 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ
2552 * by increasing d_reclen for the last record.
2553 */
2554 if (blksiz > 0) {
2555 left = NFS_READDIRBLKSIZ512 - blksiz;
2556 dp->d_reclen += left;
2557 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base +
2558 left;
2559 uiop->uio_iov->iov_len -= left;
2560 uiop->uio_resid -= left;
2561 }
2562
2563 /*
2564 * We are now either at the end of the directory or have filled the
2565 * block.
2566 */
2567 if (bigenough) {
2568 dnp->n_direofoffsetn_un2.nd_direof = fxdr_hyper(&cookie.nfsuquad[0])((((u_quad_t)(__uint32_t)(__builtin_constant_p(((u_int32_t *)
(&cookie.nfsuquad[0]))[0]) ? (__uint32_t)(((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff) <<
24 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[
0]) & 0xff00) << 8 | ((__uint32_t)(((u_int32_t *)(&
cookie.nfsuquad[0]))[0]) & 0xff0000) >> 8 | ((__uint32_t
)(((u_int32_t *)(&cookie.nfsuquad[0]))[0]) & 0xff000000
) >> 24) : __swap32md(((u_int32_t *)(&cookie.nfsuquad
[0]))[0]))) << 32) | (u_quad_t)((__uint32_t)(__builtin_constant_p
(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) ? (__uint32_t)(
((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1]) &
0xff) << 24 | ((__uint32_t)(((u_int32_t *)(&cookie
.nfsuquad[0]))[1]) & 0xff00) << 8 | ((__uint32_t)((
(u_int32_t *)(&cookie.nfsuquad[0]))[1]) & 0xff0000) >>
8 | ((__uint32_t)(((u_int32_t *)(&cookie.nfsuquad[0]))[1
]) & 0xff000000) >> 24) : __swap32md(((u_int32_t *)
(&cookie.nfsuquad[0]))[1]))))
;
2569 if (end_of_directory) *end_of_directory = 1;
2570 } else {
2571 if (uiop->uio_resid > 0)
2572 printf("EEK! readdirplusrpc resid > 0\n");
2573 }
2574
2575nfsmout:
2576 if (newvp != NULLVP((struct vnode *)((void *)0))) {
2577 if (newvp == vp)
2578 vrele(newvp);
2579 else
2580 vput(newvp);
2581 }
2582 return (error);
2583}
2584
2585/*
2586 * Silly rename. To make the NFS filesystem that is stateless look a little
2587 * more like the "ufs" a remove of an active vnode is translated to a rename
2588 * to a funny looking filename that is removed by nfs_inactive on the
2589 * nfsnode. There is the potential for another process on a different client
2590 * to create the same funny name between the nfs_lookitup() fails and the
2591 * nfs_rename() completes, but...
2592 */
2593int
2594nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2595{
2596 struct sillyrename *sp;
2597 struct nfsnode *np;
2598 int error;
2599
2600 cache_purge(dvp);
2601 np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2602 sp = malloc(sizeof(*sp), M_NFSREQ22, M_WAITOK0x0001);
2603 sp->s_cred = crdup(cnp->cn_cred);
2604 sp->s_dvp = dvp;
2605 vref(dvp);
2606
2607 if (vp->v_type == VDIR) {
2608#ifdef DIAGNOSTIC1
2609 printf("nfs: sillyrename dir\n");
2610#endif
2611 error = EINVAL22;
2612 goto bad;
2613 }
2614
2615 /* Try lookitups until we get one that isn't there */
2616 while (1) {
2617 /* Fudge together a funny name */
2618 u_int32_t rnd[2];
2619
2620 arc4random_buf(&rnd, sizeof rnd);
2621 sp->s_namlen = snprintf(sp->s_name, sizeof sp->s_name,
2622 ".nfs%08X%08X", rnd[0], rnd[1]);
2623 if (sp->s_namlen > sizeof sp->s_name)
2624 sp->s_namlen = strlen(sp->s_name);
2625
2626 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2627 cnp->cn_proc, NULL((void *)0)))
2628 break;
2629 }
2630
2631 error = nfs_renameit(dvp, cnp, sp);
2632 if (error)
2633 goto bad;
2634 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2635 cnp->cn_proc, &np);
2636 np->n_sillyrename = sp;
2637 return (0);
2638bad:
2639 vrele(sp->s_dvp);
2640 crfree(sp->s_cred);
2641 free(sp, M_NFSREQ22, sizeof(*sp));
2642 return (error);
2643}
2644
2645/*
2646 * Look up a file name and optionally either update the file handle or
2647 * allocate an nfsnode, depending on the value of npp.
2648 * npp == NULL --> just do the lookup
2649 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2650 * handled too
2651 * *npp != NULL --> update the file handle in the vnode
2652 */
2653int
2654nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
2655 struct proc *procp, struct nfsnode **npp)
2656{
2657 struct nfsm_info info;
2658 u_int32_t *tl;
2659 int32_t t1;
2660 struct vnode *newvp = NULL((void *)0);
2661 struct nfsnode *np, *dnp = VTONFS(dvp)((struct nfsnode *)(dvp)->v_data);
2662 caddr_t cp2;
2663 int error = 0, fhlen, attrflag = 0;
2664 nfsfh_t *nfhp;
2665
2666 info.nmi_v3 = NFS_ISV3(dvp)(((struct nfsmount *)(((dvp)->v_mount)->mnt_data))->
nm_flag & 0x00000200)
;
2667
2668 nfsstats.rpccnt[NFSPROC_LOOKUP3]++;
2669 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)((info.nmi_v3) ? (64 + 4) : 32) + NFSX_UNSIGNED4 +
2670 nfsm_rndup(len)(((len)+3)&(~0x3)));
2671 nfsm_fhtom(&info, dvp, info.nmi_v3);
2672 nfsm_strtom(name, len, NFS_MAXNAMLEN)if ((len) > (255)) { m_freem(info.nmi_mreq); error = 63; goto
nfsmout; } nfsm_strtombuf(&info.nmi_mb, (name), (len))
;
2673
2674 info.nmi_procp = procp;
2675 info.nmi_cred = cred;
2676 error = nfs_request(dvp, NFSPROC_LOOKUP3, &info);
2677 if (error && !info.nmi_v3) {
2678 m_freem(info.nmi_mrep);
2679 goto nfsmout;
2680 }
2681
2682 if (npp && !error) {
2683 nfsm_getfh(nfhp, fhlen, info.nmi_v3){ if (info.nmi_v3) { { t1 = ((caddr_t)((info.nmi_md)->m_hdr
.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if
(t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos
+= (4); } else if ((t1 = nfsm_disct(&info.nmi_md, &info
.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1; m_freem(info
.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; }
}; if (((fhlen) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) <= 0 || (fhlen) > 64)
{ m_freem(info.nmi_mrep); error = 72; goto nfsmout; } } else
(fhlen) = 32; { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data
)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >=
((((fhlen)+3)&(~0x3)))) { ((nfhp)) = (nfsfh_t *)(info.nmi_dpos
); info.nmi_dpos += ((((fhlen)+3)&(~0x3))); } else if ((t1
= nfsm_disct(&info.nmi_md, &info.nmi_dpos, ((((fhlen
)+3)&(~0x3))), t1, &cp2)) != 0) { error = t1; m_freem
(info.nmi_mrep); goto nfsmout; } else { ((nfhp)) = (nfsfh_t *
)cp2; } }; }
;
2684 if (*npp) {
2685 np = *npp;
2686 np->n_fhp = &np->n_fh;
2687 bcopy(nfhp, np->n_fhp, fhlen);
2688 np->n_fhsize = fhlen;
2689 newvp = NFSTOV(np)((np)->n_vnode);
2690 } else if (NFS_CMPFH(dnp, nfhp, fhlen)((dnp)->n_fhsize == (fhlen) && !bcmp((caddr_t)(dnp
)->n_fhp, (caddr_t)(nfhp), (fhlen)))
) {
2691 vref(dvp);
2692 newvp = dvp;
2693 np = dnp;
2694 } else {
2695 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2696 if (error) {
2697 m_freem(info.nmi_mrep);
2698 return (error);
2699 }
2700 newvp = NFSTOV(np)((np)->n_vnode);
2701 }
2702 if (info.nmi_v3) {
2703 nfsm_postop_attr(newvp, attrflag){ if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = (newvp
); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(attrflag) = ((int)(__uint32_t)(__builtin_constant_p((int32_t
)(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (attrflag) = 0; m_freem(info.nmi_mrep)
; goto nfsmout; } (newvp) = ttvp; } } }
;
2704 if (!attrflag && *npp == NULL((void *)0)) {
2705 m_freem(info.nmi_mrep);
2706 if (newvp == dvp)
2707 vrele(newvp);
2708 else
2709 vput(newvp);
2710 return (ENOENT2);
2711 }
2712 } else
2713 nfsm_loadattr(newvp, NULL){ struct vnode *ttvp = (newvp); if ((t1 = nfs_loadattrcache(&
ttvp, &info.nmi_md, &info.nmi_dpos, (((void *)0)))) !=
0) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } (newvp
) = ttvp; }
;
2714 }
2715 m_freem(info.nmi_mrep);
2716nfsmout:
2717 if (npp && *npp == NULL((void *)0)) {
2718 if (error) {
2719 if (newvp == dvp)
2720 vrele(newvp);
2721 else
2722 vput(newvp);
2723 } else
2724 *npp = np;
2725 }
2726 return (error);
2727}
2728
2729/*
2730 * Nfs Version 3 commit rpc
2731 */
2732int
2733nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct proc *procp)
2734{
2735 struct nfsm_info info;
2736 u_int32_t *tl;
2737 int32_t t1;
2738 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2739 caddr_t cp2;
2740 int error = 0, wccflag = NFSV3_WCCRATTR0;
2741
2742 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF0x00040000) == 0)
2743 return (0);
2744 nfsstats.rpccnt[NFSPROC_COMMIT21]++;
2745 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)((1) ? (64 + 4) : 32));
2746 nfsm_fhtom(&info, vp, 1);
2747
2748 tl = nfsm_build(&info.nmi_mb, 3 * NFSX_UNSIGNED4);
2749 txdr_hyper(offset, tl)do { ((u_int32_t *)(tl))[0] = (__uint32_t)(__builtin_constant_p
((u_int32_t)((offset) >> 32)) ? (__uint32_t)(((__uint32_t
)((u_int32_t)((offset) >> 32)) & 0xff) << 24 |
((__uint32_t)((u_int32_t)((offset) >> 32)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)((offset) >> 32
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)((offset
) >> 32)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)((offset) >> 32))); ((u_int32_t *)(tl))[1] = (
__uint32_t)(__builtin_constant_p((u_int32_t)((offset) & 0xffffffff
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)((offset) & 0xffffffff
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)((offset
) & 0xffffffff)) & 0xff00) << 8 | ((__uint32_t)
((u_int32_t)((offset) & 0xffffffff)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)((offset) & 0xffffffff)) &
0xff000000) >> 24) : __swap32md((u_int32_t)((offset) &
0xffffffff))); } while (0)
;
2750 tl += 2;
2751 *tl = txdr_unsigned(cnt)((__uint32_t)(__builtin_constant_p((int32_t)(cnt)) ? (__uint32_t
)(((__uint32_t)((int32_t)(cnt)) & 0xff) << 24 | ((__uint32_t
)((int32_t)(cnt)) & 0xff00) << 8 | ((__uint32_t)((int32_t
)(cnt)) & 0xff0000) >> 8 | ((__uint32_t)((int32_t)(
cnt)) & 0xff000000) >> 24) : __swap32md((int32_t)(cnt
))))
;
2752
2753 info.nmi_procp = procp;
2754 info.nmi_cred = VTONFS(vp)((struct nfsnode *)(vp)->v_data)->n_wcred;
2755 error = nfs_request(vp, NFSPROC_COMMIT21, &info);
2756 nfsm_wcc_data(vp, wccflag)do { if (info.nmi_mrep != ((void *)0)) { struct timespec _mtime
; int ttattrf, ttretf = 0; { t1 = ((caddr_t)((info.nmi_md)->
m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (4)) { (tl) = (u_int32_t *)(info.nmi_dpos); info
.nmi_dpos += (4); } else if ((t1 = nfsm_disct(&info.nmi_md
, &info.nmi_dpos, (4), t1, &cp2)) != 0) { error = t1;
m_freem(info.nmi_mrep); goto nfsmout; } else { (tl) = (u_int32_t
*)cp2; } }; if (*tl == nfs_true) { { t1 = ((caddr_t)((info.nmi_md
)->m_hdr.mh_data)) + info.nmi_md->m_hdr.mh_len - info.nmi_dpos
; if (t1 >= (6 * 4)) { (tl) = (u_int32_t *)(info.nmi_dpos)
; info.nmi_dpos += (6 * 4); } else if ((t1 = nfsm_disct(&
info.nmi_md, &info.nmi_dpos, (6 * 4), t1, &cp2)) != 0
) { error = t1; m_freem(info.nmi_mrep); goto nfsmout; } else {
(tl) = (u_int32_t *)cp2; } }; do { (&_mtime)->tv_sec =
(__uint32_t)(__builtin_constant_p(((struct nfsv3_time *)(tl +
2))->nfsv3_sec) ? (__uint32_t)(((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_sec) & 0xff) << 24 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff00
) << 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_sec) & 0xff0000) >> 8 | ((__uint32_t)(((struct
nfsv3_time *)(tl + 2))->nfsv3_sec) & 0xff000000) >>
24) : __swap32md(((struct nfsv3_time *)(tl + 2))->nfsv3_sec
)); (&_mtime)->tv_nsec = (__uint32_t)(__builtin_constant_p
(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) ? (__uint32_t
)(((__uint32_t)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec
) & 0xff) << 24 | ((__uint32_t)(((struct nfsv3_time
*)(tl + 2))->nfsv3_nsec) & 0xff00) << 8 | ((__uint32_t
)(((struct nfsv3_time *)(tl + 2))->nfsv3_nsec) & 0xff0000
) >> 8 | ((__uint32_t)(((struct nfsv3_time *)(tl + 2))->
nfsv3_nsec) & 0xff000000) >> 24) : __swap32md(((struct
nfsv3_time *)(tl + 2))->nfsv3_nsec)); } while (0); if (wccflag
) { ttretf = (((&((struct nfsnode *)(vp)->v_data)->
n_mtime)->tv_sec == (&_mtime)->tv_sec) ? ((&((struct
nfsnode *)(vp)->v_data)->n_mtime)->tv_nsec != (&
_mtime)->tv_nsec) : ((&((struct nfsnode *)(vp)->v_data
)->n_mtime)->tv_sec != (&_mtime)->tv_sec)); } } {
if (info.nmi_mrep != ((void *)0)) { struct vnode *ttvp = ((vp
)); { t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info
.nmi_md->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (4)) {
(tl) = (u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (4); }
else if ((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos
, (4), t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep
); goto nfsmout; } else { (tl) = (u_int32_t *)cp2; } }; if ((
(ttattrf) = ((int)(__uint32_t)(__builtin_constant_p((int32_t)
(*tl)) ? (__uint32_t)(((__uint32_t)((int32_t)(*tl)) & 0xff
) << 24 | ((__uint32_t)((int32_t)(*tl)) & 0xff00) <<
8 | ((__uint32_t)((int32_t)(*tl)) & 0xff0000) >> 8
| ((__uint32_t)((int32_t)(*tl)) & 0xff000000) >> 24
) : __swap32md((int32_t)(*tl))))) != 0) { if ((t1 = nfs_loadattrcache
(&ttvp, &info.nmi_md, &info.nmi_dpos, ((void *)0)
)) != 0) { error = t1; (ttattrf) = 0; m_freem(info.nmi_mrep);
goto nfsmout; } ((vp)) = ttvp; } } }; if (wccflag) { (wccflag
) = ttretf; } else { (wccflag) = ttattrf; } } } while (0)
;
2757
2758 if (!error) {
2759 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF){ t1 = ((caddr_t)((info.nmi_md)->m_hdr.mh_data)) + info.nmi_md
->m_hdr.mh_len - info.nmi_dpos; if (t1 >= (8)) { (tl) =
(u_int32_t *)(info.nmi_dpos); info.nmi_dpos += (8); } else if
((t1 = nfsm_disct(&info.nmi_md, &info.nmi_dpos, (8),
t1, &cp2)) != 0) { error = t1; m_freem(info.nmi_mrep); goto
nfsmout; } else { (tl) = (u_int32_t *)cp2; } }
;
2760 if (bcmp(nmp->nm_verf, tl,
2761 NFSX_V3WRITEVERF8)) {
2762 bcopy(tl, nmp->nm_verf,
2763 NFSX_V3WRITEVERF8);
2764 error = NFSERR_STALEWRITEVERF30001;
2765 }
2766 }
2767 m_freem(info.nmi_mrep);
2768
2769nfsmout:
2770 return (error);
2771}
2772
2773/*
2774 * Kludge City..
2775 * - make nfs_bmap() essentially a no-op that does no translation
2776 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2777 * (Maybe I could use the process's page mapping, but I was concerned that
2778 * Kernel Write might not be enabled and also figured copyout() would do
2779 * a lot more work than bcopy() and also it currently happens in the
2780 * context of the swapper process (2).
2781 */
2782int
2783nfs_bmap(void *v)
2784{
2785 struct vop_bmap_args *ap = v;
2786 struct vnode *vp = ap->a_vp;
2787
2788 if (ap->a_vpp != NULL((void *)0))
2789 *ap->a_vpp = vp;
2790 if (ap->a_bnp != NULL((void *)0))
2791 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize)((vp->v_mount->mnt_stat.f_iosize) >> 9);
2792 return (0);
2793}
2794
2795/*
2796 * Strategy routine.
2797 * For async requests when nfsiod(s) are running, queue the request by
2798 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2799 * request.
2800 */
2801int
2802nfs_strategy(void *v)
2803{
2804 struct vop_strategy_args *ap = v;
2805 struct buf *bp = ap->a_bp;
2806 struct proc *p;
2807 int error = 0;
2808
2809 if ((bp->b_flags & (B_PHYS0x00002000|B_ASYNC0x00000004)) == (B_PHYS0x00002000|B_ASYNC0x00000004))
2810 panic("nfs physio/async");
2811 if (bp->b_flags & B_ASYNC0x00000004)
2812 p = NULL((void *)0);
2813 else
2814 p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
; /* XXX */
2815 /*
2816 * If the op is asynchronous and an i/o daemon is waiting
2817 * queue the request, wake it up and wait for completion
2818 * otherwise just do it ourselves.
2819 */
2820 if ((bp->b_flags & B_ASYNC0x00000004) == 0 || nfs_asyncio(bp, 0))
2821 error = nfs_doio(bp, p);
2822 return (error);
2823}
2824
2825/*
2826 * fsync vnode op. Just call nfs_flush() with commit == 1.
2827 */
2828int
2829nfs_fsync(void *v)
2830{
2831 struct vop_fsync_args *ap = v;
2832
2833 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2834}
2835
2836/*
2837 * Flush all the blocks associated with a vnode.
2838 * Walk through the buffer pool and push any dirty pages
2839 * associated with the vnode.
2840 */
2841int
2842nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct proc *p,
2843 int commit)
2844{
2845 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
2846 struct buf *bp;
2847 int i;
2848 struct buf *nbp;
2849 struct nfsmount *nmp = VFSTONFS(vp->v_mount)((struct nfsmount *)((vp->v_mount)->mnt_data));
2850 uint64_t slptimeo = INFSLP0xffffffffffffffffULL;
2851 int s, error = 0, slpflag = 0, retv, bvecpos;
2852 int dirty, passone = 1;
2853 u_quad_t off = (u_quad_t)-1, endoff = 0, toff;
2854#ifndef NFS_COMMITBVECSIZ20
2855#define NFS_COMMITBVECSIZ20 20
2856#endif
2857 struct buf *bvec[NFS_COMMITBVECSIZ20];
2858
2859 if (nmp->nm_flag & NFSMNT_INT0x00000040)
2860 slpflag = PCATCH0x100;
2861 if (!commit)
2862 passone = 0;
2863 /*
2864 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2865 * server, but nas not been committed to stable storage on the server
2866 * yet. On the first pass, the byte range is worked out and the commit
2867 * rpc is done. On the second pass, nfs_writebp() is called to do the
2868 * job.
2869 */
2870again:
2871 bvecpos = 0;
2872 if (NFS_ISV3(vp)(((struct nfsmount *)(((vp)->v_mount)->mnt_data))->nm_flag
& 0x00000200)
&& commit) {
2873 s = splbio()splraise(0x3);
2874 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp)for ((bp) = ((&vp->v_dirtyblkhd)->lh_first); (bp) &&
((nbp) = ((bp)->b_vnbufs.le_next), 1); (bp) = (nbp))
{
2875 if (bvecpos >= NFS_COMMITBVECSIZ20)
2876 break;
2877 if ((bp->b_flags & (B_BUSY0x00000010 | B_DELWRI0x00000080 | B_NEEDCOMMIT0x00000002))
2878 != (B_DELWRI0x00000080 | B_NEEDCOMMIT0x00000002))
2879 continue;
2880 bremfreebufcache_take(bp);
2881 bp->b_flags |= B_WRITEINPROG0x00020000;
2882 buf_acquire(bp);
2883
2884 /*
2885 * A list of these buffers is kept so that the
2886 * second loop knows which buffers have actually
2887 * been committed. This is necessary, since there
2888 * may be a race between the commit rpc and new
2889 * uncommitted writes on the file.
2890 */
2891 bvec[bvecpos++] = bp;
2892 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE(1 << 9) +
2893 bp->b_dirtyoff;
2894 if (toff < off)
2895 off = toff;
2896 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2897 if (toff > endoff)
2898 endoff = toff;
2899 }
2900 splx(s)spllower(s);
2901 }
2902 if (bvecpos > 0) {
2903 /*
2904 * Commit data on the server, as required.
2905 */
2906 bcstats.pendingwrites++;
2907 bcstats.numwrites++;
2908 retv = nfs_commit(vp, off, (int)(endoff - off), p);
2909 if (retv == NFSERR_STALEWRITEVERF30001)
2910 nfs_clearcommit(vp->v_mount);
2911 /*
2912 * Now, either mark the blocks I/O done or mark the
2913 * blocks dirty, depending on whether the commit
2914 * succeeded.
2915 */
2916 for (i = 0; i < bvecpos; i++) {
2917 bp = bvec[i];
2918 bp->b_flags &= ~(B_NEEDCOMMIT0x00000002 | B_WRITEINPROG0x00020000);
2919 if (retv) {
2920 if (i == 0)
2921 bcstats.pendingwrites--;
2922 brelse(bp);
2923 } else {
2924 if (i > 0)
2925 bcstats.pendingwrites++;
2926 s = splbio()splraise(0x3);
2927 buf_undirty(bp);
2928 vp->v_numoutput++;
2929 bp->b_flags |= B_ASYNC0x00000004;
2930 bp->b_flags &= ~(B_READ0x00008000|B_DONE0x00000100|B_ERROR0x00000400);
2931 bp->b_dirtyoff = bp->b_dirtyend = 0;
2932 biodone(bp);
2933 splx(s)spllower(s);
2934 }
2935 }
2936 }
2937
2938 /*
2939 * Start/do any write(s) that are required.
2940 */
2941loop:
2942 s = splbio()splraise(0x3);
2943 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp)for ((bp) = ((&vp->v_dirtyblkhd)->lh_first); (bp) &&
((nbp) = ((bp)->b_vnbufs.le_next), 1); (bp) = (nbp))
{
2944 if (bp->b_flags & B_BUSY0x00000010) {
2945 if (waitfor != MNT_WAIT1 || passone)
2946 continue;
2947 bp->b_flags |= B_WANTED0x00010000;
2948 error = tsleep_nsec(bp, slpflag | (PRIBIO16 + 1),
2949 "nfsfsync", slptimeo);
2950 splx(s)spllower(s);
2951 if (error) {
2952 if (nfs_sigintr(nmp, NULL((void *)0), p))
2953 return (EINTR4);
2954 if (slpflag == PCATCH0x100) {
2955 slpflag = 0;
2956 slptimeo = SEC_TO_NSEC(2);
2957 }
2958 }
2959 goto loop;
2960 }
2961 if ((bp->b_flags & B_DELWRI0x00000080) == 0)
2962 panic("nfs_fsync: not dirty");
2963 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT0x00000002))
2964 continue;
2965 bremfreebufcache_take(bp);
2966 if (passone || !commit) {
2967 bp->b_flags |= B_ASYNC0x00000004;
2968 } else {
2969 bp->b_flags |= (B_ASYNC0x00000004|B_WRITEINPROG0x00020000|B_NEEDCOMMIT0x00000002);
2970 }
2971 buf_acquire(bp);
2972 splx(s)spllower(s);
2973 VOP_BWRITE(bp);
2974 goto loop;
2975 }
2976 splx(s)spllower(s);
2977 if (passone) {
2978 passone = 0;
2979 goto again;
2980 }
2981 if (waitfor == MNT_WAIT1) {
2982 loop2:
2983 s = splbio()splraise(0x3);
2984 error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo);
2985 if (error) {
2986 splx(s)spllower(s);
2987 if (nfs_sigintr(nmp, NULL((void *)0), p))
2988 return (EINTR4);
2989 if (slpflag == PCATCH0x100) {
2990 slpflag = 0;
2991 slptimeo = SEC_TO_NSEC(2);
2992 }
2993 goto loop2;
2994 }
2995 dirty = (!LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0)) && commit);
2996 splx(s)spllower(s);
2997 if (dirty) {
2998#if 0
2999 vprint("nfs_fsync: dirty", vp);
3000#endif
3001 goto loop;
3002 }
3003 }
3004 if (np->n_flag & NWRITEERR0x0008) {
3005 error = np->n_error;
3006 np->n_flag &= ~NWRITEERR0x0008;
3007 }
3008 return (error);
3009}
3010
3011/*
3012 * Return POSIX pathconf information applicable to nfs.
3013 * Fake it. For v3 we could ask the server, but such code
3014 * hasn't been written yet.
3015 */
3016int
3017nfs_pathconf(void *v)
3018{
3019 struct vop_pathconf_args *ap = v;
3020 struct nfsmount *nmp = VFSTONFS(ap->a_vp->v_mount)((struct nfsmount *)((ap->a_vp->v_mount)->mnt_data));
3021 int error = 0;
3022
3023 switch (ap->a_name) {
3024 case _PC_LINK_MAX1:
3025 *ap->a_retval = LINK_MAX32767;
3026 break;
3027 case _PC_NAME_MAX4:
3028 *ap->a_retval = NAME_MAX255;
3029 break;
3030 case _PC_CHOWN_RESTRICTED7:
3031 *ap->a_retval = 1;
3032 break;
3033 case _PC_NO_TRUNC8:
3034 *ap->a_retval = 1;
3035 break;
3036 case _PC_ALLOC_SIZE_MIN11:
3037 *ap->a_retval = NFS_FABLKSIZE512;
3038 break;
3039 case _PC_FILESIZEBITS13:
3040 *ap->a_retval = 64;
3041 break;
3042 case _PC_REC_INCR_XFER_SIZE15:
3043 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize);
3044 break;
3045 case _PC_REC_MAX_XFER_SIZE16:
3046 *ap->a_retval = -1; /* means ``unlimited'' */
3047 break;
3048 case _PC_REC_MIN_XFER_SIZE17:
3049 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize);
3050 break;
3051 case _PC_REC_XFER_ALIGN18:
3052 *ap->a_retval = PAGE_SIZE(1 << 12);
3053 break;
3054 case _PC_SYMLINK_MAX19:
3055 *ap->a_retval = MAXPATHLEN1024;
3056 break;
3057 case _PC_2_SYMLINKS10:
3058 *ap->a_retval = 1;
3059 break;
3060 case _PC_TIMESTAMP_RESOLUTION21:
3061 *ap->a_retval = NFS_ISV3(ap->a_vp)(((struct nfsmount *)(((ap->a_vp)->v_mount)->mnt_data
))->nm_flag & 0x00000200)
? 1 : 1000;
3062 break;
3063 default:
3064 error = EINVAL22;
3065 break;
3066 }
3067
3068 return (error);
3069}
3070
3071/*
3072 * NFS advisory byte-level locks.
3073 */
3074int
3075nfs_advlock(void *v)
3076{
3077 struct vop_advlock_args *ap = v;
3078 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3079
3080 return (lf_advlock(&np->n_lockf, np->n_size, ap->a_id, ap->a_op,
3081 ap->a_fl, ap->a_flags));
3082}
3083
3084/*
3085 * Print out the contents of an nfsnode.
3086 */
3087int
3088nfs_print(void *v)
3089{
3090 struct vop_print_args *ap = v;
3091 struct vnode *vp = ap->a_vp;
3092 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3093
3094 printf("tag VT_NFS, fileid %lld fsid 0x%lx",
3095 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3096#ifdef FIFO1
3097 if (vp->v_type == VFIFO)
3098 fifo_printinfo(vp);
3099#endif
3100 printf("\n");
3101 return (0);
3102}
3103
3104/*
3105 * Just call nfs_writebp() with the force argument set to 1.
3106 */
3107int
3108nfs_bwrite(void *v)
3109{
3110 struct vop_bwrite_args *ap = v;
3111
3112 return (nfs_writebp(ap->a_bp, 1));
3113}
3114
3115/*
3116 * This is a clone of vop_generic_bwrite(), except that B_WRITEINPROG isn't set unless
3117 * the force flag is one and it also handles the B_NEEDCOMMIT flag.
3118 */
3119int
3120nfs_writebp(struct buf *bp, int force)
3121{
3122 int oldflags = bp->b_flags, retv = 1;
3123 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
; /* XXX */
3124 off_t off;
3125 size_t cnt;
3126 int s;
3127 struct vnode *vp;
3128 struct nfsnode *np;
3129
3130 if(!(bp->b_flags & B_BUSY0x00000010))
3131 panic("bwrite: buffer is not busy???");
3132
3133 vp = bp->b_vp;
3134 np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3135
3136 bp->b_flags &= ~(B_READ0x00008000|B_DONE0x00000100|B_ERROR0x00000400);
3137
3138 s = splbio()splraise(0x3);
3139 buf_undirty(bp);
3140
3141 if ((oldflags & B_ASYNC0x00000004) && !(oldflags & B_DELWRI0x00000080) && p)
3142 ++p->p_ru.ru_oublock;
3143
3144 bp->b_vp->v_numoutput++;
3145 splx(s)spllower(s);
3146
3147 /*
3148 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
3149 * an actual write will have to be scheduled via. VOP_STRATEGY().
3150 * If B_WRITEINPROG is already set, then push it with a write anyhow.
3151 */
3152 if ((oldflags & (B_NEEDCOMMIT0x00000002 | B_WRITEINPROG0x00020000)) == B_NEEDCOMMIT0x00000002) {
3153 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE(1 << 9) + bp->b_dirtyoff;
3154 cnt = bp->b_dirtyend - bp->b_dirtyoff;
3155
3156 rw_enter_write(&np->n_commitlock);
3157 if (!(bp->b_flags & B_NEEDCOMMIT0x00000002)) {
3158 rw_exit_write(&np->n_commitlock);
3159 return (0);
3160 }
3161
3162 /*
3163 * If it's already been committed by somebody else,
3164 * bail.
3165 */
3166 if (!nfs_in_committed_range(vp, bp)) {
3167 int pushedrange = 0;
3168 /*
3169 * Since we're going to do this, push as much
3170 * as we can.
3171 */
3172
3173 if (nfs_in_tobecommitted_range(vp, bp)) {
3174 pushedrange = 1;
3175 off = np->n_pushlo;
3176 cnt = np->n_pushhi - np->n_pushlo;
3177 }
3178
3179 bp->b_flags |= B_WRITEINPROG0x00020000;
3180 bcstats.pendingwrites++;
3181 bcstats.numwrites++;
3182 retv = nfs_commit(bp->b_vp, off, cnt, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
3183 bp->b_flags &= ~B_WRITEINPROG0x00020000;
3184
3185 if (retv == 0) {
3186 if (pushedrange)
3187 nfs_merge_commit_ranges(vp);
3188 else
3189 nfs_add_committed_range(vp, bp);
3190 } else
3191 bcstats.pendingwrites--;
3192 } else
3193 retv = 0; /* It has already been committed. */
3194
3195 rw_exit_write(&np->n_commitlock);
3196 if (!retv) {
3197 bp->b_dirtyoff = bp->b_dirtyend = 0;
3198 bp->b_flags &= ~B_NEEDCOMMIT0x00000002;
3199 s = splbio()splraise(0x3);
3200 biodone(bp);
3201 splx(s)spllower(s);
3202 } else if (retv == NFSERR_STALEWRITEVERF30001)
3203 nfs_clearcommit(bp->b_vp->v_mount);
3204 }
3205 if (retv) {
3206 s = splbio()splraise(0x3);
3207 buf_flip_dma(bp);
3208 if (force)
3209 bp->b_flags |= B_WRITEINPROG0x00020000;
3210 splx(s)spllower(s);
3211 VOP_STRATEGY(bp->b_vp, bp);
3212 }
3213
3214 if( (oldflags & B_ASYNC0x00000004) == 0) {
3215 int rtval;
3216
3217 bp->b_flags |= B_RAW0x00004000;
3218 rtval = biowait(bp);
3219 if (!(oldflags & B_DELWRI0x00000080) && p) {
3220 ++p->p_ru.ru_oublock;
3221 }
3222 brelse(bp);
3223 return (rtval);
3224 }
3225
3226 return (0);
3227}
3228
3229/*
3230 * nfs special file access vnode op.
3231 * Essentially just get vattr and then imitate iaccess() since the device is
3232 * local to the client.
3233 */
3234int
3235nfsspec_access(void *v)
3236{
3237 struct vop_access_args *ap = v;
3238 struct vattr va;
3239 struct vnode *vp = ap->a_vp;
3240 int error;
3241
3242 /*
3243 * Disallow write attempts on filesystems mounted read-only;
3244 * unless the file is a socket, fifo, or a block or character
3245 * device resident on the filesystem.
3246 */
3247 if ((ap->a_mode & VWRITE00200) && (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001)) {
3248 switch (vp->v_type) {
3249 case VREG:
3250 case VDIR:
3251 case VLNK:
3252 return (EROFS30);
3253 default:
3254 break;
3255 }
3256 }
3257
3258 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p);
3259 if (error)
3260 return (error);
3261
3262 return (vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid,
3263 ap->a_mode, ap->a_cred));
3264}
3265
3266/*
3267 * Read wrapper for special devices.
3268 */
3269int
3270nfsspec_read(void *v)
3271{
3272 struct vop_read_args *ap = v;
3273 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3274
3275 /*
3276 * Set access flag.
3277 */
3278 np->n_flag |= NACC0x0100;
3279 getnanotime(&np->n_atimn_un1.nf_atim);
3280 return (spec_read(ap));
3281}
3282
3283/*
3284 * Write wrapper for special devices.
3285 */
3286int
3287nfsspec_write(void *v)
3288{
3289 struct vop_write_args *ap = v;
3290 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3291
3292 /*
3293 * Set update flag.
3294 */
3295 np->n_flag |= NUPD0x0200;
3296 getnanotime(&np->n_mtimn_un2.nf_mtim);
3297 return (spec_write(ap));
3298}
3299
3300/*
3301 * Close wrapper for special devices.
3302 *
3303 * Update the times on the nfsnode then do device close.
3304 */
3305int
3306nfsspec_close(void *v)
3307{
3308 struct vop_close_args *ap = v;
3309 struct vnode *vp = ap->a_vp;
3310 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3311 struct vattr vattr;
3312
3313 if (np->n_flag & (NACC0x0100 | NUPD0x0200)) {
3314 np->n_flag |= NCHG0x0400;
3315 if (vp->v_usecount == 1 &&
3316 (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001) == 0) {
3317 VATTR_NULL(&vattr)vattr_null(&vattr);
3318 if (np->n_flag & NACC0x0100)
3319 vattr.va_atime = np->n_atimn_un1.nf_atim;
3320 if (np->n_flag & NUPD0x0200)
3321 vattr.va_mtime = np->n_mtimn_un2.nf_mtim;
3322 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3323 }
3324 }
3325 return (spec_close(ap));
3326}
3327
3328#ifdef FIFO1
3329/*
3330 * Read wrapper for fifos.
3331 */
3332int
3333nfsfifo_read(void *v)
3334{
3335 struct vop_read_args *ap = v;
3336 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3337
3338 /*
3339 * Set access flag.
3340 */
3341 np->n_flag |= NACC0x0100;
3342 getnanotime(&np->n_atimn_un1.nf_atim);
3343 return (fifo_read(ap));
3344}
3345
3346/*
3347 * Write wrapper for fifos.
3348 */
3349int
3350nfsfifo_write(void *v)
3351{
3352 struct vop_write_args *ap = v;
3353 struct nfsnode *np = VTONFS(ap->a_vp)((struct nfsnode *)(ap->a_vp)->v_data);
3354
3355 /*
3356 * Set update flag.
3357 */
3358 np->n_flag |= NUPD0x0200;
3359 getnanotime(&np->n_mtimn_un2.nf_mtim);
3360 return (fifo_write(ap));
3361}
3362
3363/*
3364 * Close wrapper for fifos.
3365 *
3366 * Update the times on the nfsnode then do fifo close.
3367 */
3368int
3369nfsfifo_close(void *v)
3370{
3371 struct vop_close_args *ap = v;
3372 struct vnode *vp = ap->a_vp;
3373 struct nfsnode *np = VTONFS(vp)((struct nfsnode *)(vp)->v_data);
3374 struct vattr vattr;
3375
3376 if (np->n_flag & (NACC0x0100 | NUPD0x0200)) {
3377 if (np->n_flag & NACC0x0100) {
3378 getnanotime(&np->n_atimn_un1.nf_atim);
3379 }
3380 if (np->n_flag & NUPD0x0200) {
3381 getnanotime(&np->n_mtimn_un2.nf_mtim);
3382 }
3383 np->n_flag |= NCHG0x0400;
3384 if (vp->v_usecount == 1 &&
3385 (vp->v_mount->mnt_flag & MNT_RDONLY0x00000001) == 0) {
3386 VATTR_NULL(&vattr)vattr_null(&vattr);
3387 if (np->n_flag & NACC0x0100)
3388 vattr.va_atime = np->n_atimn_un1.nf_atim;
3389 if (np->n_flag & NUPD0x0200)
3390 vattr.va_mtime = np->n_mtimn_un2.nf_mtim;
3391 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3392 }
3393 }
3394 return (fifo_close(ap));
3395}
3396
3397int
3398nfsfifo_reclaim(void *v)
3399{
3400 fifo_reclaim(v);
3401 return (nfs_reclaim(v));
3402}
3403#endif /* ! FIFO */