Bug Summary

File:kern/vfs_subr.c
Warning:line 1259, column 7
Access to field 'si_rdev' results in a dereference of a null pointer (loaded from field 'vu_specinfo')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name vfs_subr.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/kern/vfs_subr.c
1/* $OpenBSD: vfs_subr.c,v 1.313 2021/10/25 10:24:54 claudio Exp $ */
2/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
3
4/*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
38 */
39
40/*
41 * External virtual filesystem routines
42 */
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/sysctl.h>
48#include <sys/mount.h>
49#include <sys/time.h>
50#include <sys/fcntl.h>
51#include <sys/kernel.h>
52#include <sys/conf.h>
53#include <sys/vnode.h>
54#include <sys/lock.h>
55#include <sys/lockf.h>
56#include <sys/stat.h>
57#include <sys/acct.h>
58#include <sys/namei.h>
59#include <sys/ucred.h>
60#include <sys/buf.h>
61#include <sys/errno.h>
62#include <sys/malloc.h>
63#include <sys/mbuf.h>
64#include <sys/syscallargs.h>
65#include <sys/pool.h>
66#include <sys/tree.h>
67#include <sys/specdev.h>
68#include <sys/atomic.h>
69
70#include <netinet/in.h>
71
72#include <uvm/uvm_extern.h>
73#include <uvm/uvm_vnode.h>
74
75#include "softraid.h"
76
77void sr_quiesce(void);
78
79enum vtype iftovt_tab[16] = {
80 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
81 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
82};
83
84int vttoif_tab[9] = {
85 0, S_IFREG0100000, S_IFDIR0040000, S_IFBLK0060000, S_IFCHR0020000, S_IFLNK0120000,
86 S_IFSOCK0140000, S_IFIFO0010000, S_IFMT0170000,
87};
88
89int prtactive = 0; /* 1 => print out reclaim of active vnodes */
90int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */
91
92/*
93 * Insq/Remq for the vnode usage lists.
94 */
95#define bufinsvn(bp, dp)do { if (((bp)->b_vnbufs.le_next = (dp)->lh_first) != (
(void *)0)) (dp)->lh_first->b_vnbufs.le_prev = &(bp
)->b_vnbufs.le_next; (dp)->lh_first = (bp); (bp)->b_vnbufs
.le_prev = &(dp)->lh_first; } while (0)
LIST_INSERT_HEAD(dp, bp, b_vnbufs)do { if (((bp)->b_vnbufs.le_next = (dp)->lh_first) != (
(void *)0)) (dp)->lh_first->b_vnbufs.le_prev = &(bp
)->b_vnbufs.le_next; (dp)->lh_first = (bp); (bp)->b_vnbufs
.le_prev = &(dp)->lh_first; } while (0)
96#define bufremvn(bp){ do { if ((bp)->b_vnbufs.le_next != ((void *)0)) (bp)->
b_vnbufs.le_next->b_vnbufs.le_prev = (bp)->b_vnbufs.le_prev
; *(bp)->b_vnbufs.le_prev = (bp)->b_vnbufs.le_next; ((bp
)->b_vnbufs.le_prev) = ((void *)-1); ((bp)->b_vnbufs.le_next
) = ((void *)-1); } while (0); ((bp)->b_vnbufs.le_next) = (
(struct buf *)0x87654321); }
{ \
97 LIST_REMOVE(bp, b_vnbufs)do { if ((bp)->b_vnbufs.le_next != ((void *)0)) (bp)->b_vnbufs
.le_next->b_vnbufs.le_prev = (bp)->b_vnbufs.le_prev; *(
bp)->b_vnbufs.le_prev = (bp)->b_vnbufs.le_next; ((bp)->
b_vnbufs.le_prev) = ((void *)-1); ((bp)->b_vnbufs.le_next)
= ((void *)-1); } while (0)
; \
98 LIST_NEXT(bp, b_vnbufs)((bp)->b_vnbufs.le_next) = NOLIST((struct buf *)0x87654321); \
99}
100
101struct freelst vnode_hold_list; /* list of vnodes referencing buffers */
102struct freelst vnode_free_list; /* vnode free list */
103
104struct mntlist mountlist; /* mounted filesystem list */
105
106void vclean(struct vnode *, int, struct proc *);
107
108void insmntque(struct vnode *, struct mount *);
109int getdevvp(dev_t, struct vnode **, enum vtype);
110
111int vfs_hang_addrlist(struct mount *, struct netexport *,
112 struct export_args *);
113int vfs_free_netcred(struct radix_node *, void *, u_int);
114void vfs_free_addrlist(struct netexport *);
115void vputonfreelist(struct vnode *);
116
117int vflush_vnode(struct vnode *, void *);
118int maxvnodes;
119
120struct mutex vnode_mtx = MUTEX_INITIALIZER(IPL_BIO){ ((void *)0), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6))), 0x0 }
;
121
122void vfs_unmountall(void);
123
124#ifdef DEBUG
125void printlockedvnodes(void);
126#endif
127
128struct pool vnode_pool;
129struct pool uvm_vnode_pool;
130
131static inline int rb_buf_compare(const struct buf *b1, const struct buf *b2);
132RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare)static int buf_rb_bufs_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct buf *l = lptr, *r = rptr; return rb_buf_compare
(l, r); } static const struct rb_type buf_rb_bufs_RBT_INFO = {
buf_rb_bufs_RBT_COMPARE, ((void *)0), __builtin_offsetof(struct
buf, b_rbbufs), }; const struct rb_type *const buf_rb_bufs_RBT_TYPE
= &buf_rb_bufs_RBT_INFO
;
133
134static inline int
135rb_buf_compare(const struct buf *b1, const struct buf *b2)
136{
137 if (b1->b_lblkno < b2->b_lblkno)
138 return(-1);
139 if (b1->b_lblkno > b2->b_lblkno)
140 return(1);
141 return(0);
142}
143
144/*
145 * Initialize the vnode management data structures.
146 */
147void
148vntblinit(void)
149{
150 /* buffer cache may need a vnode for each buffer */
151 maxvnodes = 2 * initialvnodes;
152 pool_init(&vnode_pool, sizeof(struct vnode), 0, IPL_NONE0x0,
153 PR_WAITOK0x0001, "vnodes", NULL((void *)0));
154 pool_init(&uvm_vnode_pool, sizeof(struct uvm_vnode), 0, IPL_NONE0x0,
155 PR_WAITOK0x0001, "uvmvnodes", NULL((void *)0));
156 TAILQ_INIT(&vnode_hold_list)do { (&vnode_hold_list)->tqh_first = ((void *)0); (&
vnode_hold_list)->tqh_last = &(&vnode_hold_list)->
tqh_first; } while (0)
;
157 TAILQ_INIT(&vnode_free_list)do { (&vnode_free_list)->tqh_first = ((void *)0); (&
vnode_free_list)->tqh_last = &(&vnode_free_list)->
tqh_first; } while (0)
;
158 TAILQ_INIT(&mountlist)do { (&mountlist)->tqh_first = ((void *)0); (&mountlist
)->tqh_last = &(&mountlist)->tqh_first; } while
(0)
;
159 /*
160 * Initialize the filesystem syncer.
161 */
162 vn_initialize_syncerd();
163
164#ifdef NFSSERVER1
165 rn_init(sizeof(struct sockaddr_in));
166#endif /* NFSSERVER */
167}
168
169/*
170 * Allocate a mount point.
171 *
172 * The returned mount point is marked as busy.
173 */
174struct mount *
175vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp)
176{
177 struct mount *mp;
178
179 mp = malloc(sizeof(*mp), M_MOUNT20, M_WAITOK0x0001|M_ZERO0x0008);
180 rw_init_flags(&mp->mnt_lock, "vfslock", RWL_IS_VNODE)_rw_init_flags(&mp->mnt_lock, "vfslock", 0x04, ((void *
)0))
;
181 (void)vfs_busy(mp, VB_READ0x01|VB_NOWAIT0x04);
182
183 TAILQ_INIT(&mp->mnt_vnodelist)do { (&mp->mnt_vnodelist)->tqh_first = ((void *)0);
(&mp->mnt_vnodelist)->tqh_last = &(&mp->
mnt_vnodelist)->tqh_first; } while (0)
;
184 mp->mnt_vnodecovered = vp;
185
186 atomic_inc_int(&vfsp->vfc_refcount)_atomic_inc_int(&vfsp->vfc_refcount);
187 mp->mnt_vfc = vfsp;
188 mp->mnt_op = vfsp->vfc_vfsops;
189 mp->mnt_flag = vfsp->vfc_flags;
190 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN16);
191
192 return (mp);
193}
194
195/*
196 * Release a mount point.
197 */
198void
199vfs_mount_free(struct mount *mp)
200{
201 atomic_dec_int(&mp->mnt_vfc->vfc_refcount)_atomic_dec_int(&mp->mnt_vfc->vfc_refcount);
202 free(mp, M_MOUNT20, sizeof(*mp));
203}
204
205/*
206 * Mark a mount point as busy. Used to synchronize access and to delay
207 * unmounting.
208 *
209 * Default behaviour is to attempt getting a READ lock and in case of an
210 * ongoing unmount, to wait for it to finish and then return failure.
211 */
212int
213vfs_busy(struct mount *mp, int flags)
214{
215 int rwflags = 0;
216
217 if (flags & VB_WRITE0x02)
218 rwflags |= RW_WRITE0x0001UL;
219 else
220 rwflags |= RW_READ0x0002UL;
221
222 if (flags & VB_WAIT0x08)
223 rwflags |= RW_SLEEPFAIL0x0020UL;
224 else
225 rwflags |= RW_NOSLEEP0x0040UL;
226
227#ifdef WITNESS
228 if (flags & VB_DUPOK0x10)
229 rwflags |= RW_DUPOK0x0100UL;
230#endif
231
232 if (rw_enter(&mp->mnt_lock, rwflags))
233 return (EBUSY16);
234
235 return (0);
236}
237
238/*
239 * Free a busy file system
240 */
241void
242vfs_unbusy(struct mount *mp)
243{
244 rw_exit(&mp->mnt_lock);
245}
246
247int
248vfs_isbusy(struct mount *mp)
249{
250 if (RWLOCK_OWNER(&mp->mnt_lock)((struct proc *)((&mp->mnt_lock)->rwl_owner & ~
0x07UL))
> 0)
251 return (1);
252 else
253 return (0);
254}
255
256/*
257 * Lookup a filesystem type, and if found allocate and initialize
258 * a mount structure for it.
259 *
260 * Devname is usually updated by mount(8) after booting.
261 */
262int
263vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
264{
265 struct vfsconf *vfsp;
266 struct mount *mp;
267
268 vfsp = vfs_byname(fstypename);
269 if (vfsp == NULL((void *)0))
270 return (ENODEV19);
271 mp = vfs_mount_alloc(NULLVP((struct vnode *)((void *)0)), vfsp);
272 mp->mnt_flag |= MNT_RDONLY0x00000001;
273 mp->mnt_stat.f_mntonname[0] = '/';
274 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN90, NULL((void *)0));
275 copystr(devname, mp->mnt_stat.f_mntfromspec, MNAMELEN90, NULL((void *)0));
276 *mpp = mp;
277 return (0);
278 }
279
280/*
281 * Lookup a mount point by filesystem identifier.
282 */
283struct mount *
284vfs_getvfs(fsid_t *fsid)
285{
286 struct mount *mp;
287
288 TAILQ_FOREACH(mp, &mountlist, mnt_list)for((mp) = ((&mountlist)->tqh_first); (mp) != ((void *
)0); (mp) = ((mp)->mnt_list.tqe_next))
{
289 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
290 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
291 return (mp);
292 }
293 }
294
295 return (NULL((void *)0));
296}
297
298
299/*
300 * Get a new unique fsid
301 */
302void
303vfs_getnewfsid(struct mount *mp)
304{
305 static u_short xxxfs_mntid;
306
307 fsid_t tfsid;
308 int mtype;
309
310 mtype = mp->mnt_vfc->vfc_typenum;
311 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0)((dev_t)((((nblkdev + mtype) & 0xff) << 8) | ((0) &
0xff) | (((0) & 0xffff00) << 8)))
;
312 mp->mnt_stat.f_fsid.val[1] = mtype;
313 if (xxxfs_mntid == 0)
314 ++xxxfs_mntid;
315 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid)((dev_t)((((nblkdev + mtype) & 0xff) << 8) | ((xxxfs_mntid
) & 0xff) | (((xxxfs_mntid) & 0xffff00) << 8)))
;
316 tfsid.val[1] = mtype;
317 if (!TAILQ_EMPTY(&mountlist)(((&mountlist)->tqh_first) == ((void *)0))) {
318 while (vfs_getvfs(&tfsid)) {
319 tfsid.val[0]++;
320 xxxfs_mntid++;
321 }
322 }
323 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
324}
325
326/*
327 * Set vnode attributes to VNOVAL
328 */
329void
330vattr_null(struct vattr *vap)
331{
332
333 vap->va_type = VNON;
334 /*
335 * Don't get fancy: u_quad_t = u_int = VNOVAL leaves the u_quad_t
336 * with 2^31-1 instead of 2^64-1. Just write'm out and let
337 * the compiler do its job.
338 */
339 vap->va_mode = VNOVAL(-1);
340 vap->va_nlink = VNOVAL(-1);
341 vap->va_uid = VNOVAL(-1);
342 vap->va_gid = VNOVAL(-1);
343 vap->va_fsid = VNOVAL(-1);
344 vap->va_fileid = VNOVAL(-1);
345 vap->va_size = VNOVAL(-1);
346 vap->va_blocksize = VNOVAL(-1);
347 vap->va_atime.tv_sec = VNOVAL(-1);
348 vap->va_atime.tv_nsec = VNOVAL(-1);
349 vap->va_mtime.tv_sec = VNOVAL(-1);
350 vap->va_mtime.tv_nsec = VNOVAL(-1);
351 vap->va_ctime.tv_sec = VNOVAL(-1);
352 vap->va_ctime.tv_nsec = VNOVAL(-1);
353 vap->va_gen = VNOVAL(-1);
354 vap->va_flags = VNOVAL(-1);
355 vap->va_rdev = VNOVAL(-1);
356 vap->va_bytes = VNOVAL(-1);
357 vap->va_filerev = VNOVAL(-1);
358 vap->va_vaflags = 0;
359}
360
361/*
362 * Routines having to do with the management of the vnode table.
363 */
364long numvnodes;
365
366/*
367 * Return the next vnode from the free list.
368 */
369int
370getnewvnode(enum vtagtype tag, struct mount *mp, const struct vops *vops,
371 struct vnode **vpp)
372{
373 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
374 struct freelst *listhd;
375 static int toggle;
376 struct vnode *vp;
377 int s;
378
379 /*
380 * allow maxvnodes to increase if the buffer cache itself
381 * is big enough to justify it. (we don't shrink it ever)
382 */
383 maxvnodes = maxvnodes < bcstats.numbufs ? bcstats.numbufs
384 : maxvnodes;
385
386 /*
387 * We must choose whether to allocate a new vnode or recycle an
388 * existing one. The criterion for allocating a new one is that
389 * the total number of vnodes is less than the number desired or
390 * there are no vnodes on either free list. Generally we only
391 * want to recycle vnodes that have no buffers associated with
392 * them, so we look first on the vnode_free_list. If it is empty,
393 * we next consider vnodes with referencing buffers on the
394 * vnode_hold_list. The toggle ensures that half the time we
395 * will use a buffer from the vnode_hold_list, and half the time
396 * we will allocate a new one unless the list has grown to twice
397 * the desired size. We are reticent to recycle vnodes from the
398 * vnode_hold_list because we will lose the identity of all its
399 * referencing buffers.
400 */
401 toggle ^= 1;
402 if (numvnodes / 2 > maxvnodes)
403 toggle = 0;
404
405 s = splbio()splraise(0x6);
406 if ((numvnodes < maxvnodes) ||
407 ((TAILQ_FIRST(listhd = &vnode_free_list)((listhd = &vnode_free_list)->tqh_first) == NULL((void *)0)) &&
408 ((TAILQ_FIRST(listhd = &vnode_hold_list)((listhd = &vnode_hold_list)->tqh_first) == NULL((void *)0)) || toggle))) {
409 splx(s)spllower(s);
410 vp = pool_get(&vnode_pool, PR_WAITOK0x0001 | PR_ZERO0x0008);
411 vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK0x0001 | PR_ZERO0x0008);
412 vp->v_uvm->u_vnode = vp;
413 uvm_obj_init(&vp->v_uvm->u_obj, &uvm_vnodeops, 0);
414 RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree)buf_rb_bufs_RBT_INIT(&vp->v_bufs_tree);
415 cache_tree_init(&vp->v_nc_tree);
416 TAILQ_INIT(&vp->v_cache_dst)do { (&vp->v_cache_dst)->tqh_first = ((void *)0); (
&vp->v_cache_dst)->tqh_last = &(&vp->v_cache_dst
)->tqh_first; } while (0)
;
417 numvnodes++;
418 } else {
419 TAILQ_FOREACH(vp, listhd, v_freelist)for((vp) = ((listhd)->tqh_first); (vp) != ((void *)0); (vp
) = ((vp)->v_freelist.tqe_next))
{
420 if (VOP_ISLOCKED(vp) == 0)
421 break;
422 }
423 /*
424 * Unless this is a bad time of the month, at most
425 * the first NCPUS items on the free list are
426 * locked, so this is close enough to being empty.
427 */
428 if (vp == NULL((void *)0)) {
429 splx(s)spllower(s);
430 tablefull("vnode");
431 *vpp = NULL((void *)0);
432 return (ENFILE23);
433 }
434
435#ifdef DIAGNOSTIC1
436 if (vp->v_usecount) {
437 vprint("free vnode", vp);
438 panic("free vnode isn't");
439 }
440#endif
441
442 TAILQ_REMOVE(listhd, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next) != ((void *)0)) (vp)->
v_freelist.tqe_next->v_freelist.tqe_prev = (vp)->v_freelist
.tqe_prev; else (listhd)->tqh_last = (vp)->v_freelist.tqe_prev
; *(vp)->v_freelist.tqe_prev = (vp)->v_freelist.tqe_next
; ((vp)->v_freelist.tqe_prev) = ((void *)-1); ((vp)->v_freelist
.tqe_next) = ((void *)-1); } while (0)
;
443 vp->v_bioflag &= ~VBIOONFREELIST0x0004;
444 splx(s)spllower(s);
445
446 if (vp->v_type != VBAD)
447 vgonel(vp, p);
448#ifdef DIAGNOSTIC1
449 if (vp->v_data) {
450 vprint("cleaned vnode", vp);
451 panic("cleaned vnode isn't");
452 }
453 s = splbio()splraise(0x6);
454 if (vp->v_numoutput)
455 panic("Clean vnode has pending I/O's");
456 splx(s)spllower(s);
457#endif
458 vp->v_flag = 0;
459 vp->v_socketv_un.vu_socket = NULL((void *)0);
460 }
461 cache_purge(vp);
462 vp->v_type = VNON;
463 vp->v_tag = tag;
464 vp->v_op = vops;
465 insmntque(vp, mp);
466 *vpp = vp;
467 vp->v_usecount = 1;
468 vp->v_data = NULL((void *)0);
469 return (0);
470}
471
472/*
473 * Move a vnode from one mount queue to another.
474 */
475void
476insmntque(struct vnode *vp, struct mount *mp)
477{
478 /*
479 * Delete from old mount point vnode list, if on one.
480 */
481 if (vp->v_mount != NULL((void *)0))
482 TAILQ_REMOVE(&vp->v_mount->mnt_vnodelist, vp, v_mntvnodes)do { if (((vp)->v_mntvnodes.tqe_next) != ((void *)0)) (vp)
->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = (vp)->
v_mntvnodes.tqe_prev; else (&vp->v_mount->mnt_vnodelist
)->tqh_last = (vp)->v_mntvnodes.tqe_prev; *(vp)->v_mntvnodes
.tqe_prev = (vp)->v_mntvnodes.tqe_next; ((vp)->v_mntvnodes
.tqe_prev) = ((void *)-1); ((vp)->v_mntvnodes.tqe_next) = (
(void *)-1); } while (0)
;
483 /*
484 * Insert into list of vnodes for the new mount point, if available.
485 */
486 if ((vp->v_mount = mp) != NULL((void *)0))
487 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes)do { (vp)->v_mntvnodes.tqe_next = ((void *)0); (vp)->v_mntvnodes
.tqe_prev = (&mp->mnt_vnodelist)->tqh_last; *(&
mp->mnt_vnodelist)->tqh_last = (vp); (&mp->mnt_vnodelist
)->tqh_last = &(vp)->v_mntvnodes.tqe_next; } while (
0)
;
488}
489
490/*
491 * Create a vnode for a block device.
492 * Used for root filesystem, argdev, and swap areas.
493 * Also used for memory file system special devices.
494 */
495int
496bdevvp(dev_t dev, struct vnode **vpp)
497{
498 return (getdevvp(dev, vpp, VBLK));
499}
500
501/*
502 * Create a vnode for a character device.
503 * Used for console handling.
504 */
505int
506cdevvp(dev_t dev, struct vnode **vpp)
507{
508 return (getdevvp(dev, vpp, VCHR));
509}
510
511/*
512 * Create a vnode for a device.
513 * Used by bdevvp (block device) for root file system etc.,
514 * and by cdevvp (character device) for console.
515 */
516int
517getdevvp(dev_t dev, struct vnode **vpp, enum vtype type)
518{
519 struct vnode *vp;
520 struct vnode *nvp;
521 int error;
522
523 if (dev == NODEV(dev_t)(-1)) {
524 *vpp = NULLVP((struct vnode *)((void *)0));
525 return (0);
526 }
527 error = getnewvnode(VT_NON, NULL((void *)0), &spec_vops, &nvp);
528 if (error) {
529 *vpp = NULLVP((struct vnode *)((void *)0));
530 return (error);
531 }
532 vp = nvp;
533 vp->v_type = type;
534 if ((nvp = checkalias(vp, dev, NULL((void *)0))) != NULL((void *)0)) {
535 vput(vp);
536 vp = nvp;
537 }
538 if (vp->v_type == VCHR && cdevsw[major(vp->v_rdev)(((unsigned)(vp->v_un.vu_specinfo->si_rdev) >> 8)
& 0xff)
].d_type == D_TTY2)
539 vp->v_flag |= VISTTY0x0008;
540 *vpp = vp;
541 return (0);
542}
543
544/*
545 * Check to see if the new vnode represents a special device
546 * for which we already have a vnode (either because of
547 * bdevvp() or because of a different vnode representing
548 * the same block device). If such an alias exists, deallocate
549 * the existing contents and return the aliased vnode. The
550 * caller is responsible for filling it with its new contents.
551 */
552struct vnode *
553checkalias(struct vnode *nvp, dev_t nvp_rdev, struct mount *mp)
554{
555 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
556 struct vnode *vp;
557 struct vnodechain *vchain;
558
559 if (nvp->v_type != VBLK && nvp->v_type != VCHR)
560 return (NULLVP((struct vnode *)((void *)0)));
561
562 vchain = &speclisth[SPECHASH(nvp_rdev)(((nvp_rdev>>5)+(nvp_rdev))&(64 -1))];
563loop:
564 SLIST_FOREACH(vp, vchain, v_specnext)for((vp) = ((vchain)->slh_first); (vp) != ((void *)0); (vp
) = ((vp)->v_un.vu_specinfo->si_specnext.sle_next))
{
565 if (nvp_rdev != vp->v_rdevv_un.vu_specinfo->si_rdev || nvp->v_type != vp->v_type) {
566 continue;
567 }
568 /*
569 * Alias, but not in use, so flush it out.
570 */
571 if (vp->v_usecount == 0) {
572 vgonel(vp, p);
573 goto loop;
574 }
575 if (vget(vp, LK_EXCLUSIVE0x0001UL)) {
576 goto loop;
577 }
578 break;
579 }
580
581 /*
582 * Common case is actually in the if statement
583 */
584 if (vp == NULL((void *)0) || !(vp->v_tag == VT_NON && vp->v_type == VBLK)) {
585 nvp->v_specinfov_un.vu_specinfo = malloc(sizeof(struct specinfo), M_VNODE25,
586 M_WAITOK0x0001);
587 nvp->v_rdevv_un.vu_specinfo->si_rdev = nvp_rdev;
588 nvp->v_hashchainv_un.vu_specinfo->si_hashchain = vchain;
589 nvp->v_specmountpointv_un.vu_specinfo->si_mountpoint = NULL((void *)0);
590 nvp->v_speclockfv_un.vu_specinfo->si_lockf = NULL((void *)0);
591 nvp->v_specbitmapv_un.vu_specinfo->si_ci.ci_bitmap = NULL((void *)0);
592 if (nvp->v_type == VCHR &&
593 (cdevsw[major(nvp_rdev)(((unsigned)(nvp_rdev) >> 8) & 0xff)].d_flags & D_CLONE0x0001) &&
594 (minor(nvp_rdev)((unsigned)((nvp_rdev) & 0xff) | (((nvp_rdev) & 0xffff0000
) >> 8))
>> CLONE_SHIFT8 == 0)) {
595 if (vp != NULLVP((struct vnode *)((void *)0)))
596 nvp->v_specbitmapv_un.vu_specinfo->si_ci.ci_bitmap = vp->v_specbitmapv_un.vu_specinfo->si_ci.ci_bitmap;
597 else
598 nvp->v_specbitmapv_un.vu_specinfo->si_ci.ci_bitmap = malloc(CLONE_MAPSZ128,
599 M_VNODE25, M_WAITOK0x0001 | M_ZERO0x0008);
600 }
601 SLIST_INSERT_HEAD(vchain, nvp, v_specnext)do { (nvp)->v_un.vu_specinfo->si_specnext.sle_next = (vchain
)->slh_first; (vchain)->slh_first = (nvp); } while (0)
;
602 if (vp != NULLVP((struct vnode *)((void *)0))) {
603 nvp->v_flag |= VALIASED0x0800;
604 vp->v_flag |= VALIASED0x0800;
605 vput(vp);
606 }
607 return (NULLVP((struct vnode *)((void *)0)));
608 }
609
610 /*
611 * This code is the uncommon case. It is called in case
612 * we found an alias that was VT_NON && vtype of VBLK
613 * This means we found a block device that was created
614 * using bdevvp.
615 * An example of such a vnode is the root partition device vnode
616 * created in ffs_mountroot.
617 *
618 * The vnodes created by bdevvp should not be aliased (why?).
619 */
620
621 VOP_UNLOCK(vp);
622 vclean(vp, 0, p);
623 vp->v_op = nvp->v_op;
624 vp->v_tag = nvp->v_tag;
625 nvp->v_type = VNON;
626 insmntque(vp, mp);
627 return (vp);
628}
629
630/*
631 * Grab a particular vnode from the free list, increment its
632 * reference count and lock it. If the vnode lock bit is set,
633 * the vnode is being eliminated in vgone. In that case, we
634 * cannot grab it, so the process is awakened when the
635 * transition is completed, and an error code is returned to
636 * indicate that the vnode is no longer usable, possibly
637 * having been changed to a new file system type.
638 */
639int
640vget(struct vnode *vp, int flags)
641{
642 int error, s, onfreelist;
643
644 /*
645 * If the vnode is in the process of being cleaned out for
646 * another use, we wait for the cleaning to finish and then
647 * return failure. Cleaning is determined by checking that
648 * the VXLOCK flag is set.
649 */
650 mtx_enter(&vnode_mtx);
651 if (vp->v_lflag & VXLOCK0x0100) {
652 if (flags & LK_NOWAIT0x0040UL) {
653 mtx_leave(&vnode_mtx);
654 return (EBUSY16);
655 }
656
657 vp->v_lflag |= VXWANT0x0200;
658 msleep_nsec(vp, &vnode_mtx, PINOD8, "vget", INFSLP0xffffffffffffffffULL);
659 mtx_leave(&vnode_mtx);
660 return (ENOENT2);
661 }
662 mtx_leave(&vnode_mtx);
663
664 onfreelist = vp->v_bioflag & VBIOONFREELIST0x0004;
665 if (vp->v_usecount == 0 && onfreelist) {
666 s = splbio()splraise(0x6);
667 if (vp->v_holdcnt > 0)
668 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next) != ((void *)0)) (vp)->
v_freelist.tqe_next->v_freelist.tqe_prev = (vp)->v_freelist
.tqe_prev; else (&vnode_hold_list)->tqh_last = (vp)->
v_freelist.tqe_prev; *(vp)->v_freelist.tqe_prev = (vp)->
v_freelist.tqe_next; ((vp)->v_freelist.tqe_prev) = ((void *
)-1); ((vp)->v_freelist.tqe_next) = ((void *)-1); } while (
0)
;
669 else
670 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next) != ((void *)0)) (vp)->
v_freelist.tqe_next->v_freelist.tqe_prev = (vp)->v_freelist
.tqe_prev; else (&vnode_free_list)->tqh_last = (vp)->
v_freelist.tqe_prev; *(vp)->v_freelist.tqe_prev = (vp)->
v_freelist.tqe_next; ((vp)->v_freelist.tqe_prev) = ((void *
)-1); ((vp)->v_freelist.tqe_next) = ((void *)-1); } while (
0)
;
671 vp->v_bioflag &= ~VBIOONFREELIST0x0004;
672 splx(s)spllower(s);
673 }
674
675 vp->v_usecount++;
676 if (flags & LK_TYPE_MASK(0x0001UL|0x0002UL)) {
677 if ((error = vn_lock(vp, flags)) != 0) {
678 vp->v_usecount--;
679 if (vp->v_usecount == 0 && onfreelist)
680 vputonfreelist(vp);
681 }
682 return (error);
683 }
684
685 return (0);
686}
687
688
689/* Vnode reference. */
690void
691vref(struct vnode *vp)
692{
693 KERNEL_ASSERT_LOCKED()((_kernel_lock_held()) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_subr.c"
, 693, "_kernel_lock_held()"))
;
694
695#ifdef DIAGNOSTIC1
696 if (vp->v_usecount == 0)
697 panic("vref used where vget required");
698 if (vp->v_type == VNON)
699 panic("vref on a VNON vnode");
700#endif
701 vp->v_usecount++;
702}
703
704void
705vputonfreelist(struct vnode *vp)
706{
707 int s;
708 struct freelst *lst;
709
710 s = splbio()splraise(0x6);
711#ifdef DIAGNOSTIC1
712 if (vp->v_usecount != 0)
713 panic("Use count is not zero!");
714
715 /*
716 * If the hold count is still positive, one or many threads could still
717 * be waiting on the vnode lock inside uvn_io().
718 */
719 if (vp->v_holdcnt == 0 && vp->v_lockcount != 0)
720 panic("%s: lock count is not zero", __func__);
721
722 if (vp->v_bioflag & VBIOONFREELIST0x0004) {
723 vprint("vnode already on free list: ", vp);
724 panic("vnode already on free list");
725 }
726#endif
727
728 vp->v_bioflag |= VBIOONFREELIST0x0004;
729 vp->v_bioflag &= ~VBIOERROR0x0008;
730
731 if (vp->v_holdcnt > 0)
732 lst = &vnode_hold_list;
733 else
734 lst = &vnode_free_list;
735
736 if (vp->v_type == VBAD)
737 TAILQ_INSERT_HEAD(lst, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next = (lst)->tqh_first)
!= ((void *)0)) (lst)->tqh_first->v_freelist.tqe_prev =
&(vp)->v_freelist.tqe_next; else (lst)->tqh_last =
&(vp)->v_freelist.tqe_next; (lst)->tqh_first = (vp
); (vp)->v_freelist.tqe_prev = &(lst)->tqh_first; }
while (0)
;
738 else
739 TAILQ_INSERT_TAIL(lst, vp, v_freelist)do { (vp)->v_freelist.tqe_next = ((void *)0); (vp)->v_freelist
.tqe_prev = (lst)->tqh_last; *(lst)->tqh_last = (vp); (
lst)->tqh_last = &(vp)->v_freelist.tqe_next; } while
(0)
;
740
741 splx(s)spllower(s);
742}
743
744/*
745 * vput(), just unlock and vrele()
746 */
747void
748vput(struct vnode *vp)
749{
750 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
751
752#ifdef DIAGNOSTIC1
753 if (vp == NULL((void *)0))
754 panic("vput: null vp");
755#endif
756
757#ifdef DIAGNOSTIC1
758 if (vp->v_usecount == 0) {
759 vprint("vput: bad ref count", vp);
760 panic("vput: ref cnt");
761 }
762#endif
763 vp->v_usecount--;
764 KASSERT(vp->v_usecount > 0 || vp->v_uvcount == 0)((vp->v_usecount > 0 || vp->v_uvcount == 0) ? (void)
0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_subr.c", 764
, "vp->v_usecount > 0 || vp->v_uvcount == 0"))
;
765 if (vp->v_usecount > 0) {
766 VOP_UNLOCK(vp);
767 return;
768 }
769
770#ifdef DIAGNOSTIC1
771 if (vp->v_writecount != 0) {
772 vprint("vput: bad writecount", vp);
773 panic("vput: v_writecount != 0");
774 }
775#endif
776
777 VOP_INACTIVE(vp, p);
778
779 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST0x0004))
780 vputonfreelist(vp);
781}
782
783/*
784 * Vnode release - use for active VNODES.
785 * If count drops to zero, call inactive routine and return to freelist.
786 * Returns 0 if it did not sleep.
787 */
788int
789vrele(struct vnode *vp)
790{
791 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
792
793#ifdef DIAGNOSTIC1
794 if (vp == NULL((void *)0))
795 panic("vrele: null vp");
796#endif
797#ifdef DIAGNOSTIC1
798 if (vp->v_usecount == 0) {
799 vprint("vrele: bad ref count", vp);
800 panic("vrele: ref cnt");
801 }
802#endif
803 vp->v_usecount--;
804 if (vp->v_usecount > 0) {
805 return (0);
806 }
807
808#ifdef DIAGNOSTIC1
809 if (vp->v_writecount != 0) {
810 vprint("vrele: bad writecount", vp);
811 panic("vrele: v_writecount != 0");
812 }
813#endif
814
815 if (vn_lock(vp, LK_EXCLUSIVE0x0001UL)) {
816#ifdef DIAGNOSTIC1
817 vprint("vrele: cannot lock", vp);
818#endif
819 return (1);
820 }
821
822 VOP_INACTIVE(vp, p);
823
824 if (vp->v_usecount == 0 && !(vp->v_bioflag & VBIOONFREELIST0x0004))
825 vputonfreelist(vp);
826 return (1);
827}
828
829/* Page or buffer structure gets a reference. */
830void
831vhold(struct vnode *vp)
832{
833 /*
834 * If it is on the freelist and the hold count is currently
835 * zero, move it to the hold list.
836 */
837 if ((vp->v_bioflag & VBIOONFREELIST0x0004) &&
838 vp->v_holdcnt == 0 && vp->v_usecount == 0) {
839 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next) != ((void *)0)) (vp)->
v_freelist.tqe_next->v_freelist.tqe_prev = (vp)->v_freelist
.tqe_prev; else (&vnode_free_list)->tqh_last = (vp)->
v_freelist.tqe_prev; *(vp)->v_freelist.tqe_prev = (vp)->
v_freelist.tqe_next; ((vp)->v_freelist.tqe_prev) = ((void *
)-1); ((vp)->v_freelist.tqe_next) = ((void *)-1); } while (
0)
;
840 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist)do { (vp)->v_freelist.tqe_next = ((void *)0); (vp)->v_freelist
.tqe_prev = (&vnode_hold_list)->tqh_last; *(&vnode_hold_list
)->tqh_last = (vp); (&vnode_hold_list)->tqh_last = &
(vp)->v_freelist.tqe_next; } while (0)
;
841 }
842 vp->v_holdcnt++;
843}
844
845/* Lose interest in a vnode. */
846void
847vdrop(struct vnode *vp)
848{
849#ifdef DIAGNOSTIC1
850 if (vp->v_holdcnt == 0)
851 panic("vdrop: zero holdcnt");
852#endif
853
854 vp->v_holdcnt--;
855
856 /*
857 * If it is on the holdlist and the hold count drops to
858 * zero, move it to the free list.
859 */
860 if ((vp->v_bioflag & VBIOONFREELIST0x0004) &&
861 vp->v_holdcnt == 0 && vp->v_usecount == 0) {
862 TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next) != ((void *)0)) (vp)->
v_freelist.tqe_next->v_freelist.tqe_prev = (vp)->v_freelist
.tqe_prev; else (&vnode_hold_list)->tqh_last = (vp)->
v_freelist.tqe_prev; *(vp)->v_freelist.tqe_prev = (vp)->
v_freelist.tqe_next; ((vp)->v_freelist.tqe_prev) = ((void *
)-1); ((vp)->v_freelist.tqe_next) = ((void *)-1); } while (
0)
;
863 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist)do { (vp)->v_freelist.tqe_next = ((void *)0); (vp)->v_freelist
.tqe_prev = (&vnode_free_list)->tqh_last; *(&vnode_free_list
)->tqh_last = (vp); (&vnode_free_list)->tqh_last = &
(vp)->v_freelist.tqe_next; } while (0)
;
864 }
865}
866
867/*
868 * Remove any vnodes in the vnode table belonging to mount point mp.
869 *
870 * If MNT_NOFORCE is specified, there should not be any active ones,
871 * return error if any are found (nb: this is a user error, not a
872 * system error). If MNT_FORCE is specified, detach any active vnodes
873 * that are found.
874 */
875#ifdef DEBUG_SYSCTL
876int busyprt = 0; /* print out busy vnodes */
877struct ctldebug debug_vfs_busyprt = { "vfs_busyprt", &busyprt };
878#endif
879
880int
881vfs_mount_foreach_vnode(struct mount *mp,
882 int (*func)(struct vnode *, void *), void *arg) {
883 struct vnode *vp, *nvp;
884 int error = 0;
885
886loop:
887 TAILQ_FOREACH_SAFE(vp , &mp->mnt_vnodelist, v_mntvnodes, nvp)for ((vp) = ((&mp->mnt_vnodelist)->tqh_first); (vp)
!= ((void *)0) && ((nvp) = ((vp)->v_mntvnodes.tqe_next
), 1); (vp) = (nvp))
{
888 if (vp->v_mount != mp)
889 goto loop;
890
891 error = func(vp, arg);
892
893 if (error != 0)
894 break;
895 }
896
897 return (error);
898}
899
900struct vflush_args {
901 struct vnode *skipvp;
902 int busy;
903 int flags;
904};
905
906int
907vflush_vnode(struct vnode *vp, void *arg)
908{
909 struct vflush_args *va = arg;
910 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
911
912 if (vp == va->skipvp) {
913 return (0);
914 }
915
916 if ((va->flags & SKIPSYSTEM0x0001) && (vp->v_flag & VSYSTEM0x0004)) {
917 return (0);
918 }
919
920 /*
921 * If WRITECLOSE is set, only flush out regular file
922 * vnodes open for writing.
923 */
924 if ((va->flags & WRITECLOSE0x0004) &&
925 (vp->v_writecount == 0 || vp->v_type != VREG)) {
926 return (0);
927 }
928
929 /*
930 * With v_usecount == 0, all we need to do is clear
931 * out the vnode data structures and we are done.
932 */
933 if (vp->v_usecount == 0) {
934 vgonel(vp, p);
935 return (0);
936 }
937
938 /*
939 * If FORCECLOSE is set, forcibly close the vnode.
940 * For block or character devices, revert to an
941 * anonymous device. For all other files, just kill them.
942 */
943 if (va->flags & FORCECLOSE0x0002) {
944 if (vp->v_type != VBLK && vp->v_type != VCHR) {
945 vgonel(vp, p);
946 } else {
947 vclean(vp, 0, p);
948 vp->v_op = &spec_vops;
949 insmntque(vp, NULL((void *)0));
950 }
951 return (0);
952 }
953
954 /*
955 * If set, this is allowed to ignore vnodes which don't
956 * have changes pending to disk.
957 * XXX Might be nice to check per-fs "inode" flags, but
958 * generally the filesystem is sync'd already, right?
959 */
960 if ((va->flags & IGNORECLEAN0x0010) &&
961 LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0)))
962 return (0);
963
964#ifdef DEBUG_SYSCTL
965 if (busyprt)
966 vprint("vflush: busy vnode", vp);
967#endif
968 va->busy++;
969 return (0);
970}
971
972int
973vflush(struct mount *mp, struct vnode *skipvp, int flags)
974{
975 struct vflush_args va;
976 va.skipvp = skipvp;
977 va.busy = 0;
978 va.flags = flags;
979
980 vfs_mount_foreach_vnode(mp, vflush_vnode, &va);
981
982 if (va.busy)
983 return (EBUSY16);
984 return (0);
985}
986
987/*
988 * Disassociate the underlying file system from a vnode.
989 */
990void
991vclean(struct vnode *vp, int flags, struct proc *p)
992{
993 int active, do_wakeup = 0;
994
995 /*
996 * Check to see if the vnode is in use.
997 * If so we have to reference it before we clean it out
998 * so that its count cannot fall to zero and generate a
999 * race against ourselves to recycle it.
1000 */
1001 if ((active = vp->v_usecount) != 0)
1002 vp->v_usecount++;
1003
1004 /*
1005 * Prevent the vnode from being recycled or
1006 * brought into use while we clean it out.
1007 */
1008 mtx_enter(&vnode_mtx);
1009 if (vp->v_lflag & VXLOCK0x0100)
1010 panic("vclean: deadlock");
1011 vp->v_lflag |= VXLOCK0x0100;
1012
1013 if (vp->v_lockcount > 0) {
1014 /*
1015 * Ensure that any thread currently waiting on the same lock has
1016 * observed that the vnode is about to be exclusively locked
1017 * before continuing.
1018 */
1019 msleep_nsec(&vp->v_lockcount, &vnode_mtx, PINOD8, "vop_lock",
1020 INFSLP0xffffffffffffffffULL);
1021 KASSERT(vp->v_lockcount == 0)((vp->v_lockcount == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/kern/vfs_subr.c", 1021, "vp->v_lockcount == 0"
))
;
1022 }
1023 mtx_leave(&vnode_mtx);
1024
1025 /*
1026 * Even if the count is zero, the VOP_INACTIVE routine may still
1027 * have the object locked while it cleans it out. The VOP_LOCK
1028 * ensures that the VOP_INACTIVE routine is done with its work.
1029 * For active vnodes, it ensures that no other activity can
1030 * occur while the underlying object is being cleaned out.
1031 */
1032 VOP_LOCK(vp, LK_EXCLUSIVE0x0001UL | LK_DRAIN0x1000UL);
1033
1034 /*
1035 * Clean out any VM data associated with the vnode.
1036 */
1037 uvm_vnp_terminate(vp);
1038 /*
1039 * Clean out any buffers associated with the vnode.
1040 */
1041 if (flags & DOCLOSE0x0008)
1042 vinvalbuf(vp, V_SAVE0x0001, NOCRED((struct ucred *)-1), p, 0, INFSLP0xffffffffffffffffULL);
1043 /*
1044 * If purging an active vnode, it must be closed and
1045 * deactivated before being reclaimed. Note that the
1046 * VOP_INACTIVE will unlock the vnode
1047 */
1048 if (active) {
1049 if (flags & DOCLOSE0x0008)
1050 VOP_CLOSE(vp, FNONBLOCK0x0004, NOCRED((struct ucred *)-1), p);
1051 VOP_INACTIVE(vp, p);
1052 } else {
1053 /*
1054 * Any other processes trying to obtain this lock must first
1055 * wait for VXLOCK to clear, then call the new lock operation.
1056 */
1057 VOP_UNLOCK(vp);
1058 }
1059
1060 /*
1061 * Reclaim the vnode.
1062 */
1063 if (VOP_RECLAIM(vp, p))
1064 panic("vclean: cannot reclaim");
1065 if (active) {
1066 vp->v_usecount--;
1067 if (vp->v_usecount == 0) {
1068 if (vp->v_holdcnt > 0)
1069 panic("vclean: not clean");
1070 vputonfreelist(vp);
1071 }
1072 }
1073 cache_purge(vp);
1074
1075 /*
1076 * Done with purge, notify sleepers of the grim news.
1077 */
1078 vp->v_op = &dead_vops;
1079 VN_KNOTE(vp, NOTE_REVOKE)do { struct klist *__list = (&vp->v_selectinfo.si_note
); if (__list != ((void *)0)) knote(__list, (0x0040)); } while
(0)
;
1080 vp->v_tag = VT_NON;
1081#ifdef VFSLCKDEBUG
1082 vp->v_flag &= ~VLOCKSWORK0x4000;
1083#endif
1084 mtx_enter(&vnode_mtx);
1085 vp->v_lflag &= ~VXLOCK0x0100;
1086 if (vp->v_lflag & VXWANT0x0200) {
1087 vp->v_lflag &= ~VXWANT0x0200;
1088 do_wakeup = 1;
1089 }
1090 mtx_leave(&vnode_mtx);
1091 if (do_wakeup)
1092 wakeup(vp);
1093}
1094
1095/*
1096 * Recycle an unused vnode to the front of the free list.
1097 */
1098int
1099vrecycle(struct vnode *vp, struct proc *p)
1100{
1101 if (vp->v_usecount == 0) {
1102 vgonel(vp, p);
1103 return (1);
1104 }
1105 return (0);
1106}
1107
1108/*
1109 * Eliminate all activity associated with a vnode
1110 * in preparation for reuse.
1111 */
1112void
1113vgone(struct vnode *vp)
1114{
1115 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
1116 vgonel(vp, p);
12
Calling 'vgonel'
32
Returning from 'vgonel'
1117}
1118
1119/*
1120 * vgone, with struct proc.
1121 */
1122void
1123vgonel(struct vnode *vp, struct proc *p)
1124{
1125 struct vnode *vq;
1126 struct vnode *vx;
1127
1128 KASSERT(vp->v_uvcount == 0)((vp->v_uvcount == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_subr.c"
, 1128, "vp->v_uvcount == 0"))
;
13
Assuming field 'v_uvcount' is equal to 0
14
'?' condition is true
1129
1130 /*
1131 * If a vgone (or vclean) is already in progress,
1132 * wait until it is done and return.
1133 */
1134 mtx_enter(&vnode_mtx);
1135 if (vp->v_lflag & VXLOCK0x0100) {
15
Assuming the condition is false
16
Taking false branch
1136 vp->v_lflag |= VXWANT0x0200;
1137 msleep_nsec(vp, &vnode_mtx, PINOD8, "vgone", INFSLP0xffffffffffffffffULL);
1138 mtx_leave(&vnode_mtx);
1139 return;
1140 }
1141 mtx_leave(&vnode_mtx);
1142
1143 /*
1144 * Clean out the filesystem specific data.
1145 */
1146 vclean(vp, DOCLOSE0x0008, p);
1147 /*
1148 * Delete from old mount point vnode list, if on one.
1149 */
1150 if (vp->v_mount != NULL((void *)0))
17
Assuming field 'v_mount' is equal to NULL
18
Taking false branch
1151 insmntque(vp, NULL((void *)0));
1152 /*
1153 * If special device, remove it from special device alias list
1154 * if it is on one.
1155 */
1156 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
19
Assuming field 'v_type' is not equal to VBLK
20
Assuming field 'v_type' is equal to VCHR
22
Taking true branch
1157 vp->v_specinfov_un.vu_specinfo != NULL((void *)0)) {
21
Assuming field 'vu_specinfo' is not equal to NULL
1158 if ((vp->v_flag & VALIASED0x0800) == 0 && vp->v_type
23.1
Field 'v_type' is equal to VCHR
== VCHR &&
23
Assuming the condition is true
1159 (cdevsw[major(vp->v_rdev)(((unsigned)(vp->v_un.vu_specinfo->si_rdev) >> 8)
& 0xff)
].d_flags & D_CLONE0x0001
) &&
24
Assuming the condition is false
1160 (minor(vp->v_rdev)((unsigned)((vp->v_un.vu_specinfo->si_rdev) & 0xff)
| (((vp->v_un.vu_specinfo->si_rdev) & 0xffff0000) >>
8))
>> CLONE_SHIFT8 == 0)) {
1161 free(vp->v_specbitmapv_un.vu_specinfo->si_ci.ci_bitmap, M_VNODE25, CLONE_MAPSZ128);
1162 }
1163 SLIST_REMOVE(vp->v_hashchain, vp, vnode, v_specnext)do { if ((vp->v_un.vu_specinfo->si_hashchain)->slh_first
== (vp)) { do { ((vp->v_un.vu_specinfo->si_hashchain))
->slh_first = ((vp->v_un.vu_specinfo->si_hashchain))
->slh_first->v_un.vu_specinfo->si_specnext.sle_next;
} while (0); } else { struct vnode *curelm = (vp->v_un.vu_specinfo
->si_hashchain)->slh_first; while (curelm->v_un.vu_specinfo
->si_specnext.sle_next != (vp)) curelm = curelm->v_un.vu_specinfo
->si_specnext.sle_next; curelm->v_un.vu_specinfo->si_specnext
.sle_next = curelm->v_un.vu_specinfo->si_specnext.sle_next
->v_un.vu_specinfo->si_specnext.sle_next; } ((vp)->v_un
.vu_specinfo->si_specnext.sle_next) = ((void *)-1); } while
(0)
;
25
Assuming 'vp' is equal to field 'slh_first'
26
Taking true branch
27
Loop condition is false. Exiting loop
28
Loop condition is false. Exiting loop
1164 if (vp->v_flag & VALIASED0x0800) {
29
Taking false branch
1165 vx = NULL((void *)0);
1166 SLIST_FOREACH(vq, vp->v_hashchain, v_specnext)for((vq) = ((vp->v_un.vu_specinfo->si_hashchain)->slh_first
); (vq) != ((void *)0); (vq) = ((vq)->v_un.vu_specinfo->
si_specnext.sle_next))
{
1167 if (vq->v_rdevv_un.vu_specinfo->si_rdev != vp->v_rdevv_un.vu_specinfo->si_rdev ||
1168 vq->v_type != vp->v_type)
1169 continue;
1170 if (vx)
1171 break;
1172 vx = vq;
1173 }
1174 if (vx == NULL((void *)0))
1175 panic("missing alias");
1176 if (vq == NULL((void *)0))
1177 vx->v_flag &= ~VALIASED0x0800;
1178 vp->v_flag &= ~VALIASED0x0800;
1179 }
1180 lf_purgelocks(&vp->v_speclockfv_un.vu_specinfo->si_lockf);
1181 free(vp->v_specinfov_un.vu_specinfo, M_VNODE25, sizeof(struct specinfo));
1182 vp->v_specinfov_un.vu_specinfo = NULL((void *)0);
30
Null pointer value stored to field 'vu_specinfo'
1183 }
1184 /*
1185 * If it is on the freelist and not already at the head,
1186 * move it to the head of the list.
1187 */
1188 vp->v_type = VBAD;
1189
1190 /*
1191 * Move onto the free list, unless we were called from
1192 * getnewvnode and we're not on any free list
1193 */
1194 if (vp->v_usecount == 0 &&
31
Assuming field 'v_usecount' is not equal to 0
1195 (vp->v_bioflag & VBIOONFREELIST0x0004)) {
1196 int s;
1197
1198 s = splbio()splraise(0x6);
1199
1200 if (vp->v_holdcnt > 0)
1201 panic("vgonel: not clean");
1202
1203 if (TAILQ_FIRST(&vnode_free_list)((&vnode_free_list)->tqh_first) != vp) {
1204 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next) != ((void *)0)) (vp)->
v_freelist.tqe_next->v_freelist.tqe_prev = (vp)->v_freelist
.tqe_prev; else (&vnode_free_list)->tqh_last = (vp)->
v_freelist.tqe_prev; *(vp)->v_freelist.tqe_prev = (vp)->
v_freelist.tqe_next; ((vp)->v_freelist.tqe_prev) = ((void *
)-1); ((vp)->v_freelist.tqe_next) = ((void *)-1); } while (
0)
;
1205 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist)do { if (((vp)->v_freelist.tqe_next = (&vnode_free_list
)->tqh_first) != ((void *)0)) (&vnode_free_list)->tqh_first
->v_freelist.tqe_prev = &(vp)->v_freelist.tqe_next;
else (&vnode_free_list)->tqh_last = &(vp)->v_freelist
.tqe_next; (&vnode_free_list)->tqh_first = (vp); (vp)->
v_freelist.tqe_prev = &(&vnode_free_list)->tqh_first
; } while (0)
;
1206 }
1207 splx(s)spllower(s);
1208 }
1209}
1210
1211/*
1212 * Lookup a vnode by device number.
1213 */
1214int
1215vfinddev(dev_t dev, enum vtype type, struct vnode **vpp)
1216{
1217 struct vnode *vp;
1218 int rc =0;
1219
1220 SLIST_FOREACH(vp, &speclisth[SPECHASH(dev)], v_specnext)for((vp) = ((&speclisth[(((dev>>5)+(dev))&(64 -
1))])->slh_first); (vp) != ((void *)0); (vp) = ((vp)->v_un
.vu_specinfo->si_specnext.sle_next))
{
1221 if (dev != vp->v_rdevv_un.vu_specinfo->si_rdev || type != vp->v_type)
1222 continue;
1223 *vpp = vp;
1224 rc = 1;
1225 break;
1226 }
1227 return (rc);
1228}
1229
1230/*
1231 * Revoke all the vnodes corresponding to the specified minor number
1232 * range (endpoints inclusive) of the specified major.
1233 */
1234void
1235vdevgone(int maj, int minl, int minh, enum vtype type)
1236{
1237 struct vnode *vp;
1238 int mn;
1239
1240 for (mn = minl; mn <= minh; mn++)
1241 if (vfinddev(makedev(maj, mn)((dev_t)((((maj) & 0xff) << 8) | ((mn) & 0xff) |
(((mn) & 0xffff00) << 8)))
, type, &vp))
1242 VOP_REVOKE(vp, REVOKEALL0x0001);
1243}
1244
1245/*
1246 * Calculate the total number of references to a special device.
1247 */
1248int
1249vcount(struct vnode *vp)
1250{
1251 struct vnode *vq;
1252 int count;
1253
1254loop:
1255 if ((vp->v_flag & VALIASED0x0800) == 0)
1
Assuming the condition is false
2
Taking false branch
35
Taking false branch
1256 return (vp->v_usecount);
1257 count = 0;
1258 SLIST_FOREACH(vq, vp->v_hashchain, v_specnext)for((vq) = ((vp->v_un.vu_specinfo->si_hashchain)->slh_first
); (vq) != ((void *)0); (vq) = ((vq)->v_un.vu_specinfo->
si_specnext.sle_next))
{
3
Assuming 'vq' is not equal to null
4
Loop condition is true. Entering loop body
36
Loop condition is true. Entering loop body
1259 if (vq->v_rdevv_un.vu_specinfo->si_rdev != vp->v_rdevv_un.vu_specinfo->si_rdev || vq->v_type != vp->v_type)
5
Assuming 'vq->v_rdev' is equal to 'vp->v_rdev'
6
Assuming 'vq->v_type' is equal to 'vp->v_type'
7
Taking false branch
37
Access to field 'si_rdev' results in a dereference of a null pointer (loaded from field 'vu_specinfo')
1260 continue;
1261 /*
1262 * Alias, but not in use, so flush it out.
1263 */
1264 if (vq->v_usecount == 0 && vq != vp) {
8
Assuming field 'v_usecount' is equal to 0
9
Assuming 'vq' is not equal to 'vp'
10
Taking true branch
1265 vgone(vq);
11
Calling 'vgone'
33
Returning from 'vgone'
1266 goto loop;
34
Control jumps to line 1255
1267 }
1268 count += vq->v_usecount;
1269 }
1270 return (count);
1271}
1272
1273#if defined(DEBUG) || defined(DIAGNOSTIC1)
1274/*
1275 * Print out a description of a vnode.
1276 */
1277static char *typename[] =
1278 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
1279
1280void
1281vprint(char *label, struct vnode *vp)
1282{
1283 char buf[64];
1284
1285 if (label != NULL((void *)0))
1286 printf("%s: ", label);
1287 printf("%p, type %s, use %u, write %u, hold %u,",
1288 vp, typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1289 vp->v_holdcnt);
1290 buf[0] = '\0';
1291 if (vp->v_flag & VROOT0x0001)
1292 strlcat(buf, "|VROOT", sizeof buf);
1293 if (vp->v_flag & VTEXT0x0002)
1294 strlcat(buf, "|VTEXT", sizeof buf);
1295 if (vp->v_flag & VSYSTEM0x0004)
1296 strlcat(buf, "|VSYSTEM", sizeof buf);
1297 if (vp->v_lflag & VXLOCK0x0100)
1298 strlcat(buf, "|VXLOCK", sizeof buf);
1299 if (vp->v_lflag & VXWANT0x0200)
1300 strlcat(buf, "|VXWANT", sizeof buf);
1301 if (vp->v_bioflag & VBIOWAIT0x0001)
1302 strlcat(buf, "|VBIOWAIT", sizeof buf);
1303 if (vp->v_bioflag & VBIOONFREELIST0x0004)
1304 strlcat(buf, "|VBIOONFREELIST", sizeof buf);
1305 if (vp->v_bioflag & VBIOONSYNCLIST0x0002)
1306 strlcat(buf, "|VBIOONSYNCLIST", sizeof buf);
1307 if (vp->v_flag & VALIASED0x0800)
1308 strlcat(buf, "|VALIASED", sizeof buf);
1309 if (buf[0] != '\0')
1310 printf(" flags (%s)", &buf[1]);
1311 if (vp->v_data == NULL((void *)0)) {
1312 printf("\n");
1313 } else {
1314 printf("\n\t");
1315 VOP_PRINT(vp);
1316 }
1317}
1318#endif /* DEBUG || DIAGNOSTIC */
1319
1320#ifdef DEBUG
1321/*
1322 * List all of the locked vnodes in the system.
1323 * Called when debugging the kernel.
1324 */
1325void
1326printlockedvnodes(void)
1327{
1328 struct mount *mp;
1329 struct vnode *vp;
1330
1331 printf("Locked vnodes\n");
1332
1333 TAILQ_FOREACH(mp, &mountlist, mnt_list)for((mp) = ((&mountlist)->tqh_first); (mp) != ((void *
)0); (mp) = ((mp)->mnt_list.tqe_next))
{
1334 if (vfs_busy(mp, VB_READ0x01|VB_NOWAIT0x04))
1335 continue;
1336 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)for((vp) = ((&mp->mnt_vnodelist)->tqh_first); (vp) !=
((void *)0); (vp) = ((vp)->v_mntvnodes.tqe_next))
{
1337 if (VOP_ISLOCKED(vp))
1338 vprint(NULL((void *)0), vp);
1339 }
1340 vfs_unbusy(mp);
1341 }
1342
1343}
1344#endif
1345
1346/*
1347 * Top level filesystem related information gathering.
1348 */
1349int
1350vfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1351 size_t newlen, struct proc *p)
1352{
1353 struct vfsconf *vfsp, *tmpvfsp;
1354 int ret;
1355
1356 /* all sysctl names at this level are at least name and field */
1357 if (namelen < 2)
1358 return (ENOTDIR20); /* overloaded */
1359
1360 if (name[0] != VFS_GENERIC0) {
1361 vfsp = vfs_bytypenum(name[0]);
1362 if (vfsp == NULL((void *)0) || vfsp->vfc_vfsops->vfs_sysctl == NULL((void *)0))
1363 return (EOPNOTSUPP45);
1364
1365 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1366 oldp, oldlenp, newp, newlen, p));
1367 }
1368
1369 switch (name[1]) {
1370 case VFS_MAXTYPENUM1:
1371 return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
1372
1373 case VFS_CONF2:
1374 if (namelen < 3)
1375 return (ENOTDIR20); /* overloaded */
1376
1377 vfsp = vfs_bytypenum(name[2]);
1378 if (vfsp == NULL((void *)0))
1379 return (EOPNOTSUPP45);
1380
1381 /* Make a copy, clear out kernel pointers */
1382 tmpvfsp = malloc(sizeof(*tmpvfsp), M_TEMP127, M_WAITOK0x0001|M_ZERO0x0008);
1383 memcpy(tmpvfsp, vfsp, sizeof(*tmpvfsp))__builtin_memcpy((tmpvfsp), (vfsp), (sizeof(*tmpvfsp)));
1384 tmpvfsp->vfc_vfsops = NULL((void *)0);
1385
1386 ret = sysctl_rdstruct(oldp, oldlenp, newp, tmpvfsp,
1387 sizeof(struct vfsconf));
1388
1389 free(tmpvfsp, M_TEMP127, sizeof(*tmpvfsp));
1390 return (ret);
1391 case VFS_BCACHESTAT3: /* buffer cache statistics */
1392 ret = sysctl_rdstruct(oldp, oldlenp, newp, &bcstats,
1393 sizeof(struct bcachestats));
1394 return(ret);
1395 }
1396 return (EOPNOTSUPP45);
1397}
1398
1399/*
1400 * Check to see if a filesystem is mounted on a block device.
1401 */
1402int
1403vfs_mountedon(struct vnode *vp)
1404{
1405 struct vnode *vq;
1406 int error = 0;
1407
1408 if (vp->v_specmountpointv_un.vu_specinfo->si_mountpoint != NULL((void *)0))
1409 return (EBUSY16);
1410 if (vp->v_flag & VALIASED0x0800) {
1411 SLIST_FOREACH(vq, vp->v_hashchain, v_specnext)for((vq) = ((vp->v_un.vu_specinfo->si_hashchain)->slh_first
); (vq) != ((void *)0); (vq) = ((vq)->v_un.vu_specinfo->
si_specnext.sle_next))
{
1412 if (vq->v_rdevv_un.vu_specinfo->si_rdev != vp->v_rdevv_un.vu_specinfo->si_rdev ||
1413 vq->v_type != vp->v_type)
1414 continue;
1415 if (vq->v_specmountpointv_un.vu_specinfo->si_mountpoint != NULL((void *)0)) {
1416 error = EBUSY16;
1417 break;
1418 }
1419 }
1420 }
1421 return (error);
1422}
1423
1424#ifdef NFSSERVER1
1425/*
1426 * Build hash lists of net addresses and hang them off the mount point.
1427 * Called by vfs_export() to set up the lists of export addresses.
1428 */
1429int
1430vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1431 struct export_args *argp)
1432{
1433 struct netcred *np;
1434 struct radix_node_head *rnh;
1435 int nplen, i;
1436 struct radix_node *rn;
1437 struct sockaddr *saddr, *smask = NULL((void *)0);
1438 int error;
1439
1440 if (argp->ex_addrlen == 0) {
1441 if (mp->mnt_flag & MNT_DEFEXPORTED0x00000200)
1442 return (EPERM1);
1443 np = &nep->ne_defexported;
1444 /* fill in the kernel's ucred from userspace's xucred */
1445 if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1446 return (error);
1447 mp->mnt_flag |= MNT_DEFEXPORTED0x00000200;
1448 goto finish;
1449 }
1450 if (argp->ex_addrlen > MLEN(256 - sizeof(struct m_hdr)) || argp->ex_masklen > MLEN(256 - sizeof(struct m_hdr)) ||
1451 argp->ex_addrlen < 0 || argp->ex_masklen < 0)
1452 return (EINVAL22);
1453 nplen = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1454 np = (struct netcred *)malloc(nplen, M_NETADDR49, M_WAITOK0x0001|M_ZERO0x0008);
1455 np->netc_len = nplen;
1456 saddr = (struct sockaddr *)(np + 1);
1457 error = copyin(argp->ex_addr, saddr, argp->ex_addrlen);
1458 if (error)
1459 goto out;
1460 if (saddr->sa_len > argp->ex_addrlen)
1461 saddr->sa_len = argp->ex_addrlen;
1462 if (argp->ex_masklen) {
1463 smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
1464 error = copyin(argp->ex_mask, smask, argp->ex_masklen);
1465 if (error)
1466 goto out;
1467 if (smask->sa_len > argp->ex_masklen)
1468 smask->sa_len = argp->ex_masklen;
1469 }
1470 /* fill in the kernel's ucred from userspace's xucred */
1471 if ((error = crfromxucred(&np->netc_anon, &argp->ex_anon)))
1472 goto out;
1473 i = saddr->sa_family;
1474 switch (i) {
1475 case AF_INET2:
1476 if ((rnh = nep->ne_rtable_inet) == NULL((void *)0)) {
1477 if (!rn_inithead((void **)&nep->ne_rtable_inet,
1478 offsetof(struct sockaddr_in, sin_addr)__builtin_offsetof(struct sockaddr_in, sin_addr))) {
1479 error = ENOBUFS55;
1480 goto out;
1481 }
1482 rnh = nep->ne_rtable_inet;
1483 }
1484 break;
1485 default:
1486 error = EINVAL22;
1487 goto out;
1488 }
1489 rn = rn_addroute(saddr, smask, rnh, np->netc_rnodes, 0);
1490 if (rn == NULL((void *)0) || np != (struct netcred *)rn) { /* already exists */
1491 error = EPERM1;
1492 goto out;
1493 }
1494finish:
1495 np->netc_exflags = argp->ex_flags;
1496 return (0);
1497out:
1498 free(np, M_NETADDR49, np->netc_len);
1499 return (error);
1500}
1501
1502int
1503vfs_free_netcred(struct radix_node *rn, void *w, u_int id)
1504{
1505 struct radix_node_head *rnh = (struct radix_node_head *)w;
1506 struct netcred * np = (struct netcred *)rn;
1507
1508 rn_delete(rn->rn_keyrn_u.rn_leaf.rn_Key, rn->rn_maskrn_u.rn_leaf.rn_Mask, rnh, NULL((void *)0));
1509 free(np, M_NETADDR49, np->netc_len);
1510 return (0);
1511}
1512
1513/*
1514 * Free the net address hash lists that are hanging off the mount points.
1515 */
1516void
1517vfs_free_addrlist(struct netexport *nep)
1518{
1519 struct radix_node_head *rnh;
1520
1521 if ((rnh = nep->ne_rtable_inet) != NULL((void *)0)) {
1522 rn_walktree(rnh, vfs_free_netcred, rnh);
1523 free(rnh, M_RTABLE5, sizeof(*rnh));
1524 nep->ne_rtable_inet = NULL((void *)0);
1525 }
1526}
1527#endif /* NFSSERVER */
1528
1529int
1530vfs_export(struct mount *mp, struct netexport *nep, struct export_args *argp)
1531{
1532#ifdef NFSSERVER1
1533 int error;
1534
1535 if (argp->ex_flags & MNT_DELEXPORT0x00020000) {
1536 vfs_free_addrlist(nep);
1537 mp->mnt_flag &= ~(MNT_EXPORTED0x00000100 | MNT_DEFEXPORTED0x00000200);
1538 }
1539 if (argp->ex_flags & MNT_EXPORTED0x00000100) {
1540 if ((error = vfs_hang_addrlist(mp, nep, argp)) != 0)
1541 return (error);
1542 mp->mnt_flag |= MNT_EXPORTED0x00000100;
1543 }
1544 return (0);
1545#else
1546 return (ENOTSUP91);
1547#endif /* NFSSERVER */
1548}
1549
1550struct netcred *
1551vfs_export_lookup(struct mount *mp, struct netexport *nep, struct mbuf *nam)
1552{
1553#ifdef NFSSERVER1
1554 struct netcred *np;
1555 struct radix_node_head *rnh;
1556 struct sockaddr *saddr;
1557
1558 np = NULL((void *)0);
1559 if (mp->mnt_flag & MNT_EXPORTED0x00000100) {
1560 /*
1561 * Lookup in the export list first.
1562 */
1563 if (nam != NULL((void *)0)) {
1564 saddr = mtod(nam, struct sockaddr *)((struct sockaddr *)((nam)->m_hdr.mh_data));
1565 switch(saddr->sa_family) {
1566 case AF_INET2:
1567 rnh = nep->ne_rtable_inet;
1568 break;
1569 default:
1570 rnh = NULL((void *)0);
1571 break;
1572 }
1573 if (rnh != NULL((void *)0))
1574 np = (struct netcred *)rn_match(saddr, rnh);
1575 }
1576 /*
1577 * If no address match, use the default if it exists.
1578 */
1579 if (np == NULL((void *)0) && mp->mnt_flag & MNT_DEFEXPORTED0x00000200)
1580 np = &nep->ne_defexported;
1581 }
1582 return (np);
1583#else
1584 return (NULL((void *)0));
1585#endif /* NFSSERVER */
1586}
1587
1588/*
1589 * Do the usual access checking.
1590 * file_mode, uid and gid are from the vnode in question,
1591 * while acc_mode and cred are from the VOP_ACCESS parameter list
1592 */
1593int
1594vaccess(enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
1595 mode_t acc_mode, struct ucred *cred)
1596{
1597 mode_t mask;
1598
1599 /* User id 0 always gets read/write access. */
1600 if (cred->cr_uid == 0) {
1601 /* For VEXEC, at least one of the execute bits must be set. */
1602 if ((acc_mode & VEXEC00100) && type != VDIR &&
1603 (file_mode & (S_IXUSR0000100|S_IXGRP0000010|S_IXOTH0000001)) == 0)
1604 return EACCES13;
1605 return 0;
1606 }
1607
1608 mask = 0;
1609
1610 /* Otherwise, check the owner. */
1611 if (cred->cr_uid == uid) {
1612 if (acc_mode & VEXEC00100)
1613 mask |= S_IXUSR0000100;
1614 if (acc_mode & VREAD00400)
1615 mask |= S_IRUSR0000400;
1616 if (acc_mode & VWRITE00200)
1617 mask |= S_IWUSR0000200;
1618 return (file_mode & mask) == mask ? 0 : EACCES13;
1619 }
1620
1621 /* Otherwise, check the groups. */
1622 if (groupmember(gid, cred)) {
1623 if (acc_mode & VEXEC00100)
1624 mask |= S_IXGRP0000010;
1625 if (acc_mode & VREAD00400)
1626 mask |= S_IRGRP0000040;
1627 if (acc_mode & VWRITE00200)
1628 mask |= S_IWGRP0000020;
1629 return (file_mode & mask) == mask ? 0 : EACCES13;
1630 }
1631
1632 /* Otherwise, check everyone else. */
1633 if (acc_mode & VEXEC00100)
1634 mask |= S_IXOTH0000001;
1635 if (acc_mode & VREAD00400)
1636 mask |= S_IROTH0000004;
1637 if (acc_mode & VWRITE00200)
1638 mask |= S_IWOTH0000002;
1639 return (file_mode & mask) == mask ? 0 : EACCES13;
1640}
1641
1642int
1643vnoperm(struct vnode *vp)
1644{
1645 if (vp->v_flag & VROOT0x0001 || vp->v_mount == NULL((void *)0))
1646 return 0;
1647
1648 return (vp->v_mount->mnt_flag & MNT_NOPERM0x00000020);
1649}
1650
1651struct rwlock vfs_stall_lock = RWLOCK_INITIALIZER("vfs_stall"){ 0, "vfs_stall" };
1652unsigned int vfs_stalling = 0;
1653
1654int
1655vfs_stall(struct proc *p, int stall)
1656{
1657 struct mount *mp;
1658 int allerror = 0, error;
1659
1660 if (stall) {
1661 atomic_inc_int(&vfs_stalling)_atomic_inc_int(&vfs_stalling);
1662 rw_enter_write(&vfs_stall_lock);
1663 }
1664
1665 /*
1666 * The loop variable mp is protected by vfs_busy() so that it cannot
1667 * be unmounted while VFS_SYNC() sleeps. Traverse forward to keep the
1668 * lock order consistent with dounmount().
1669 */
1670 TAILQ_FOREACH(mp, &mountlist, mnt_list)for((mp) = ((&mountlist)->tqh_first); (mp) != ((void *
)0); (mp) = ((mp)->mnt_list.tqe_next))
{
1671 if (stall) {
1672 error = vfs_busy(mp, VB_WRITE0x02|VB_WAIT0x08|VB_DUPOK0x10);
1673 if (error) {
1674 printf("%s: busy\n", mp->mnt_stat.f_mntonname);
1675 allerror = error;
1676 continue;
1677 }
1678 uvm_vnp_sync(mp);
1679 error = VFS_SYNC(mp, MNT_WAIT, stall, p->p_ucred, p)(*(mp)->mnt_op->vfs_sync)(mp, 1, stall, p->p_ucred, p
)
;
1680 if (error) {
1681 printf("%s: failed to sync\n",
1682 mp->mnt_stat.f_mntonname);
1683 vfs_unbusy(mp);
1684 allerror = error;
1685 continue;
1686 }
1687 mp->mnt_flag |= MNT_STALLED0x00100000;
1688 } else {
1689 if (mp->mnt_flag & MNT_STALLED0x00100000) {
1690 vfs_unbusy(mp);
1691 mp->mnt_flag &= ~MNT_STALLED0x00100000;
1692 }
1693 }
1694 }
1695
1696 if (!stall) {
1697 rw_exit_write(&vfs_stall_lock);
1698 atomic_dec_int(&vfs_stalling)_atomic_dec_int(&vfs_stalling);
1699 }
1700
1701 return (allerror);
1702}
1703
1704void
1705vfs_stall_barrier(void)
1706{
1707 if (__predict_false(vfs_stalling)__builtin_expect(((vfs_stalling) != 0), 0)) {
1708 rw_enter_read(&vfs_stall_lock);
1709 rw_exit_read(&vfs_stall_lock);
1710 }
1711}
1712
1713/*
1714 * Unmount all file systems.
1715 * We traverse the list in reverse order under the assumption that doing so
1716 * will avoid needing to worry about dependencies.
1717 */
1718void
1719vfs_unmountall(void)
1720{
1721 struct mount *mp, *nmp;
1722 int allerror, error, again = 1;
1723
1724 retry:
1725 allerror = 0;
1726 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, nmp)for ((mp) = (*(((struct mntlist *)((&mountlist)->tqh_last
))->tqh_last)); (mp) != ((void *)0) && ((nmp) = (*
(((struct mntlist *)((mp)->mnt_list.tqe_prev))->tqh_last
)), 1); (mp) = (nmp))
{
1727 if (vfs_busy(mp, VB_WRITE0x02|VB_NOWAIT0x04))
1728 continue;
1729 /* XXX Here is a race, the next pointer is not locked. */
1730 if ((error = dounmount(mp, MNT_FORCE0x00080000, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
)) != 0) {
1731 printf("unmount of %s failed with error %d\n",
1732 mp->mnt_stat.f_mntonname, error);
1733 allerror = 1;
1734 }
1735 }
1736
1737 if (allerror) {
1738 printf("WARNING: some file systems would not unmount\n");
1739 if (again) {
1740 printf("retrying\n");
1741 again = 0;
1742 goto retry;
1743 }
1744 }
1745}
1746
1747/*
1748 * Sync and unmount file systems before shutting down.
1749 */
1750void
1751vfs_shutdown(struct proc *p)
1752{
1753#ifdef ACCOUNTING1
1754 acct_shutdown();
1755#endif
1756
1757 printf("syncing disks...");
1758
1759 if (panicstr == NULL((void *)0)) {
1760 /* Sync before unmount, in case we hang on something. */
1761 sys_sync(p, NULL((void *)0), NULL((void *)0));
1762 vfs_unmountall();
1763 }
1764
1765#if NSOFTRAID1 > 0
1766 sr_quiesce();
1767#endif
1768
1769 if (vfs_syncwait(p, 1))
1770 printf(" giving up\n");
1771 else
1772 printf(" done\n");
1773}
1774
1775/*
1776 * perform sync() operation and wait for buffers to flush.
1777 */
1778int
1779vfs_syncwait(struct proc *p, int verbose)
1780{
1781 struct buf *bp;
1782 int iter, nbusy, dcount, s;
1783#ifdef MULTIPROCESSOR1
1784 int hold_count;
1785#endif
1786
1787 sys_sync(p, NULL((void *)0), NULL((void *)0));
1788
1789 /* Wait for sync to finish. */
1790 dcount = 10000;
1791 for (iter = 0; iter < 20; iter++) {
1792 nbusy = 0;
1793 LIST_FOREACH(bp, &bufhead, b_list)for((bp) = ((&bufhead)->lh_first); (bp)!= ((void *)0);
(bp) = ((bp)->b_list.le_next))
{
1794 if ((bp->b_flags & (B_BUSY0x00000010|B_INVAL0x00000800|B_READ0x00008000)) == B_BUSY0x00000010)
1795 nbusy++;
1796 /*
1797 * With soft updates, some buffers that are
1798 * written will be remarked as dirty until other
1799 * buffers are written.
1800 */
1801 if (bp->b_flags & B_DELWRI0x00000080) {
1802 s = splbio()splraise(0x6);
1803 bremfreebufcache_take(bp);
1804 buf_acquire(bp);
1805 splx(s)spllower(s);
1806 nbusy++;
1807 bawrite(bp);
1808 if (dcount-- <= 0) {
1809 if (verbose)
1810 printf("softdep ");
1811 return 1;
1812 }
1813 }
1814 }
1815 if (nbusy == 0)
1816 break;
1817 if (verbose)
1818 printf("%d ", nbusy);
1819#ifdef MULTIPROCESSOR1
1820 if (_kernel_lock_held())
1821 hold_count = __mp_release_all(&kernel_lock);
1822 else
1823 hold_count = 0;
1824#endif
1825 DELAY(40000 * iter)(*delay_func)(40000 * iter);
1826#ifdef MULTIPROCESSOR1
1827 if (hold_count)
1828 __mp_acquire_count(&kernel_lock, hold_count);
1829#endif
1830 }
1831
1832 return nbusy;
1833}
1834
1835/*
1836 * posix file system related system variables.
1837 */
1838int
1839fs_posix_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1840 void *newp, size_t newlen, struct proc *p)
1841{
1842 /* all sysctl names at this level are terminal */
1843 if (namelen != 1)
1844 return (ENOTDIR20);
1845
1846 switch (name[0]) {
1847 case FS_POSIX_SETUID1:
1848 if (newp && securelevel > 0)
1849 return (EPERM1);
1850 return(sysctl_int(oldp, oldlenp, newp, newlen, &suid_clear));
1851 default:
1852 return (EOPNOTSUPP45);
1853 }
1854 /* NOTREACHED */
1855}
1856
1857/*
1858 * file system related system variables.
1859 */
1860int
1861fs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1862 size_t newlen, struct proc *p)
1863{
1864 sysctlfn *fn;
1865
1866 switch (name[0]) {
1867 case FS_POSIX1:
1868 fn = fs_posix_sysctl;
1869 break;
1870 default:
1871 return (EOPNOTSUPP45);
1872 }
1873 return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
1874}
1875
1876
1877/*
1878 * Routines dealing with vnodes and buffers
1879 */
1880
1881/*
1882 * Wait for all outstanding I/Os to complete
1883 *
1884 * Manipulates v_numoutput. Must be called at splbio()
1885 */
1886int
1887vwaitforio(struct vnode *vp, int slpflag, char *wmesg, uint64_t timeo)
1888{
1889 int error = 0;
1890
1891 splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__
); } } while (0)
;
1892
1893 while (vp->v_numoutput) {
1894 vp->v_bioflag |= VBIOWAIT0x0001;
1895 error = tsleep_nsec(&vp->v_numoutput,
1896 slpflag | (PRIBIO16 + 1), wmesg, timeo);
1897 if (error)
1898 break;
1899 }
1900
1901 return (error);
1902}
1903
1904/*
1905 * Update outstanding I/O count and do wakeup if requested.
1906 *
1907 * Manipulates v_numoutput. Must be called at splbio()
1908 */
1909void
1910vwakeup(struct vnode *vp)
1911{
1912 splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__
); } } while (0)
;
1913
1914 if (vp != NULL((void *)0)) {
1915 if (vp->v_numoutput-- == 0)
1916 panic("vwakeup: neg numoutput");
1917 if ((vp->v_bioflag & VBIOWAIT0x0001) && vp->v_numoutput == 0) {
1918 vp->v_bioflag &= ~VBIOWAIT0x0001;
1919 wakeup(&vp->v_numoutput);
1920 }
1921 }
1922}
1923
1924/*
1925 * Flush out and invalidate all buffers associated with a vnode.
1926 * Called with the underlying object locked.
1927 */
1928int
1929vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p,
1930 int slpflag, uint64_t slptimeo)
1931{
1932 struct buf *bp;
1933 struct buf *nbp, *blist;
1934 int s, error;
1935
1936#ifdef VFSLCKDEBUG
1937 if ((vp->v_flag & VLOCKSWORK0x4000) && !VOP_ISLOCKED(vp))
1938 panic("%s: vp isn't locked, vp %p", __func__, vp);
1939#endif
1940
1941 if (flags & V_SAVE0x0001) {
1942 s = splbio()splraise(0x6);
1943 vwaitforio(vp, 0, "vinvalbuf", INFSLP0xffffffffffffffffULL);
1944 if (!LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0))) {
1945 splx(s)spllower(s);
1946 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT1, p)) != 0)
1947 return (error);
1948 s = splbio()splraise(0x6);
1949 if (vp->v_numoutput > 0 ||
1950 !LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0)))
1951 panic("%s: dirty bufs, vp %p", __func__, vp);
1952 }
1953 splx(s)spllower(s);
1954 }
1955loop:
1956 s = splbio()splraise(0x6);
1957 for (;;) {
1958 int count = 0;
1959 if ((blist = LIST_FIRST(&vp->v_cleanblkhd)((&vp->v_cleanblkhd)->lh_first)) &&
1960 (flags & V_SAVEMETA0x0002))
1961 while (blist && blist->b_lblkno < 0)
1962 blist = LIST_NEXT(blist, b_vnbufs)((blist)->b_vnbufs.le_next);
1963 if (blist == NULL((void *)0) &&
1964 (blist = LIST_FIRST(&vp->v_dirtyblkhd)((&vp->v_dirtyblkhd)->lh_first)) &&
1965 (flags & V_SAVEMETA0x0002))
1966 while (blist && blist->b_lblkno < 0)
1967 blist = LIST_NEXT(blist, b_vnbufs)((blist)->b_vnbufs.le_next);
1968 if (!blist)
1969 break;
1970
1971 for (bp = blist; bp; bp = nbp) {
1972 nbp = LIST_NEXT(bp, b_vnbufs)((bp)->b_vnbufs.le_next);
1973 if (flags & V_SAVEMETA0x0002 && bp->b_lblkno < 0)
1974 continue;
1975 if (bp->b_flags & B_BUSY0x00000010) {
1976 bp->b_flags |= B_WANTED0x00010000;
1977 error = tsleep_nsec(bp, slpflag | (PRIBIO16 + 1),
1978 "vinvalbuf", slptimeo);
1979 if (error) {
1980 splx(s)spllower(s);
1981 return (error);
1982 }
1983 break;
1984 }
1985 bremfreebufcache_take(bp);
1986 /*
1987 * XXX Since there are no node locks for NFS, I believe
1988 * there is a slight chance that a delayed write will
1989 * occur while sleeping just above, so check for it.
1990 */
1991 if ((bp->b_flags & B_DELWRI0x00000080) && (flags & V_SAVE0x0001)) {
1992 buf_acquire(bp);
1993 splx(s)spllower(s);
1994 (void) VOP_BWRITE(bp);
1995 goto loop;
1996 }
1997 buf_acquire_nomap(bp);
1998 bp->b_flags |= B_INVAL0x00000800;
1999 brelse(bp);
2000 count++;
2001 /*
2002 * XXX Temporary workaround XXX
2003 *
2004 * If this is a gigantisch vnode and we are
2005 * trashing a ton of buffers, drop the lock
2006 * and yield every so often. The longer term
2007 * fix is to add a separate list for these
2008 * invalid buffers so we don't have to do the
2009 * work to free these here.
2010 */
2011 if (count > 100) {
2012 splx(s)spllower(s);
2013 sched_pause(yield)do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
2014 goto loop;
2015 }
2016 }
2017 }
2018 if (!(flags & V_SAVEMETA0x0002) &&
2019 (!LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0)) || !LIST_EMPTY(&vp->v_cleanblkhd)(((&vp->v_cleanblkhd)->lh_first) == ((void *)0))))
2020 panic("%s: flush failed, vp %p", __func__, vp);
2021 splx(s)spllower(s);
2022 return (0);
2023}
2024
2025void
2026vflushbuf(struct vnode *vp, int sync)
2027{
2028 struct buf *bp, *nbp;
2029 int s;
2030
2031loop:
2032 s = splbio()splraise(0x6);
2033 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp)for ((bp) = ((&vp->v_dirtyblkhd)->lh_first); (bp) &&
((nbp) = ((bp)->b_vnbufs.le_next), 1); (bp) = (nbp))
{
2034 if ((bp->b_flags & B_BUSY0x00000010))
2035 continue;
2036 if ((bp->b_flags & B_DELWRI0x00000080) == 0)
2037 panic("vflushbuf: not dirty");
2038 bremfreebufcache_take(bp);
2039 buf_acquire(bp);
2040 splx(s)spllower(s);
2041 /*
2042 * Wait for I/O associated with indirect blocks to complete,
2043 * since there is no way to quickly wait for them below.
2044 */
2045 if (bp->b_vp == vp || sync == 0)
2046 (void) bawrite(bp);
2047 else
2048 (void) bwrite(bp);
2049 goto loop;
2050 }
2051 if (sync == 0) {
2052 splx(s)spllower(s);
2053 return;
2054 }
2055 vwaitforio(vp, 0, "vflushbuf", INFSLP0xffffffffffffffffULL);
2056 if (!LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0))) {
2057 splx(s)spllower(s);
2058#ifdef DIAGNOSTIC1
2059 vprint("vflushbuf: dirty", vp);
2060#endif
2061 goto loop;
2062 }
2063 splx(s)spllower(s);
2064}
2065
2066/*
2067 * Associate a buffer with a vnode.
2068 *
2069 * Manipulates buffer vnode queues. Must be called at splbio().
2070 */
2071void
2072bgetvp(struct vnode *vp, struct buf *bp)
2073{
2074 splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__
); } } while (0)
;
2075
2076
2077 if (bp->b_vp)
2078 panic("bgetvp: not free");
2079 vhold(vp);
2080 bp->b_vp = vp;
2081 if (vp->v_type == VBLK || vp->v_type == VCHR)
2082 bp->b_dev = vp->v_rdevv_un.vu_specinfo->si_rdev;
2083 else
2084 bp->b_dev = NODEV(dev_t)(-1);
2085 /*
2086 * Insert onto list for new vnode.
2087 */
2088 bufinsvn(bp, &vp->v_cleanblkhd)do { if (((bp)->b_vnbufs.le_next = (&vp->v_cleanblkhd
)->lh_first) != ((void *)0)) (&vp->v_cleanblkhd)->
lh_first->b_vnbufs.le_prev = &(bp)->b_vnbufs.le_next
; (&vp->v_cleanblkhd)->lh_first = (bp); (bp)->b_vnbufs
.le_prev = &(&vp->v_cleanblkhd)->lh_first; } while
(0)
;
2089}
2090
2091/*
2092 * Disassociate a buffer from a vnode.
2093 *
2094 * Manipulates vnode buffer queues. Must be called at splbio().
2095 */
2096void
2097brelvp(struct buf *bp)
2098{
2099 struct vnode *vp;
2100
2101 splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__
); } } while (0)
;
2102
2103 if ((vp = bp->b_vp) == (struct vnode *) 0)
2104 panic("brelvp: NULL");
2105 /*
2106 * Delete from old vnode list, if on one.
2107 */
2108 if (LIST_NEXT(bp, b_vnbufs)((bp)->b_vnbufs.le_next) != NOLIST((struct buf *)0x87654321))
2109 bufremvn(bp){ do { if ((bp)->b_vnbufs.le_next != ((void *)0)) (bp)->
b_vnbufs.le_next->b_vnbufs.le_prev = (bp)->b_vnbufs.le_prev
; *(bp)->b_vnbufs.le_prev = (bp)->b_vnbufs.le_next; ((bp
)->b_vnbufs.le_prev) = ((void *)-1); ((bp)->b_vnbufs.le_next
) = ((void *)-1); } while (0); ((bp)->b_vnbufs.le_next) = (
(struct buf *)0x87654321); }
;
2110 if ((vp->v_bioflag & VBIOONSYNCLIST0x0002) &&
2111 LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0))) {
2112 vp->v_bioflag &= ~VBIOONSYNCLIST0x0002;
2113 LIST_REMOVE(vp, v_synclist)do { if ((vp)->v_synclist.le_next != ((void *)0)) (vp)->
v_synclist.le_next->v_synclist.le_prev = (vp)->v_synclist
.le_prev; *(vp)->v_synclist.le_prev = (vp)->v_synclist.
le_next; ((vp)->v_synclist.le_prev) = ((void *)-1); ((vp)->
v_synclist.le_next) = ((void *)-1); } while (0)
;
2114 }
2115 bp->b_vp = NULL((void *)0);
2116
2117 vdrop(vp);
2118}
2119
2120/*
2121 * Replaces the current vnode associated with the buffer, if any,
2122 * with a new vnode.
2123 *
2124 * If an output I/O is pending on the buffer, the old vnode
2125 * I/O count is adjusted.
2126 *
2127 * Ignores vnode buffer queues. Must be called at splbio().
2128 */
2129void
2130buf_replacevnode(struct buf *bp, struct vnode *newvp)
2131{
2132 struct vnode *oldvp = bp->b_vp;
2133
2134 splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__
); } } while (0)
;
2135
2136 if (oldvp)
2137 brelvp(bp);
2138
2139 if ((bp->b_flags & (B_READ0x00008000 | B_DONE0x00000100)) == 0) {
2140 newvp->v_numoutput++; /* put it on swapdev */
2141 vwakeup(oldvp);
2142 }
2143
2144 bgetvp(newvp, bp);
2145 bufremvn(bp){ do { if ((bp)->b_vnbufs.le_next != ((void *)0)) (bp)->
b_vnbufs.le_next->b_vnbufs.le_prev = (bp)->b_vnbufs.le_prev
; *(bp)->b_vnbufs.le_prev = (bp)->b_vnbufs.le_next; ((bp
)->b_vnbufs.le_prev) = ((void *)-1); ((bp)->b_vnbufs.le_next
) = ((void *)-1); } while (0); ((bp)->b_vnbufs.le_next) = (
(struct buf *)0x87654321); }
;
2146}
2147
2148/*
2149 * Used to assign buffers to the appropriate clean or dirty list on
2150 * the vnode and to add newly dirty vnodes to the appropriate
2151 * filesystem syncer list.
2152 *
2153 * Manipulates vnode buffer queues. Must be called at splbio().
2154 */
2155void
2156reassignbuf(struct buf *bp)
2157{
2158 struct buflists *listheadp;
2159 int delay;
2160 struct vnode *vp = bp->b_vp;
2161
2162 splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__
); } } while (0)
;
2163
2164 /*
2165 * Delete from old vnode list, if on one.
2166 */
2167 if (LIST_NEXT(bp, b_vnbufs)((bp)->b_vnbufs.le_next) != NOLIST((struct buf *)0x87654321))
2168 bufremvn(bp){ do { if ((bp)->b_vnbufs.le_next != ((void *)0)) (bp)->
b_vnbufs.le_next->b_vnbufs.le_prev = (bp)->b_vnbufs.le_prev
; *(bp)->b_vnbufs.le_prev = (bp)->b_vnbufs.le_next; ((bp
)->b_vnbufs.le_prev) = ((void *)-1); ((bp)->b_vnbufs.le_next
) = ((void *)-1); } while (0); ((bp)->b_vnbufs.le_next) = (
(struct buf *)0x87654321); }
;
2169
2170 /*
2171 * If dirty, put on list of dirty buffers;
2172 * otherwise insert onto list of clean buffers.
2173 */
2174 if ((bp->b_flags & B_DELWRI0x00000080) == 0) {
2175 listheadp = &vp->v_cleanblkhd;
2176 if ((vp->v_bioflag & VBIOONSYNCLIST0x0002) &&
2177 LIST_EMPTY(&vp->v_dirtyblkhd)(((&vp->v_dirtyblkhd)->lh_first) == ((void *)0))) {
2178 vp->v_bioflag &= ~VBIOONSYNCLIST0x0002;
2179 LIST_REMOVE(vp, v_synclist)do { if ((vp)->v_synclist.le_next != ((void *)0)) (vp)->
v_synclist.le_next->v_synclist.le_prev = (vp)->v_synclist
.le_prev; *(vp)->v_synclist.le_prev = (vp)->v_synclist.
le_next; ((vp)->v_synclist.le_prev) = ((void *)-1); ((vp)->
v_synclist.le_next) = ((void *)-1); } while (0)
;
2180 }
2181 } else {
2182 listheadp = &vp->v_dirtyblkhd;
2183 if ((vp->v_bioflag & VBIOONSYNCLIST0x0002) == 0) {
2184 switch (vp->v_type) {
2185 case VDIR:
2186 delay = syncdelay / 2;
2187 break;
2188 case VBLK:
2189 if (vp->v_specmountpointv_un.vu_specinfo->si_mountpoint != NULL((void *)0)) {
2190 delay = syncdelay / 3;
2191 break;
2192 }
2193 /* FALLTHROUGH */
2194 default:
2195 delay = syncdelay;
2196 }
2197 vn_syncer_add_to_worklist(vp, delay);
2198 }
2199 }
2200 bufinsvn(bp, listheadp)do { if (((bp)->b_vnbufs.le_next = (listheadp)->lh_first
) != ((void *)0)) (listheadp)->lh_first->b_vnbufs.le_prev
= &(bp)->b_vnbufs.le_next; (listheadp)->lh_first =
(bp); (bp)->b_vnbufs.le_prev = &(listheadp)->lh_first
; } while (0)
;
2201}
2202
2203/*
2204 * Check if vnode represents a disk device
2205 */
2206int
2207vn_isdisk(struct vnode *vp, int *errp)
2208{
2209 if (vp->v_type != VBLK && vp->v_type != VCHR)
2210 return (0);
2211
2212 return (1);
2213}
2214
2215#ifdef DDB1
2216#include <machine/db_machdep.h>
2217#include <ddb/db_interface.h>
2218
2219void
2220vfs_buf_print(void *b, int full,
2221 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2222{
2223 struct buf *bp = b;
2224
2225 (*pr)(" vp %p lblkno 0x%llx blkno 0x%llx dev 0x%x\n"
2226 " proc %p error %d flags %lb\n",
2227 bp->b_vp, (int64_t)bp->b_lblkno, (int64_t)bp->b_blkno, bp->b_dev,
2228 bp->b_proc, bp->b_error, bp->b_flags, B_BITS"\20\001AGE\002NEEDCOMMIT\003ASYNC\004BAD\005BUSY" "\006CACHE\007CALL\010DELWRI\011DONE\012EINTR\013ERROR"
"\014INVAL\015NOCACHE\016PHYS\017RAW\020READ" "\021WANTED\022WRITEINPROG\023XXX(FORMAT)\024DEFERRED"
"\025SCANNED\026DAEMON\027RELEASED\030WARM\031COLD\032BC\033DMA"
);
2229
2230 (*pr)(" bufsize 0x%lx bcount 0x%lx resid 0x%lx\n"
2231 " data %p saveaddr %p dep %p iodone %p\n",
2232 bp->b_bufsize, bp->b_bcount, (long)bp->b_resid,
2233 bp->b_data, bp->b_saveaddr,
2234 LIST_FIRST(&bp->b_dep)((&bp->b_dep)->lh_first), bp->b_iodone);
2235
2236 (*pr)(" dirty {off 0x%x end 0x%x} valid {off 0x%x end 0x%x}\n",
2237 bp->b_dirtyoff, bp->b_dirtyend, bp->b_validoff, bp->b_validend);
2238
2239#ifdef FFS_SOFTUPDATES1
2240 if (full)
2241 softdep_print(bp, full, pr);
2242#endif
2243}
2244
2245const char *vtypes[] = { VTYPE_NAMES"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO"
, "VBAD"
};
2246const char *vtags[] = { VTAG_NAMES"NON", "UFS", "NFS", "MFS", "MSDOSFS", "unused", "unused", "unused"
, "ISOFS", "unused", "EXT2FS", "VFS", "NTFS", "UDF", "FUSEFS"
, "TMPFS"
};
2247
2248void
2249vfs_vnode_print(void *v, int full,
2250 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2251{
2252 struct vnode *vp = v;
2253
2254 (*pr)("tag %s(%d) type %s(%d) mount %p typedata %p\n",
2255 (u_int)vp->v_tag >= nitems(vtags)(sizeof((vtags)) / sizeof((vtags)[0]))? "<unk>":vtags[vp->v_tag],
2256 vp->v_tag,
2257 (u_int)vp->v_type >= nitems(vtypes)(sizeof((vtypes)) / sizeof((vtypes)[0]))? "<unk>":vtypes[vp->v_type],
2258 vp->v_type, vp->v_mount, vp->v_mountedherev_un.vu_mountedhere);
2259
2260 (*pr)("data %p usecount %d writecount %d holdcnt %d numoutput %d\n",
2261 vp->v_data, vp->v_usecount, vp->v_writecount,
2262 vp->v_holdcnt, vp->v_numoutput);
2263
2264 /* uvm_object_printit(&vp->v_uobj, full, pr); */
2265
2266 if (full) {
2267 struct buf *bp;
2268
2269 (*pr)("clean bufs:\n");
2270 LIST_FOREACH(bp, &vp->v_cleanblkhd, b_vnbufs)for((bp) = ((&vp->v_cleanblkhd)->lh_first); (bp)!= (
(void *)0); (bp) = ((bp)->b_vnbufs.le_next))
{
2271 (*pr)(" bp %p\n", bp);
2272 vfs_buf_print(bp, full, pr);
2273 }
2274
2275 (*pr)("dirty bufs:\n");
2276 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs)for((bp) = ((&vp->v_dirtyblkhd)->lh_first); (bp)!= (
(void *)0); (bp) = ((bp)->b_vnbufs.le_next))
{
2277 (*pr)(" bp %p\n", bp);
2278 vfs_buf_print(bp, full, pr);
2279 }
2280 }
2281}
2282
2283void
2284vfs_mount_print(struct mount *mp, int full,
2285 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2286{
2287 struct vfsconf *vfc = mp->mnt_vfc;
2288 struct vnode *vp;
2289 int cnt;
2290
2291 (*pr)("flags %b\nvnodecovered %p syncer %p data %p\n",
2292 mp->mnt_flag, MNT_BITS"\20\001RDONLY\002SYNCHRONOUS\003NOEXEC\004NOSUID\005NODEV\006NOPERM"
"\007ASYNC\010EXRDONLY\011EXPORTED\012DEFEXPORTED\013EXPORTANON"
"\014WXALLOWED\015LOCAL\016QUOTA\017ROOTFS\020NOATIME\021UPDATE"
"\022DELEXPORT\023RELOAD\024FORCE\025STALLED\026SWAPPABLE\032WANTRDWR"
"\033SOFTDEP\034DOOMED"
,
2293 mp->mnt_vnodecovered, mp->mnt_syncer, mp->mnt_data);
2294
2295 (*pr)("vfsconf: ops %p name \"%s\" num %d ref %u flags 0x%x\n",
2296 vfc->vfc_vfsops, vfc->vfc_name, vfc->vfc_typenum,
2297 vfc->vfc_refcount, vfc->vfc_flags);
2298
2299 (*pr)("statvfs cache: bsize %x iosize %x\n"
2300 "blocks %llu free %llu avail %lld\n",
2301 mp->mnt_stat.f_bsize, mp->mnt_stat.f_iosize, mp->mnt_stat.f_blocks,
2302 mp->mnt_stat.f_bfree, mp->mnt_stat.f_bavail);
2303
2304 (*pr)(" files %llu ffiles %llu favail %lld\n", mp->mnt_stat.f_files,
2305 mp->mnt_stat.f_ffree, mp->mnt_stat.f_favail);
2306
2307 (*pr)(" f_fsidx {0x%x, 0x%x} owner %u ctime 0x%llx\n",
2308 mp->mnt_stat.f_fsid.val[0], mp->mnt_stat.f_fsid.val[1],
2309 mp->mnt_stat.f_owner, mp->mnt_stat.f_ctime);
2310
2311 (*pr)(" syncwrites %llu asyncwrites = %llu\n",
2312 mp->mnt_stat.f_syncwrites, mp->mnt_stat.f_asyncwrites);
2313
2314 (*pr)(" syncreads %llu asyncreads = %llu\n",
2315 mp->mnt_stat.f_syncreads, mp->mnt_stat.f_asyncreads);
2316
2317 (*pr)(" fstype \"%s\" mnton \"%s\" mntfrom \"%s\" mntspec \"%s\"\n",
2318 mp->mnt_stat.f_fstypename, mp->mnt_stat.f_mntonname,
2319 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromspec);
2320
2321 (*pr)("locked vnodes:");
2322 /* XXX would take mountlist lock, except ddb has no context */
2323 cnt = 0;
2324 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)for((vp) = ((&mp->mnt_vnodelist)->tqh_first); (vp) !=
((void *)0); (vp) = ((vp)->v_mntvnodes.tqe_next))
{
2325 if (VOP_ISLOCKED(vp)) {
2326 if (cnt == 0)
2327 (*pr)("\n %p", vp);
2328 else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2329 (*pr)(",\n %p", vp);
2330 else
2331 (*pr)(", %p", vp);
2332 cnt++;
2333 }
2334 }
2335 (*pr)("\n");
2336
2337 if (full) {
2338 (*pr)("all vnodes:");
2339 /* XXX would take mountlist lock, except ddb has no context */
2340 cnt = 0;
2341 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes)for((vp) = ((&mp->mnt_vnodelist)->tqh_first); (vp) !=
((void *)0); (vp) = ((vp)->v_mntvnodes.tqe_next))
{
2342 if (cnt == 0)
2343 (*pr)("\n %p", vp);
2344 else if ((cnt % (72 / (sizeof(void *) * 2 + 4))) == 0)
2345 (*pr)(",\n %p", vp);
2346 else
2347 (*pr)(", %p", vp);
2348 cnt++;
2349 }
2350 (*pr)("\n");
2351 }
2352}
2353#endif /* DDB */
2354
2355void
2356copy_statfs_info(struct statfs *sbp, const struct mount *mp)
2357{
2358 const struct statfs *mbp;
2359
2360 strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN16);
2361
2362 if (sbp == (mbp = &mp->mnt_stat))
2363 return;
2364
2365 sbp->f_fsid = mbp->f_fsid;
2366 sbp->f_owner = mbp->f_owner;
2367 sbp->f_flags = mbp->f_flags;
2368 sbp->f_syncwrites = mbp->f_syncwrites;
2369 sbp->f_asyncwrites = mbp->f_asyncwrites;
2370 sbp->f_syncreads = mbp->f_syncreads;
2371 sbp->f_asyncreads = mbp->f_asyncreads;
2372 sbp->f_namemax = mbp->f_namemax;
2373 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN)__builtin_memcpy((sbp->f_mntonname), (mp->mnt_stat.f_mntonname
), (90))
;
2374 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN)__builtin_memcpy((sbp->f_mntfromname), (mp->mnt_stat.f_mntfromname
), (90))
;
2375 memcpy(sbp->f_mntfromspec, mp->mnt_stat.f_mntfromspec, MNAMELEN)__builtin_memcpy((sbp->f_mntfromspec), (mp->mnt_stat.f_mntfromspec
), (90))
;
2376 memcpy(&sbp->mount_info, &mp->mnt_stat.mount_info,__builtin_memcpy((&sbp->mount_info), (&mp->mnt_stat
.mount_info), (sizeof(union mount_info)))
2377 sizeof(union mount_info))__builtin_memcpy((&sbp->mount_info), (&mp->mnt_stat
.mount_info), (sizeof(union mount_info)))
;
2378}