Bug Summary

File:kern/vfs_lockf.c
Warning:line 453, column 5
Value stored to 'lock' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name vfs_lockf.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/kern/vfs_lockf.c
1/* $OpenBSD: vfs_lockf.c,v 1.45 2019/12/02 15:02:32 visa Exp $ */
2/* $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $ */
3
4/*
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
36 */
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/vnode.h>
43#include <sys/pool.h>
44#include <sys/fcntl.h>
45#include <sys/lockf.h>
46#include <sys/rwlock.h>
47#include <sys/unistd.h>
48
49/*
50 * The lockf structure is a kernel structure which contains the information
51 * associated with a byte range lock. The lockf structures are linked into
52 * the inode structure. Locks are sorted by the starting byte of the lock for
53 * efficiency.
54 */
55TAILQ_HEAD(locklist, lockf)struct locklist { struct lockf *tqh_first; struct lockf **tqh_last
; }
;
56
57struct lockf {
58 short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
59 short lf_type; /* Lock type: F_RDLCK, F_WRLCK */
60 off_t lf_start; /* The byte # of the start of the lock */
61 off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/
62 caddr_t lf_id; /* The id of the resource holding the lock */
63 struct lockf_state *lf_state; /* State associated with the lock */
64 TAILQ_ENTRY(lockf)struct { struct lockf *tqe_next; struct lockf **tqe_prev; } lf_entry;
65 struct lockf *lf_blk; /* The lock that blocks us */
66 struct locklist lf_blkhd; /* The list of blocked locks */
67 TAILQ_ENTRY(lockf)struct { struct lockf *tqe_next; struct lockf **tqe_prev; } lf_block; /* A request waiting for a lock */
68 uid_t lf_uid; /* User ID responsible */
69 pid_t lf_pid; /* POSIX - owner pid */
70};
71
72struct lockf_state {
73 TAILQ_HEAD(, lockf)struct { struct lockf *tqh_first; struct lockf **tqh_last; } ls_locks; /* list of active locks */
74 TAILQ_HEAD(, lockf)struct { struct lockf *tqh_first; struct lockf **tqh_last; } ls_pending; /* list of pending locks */
75 struct lockf_state **ls_owner; /* owner */
76 int ls_refs; /* reference counter */
77};
78
79struct pool lockf_state_pool;
80struct pool lockf_pool;
81
82#define SELF0x1 0x1
83#define OTHERS0x2 0x2
84
85#ifdef LOCKF_DEBUG
86
87#define DEBUG_SETLOCK 0x01
88#define DEBUG_CLEARLOCK 0x02
89#define DEBUG_GETLOCK 0x04
90#define DEBUG_FINDOVR 0x08
91#define DEBUG_SPLIT 0x10
92#define DEBUG_WAKELOCK 0x20
93#define DEBUG_LINK 0x40
94
95int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
96
97void lf_print(const char *, struct lockf *);
98void lf_printlist(const char *, struct lockf *);
99
100#define DPRINTF(args, level) if (lockf_debug & (level)) printf args
101#define LFPRINT(args, level) if (lockf_debug & (level)) lf_print args
102#else
103#define DPRINTF(args, level)
104#define LFPRINT(args, level)
105#endif
106
107struct lockf *lf_alloc(uid_t, int);
108void lf_free(struct lockf *);
109int lf_clearlock(struct lockf *);
110int lf_findoverlap(struct lockf *, struct lockf *, int, struct lockf **);
111struct lockf *lf_getblock(struct lockf *, struct lockf *);
112int lf_getlock(struct lockf *, struct flock *);
113int lf_setlock(struct lockf *);
114void lf_split(struct lockf *, struct lockf *);
115void lf_wakelock(struct lockf *, int);
116int lf_deadlock(struct lockf *);
117void ls_ref(struct lockf_state *);
118void ls_rele(struct lockf_state *);
119
120/*
121 * Serializes access to each instance of struct lockf and struct lockf_state
122 * and each pointer from a vnode to struct lockf_state.
123 */
124struct rwlock lockf_lock = RWLOCK_INITIALIZER("lockflk"){ 0, "lockflk" };
125
126void
127lf_init(void)
128{
129 pool_init(&lockf_state_pool, sizeof(struct lockf_state), 0, IPL_NONE0x0,
130 PR_WAITOK0x0001 | PR_RWLOCK0x0010, "lockfspl", NULL((void *)0));
131 pool_init(&lockf_pool, sizeof(struct lockf), 0, IPL_NONE0x0,
132 PR_WAITOK0x0001 | PR_RWLOCK0x0010, "lockfpl", NULL((void *)0));
133}
134
135void
136ls_ref(struct lockf_state *ls)
137{
138 rw_assert_wrlock(&lockf_lock);
139
140 ls->ls_refs++;
141}
142
143void
144ls_rele(struct lockf_state *ls)
145{
146 rw_assert_wrlock(&lockf_lock);
147
148 if (--ls->ls_refs > 0)
149 return;
150
151#ifdef LOCKF_DIAGNOSTIC
152 KASSERT(TAILQ_EMPTY(&ls->ls_locks))(((((&ls->ls_locks)->tqh_first) == ((void *)0))) ? (
void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c"
, 152, "TAILQ_EMPTY(&ls->ls_locks)"))
;
153 KASSERT(TAILQ_EMPTY(&ls->ls_pending))(((((&ls->ls_pending)->tqh_first) == ((void *)0))) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c"
, 153, "TAILQ_EMPTY(&ls->ls_pending)"))
;
154#endif
155
156 *ls->ls_owner = NULL((void *)0);
157 pool_put(&lockf_state_pool, ls);
158}
159
160/*
161 * We enforce a limit on locks by uid, so that a single user cannot
162 * run the kernel out of memory. For now, the limit is pretty coarse.
163 * There is no limit on root.
164 *
165 * Splitting a lock will always succeed, regardless of current allocations.
166 * If you're slightly above the limit, we still have to permit an allocation
167 * so that the unlock can succeed. If the unlocking causes too many splits,
168 * however, you're totally cutoff.
169 */
170int maxlocksperuid = 1024;
171
172/*
173 * 3 options for allowfail.
174 * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit.
175 */
176struct lockf *
177lf_alloc(uid_t uid, int allowfail)
178{
179 struct uidinfo *uip;
180 struct lockf *lock;
181
182 uip = uid_find(uid);
183 if (uid && allowfail && uip->ui_lockcnt >
184 (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
185 uid_release(uip);
186 return (NULL((void *)0));
187 }
188 uip->ui_lockcnt++;
189 uid_release(uip);
190 lock = pool_get(&lockf_pool, PR_WAITOK0x0001);
191 lock->lf_uid = uid;
192 return (lock);
193}
194
195void
196lf_free(struct lockf *lock)
197{
198 struct uidinfo *uip;
199
200 rw_assert_wrlock(&lockf_lock);
201
202 LFPRINT(("lf_free", lock), DEBUG_LINK);
203
204#ifdef LOCKF_DIAGNOSTIC
205 KASSERT(TAILQ_EMPTY(&lock->lf_blkhd))(((((&lock->lf_blkhd)->tqh_first) == ((void *)0))) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c"
, 205, "TAILQ_EMPTY(&lock->lf_blkhd)"))
;
206#endif /* LOCKF_DIAGNOSTIC */
207
208 ls_rele(lock->lf_state);
209
210 uip = uid_find(lock->lf_uid);
211 uip->ui_lockcnt--;
212 uid_release(uip);
213 pool_put(&lockf_pool, lock);
214}
215
216
217/*
218 * Do an advisory lock operation.
219 */
220int
221lf_advlock(struct lockf_state **state, off_t size, caddr_t id, int op,
222 struct flock *fl, int flags)
223{
224 struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
;
225 struct lockf_state *ls;
226 struct lockf *lock;
227 off_t start, end;
228 int error = 0;
229
230 /*
231 * Convert the flock structure into a start and end.
232 */
233 switch (fl->l_whence) {
234 case SEEK_SET0:
235 case SEEK_CUR1:
236 /*
237 * Caller is responsible for adding any necessary offset
238 * when SEEK_CUR is used.
239 */
240 start = fl->l_start;
241 break;
242 case SEEK_END2:
243 start = size + fl->l_start;
244 break;
245 default:
246 return (EINVAL22);
247 }
248 if (start < 0)
249 return (EINVAL22);
250 if (fl->l_len > 0) {
251 if (fl->l_len - 1 > LLONG_MAX0x7fffffffffffffffLL - start)
252 return (EOVERFLOW87);
253 end = start + (fl->l_len - 1);
254 } else if (fl->l_len < 0) {
255 if (fl->l_start + fl->l_len < 0)
256 return (EINVAL22);
257 end = fl->l_start - 1;
258 start += fl->l_len;
259 } else {
260 end = -1;
261 }
262
263 rw_enter_write(&lockf_lock);
264 ls = *state;
265
266 /*
267 * Avoid the common case of unlocking when inode has no locks.
268 */
269 if (ls == NULL((void *)0) && op != F_SETLK8) {
270 fl->l_type = F_UNLCK2;
271 goto out;
272 }
273
274 if (ls == NULL((void *)0)) {
275 ls = pool_get(&lockf_state_pool, PR_WAITOK0x0001 | PR_ZERO0x0008);
276 ls->ls_owner = state;
277 TAILQ_INIT(&ls->ls_locks)do { (&ls->ls_locks)->tqh_first = ((void *)0); (&
ls->ls_locks)->tqh_last = &(&ls->ls_locks)->
tqh_first; } while (0)
;
278 TAILQ_INIT(&ls->ls_pending)do { (&ls->ls_pending)->tqh_first = ((void *)0); (&
ls->ls_pending)->tqh_last = &(&ls->ls_pending
)->tqh_first; } while (0)
;
279 *state = ls;
280 }
281 ls_ref(ls);
282
283 lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK8 ? 1 : 2);
284 if (!lock) {
285 ls_rele(ls);
286 error = ENOLCK77;
287 goto out;
288 }
289 lock->lf_flags = flags;
290 lock->lf_type = fl->l_type;
291 lock->lf_start = start;
292 lock->lf_end = end;
293 lock->lf_id = id;
294 lock->lf_state = ls;
295 lock->lf_blk = NULL((void *)0);
296 lock->lf_pid = (flags & F_POSIX0x040) ? p->p_p->ps_pid : -1;
297 TAILQ_INIT(&lock->lf_blkhd)do { (&lock->lf_blkhd)->tqh_first = ((void *)0); (&
lock->lf_blkhd)->tqh_last = &(&lock->lf_blkhd
)->tqh_first; } while (0)
;
298
299 switch (op) {
300 case F_SETLK8:
301 error = lf_setlock(lock);
302 break;
303 case F_UNLCK2:
304 error = lf_clearlock(lock);
305 lf_free(lock);
306 break;
307 case F_GETLK7:
308 error = lf_getlock(lock, fl);
309 lf_free(lock);
310 break;
311 default:
312 lf_free(lock);
313 error = EINVAL22;
314 break;
315 }
316
317out:
318 rw_exit_write(&lockf_lock);
319 return (error);
320}
321
322/*
323 * Set a byte-range lock.
324 */
325int
326lf_setlock(struct lockf *lock)
327{
328 struct lockf *block;
329 struct lockf *overlap, *ltmp;
330 int ovcase, priority, needtolink, error;
331
332 rw_assert_wrlock(&lockf_lock);
333
334 LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
335
336 priority = PLOCK36;
337 if (lock->lf_type == F_WRLCK3)
338 priority += 4;
339 priority |= PCATCH0x100;
340 /*
341 * Scan lock list for this file looking for locks that would block us.
342 */
343 for (;;) {
344 block = lf_getblock(TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first),
345 lock);
346 if (block == NULL((void *)0))
347 break;
348
349 if ((lock->lf_flags & F_WAIT0x010) == 0) {
350 lf_free(lock);
351 return (EAGAIN35);
352 }
353
354 /*
355 * Lock is blocked, check for deadlock before proceeding.
356 * Note: flock style locks cover the whole file, there is no
357 * chance for deadlock.
358 */
359 if ((lock->lf_flags & F_POSIX0x040) && lf_deadlock(lock)) {
360 lf_free(lock);
361 return (EDEADLK11);
362 }
363
364 /*
365 * For flock type locks, we must first remove
366 * any shared locks that we hold before we sleep
367 * waiting for an exclusive lock.
368 */
369 if ((lock->lf_flags & F_FLOCK0x020) && lock->lf_type == F_WRLCK3) {
370 lock->lf_type = F_UNLCK2;
371 (void)lf_clearlock(lock);
372 lock->lf_type = F_WRLCK3;
373 }
374 /*
375 * Add our lock to the blocked list and sleep until we're free.
376 * Remember who blocked us (for deadlock detection).
377 */
378 lock->lf_blk = block;
379 LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK);
380 LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK);
381 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block)do { (lock)->lf_block.tqe_next = ((void *)0); (lock)->lf_block
.tqe_prev = (&block->lf_blkhd)->tqh_last; *(&block
->lf_blkhd)->tqh_last = (lock); (&block->lf_blkhd
)->tqh_last = &(lock)->lf_block.tqe_next; } while (
0)
;
382 TAILQ_INSERT_TAIL(&lock->lf_state->ls_pending, lock, lf_entry)do { (lock)->lf_entry.tqe_next = ((void *)0); (lock)->lf_entry
.tqe_prev = (&lock->lf_state->ls_pending)->tqh_last
; *(&lock->lf_state->ls_pending)->tqh_last = (lock
); (&lock->lf_state->ls_pending)->tqh_last = &
(lock)->lf_entry.tqe_next; } while (0)
;
383 error = rwsleep_nsec(lock, &lockf_lock, priority, "lockf",
384 INFSLP0xffffffffffffffffULL);
385 TAILQ_REMOVE(&lock->lf_state->ls_pending, lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock
)->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry
.tqe_prev; else (&lock->lf_state->ls_pending)->tqh_last
= (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev
= (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev
) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)-
1); } while (0)
;
386 wakeup_one(lock->lf_state)wakeup_n((lock->lf_state), 1);
387 if (lock->lf_blk != NULL((void *)0)) {
388 TAILQ_REMOVE(&lock->lf_blk->lf_blkhd, lock, lf_block)do { if (((lock)->lf_block.tqe_next) != ((void *)0)) (lock
)->lf_block.tqe_next->lf_block.tqe_prev = (lock)->lf_block
.tqe_prev; else (&lock->lf_blk->lf_blkhd)->tqh_last
= (lock)->lf_block.tqe_prev; *(lock)->lf_block.tqe_prev
= (lock)->lf_block.tqe_next; ((lock)->lf_block.tqe_prev
) = ((void *)-1); ((lock)->lf_block.tqe_next) = ((void *)-
1); } while (0)
;
389 lock->lf_blk = NULL((void *)0);
390 }
391 if (error) {
392 lf_free(lock);
393 return (error);
394 }
395 if (lock->lf_flags & F_INTR0x080) {
396 lf_free(lock);
397 return (EINTR4);
398 }
399 }
400 /*
401 * No blocks!! Add the lock. Note that we will
402 * downgrade or upgrade any overlapping locks this
403 * process already owns.
404 *
405 * Skip over locks owned by other processes.
406 * Handle any locks that overlap and are owned by ourselves.
407 */
408 block = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first);
409 overlap = NULL((void *)0);
410 needtolink = 1;
411 for (;;) {
412 ovcase = lf_findoverlap(block, lock, SELF0x1, &overlap);
413 if (ovcase)
414 block = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next);
415 /*
416 * Six cases:
417 * 0) no overlap
418 * 1) overlap == lock
419 * 2) overlap contains lock
420 * 3) lock contains overlap
421 * 4) overlap starts before lock
422 * 5) overlap ends after lock
423 */
424 switch (ovcase) {
425 case 0: /* no overlap */
426 if (needtolink) {
427 if (overlap) /* insert before overlap */
428 TAILQ_INSERT_BEFORE(overlap, lock,do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev
; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry
.tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock
)->lf_entry.tqe_next; } while (0)
429 lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev
; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry
.tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock
)->lf_entry.tqe_next; } while (0)
;
430 else /* first or last lock in list */
431 TAILQ_INSERT_TAIL(&lock->lf_state->ls_locks,do { (lock)->lf_entry.tqe_next = ((void *)0); (lock)->lf_entry
.tqe_prev = (&lock->lf_state->ls_locks)->tqh_last
; *(&lock->lf_state->ls_locks)->tqh_last = (lock
); (&lock->lf_state->ls_locks)->tqh_last = &
(lock)->lf_entry.tqe_next; } while (0)
432 lock, lf_entry)do { (lock)->lf_entry.tqe_next = ((void *)0); (lock)->lf_entry
.tqe_prev = (&lock->lf_state->ls_locks)->tqh_last
; *(&lock->lf_state->ls_locks)->tqh_last = (lock
); (&lock->lf_state->ls_locks)->tqh_last = &
(lock)->lf_entry.tqe_next; } while (0)
;
433 }
434 break;
435 case 1: /* overlap == lock */
436 /*
437 * If downgrading lock, others may be
438 * able to acquire it.
439 */
440 if (lock->lf_type == F_RDLCK1 &&
441 overlap->lf_type == F_WRLCK3)
442 lf_wakelock(overlap, 0);
443 overlap->lf_type = lock->lf_type;
444 lf_free(lock);
445 lock = overlap; /* for debug output below */
446 break;
447 case 2: /* overlap contains lock */
448 /*
449 * Check for common starting point and different types.
450 */
451 if (overlap->lf_type == lock->lf_type) {
452 lf_free(lock);
453 lock = overlap; /* for debug output below */
Value stored to 'lock' is never read
454 break;
455 }
456 if (overlap->lf_start == lock->lf_start) {
457 if (!needtolink)
458 TAILQ_REMOVE(&lock->lf_state->ls_locks,do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock
)->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry
.tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last
= (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev
= (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev
) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)-
1); } while (0)
459 lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock
)->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry
.tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last
= (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev
= (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev
) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)-
1); } while (0)
;
460 TAILQ_INSERT_BEFORE(overlap, lock, lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev
; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry
.tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock
)->lf_entry.tqe_next; } while (0)
;
461 overlap->lf_start = lock->lf_end + 1;
462 } else
463 lf_split(overlap, lock);
464 lf_wakelock(overlap, 0);
465 break;
466 case 3: /* lock contains overlap */
467 /*
468 * If downgrading lock, others may be able to
469 * acquire it, otherwise take the list.
470 */
471 if (lock->lf_type == F_RDLCK1 &&
472 overlap->lf_type == F_WRLCK3) {
473 lf_wakelock(overlap, 0);
474 } else {
475 while ((ltmp =
476 TAILQ_FIRST(&overlap->lf_blkhd)((&overlap->lf_blkhd)->tqh_first))) {
477 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,do { if (((ltmp)->lf_block.tqe_next) != ((void *)0)) (ltmp
)->lf_block.tqe_next->lf_block.tqe_prev = (ltmp)->lf_block
.tqe_prev; else (&overlap->lf_blkhd)->tqh_last = (ltmp
)->lf_block.tqe_prev; *(ltmp)->lf_block.tqe_prev = (ltmp
)->lf_block.tqe_next; ((ltmp)->lf_block.tqe_prev) = ((void
*)-1); ((ltmp)->lf_block.tqe_next) = ((void *)-1); } while
(0)
478 lf_block)do { if (((ltmp)->lf_block.tqe_next) != ((void *)0)) (ltmp
)->lf_block.tqe_next->lf_block.tqe_prev = (ltmp)->lf_block
.tqe_prev; else (&overlap->lf_blkhd)->tqh_last = (ltmp
)->lf_block.tqe_prev; *(ltmp)->lf_block.tqe_prev = (ltmp
)->lf_block.tqe_next; ((ltmp)->lf_block.tqe_prev) = ((void
*)-1); ((ltmp)->lf_block.tqe_next) = ((void *)-1); } while
(0)
;
479 ltmp->lf_blk = lock;
480 TAILQ_INSERT_TAIL(&lock->lf_blkhd,do { (ltmp)->lf_block.tqe_next = ((void *)0); (ltmp)->lf_block
.tqe_prev = (&lock->lf_blkhd)->tqh_last; *(&lock
->lf_blkhd)->tqh_last = (ltmp); (&lock->lf_blkhd
)->tqh_last = &(ltmp)->lf_block.tqe_next; } while (
0)
481 ltmp, lf_block)do { (ltmp)->lf_block.tqe_next = ((void *)0); (ltmp)->lf_block
.tqe_prev = (&lock->lf_blkhd)->tqh_last; *(&lock
->lf_blkhd)->tqh_last = (ltmp); (&lock->lf_blkhd
)->tqh_last = &(ltmp)->lf_block.tqe_next; } while (
0)
;
482 }
483 }
484 /*
485 * Add the new lock if necessary and delete the overlap.
486 */
487 if (needtolink) {
488 TAILQ_INSERT_BEFORE(overlap, lock, lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev
; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry
.tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock
)->lf_entry.tqe_next; } while (0)
;
489 needtolink = 0;
490 }
491 TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap, lf_entry)do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap
)->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)->
lf_entry.tqe_prev; else (&lock->lf_state->ls_locks)
->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)->
lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap
)->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry
.tqe_next) = ((void *)-1); } while (0)
;
492 lf_free(overlap);
493 continue;
494 case 4: /* overlap starts before lock */
495 /*
496 * Add lock after overlap on the list.
497 */
498 if (!needtolink)
499 TAILQ_REMOVE(&lock->lf_state->ls_locks, lock,do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock
)->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry
.tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last
= (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev
= (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev
) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)-
1); } while (0)
500 lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock
)->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry
.tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last
= (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev
= (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev
) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)-
1); } while (0)
;
501 TAILQ_INSERT_AFTER(&lock->lf_state->ls_locks, overlap,do { if (((lock)->lf_entry.tqe_next = (overlap)->lf_entry
.tqe_next) != ((void *)0)) (lock)->lf_entry.tqe_next->lf_entry
.tqe_prev = &(lock)->lf_entry.tqe_next; else (&lock
->lf_state->ls_locks)->tqh_last = &(lock)->lf_entry
.tqe_next; (overlap)->lf_entry.tqe_next = (lock); (lock)->
lf_entry.tqe_prev = &(overlap)->lf_entry.tqe_next; } while
(0)
502 lock, lf_entry)do { if (((lock)->lf_entry.tqe_next = (overlap)->lf_entry
.tqe_next) != ((void *)0)) (lock)->lf_entry.tqe_next->lf_entry
.tqe_prev = &(lock)->lf_entry.tqe_next; else (&lock
->lf_state->ls_locks)->tqh_last = &(lock)->lf_entry
.tqe_next; (overlap)->lf_entry.tqe_next = (lock); (lock)->
lf_entry.tqe_prev = &(overlap)->lf_entry.tqe_next; } while
(0)
;
503 overlap->lf_end = lock->lf_start - 1;
504 lf_wakelock(overlap, 0);
505 needtolink = 0;
506 continue;
507 case 5: /* overlap ends after lock */
508 /*
509 * Add the new lock before overlap.
510 */
511 if (needtolink)
512 TAILQ_INSERT_BEFORE(overlap, lock, lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev
; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry
.tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock
)->lf_entry.tqe_next; } while (0)
;
513 overlap->lf_start = lock->lf_end + 1;
514 lf_wakelock(overlap, 0);
515 break;
516 }
517 break;
518 }
519 LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK);
520 return (0);
521}
522
523/*
524 * Remove a byte-range lock on an inode.
525 *
526 * Generally, find the lock (or an overlap to that lock)
527 * and remove it (or shrink it), then wakeup anyone we can.
528 */
529int
530lf_clearlock(struct lockf *lock)
531{
532 struct lockf *lf, *overlap;
533 int ovcase;
534
535 rw_assert_wrlock(&lockf_lock);
536
537 lf = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first);
538 if (lf == NULL((void *)0))
539 return (0);
540
541 LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK);
542 while ((ovcase = lf_findoverlap(lf, lock, SELF0x1, &overlap))) {
543 lf_wakelock(overlap, 0);
544
545 switch (ovcase) {
546 case 1: /* overlap == lock */
547 TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap
)->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)->
lf_entry.tqe_prev; else (&lock->lf_state->ls_locks)
->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)->
lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap
)->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry
.tqe_next) = ((void *)-1); } while (0)
548 lf_entry)do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap
)->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)->
lf_entry.tqe_prev; else (&lock->lf_state->ls_locks)
->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)->
lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap
)->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry
.tqe_next) = ((void *)-1); } while (0)
;
549 lf_free(overlap);
550 break;
551 case 2: /* overlap contains lock: split it */
552 if (overlap->lf_start == lock->lf_start) {
553 overlap->lf_start = lock->lf_end + 1;
554 break;
555 }
556 lf_split(overlap, lock);
557 /*
558 * The lock is now part of the list, lf_clearlock() must
559 * ensure that the lock remains detached from the list.
560 */
561 TAILQ_REMOVE(&lock->lf_state->ls_locks, lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock
)->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry
.tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last
= (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev
= (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev
) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)-
1); } while (0)
;
562 break;
563 case 3: /* lock contains overlap */
564 lf = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next);
565 TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap
)->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)->
lf_entry.tqe_prev; else (&lock->lf_state->ls_locks)
->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)->
lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap
)->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry
.tqe_next) = ((void *)-1); } while (0)
566 lf_entry)do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap
)->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)->
lf_entry.tqe_prev; else (&lock->lf_state->ls_locks)
->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)->
lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap
)->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry
.tqe_next) = ((void *)-1); } while (0)
;
567 lf_free(overlap);
568 continue;
569 case 4: /* overlap starts before lock */
570 overlap->lf_end = lock->lf_start - 1;
571 lf = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next);
572 continue;
573 case 5: /* overlap ends after lock */
574 overlap->lf_start = lock->lf_end + 1;
575 break;
576 }
577 break;
578 }
579 return (0);
580}
581
582/*
583 * Check whether there is a blocking lock,
584 * and if so return its process identifier.
585 */
586int
587lf_getlock(struct lockf *lock, struct flock *fl)
588{
589 struct lockf *block, *lf;
590
591 rw_assert_wrlock(&lockf_lock);
592
593 LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK);
594
595 lf = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first);
596 if ((block = lf_getblock(lf, lock)) != NULL((void *)0)) {
597 fl->l_type = block->lf_type;
598 fl->l_whence = SEEK_SET0;
599 fl->l_start = block->lf_start;
600 if (block->lf_end == -1)
601 fl->l_len = 0;
602 else
603 fl->l_len = block->lf_end - block->lf_start + 1;
604 fl->l_pid = block->lf_pid;
605 } else {
606 fl->l_type = F_UNLCK2;
607 }
608 return (0);
609}
610
611/*
612 * Walk the list of locks for an inode and
613 * return the first blocking lock.
614 */
615struct lockf *
616lf_getblock(struct lockf *lf, struct lockf *lock)
617{
618 struct lockf *overlap;
619
620 rw_assert_wrlock(&lockf_lock);
621
622 while (lf_findoverlap(lf, lock, OTHERS0x2, &overlap) != 0) {
623 /*
624 * We've found an overlap, see if it blocks us
625 */
626 if ((lock->lf_type == F_WRLCK3 || overlap->lf_type == F_WRLCK3))
627 return (overlap);
628 /*
629 * Nope, point to the next one on the list and
630 * see if it blocks us
631 */
632 lf = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next);
633 }
634 return (NULL((void *)0));
635}
636
637/*
638 * Walk the list of locks for an inode to
639 * find an overlapping lock (if any).
640 *
641 * NOTE: this returns only the FIRST overlapping lock. There
642 * may be more than one.
643 */
644int
645lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
646 struct lockf **overlap)
647{
648 off_t start, end;
649
650 rw_assert_wrlock(&lockf_lock);
651
652 LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR);
653
654 *overlap = lf;
655 start = lock->lf_start;
656 end = lock->lf_end;
657 while (lf != NULL((void *)0)) {
658 if (((type & SELF0x1) && lf->lf_id != lock->lf_id) ||
659 ((type & OTHERS0x2) && lf->lf_id == lock->lf_id)) {
660 *overlap = lf = TAILQ_NEXT(lf, lf_entry)((lf)->lf_entry.tqe_next);
661 continue;
662 }
663 LFPRINT(("\tchecking", lf), DEBUG_FINDOVR);
664 /*
665 * OK, check for overlap
666 *
667 * Six cases:
668 * 0) no overlap
669 * 1) overlap == lock
670 * 2) overlap contains lock
671 * 3) lock contains overlap
672 * 4) overlap starts before lock
673 * 5) overlap ends after lock
674 */
675
676 /* Case 0 */
677 if ((lf->lf_end != -1 && start > lf->lf_end) ||
678 (end != -1 && lf->lf_start > end)) {
679 DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
680 if ((type & SELF0x1) && end != -1 && lf->lf_start > end)
681 return (0);
682 *overlap = lf = TAILQ_NEXT(lf, lf_entry)((lf)->lf_entry.tqe_next);
683 continue;
684 }
685 /* Case 1 */
686 if ((lf->lf_start == start) && (lf->lf_end == end)) {
687 DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
688 return (1);
689 }
690 /* Case 2 */
691 if ((lf->lf_start <= start) &&
692 (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) {
693 DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
694 return (2);
695 }
696 /* Case 3 */
697 if (start <= lf->lf_start &&
698 (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) {
699 DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
700 return (3);
701 }
702 /* Case 4 */
703 if ((lf->lf_start < start) &&
704 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
705 DPRINTF(("overlap starts before lock\n"),
706 DEBUG_FINDOVR);
707 return (4);
708 }
709 /* Case 5 */
710 if ((lf->lf_start > start) && (end != -1) &&
711 ((lf->lf_end > end) || (lf->lf_end == -1))) {
712 DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
713 return (5);
714 }
715 panic("lf_findoverlap: default");
716 }
717 return (0);
718}
719
720/*
721 * Purge all locks associated with the given lock state.
722 */
723void
724lf_purgelocks(struct lockf_state **state)
725{
726 struct lockf_state *ls;
727 struct lockf *lock;
728
729 rw_enter_write(&lockf_lock);
730
731 ls = *state;
732 if (ls == NULL((void *)0))
733 goto out;
734
735 ls_ref(ls);
736
737 /* Interrupt blocked locks and wait for all of them to finish. */
738 TAILQ_FOREACH(lock, &ls->ls_locks, lf_entry)for((lock) = ((&ls->ls_locks)->tqh_first); (lock) !=
((void *)0); (lock) = ((lock)->lf_entry.tqe_next))
{
739 LFPRINT(("lf_purgelocks: wakeup", lock), DEBUG_SETLOCK);
740 lf_wakelock(lock, F_INTR0x080);
741 }
742 while (!TAILQ_EMPTY(&ls->ls_pending)(((&ls->ls_pending)->tqh_first) == ((void *)0)))
743 rwsleep_nsec(ls, &lockf_lock, PLOCK36, "lockfp", INFSLP0xffffffffffffffffULL);
744
745 /*
746 * Any remaining locks cannot block other locks at this point and can
747 * safely be removed.
748 */
749 while ((lock = TAILQ_FIRST(&ls->ls_locks)((&ls->ls_locks)->tqh_first))) {
750 TAILQ_REMOVE(&ls->ls_locks, lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock
)->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry
.tqe_prev; else (&ls->ls_locks)->tqh_last = (lock)->
lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->
lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev) = ((void *)
-1); ((lock)->lf_entry.tqe_next) = ((void *)-1); } while (
0)
;
751 lf_free(lock);
752 }
753
754 /* This is the last expected thread to hold a lock state reference. */
755#ifdef LOCKF_DIAGNOSTIC
756 KASSERT(ls->ls_refs == 1)((ls->ls_refs == 1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c"
, 756, "ls->ls_refs == 1"))
;
757#endif
758 ls_rele(ls);
759
760out:
761 rw_exit_write(&lockf_lock);
762}
763
764/*
765 * Split a lock and a contained region into
766 * two or three locks as necessary.
767 */
768void
769lf_split(struct lockf *lock1, struct lockf *lock2)
770{
771 struct lockf *splitlock;
772
773 rw_assert_wrlock(&lockf_lock);
774
775 LFPRINT(("lf_split", lock1), DEBUG_SPLIT);
776 LFPRINT(("splitting from", lock2), DEBUG_SPLIT);
777
778 /*
779 * Check to see if splitting into only two pieces.
780 */
781 if (lock1->lf_start == lock2->lf_start) {
782 lock1->lf_start = lock2->lf_end + 1;
783 TAILQ_INSERT_BEFORE(lock1, lock2, lf_entry)do { (lock2)->lf_entry.tqe_prev = (lock1)->lf_entry.tqe_prev
; (lock2)->lf_entry.tqe_next = (lock1); *(lock1)->lf_entry
.tqe_prev = (lock2); (lock1)->lf_entry.tqe_prev = &(lock2
)->lf_entry.tqe_next; } while (0)
;
784 return;
785 }
786 if (lock1->lf_end == lock2->lf_end) {
787 lock1->lf_end = lock2->lf_start - 1;
788 TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2,do { if (((lock2)->lf_entry.tqe_next = (lock1)->lf_entry
.tqe_next) != ((void *)0)) (lock2)->lf_entry.tqe_next->
lf_entry.tqe_prev = &(lock2)->lf_entry.tqe_next; else (
&lock1->lf_state->ls_locks)->tqh_last = &(lock2
)->lf_entry.tqe_next; (lock1)->lf_entry.tqe_next = (lock2
); (lock2)->lf_entry.tqe_prev = &(lock1)->lf_entry.
tqe_next; } while (0)
789 lf_entry)do { if (((lock2)->lf_entry.tqe_next = (lock1)->lf_entry
.tqe_next) != ((void *)0)) (lock2)->lf_entry.tqe_next->
lf_entry.tqe_prev = &(lock2)->lf_entry.tqe_next; else (
&lock1->lf_state->ls_locks)->tqh_last = &(lock2
)->lf_entry.tqe_next; (lock1)->lf_entry.tqe_next = (lock2
); (lock2)->lf_entry.tqe_prev = &(lock1)->lf_entry.
tqe_next; } while (0)
;
790 return;
791 }
792 /*
793 * Make a new lock consisting of the last part of
794 * the encompassing lock
795 */
796 splitlock = lf_alloc(lock1->lf_uid, 0);
797 splitlock->lf_flags = lock1->lf_flags;
798 splitlock->lf_type = lock1->lf_type;
799 splitlock->lf_start = lock2->lf_end + 1;
800 splitlock->lf_end = lock1->lf_end;
801 splitlock->lf_id = lock1->lf_id;
802 splitlock->lf_state = lock1->lf_state;
803 splitlock->lf_blk = NULL((void *)0);
804 splitlock->lf_pid = lock1->lf_pid;
805 TAILQ_INIT(&splitlock->lf_blkhd)do { (&splitlock->lf_blkhd)->tqh_first = ((void *)0
); (&splitlock->lf_blkhd)->tqh_last = &(&splitlock
->lf_blkhd)->tqh_first; } while (0)
;
806 ls_ref(splitlock->lf_state);
807 lock1->lf_end = lock2->lf_start - 1;
808
809 TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2, lf_entry)do { if (((lock2)->lf_entry.tqe_next = (lock1)->lf_entry
.tqe_next) != ((void *)0)) (lock2)->lf_entry.tqe_next->
lf_entry.tqe_prev = &(lock2)->lf_entry.tqe_next; else (
&lock1->lf_state->ls_locks)->tqh_last = &(lock2
)->lf_entry.tqe_next; (lock1)->lf_entry.tqe_next = (lock2
); (lock2)->lf_entry.tqe_prev = &(lock1)->lf_entry.
tqe_next; } while (0)
;
810 TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock2, splitlock,do { if (((splitlock)->lf_entry.tqe_next = (lock2)->lf_entry
.tqe_next) != ((void *)0)) (splitlock)->lf_entry.tqe_next->
lf_entry.tqe_prev = &(splitlock)->lf_entry.tqe_next; else
(&lock1->lf_state->ls_locks)->tqh_last = &(
splitlock)->lf_entry.tqe_next; (lock2)->lf_entry.tqe_next
= (splitlock); (splitlock)->lf_entry.tqe_prev = &(lock2
)->lf_entry.tqe_next; } while (0)
811 lf_entry)do { if (((splitlock)->lf_entry.tqe_next = (lock2)->lf_entry
.tqe_next) != ((void *)0)) (splitlock)->lf_entry.tqe_next->
lf_entry.tqe_prev = &(splitlock)->lf_entry.tqe_next; else
(&lock1->lf_state->ls_locks)->tqh_last = &(
splitlock)->lf_entry.tqe_next; (lock2)->lf_entry.tqe_next
= (splitlock); (splitlock)->lf_entry.tqe_prev = &(lock2
)->lf_entry.tqe_next; } while (0)
;
812}
813
814/*
815 * Wakeup a blocklist
816 */
817void
818lf_wakelock(struct lockf *lock, int flags)
819{
820 struct lockf *wakelock;
821
822 rw_assert_wrlock(&lockf_lock);
823
824 while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd)((&lock->lf_blkhd)->tqh_first))) {
825 TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block)do { if (((wakelock)->lf_block.tqe_next) != ((void *)0)) (
wakelock)->lf_block.tqe_next->lf_block.tqe_prev = (wakelock
)->lf_block.tqe_prev; else (&lock->lf_blkhd)->tqh_last
= (wakelock)->lf_block.tqe_prev; *(wakelock)->lf_block
.tqe_prev = (wakelock)->lf_block.tqe_next; ((wakelock)->
lf_block.tqe_prev) = ((void *)-1); ((wakelock)->lf_block.tqe_next
) = ((void *)-1); } while (0)
;
826 wakelock->lf_blk = NULL((void *)0);
827 wakelock->lf_flags |= flags;
828 wakeup_one(wakelock)wakeup_n((wakelock), 1);
829 }
830}
831
832/*
833 * Returns non-zero if the given lock would cause a deadlock.
834 */
835int
836lf_deadlock(struct lockf *lock)
837{
838 struct lockf *block, *lf, *pending;
839
840 lf = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first);
841 for (; (block = lf_getblock(lf, lock)) != NULL((void *)0);
842 lf = TAILQ_NEXT(block, lf_entry)((block)->lf_entry.tqe_next)) {
843 if ((block->lf_flags & F_POSIX0x040) == 0)
844 continue;
845
846 TAILQ_FOREACH(pending, &lock->lf_state->ls_pending, lf_entry)for((pending) = ((&lock->lf_state->ls_pending)->
tqh_first); (pending) != ((void *)0); (pending) = ((pending)->
lf_entry.tqe_next))
{
847 if (pending->lf_blk == NULL((void *)0))
848 continue; /* lock already unblocked */
849
850 if (pending->lf_pid == block->lf_pid &&
851 pending->lf_blk->lf_pid == lock->lf_pid)
852 return (1);
853 }
854 }
855
856 return (0);
857}
858
859#ifdef LOCKF_DEBUG
860/*
861 * Print out a lock.
862 */
863void
864lf_print(const char *tag, struct lockf *lock)
865{
866 struct lockf *block;
867
868 if (tag)
869 printf("%s: ", tag);
870 printf("lock %p", lock);
871 if (lock == NULL((void *)0)) {
872 printf("\n");
873 return;
874 }
875 printf(", %s %p %s, start %lld, end %lld",
876 lock->lf_flags & F_POSIX0x040 ? "posix" : "flock",
877 lock->lf_id,
878 lock->lf_type == F_RDLCK1 ? "shared" :
879 lock->lf_type == F_WRLCK3 ? "exclusive" :
880 lock->lf_type == F_UNLCK2 ? "unlock" :
881 "unknown", lock->lf_start, lock->lf_end);
882 printf(", next %p, state %p",
883 TAILQ_NEXT(lock, lf_entry)((lock)->lf_entry.tqe_next), lock->lf_state);
884 block = TAILQ_FIRST(&lock->lf_blkhd)((&lock->lf_blkhd)->tqh_first);
885 if (block)
886 printf(", block");
887 TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)for((block) = ((&lock->lf_blkhd)->tqh_first); (block
) != ((void *)0); (block) = ((block)->lf_block.tqe_next))
888 printf(" %p,", block);
889 printf("\n");
890}
891
892void
893lf_printlist(const char *tag, struct lockf *lock)
894{
895 struct lockf *lf;
896
897 printf("%s: Lock list:\n", tag);
898 TAILQ_FOREACH(lf, &lock->lf_state->ls_locks, lf_entry)for((lf) = ((&lock->lf_state->ls_locks)->tqh_first
); (lf) != ((void *)0); (lf) = ((lf)->lf_entry.tqe_next))
{
899 if (lock == lf)
900 printf(" * ");
901 else
902 printf(" ");
903 lf_print(NULL((void *)0), lf);
904 }
905}
906#endif /* LOCKF_DEBUG */