File: | kern/vfs_lockf.c |
Warning: | line 453, column 5 Value stored to 'lock' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: vfs_lockf.c,v 1.50 2022/08/14 01:58:28 jsg Exp $ */ |
2 | /* $NetBSD: vfs_lockf.c,v 1.7 1996/02/04 02:18:21 christos Exp $ */ |
3 | |
4 | /* |
5 | * Copyright (c) 1982, 1986, 1989, 1993 |
6 | * The Regents of the University of California. All rights reserved. |
7 | * |
8 | * This code is derived from software contributed to Berkeley by |
9 | * Scooter Morris at Genentech Inc. |
10 | * |
11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions |
13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. |
19 | * 3. Neither the name of the University nor the names of its contributors |
20 | * may be used to endorse or promote products derived from this software |
21 | * without specific prior written permission. |
22 | * |
23 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
24 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
27 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
28 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
29 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
30 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
31 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
32 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
33 | * SUCH DAMAGE. |
34 | * |
35 | * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 |
36 | */ |
37 | |
38 | #include <sys/param.h> |
39 | #include <sys/systm.h> |
40 | #include <sys/proc.h> |
41 | #include <sys/pool.h> |
42 | #include <sys/fcntl.h> |
43 | #include <sys/lockf.h> |
44 | #include <sys/rwlock.h> |
45 | #include <sys/unistd.h> |
46 | |
47 | /* |
48 | * The lockf structure is a kernel structure which contains the information |
49 | * associated with a byte range lock. The lockf structures are linked into |
50 | * the inode structure. Locks are sorted by the starting byte of the lock for |
51 | * efficiency. |
52 | */ |
53 | TAILQ_HEAD(locklist, lockf)struct locklist { struct lockf *tqh_first; struct lockf **tqh_last ; }; |
54 | |
55 | struct lockf { |
56 | short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */ |
57 | short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ |
58 | off_t lf_start; /* The byte # of the start of the lock */ |
59 | off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/ |
60 | caddr_t lf_id; /* The id of the resource holding the lock */ |
61 | struct lockf_state *lf_state; /* State associated with the lock */ |
62 | TAILQ_ENTRY(lockf)struct { struct lockf *tqe_next; struct lockf **tqe_prev; } lf_entry; |
63 | struct lockf *lf_blk; /* The lock that blocks us */ |
64 | struct locklist lf_blkhd; /* The list of blocked locks */ |
65 | TAILQ_ENTRY(lockf)struct { struct lockf *tqe_next; struct lockf **tqe_prev; } lf_block; /* A request waiting for a lock */ |
66 | uid_t lf_uid; /* User ID responsible */ |
67 | pid_t lf_pid; /* POSIX - owner pid */ |
68 | }; |
69 | |
70 | struct lockf_state { |
71 | TAILQ_HEAD(, lockf)struct { struct lockf *tqh_first; struct lockf **tqh_last; } ls_locks; /* list of active locks */ |
72 | TAILQ_HEAD(, lockf)struct { struct lockf *tqh_first; struct lockf **tqh_last; } ls_pending; /* list of pending locks */ |
73 | struct lockf_state **ls_owner; /* owner */ |
74 | int ls_refs; /* reference counter */ |
75 | }; |
76 | |
77 | struct pool lockf_state_pool; |
78 | struct pool lockf_pool; |
79 | |
80 | #define SELF0x1 0x1 |
81 | #define OTHERS0x2 0x2 |
82 | |
83 | #ifdef LOCKF_DEBUG |
84 | |
85 | #define DEBUG_SETLOCK 0x01 |
86 | #define DEBUG_CLEARLOCK 0x02 |
87 | #define DEBUG_GETLOCK 0x04 |
88 | #define DEBUG_FINDOVR 0x08 |
89 | #define DEBUG_SPLIT 0x10 |
90 | #define DEBUG_WAKELOCK 0x20 |
91 | #define DEBUG_LINK 0x40 |
92 | |
93 | int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK; |
94 | |
95 | void lf_print(const char *, struct lockf *); |
96 | void lf_printlist(const char *, struct lockf *); |
97 | |
98 | #define DPRINTF(args, level) if (lockf_debug & (level)) printf args |
99 | #define LFPRINT(args, level) if (lockf_debug & (level)) lf_print args |
100 | #else |
101 | #define DPRINTF(args, level) |
102 | #define LFPRINT(args, level) |
103 | #endif |
104 | |
105 | struct lockf *lf_alloc(uid_t, int); |
106 | void lf_free(struct lockf *); |
107 | int lf_clearlock(struct lockf *); |
108 | int lf_findoverlap(struct lockf *, struct lockf *, int, struct lockf **); |
109 | struct lockf *lf_getblock(struct lockf *, struct lockf *); |
110 | int lf_getlock(struct lockf *, struct flock *); |
111 | int lf_setlock(struct lockf *); |
112 | void lf_split(struct lockf *, struct lockf *); |
113 | void lf_wakelock(struct lockf *, int); |
114 | int lf_deadlock(struct lockf *); |
115 | void ls_ref(struct lockf_state *); |
116 | void ls_rele(struct lockf_state *); |
117 | |
118 | /* |
119 | * Serializes access to each instance of struct lockf and struct lockf_state |
120 | * and each pointer from a vnode to struct lockf_state. |
121 | */ |
122 | struct rwlock lockf_lock = RWLOCK_INITIALIZER("lockflk"){ 0, "lockflk" }; |
123 | |
124 | void |
125 | lf_init(void) |
126 | { |
127 | pool_init(&lockf_state_pool, sizeof(struct lockf_state), 0, IPL_NONE0x0, |
128 | PR_WAITOK0x0001 | PR_RWLOCK0x0010, "lockfspl", NULL((void *)0)); |
129 | pool_init(&lockf_pool, sizeof(struct lockf), 0, IPL_NONE0x0, |
130 | PR_WAITOK0x0001 | PR_RWLOCK0x0010, "lockfpl", NULL((void *)0)); |
131 | } |
132 | |
133 | void |
134 | ls_ref(struct lockf_state *ls) |
135 | { |
136 | rw_assert_wrlock(&lockf_lock); |
137 | |
138 | ls->ls_refs++; |
139 | } |
140 | |
141 | void |
142 | ls_rele(struct lockf_state *ls) |
143 | { |
144 | rw_assert_wrlock(&lockf_lock); |
145 | |
146 | if (--ls->ls_refs > 0) |
147 | return; |
148 | |
149 | KASSERT(TAILQ_EMPTY(&ls->ls_locks))(((((&ls->ls_locks)->tqh_first) == ((void *)0))) ? ( void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c" , 149, "TAILQ_EMPTY(&ls->ls_locks)")); |
150 | KASSERT(TAILQ_EMPTY(&ls->ls_pending))(((((&ls->ls_pending)->tqh_first) == ((void *)0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c" , 150, "TAILQ_EMPTY(&ls->ls_pending)")); |
151 | |
152 | *ls->ls_owner = NULL((void *)0); |
153 | pool_put(&lockf_state_pool, ls); |
154 | } |
155 | |
156 | /* |
157 | * We enforce a limit on locks by uid, so that a single user cannot |
158 | * run the kernel out of memory. For now, the limit is pretty coarse. |
159 | * There is no limit on root. |
160 | * |
161 | * Splitting a lock will always succeed, regardless of current allocations. |
162 | * If you're slightly above the limit, we still have to permit an allocation |
163 | * so that the unlock can succeed. If the unlocking causes too many splits, |
164 | * however, you're totally cutoff. |
165 | */ |
166 | int maxlocksperuid = 1024; |
167 | |
168 | /* |
169 | * 3 options for allowfail. |
170 | * 0 - always allocate. 1 - cutoff at limit. 2 - cutoff at double limit. |
171 | */ |
172 | struct lockf * |
173 | lf_alloc(uid_t uid, int allowfail) |
174 | { |
175 | struct uidinfo *uip; |
176 | struct lockf *lock; |
177 | |
178 | uip = uid_find(uid); |
179 | if (uid && allowfail && uip->ui_lockcnt > |
180 | (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) { |
181 | uid_release(uip); |
182 | return (NULL((void *)0)); |
183 | } |
184 | uip->ui_lockcnt++; |
185 | uid_release(uip); |
186 | lock = pool_get(&lockf_pool, PR_WAITOK0x0001); |
187 | lock->lf_uid = uid; |
188 | return (lock); |
189 | } |
190 | |
191 | void |
192 | lf_free(struct lockf *lock) |
193 | { |
194 | struct uidinfo *uip; |
195 | |
196 | rw_assert_wrlock(&lockf_lock); |
197 | |
198 | LFPRINT(("lf_free", lock), DEBUG_LINK); |
199 | |
200 | KASSERT(TAILQ_EMPTY(&lock->lf_blkhd))(((((&lock->lf_blkhd)->tqh_first) == ((void *)0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c" , 200, "TAILQ_EMPTY(&lock->lf_blkhd)")); |
201 | |
202 | ls_rele(lock->lf_state); |
203 | |
204 | uip = uid_find(lock->lf_uid); |
205 | uip->ui_lockcnt--; |
206 | uid_release(uip); |
207 | pool_put(&lockf_pool, lock); |
208 | } |
209 | |
210 | |
211 | /* |
212 | * Do an advisory lock operation. |
213 | */ |
214 | int |
215 | lf_advlock(struct lockf_state **state, off_t size, caddr_t id, int op, |
216 | struct flock *fl, int flags) |
217 | { |
218 | struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
219 | struct lockf_state *ls; |
220 | struct lockf *lock; |
221 | off_t start, end; |
222 | int error = 0; |
223 | |
224 | /* |
225 | * Convert the flock structure into a start and end. |
226 | */ |
227 | switch (fl->l_whence) { |
228 | case SEEK_SET0: |
229 | case SEEK_CUR1: |
230 | /* |
231 | * Caller is responsible for adding any necessary offset |
232 | * when SEEK_CUR is used. |
233 | */ |
234 | start = fl->l_start; |
235 | break; |
236 | case SEEK_END2: |
237 | start = size + fl->l_start; |
238 | break; |
239 | default: |
240 | return (EINVAL22); |
241 | } |
242 | if (start < 0) |
243 | return (EINVAL22); |
244 | if (fl->l_len > 0) { |
245 | if (fl->l_len - 1 > LLONG_MAX0x7fffffffffffffffLL - start) |
246 | return (EOVERFLOW87); |
247 | end = start + (fl->l_len - 1); |
248 | /* Avoid ambiguity at the end of the range. */ |
249 | if (end == LLONG_MAX0x7fffffffffffffffLL) |
250 | end = -1; |
251 | } else if (fl->l_len < 0) { |
252 | if (start + fl->l_len < 0) |
253 | return (EINVAL22); |
254 | end = start - 1; |
255 | start += fl->l_len; |
256 | } else { |
257 | end = -1; |
258 | } |
259 | |
260 | rw_enter_write(&lockf_lock); |
261 | ls = *state; |
262 | |
263 | /* |
264 | * Avoid the common case of unlocking when inode has no locks. |
265 | */ |
266 | if (ls == NULL((void *)0) && op != F_SETLK8) { |
267 | fl->l_type = F_UNLCK2; |
268 | goto out; |
269 | } |
270 | |
271 | if (ls == NULL((void *)0)) { |
272 | ls = pool_get(&lockf_state_pool, PR_WAITOK0x0001 | PR_ZERO0x0008); |
273 | ls->ls_owner = state; |
274 | TAILQ_INIT(&ls->ls_locks)do { (&ls->ls_locks)->tqh_first = ((void *)0); (& ls->ls_locks)->tqh_last = &(&ls->ls_locks)-> tqh_first; } while (0); |
275 | TAILQ_INIT(&ls->ls_pending)do { (&ls->ls_pending)->tqh_first = ((void *)0); (& ls->ls_pending)->tqh_last = &(&ls->ls_pending )->tqh_first; } while (0); |
276 | *state = ls; |
277 | } |
278 | ls_ref(ls); |
279 | |
280 | lock = lf_alloc(p->p_ucred->cr_uid, op == F_SETLK8 ? 1 : 2); |
281 | if (!lock) { |
282 | ls_rele(ls); |
283 | error = ENOLCK77; |
284 | goto out; |
285 | } |
286 | lock->lf_flags = flags; |
287 | lock->lf_type = fl->l_type; |
288 | lock->lf_start = start; |
289 | lock->lf_end = end; |
290 | lock->lf_id = id; |
291 | lock->lf_state = ls; |
292 | lock->lf_blk = NULL((void *)0); |
293 | lock->lf_pid = (flags & F_POSIX0x040) ? p->p_p->ps_pid : -1; |
294 | TAILQ_INIT(&lock->lf_blkhd)do { (&lock->lf_blkhd)->tqh_first = ((void *)0); (& lock->lf_blkhd)->tqh_last = &(&lock->lf_blkhd )->tqh_first; } while (0); |
295 | |
296 | switch (op) { |
297 | case F_SETLK8: |
298 | error = lf_setlock(lock); |
299 | break; |
300 | case F_UNLCK2: |
301 | error = lf_clearlock(lock); |
302 | lf_free(lock); |
303 | break; |
304 | case F_GETLK7: |
305 | error = lf_getlock(lock, fl); |
306 | lf_free(lock); |
307 | break; |
308 | default: |
309 | lf_free(lock); |
310 | error = EINVAL22; |
311 | break; |
312 | } |
313 | |
314 | out: |
315 | rw_exit_write(&lockf_lock); |
316 | return (error); |
317 | } |
318 | |
319 | /* |
320 | * Set a byte-range lock. |
321 | */ |
322 | int |
323 | lf_setlock(struct lockf *lock) |
324 | { |
325 | struct lockf *block; |
326 | struct lockf *overlap, *ltmp; |
327 | int ovcase, priority, needtolink, error; |
328 | |
329 | rw_assert_wrlock(&lockf_lock); |
330 | |
331 | LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK); |
332 | |
333 | priority = PLOCK36; |
334 | if (lock->lf_type == F_WRLCK3) |
335 | priority += 4; |
336 | priority |= PCATCH0x100; |
337 | /* |
338 | * Scan lock list for this file looking for locks that would block us. |
339 | */ |
340 | for (;;) { |
341 | block = lf_getblock(TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first), |
342 | lock); |
343 | if (block == NULL((void *)0)) |
344 | break; |
345 | |
346 | if ((lock->lf_flags & F_WAIT0x010) == 0) { |
347 | lf_free(lock); |
348 | return (EAGAIN35); |
349 | } |
350 | |
351 | /* |
352 | * Lock is blocked, check for deadlock before proceeding. |
353 | * Note: flock style locks cover the whole file, there is no |
354 | * chance for deadlock. |
355 | */ |
356 | if ((lock->lf_flags & F_POSIX0x040) && lf_deadlock(lock)) { |
357 | lf_free(lock); |
358 | return (EDEADLK11); |
359 | } |
360 | |
361 | /* |
362 | * For flock type locks, we must first remove |
363 | * any shared locks that we hold before we sleep |
364 | * waiting for an exclusive lock. |
365 | */ |
366 | if ((lock->lf_flags & F_FLOCK0x020) && lock->lf_type == F_WRLCK3) { |
367 | lock->lf_type = F_UNLCK2; |
368 | (void)lf_clearlock(lock); |
369 | lock->lf_type = F_WRLCK3; |
370 | } |
371 | /* |
372 | * Add our lock to the blocked list and sleep until we're free. |
373 | * Remember who blocked us (for deadlock detection). |
374 | */ |
375 | lock->lf_blk = block; |
376 | LFPRINT(("lf_setlock", lock), DEBUG_SETLOCK); |
377 | LFPRINT(("lf_setlock: blocking on", block), DEBUG_SETLOCK); |
378 | TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block)do { (lock)->lf_block.tqe_next = ((void *)0); (lock)->lf_block .tqe_prev = (&block->lf_blkhd)->tqh_last; *(&block ->lf_blkhd)->tqh_last = (lock); (&block->lf_blkhd )->tqh_last = &(lock)->lf_block.tqe_next; } while ( 0); |
379 | TAILQ_INSERT_TAIL(&lock->lf_state->ls_pending, lock, lf_entry)do { (lock)->lf_entry.tqe_next = ((void *)0); (lock)->lf_entry .tqe_prev = (&lock->lf_state->ls_pending)->tqh_last ; *(&lock->lf_state->ls_pending)->tqh_last = (lock ); (&lock->lf_state->ls_pending)->tqh_last = & (lock)->lf_entry.tqe_next; } while (0); |
380 | error = rwsleep_nsec(lock, &lockf_lock, priority, "lockf", |
381 | INFSLP0xffffffffffffffffULL); |
382 | TAILQ_REMOVE(&lock->lf_state->ls_pending, lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_pending)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0); |
383 | wakeup_one(lock->lf_state)wakeup_n((lock->lf_state), 1); |
384 | if (lock->lf_blk != NULL((void *)0)) { |
385 | TAILQ_REMOVE(&lock->lf_blk->lf_blkhd, lock, lf_block)do { if (((lock)->lf_block.tqe_next) != ((void *)0)) (lock )->lf_block.tqe_next->lf_block.tqe_prev = (lock)->lf_block .tqe_prev; else (&lock->lf_blk->lf_blkhd)->tqh_last = (lock)->lf_block.tqe_prev; *(lock)->lf_block.tqe_prev = (lock)->lf_block.tqe_next; ((lock)->lf_block.tqe_prev ) = ((void *)-1); ((lock)->lf_block.tqe_next) = ((void *)- 1); } while (0); |
386 | lock->lf_blk = NULL((void *)0); |
387 | } |
388 | if (error) { |
389 | lf_free(lock); |
390 | return (error); |
391 | } |
392 | if (lock->lf_flags & F_INTR0x080) { |
393 | lf_free(lock); |
394 | return (EINTR4); |
395 | } |
396 | } |
397 | /* |
398 | * No blocks!! Add the lock. Note that we will |
399 | * downgrade or upgrade any overlapping locks this |
400 | * process already owns. |
401 | * |
402 | * Skip over locks owned by other processes. |
403 | * Handle any locks that overlap and are owned by ourselves. |
404 | */ |
405 | block = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first); |
406 | overlap = NULL((void *)0); |
407 | needtolink = 1; |
408 | for (;;) { |
409 | ovcase = lf_findoverlap(block, lock, SELF0x1, &overlap); |
410 | if (ovcase) |
411 | block = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next); |
412 | /* |
413 | * Six cases: |
414 | * 0) no overlap |
415 | * 1) overlap == lock |
416 | * 2) overlap contains lock |
417 | * 3) lock contains overlap |
418 | * 4) overlap starts before lock |
419 | * 5) overlap ends after lock |
420 | */ |
421 | switch (ovcase) { |
422 | case 0: /* no overlap */ |
423 | if (needtolink) { |
424 | if (overlap) /* insert before overlap */ |
425 | TAILQ_INSERT_BEFORE(overlap, lock,do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev ; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry .tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock )->lf_entry.tqe_next; } while (0) |
426 | lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev ; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry .tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock )->lf_entry.tqe_next; } while (0); |
427 | else /* first or last lock in list */ |
428 | TAILQ_INSERT_TAIL(&lock->lf_state->ls_locks,do { (lock)->lf_entry.tqe_next = ((void *)0); (lock)->lf_entry .tqe_prev = (&lock->lf_state->ls_locks)->tqh_last ; *(&lock->lf_state->ls_locks)->tqh_last = (lock ); (&lock->lf_state->ls_locks)->tqh_last = & (lock)->lf_entry.tqe_next; } while (0) |
429 | lock, lf_entry)do { (lock)->lf_entry.tqe_next = ((void *)0); (lock)->lf_entry .tqe_prev = (&lock->lf_state->ls_locks)->tqh_last ; *(&lock->lf_state->ls_locks)->tqh_last = (lock ); (&lock->lf_state->ls_locks)->tqh_last = & (lock)->lf_entry.tqe_next; } while (0); |
430 | } |
431 | break; |
432 | case 1: /* overlap == lock */ |
433 | /* |
434 | * If downgrading lock, others may be |
435 | * able to acquire it. |
436 | */ |
437 | if (lock->lf_type == F_RDLCK1 && |
438 | overlap->lf_type == F_WRLCK3) |
439 | lf_wakelock(overlap, 0); |
440 | overlap->lf_type = lock->lf_type; |
441 | lf_free(lock); |
442 | lock = overlap; /* for debug output below */ |
443 | break; |
444 | case 2: /* overlap contains lock */ |
445 | /* |
446 | * Check for common starting point and different types. |
447 | */ |
448 | if (overlap->lf_type == lock->lf_type) { |
449 | if (!needtolink) |
450 | TAILQ_REMOVE(&lock->lf_state->ls_locks,do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0) |
451 | lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0); |
452 | lf_free(lock); |
453 | lock = overlap; /* for debug output below */ |
Value stored to 'lock' is never read | |
454 | break; |
455 | } |
456 | if (overlap->lf_start == lock->lf_start) { |
457 | if (!needtolink) |
458 | TAILQ_REMOVE(&lock->lf_state->ls_locks,do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0) |
459 | lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0); |
460 | TAILQ_INSERT_BEFORE(overlap, lock, lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev ; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry .tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock )->lf_entry.tqe_next; } while (0); |
461 | overlap->lf_start = lock->lf_end + 1; |
462 | } else |
463 | lf_split(overlap, lock); |
464 | lf_wakelock(overlap, 0); |
465 | break; |
466 | case 3: /* lock contains overlap */ |
467 | /* |
468 | * If downgrading lock, others may be able to |
469 | * acquire it, otherwise take the list. |
470 | */ |
471 | if (lock->lf_type == F_RDLCK1 && |
472 | overlap->lf_type == F_WRLCK3) { |
473 | lf_wakelock(overlap, 0); |
474 | } else { |
475 | while ((ltmp = |
476 | TAILQ_FIRST(&overlap->lf_blkhd)((&overlap->lf_blkhd)->tqh_first))) { |
477 | TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,do { if (((ltmp)->lf_block.tqe_next) != ((void *)0)) (ltmp )->lf_block.tqe_next->lf_block.tqe_prev = (ltmp)->lf_block .tqe_prev; else (&overlap->lf_blkhd)->tqh_last = (ltmp )->lf_block.tqe_prev; *(ltmp)->lf_block.tqe_prev = (ltmp )->lf_block.tqe_next; ((ltmp)->lf_block.tqe_prev) = ((void *)-1); ((ltmp)->lf_block.tqe_next) = ((void *)-1); } while (0) |
478 | lf_block)do { if (((ltmp)->lf_block.tqe_next) != ((void *)0)) (ltmp )->lf_block.tqe_next->lf_block.tqe_prev = (ltmp)->lf_block .tqe_prev; else (&overlap->lf_blkhd)->tqh_last = (ltmp )->lf_block.tqe_prev; *(ltmp)->lf_block.tqe_prev = (ltmp )->lf_block.tqe_next; ((ltmp)->lf_block.tqe_prev) = ((void *)-1); ((ltmp)->lf_block.tqe_next) = ((void *)-1); } while (0); |
479 | ltmp->lf_blk = lock; |
480 | TAILQ_INSERT_TAIL(&lock->lf_blkhd,do { (ltmp)->lf_block.tqe_next = ((void *)0); (ltmp)->lf_block .tqe_prev = (&lock->lf_blkhd)->tqh_last; *(&lock ->lf_blkhd)->tqh_last = (ltmp); (&lock->lf_blkhd )->tqh_last = &(ltmp)->lf_block.tqe_next; } while ( 0) |
481 | ltmp, lf_block)do { (ltmp)->lf_block.tqe_next = ((void *)0); (ltmp)->lf_block .tqe_prev = (&lock->lf_blkhd)->tqh_last; *(&lock ->lf_blkhd)->tqh_last = (ltmp); (&lock->lf_blkhd )->tqh_last = &(ltmp)->lf_block.tqe_next; } while ( 0); |
482 | } |
483 | } |
484 | /* |
485 | * Add the new lock if necessary and delete the overlap. |
486 | */ |
487 | if (needtolink) { |
488 | TAILQ_INSERT_BEFORE(overlap, lock, lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev ; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry .tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock )->lf_entry.tqe_next; } while (0); |
489 | needtolink = 0; |
490 | } |
491 | TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap, lf_entry)do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap )->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)-> lf_entry.tqe_prev; else (&lock->lf_state->ls_locks) ->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)-> lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap )->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry .tqe_next) = ((void *)-1); } while (0); |
492 | lf_free(overlap); |
493 | continue; |
494 | case 4: /* overlap starts before lock */ |
495 | /* |
496 | * Add lock after overlap on the list. |
497 | */ |
498 | if (!needtolink) |
499 | TAILQ_REMOVE(&lock->lf_state->ls_locks, lock,do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0) |
500 | lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0); |
501 | TAILQ_INSERT_AFTER(&lock->lf_state->ls_locks, overlap,do { if (((lock)->lf_entry.tqe_next = (overlap)->lf_entry .tqe_next) != ((void *)0)) (lock)->lf_entry.tqe_next->lf_entry .tqe_prev = &(lock)->lf_entry.tqe_next; else (&lock ->lf_state->ls_locks)->tqh_last = &(lock)->lf_entry .tqe_next; (overlap)->lf_entry.tqe_next = (lock); (lock)-> lf_entry.tqe_prev = &(overlap)->lf_entry.tqe_next; } while (0) |
502 | lock, lf_entry)do { if (((lock)->lf_entry.tqe_next = (overlap)->lf_entry .tqe_next) != ((void *)0)) (lock)->lf_entry.tqe_next->lf_entry .tqe_prev = &(lock)->lf_entry.tqe_next; else (&lock ->lf_state->ls_locks)->tqh_last = &(lock)->lf_entry .tqe_next; (overlap)->lf_entry.tqe_next = (lock); (lock)-> lf_entry.tqe_prev = &(overlap)->lf_entry.tqe_next; } while (0); |
503 | overlap->lf_end = lock->lf_start - 1; |
504 | lf_wakelock(overlap, 0); |
505 | needtolink = 0; |
506 | continue; |
507 | case 5: /* overlap ends after lock */ |
508 | /* |
509 | * Add the new lock before overlap. |
510 | */ |
511 | if (needtolink) |
512 | TAILQ_INSERT_BEFORE(overlap, lock, lf_entry)do { (lock)->lf_entry.tqe_prev = (overlap)->lf_entry.tqe_prev ; (lock)->lf_entry.tqe_next = (overlap); *(overlap)->lf_entry .tqe_prev = (lock); (overlap)->lf_entry.tqe_prev = &(lock )->lf_entry.tqe_next; } while (0); |
513 | overlap->lf_start = lock->lf_end + 1; |
514 | lf_wakelock(overlap, 0); |
515 | break; |
516 | } |
517 | break; |
518 | } |
519 | LFPRINT(("lf_setlock: got the lock", lock), DEBUG_SETLOCK); |
520 | return (0); |
521 | } |
522 | |
523 | /* |
524 | * Remove a byte-range lock on an inode. |
525 | * |
526 | * Generally, find the lock (or an overlap to that lock) |
527 | * and remove it (or shrink it), then wakeup anyone we can. |
528 | */ |
529 | int |
530 | lf_clearlock(struct lockf *lock) |
531 | { |
532 | struct lockf *lf, *overlap; |
533 | int ovcase; |
534 | |
535 | rw_assert_wrlock(&lockf_lock); |
536 | |
537 | lf = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first); |
538 | if (lf == NULL((void *)0)) |
539 | return (0); |
540 | |
541 | LFPRINT(("lf_clearlock", lock), DEBUG_CLEARLOCK); |
542 | while ((ovcase = lf_findoverlap(lf, lock, SELF0x1, &overlap))) { |
543 | lf_wakelock(overlap, 0); |
544 | |
545 | switch (ovcase) { |
546 | case 1: /* overlap == lock */ |
547 | TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap )->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)-> lf_entry.tqe_prev; else (&lock->lf_state->ls_locks) ->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)-> lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap )->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry .tqe_next) = ((void *)-1); } while (0) |
548 | lf_entry)do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap )->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)-> lf_entry.tqe_prev; else (&lock->lf_state->ls_locks) ->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)-> lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap )->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry .tqe_next) = ((void *)-1); } while (0); |
549 | lf_free(overlap); |
550 | break; |
551 | case 2: /* overlap contains lock: split it */ |
552 | if (overlap->lf_start == lock->lf_start) { |
553 | overlap->lf_start = lock->lf_end + 1; |
554 | break; |
555 | } |
556 | lf_split(overlap, lock); |
557 | /* |
558 | * The lock is now part of the list, lf_clearlock() must |
559 | * ensure that the lock remains detached from the list. |
560 | */ |
561 | TAILQ_REMOVE(&lock->lf_state->ls_locks, lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&lock->lf_state->ls_locks)->tqh_last = (lock)->lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)->lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev ) = ((void *)-1); ((lock)->lf_entry.tqe_next) = ((void *)- 1); } while (0); |
562 | break; |
563 | case 3: /* lock contains overlap */ |
564 | lf = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next); |
565 | TAILQ_REMOVE(&lock->lf_state->ls_locks, overlap,do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap )->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)-> lf_entry.tqe_prev; else (&lock->lf_state->ls_locks) ->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)-> lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap )->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry .tqe_next) = ((void *)-1); } while (0) |
566 | lf_entry)do { if (((overlap)->lf_entry.tqe_next) != ((void *)0)) (overlap )->lf_entry.tqe_next->lf_entry.tqe_prev = (overlap)-> lf_entry.tqe_prev; else (&lock->lf_state->ls_locks) ->tqh_last = (overlap)->lf_entry.tqe_prev; *(overlap)-> lf_entry.tqe_prev = (overlap)->lf_entry.tqe_next; ((overlap )->lf_entry.tqe_prev) = ((void *)-1); ((overlap)->lf_entry .tqe_next) = ((void *)-1); } while (0); |
567 | lf_free(overlap); |
568 | continue; |
569 | case 4: /* overlap starts before lock */ |
570 | overlap->lf_end = lock->lf_start - 1; |
571 | lf = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next); |
572 | continue; |
573 | case 5: /* overlap ends after lock */ |
574 | overlap->lf_start = lock->lf_end + 1; |
575 | break; |
576 | } |
577 | break; |
578 | } |
579 | return (0); |
580 | } |
581 | |
582 | /* |
583 | * Check whether there is a blocking lock, |
584 | * and if so return its process identifier. |
585 | */ |
586 | int |
587 | lf_getlock(struct lockf *lock, struct flock *fl) |
588 | { |
589 | struct lockf *block, *lf; |
590 | |
591 | rw_assert_wrlock(&lockf_lock); |
592 | |
593 | LFPRINT(("lf_getlock", lock), DEBUG_CLEARLOCK); |
594 | |
595 | lf = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first); |
596 | if ((block = lf_getblock(lf, lock)) != NULL((void *)0)) { |
597 | fl->l_type = block->lf_type; |
598 | fl->l_whence = SEEK_SET0; |
599 | fl->l_start = block->lf_start; |
600 | if (block->lf_end == -1) |
601 | fl->l_len = 0; |
602 | else |
603 | fl->l_len = block->lf_end - block->lf_start + 1; |
604 | fl->l_pid = block->lf_pid; |
605 | } else { |
606 | fl->l_type = F_UNLCK2; |
607 | } |
608 | return (0); |
609 | } |
610 | |
611 | /* |
612 | * Walk the list of locks for an inode and |
613 | * return the first blocking lock. |
614 | */ |
615 | struct lockf * |
616 | lf_getblock(struct lockf *lf, struct lockf *lock) |
617 | { |
618 | struct lockf *overlap; |
619 | |
620 | rw_assert_wrlock(&lockf_lock); |
621 | |
622 | while (lf_findoverlap(lf, lock, OTHERS0x2, &overlap) != 0) { |
623 | /* |
624 | * We've found an overlap, see if it blocks us |
625 | */ |
626 | if ((lock->lf_type == F_WRLCK3 || overlap->lf_type == F_WRLCK3)) |
627 | return (overlap); |
628 | /* |
629 | * Nope, point to the next one on the list and |
630 | * see if it blocks us |
631 | */ |
632 | lf = TAILQ_NEXT(overlap, lf_entry)((overlap)->lf_entry.tqe_next); |
633 | } |
634 | return (NULL((void *)0)); |
635 | } |
636 | |
637 | /* |
638 | * Walk the list of locks for an inode to |
639 | * find an overlapping lock (if any). |
640 | * |
641 | * NOTE: this returns only the FIRST overlapping lock. There |
642 | * may be more than one. |
643 | */ |
644 | int |
645 | lf_findoverlap(struct lockf *lf, struct lockf *lock, int type, |
646 | struct lockf **overlap) |
647 | { |
648 | off_t start, end; |
649 | |
650 | rw_assert_wrlock(&lockf_lock); |
651 | |
652 | LFPRINT(("lf_findoverlap: looking for overlap in", lock), DEBUG_FINDOVR); |
653 | |
654 | *overlap = lf; |
655 | start = lock->lf_start; |
656 | end = lock->lf_end; |
657 | while (lf != NULL((void *)0)) { |
658 | if (((type & SELF0x1) && lf->lf_id != lock->lf_id) || |
659 | ((type & OTHERS0x2) && lf->lf_id == lock->lf_id)) { |
660 | *overlap = lf = TAILQ_NEXT(lf, lf_entry)((lf)->lf_entry.tqe_next); |
661 | continue; |
662 | } |
663 | LFPRINT(("\tchecking", lf), DEBUG_FINDOVR); |
664 | /* |
665 | * OK, check for overlap |
666 | * |
667 | * Six cases: |
668 | * 0) no overlap |
669 | * 1) overlap == lock |
670 | * 2) overlap contains lock |
671 | * 3) lock contains overlap |
672 | * 4) overlap starts before lock |
673 | * 5) overlap ends after lock |
674 | */ |
675 | |
676 | /* Case 0 */ |
677 | if ((lf->lf_end != -1 && start > lf->lf_end) || |
678 | (end != -1 && lf->lf_start > end)) { |
679 | DPRINTF(("no overlap\n"), DEBUG_FINDOVR); |
680 | if ((type & SELF0x1) && end != -1 && lf->lf_start > end) |
681 | return (0); |
682 | *overlap = lf = TAILQ_NEXT(lf, lf_entry)((lf)->lf_entry.tqe_next); |
683 | continue; |
684 | } |
685 | /* Case 1 */ |
686 | if ((lf->lf_start == start) && (lf->lf_end == end)) { |
687 | DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR); |
688 | return (1); |
689 | } |
690 | /* Case 2 */ |
691 | if ((lf->lf_start <= start) && |
692 | (lf->lf_end == -1 || (end != -1 && lf->lf_end >= end))) { |
693 | DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR); |
694 | return (2); |
695 | } |
696 | /* Case 3 */ |
697 | if (start <= lf->lf_start && |
698 | (end == -1 || (lf->lf_end != -1 && end >= lf->lf_end))) { |
699 | DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR); |
700 | return (3); |
701 | } |
702 | /* Case 4 */ |
703 | if ((lf->lf_start < start) && |
704 | ((lf->lf_end >= start) || (lf->lf_end == -1))) { |
705 | DPRINTF(("overlap starts before lock\n"), |
706 | DEBUG_FINDOVR); |
707 | return (4); |
708 | } |
709 | /* Case 5 */ |
710 | if ((lf->lf_start > start) && (end != -1) && |
711 | ((lf->lf_end > end) || (lf->lf_end == -1))) { |
712 | DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR); |
713 | return (5); |
714 | } |
715 | panic("lf_findoverlap: default"); |
716 | } |
717 | return (0); |
718 | } |
719 | |
720 | /* |
721 | * Purge all locks associated with the given lock state. |
722 | */ |
723 | void |
724 | lf_purgelocks(struct lockf_state **state) |
725 | { |
726 | struct lockf_state *ls; |
727 | struct lockf *lock; |
728 | |
729 | rw_enter_write(&lockf_lock); |
730 | |
731 | ls = *state; |
732 | if (ls == NULL((void *)0)) |
733 | goto out; |
734 | |
735 | ls_ref(ls); |
736 | |
737 | /* Interrupt blocked locks and wait for all of them to finish. */ |
738 | TAILQ_FOREACH(lock, &ls->ls_locks, lf_entry)for((lock) = ((&ls->ls_locks)->tqh_first); (lock) != ((void *)0); (lock) = ((lock)->lf_entry.tqe_next)) { |
739 | LFPRINT(("lf_purgelocks: wakeup", lock), DEBUG_SETLOCK); |
740 | lf_wakelock(lock, F_INTR0x080); |
741 | } |
742 | while (!TAILQ_EMPTY(&ls->ls_pending)(((&ls->ls_pending)->tqh_first) == ((void *)0))) |
743 | rwsleep_nsec(ls, &lockf_lock, PLOCK36, "lockfp", INFSLP0xffffffffffffffffULL); |
744 | |
745 | /* |
746 | * Any remaining locks cannot block other locks at this point and can |
747 | * safely be removed. |
748 | */ |
749 | while ((lock = TAILQ_FIRST(&ls->ls_locks)((&ls->ls_locks)->tqh_first))) { |
750 | TAILQ_REMOVE(&ls->ls_locks, lock, lf_entry)do { if (((lock)->lf_entry.tqe_next) != ((void *)0)) (lock )->lf_entry.tqe_next->lf_entry.tqe_prev = (lock)->lf_entry .tqe_prev; else (&ls->ls_locks)->tqh_last = (lock)-> lf_entry.tqe_prev; *(lock)->lf_entry.tqe_prev = (lock)-> lf_entry.tqe_next; ((lock)->lf_entry.tqe_prev) = ((void *) -1); ((lock)->lf_entry.tqe_next) = ((void *)-1); } while ( 0); |
751 | lf_free(lock); |
752 | } |
753 | |
754 | /* This is the last expected thread to hold a lock state reference. */ |
755 | KASSERT(ls->ls_refs == 1)((ls->ls_refs == 1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_lockf.c" , 755, "ls->ls_refs == 1")); |
756 | ls_rele(ls); |
757 | |
758 | out: |
759 | rw_exit_write(&lockf_lock); |
760 | } |
761 | |
762 | /* |
763 | * Split a lock and a contained region into |
764 | * two or three locks as necessary. |
765 | */ |
766 | void |
767 | lf_split(struct lockf *lock1, struct lockf *lock2) |
768 | { |
769 | struct lockf *splitlock; |
770 | |
771 | rw_assert_wrlock(&lockf_lock); |
772 | |
773 | LFPRINT(("lf_split", lock1), DEBUG_SPLIT); |
774 | LFPRINT(("splitting from", lock2), DEBUG_SPLIT); |
775 | |
776 | /* |
777 | * Check to see if splitting into only two pieces. |
778 | */ |
779 | if (lock1->lf_start == lock2->lf_start) { |
780 | lock1->lf_start = lock2->lf_end + 1; |
781 | TAILQ_INSERT_BEFORE(lock1, lock2, lf_entry)do { (lock2)->lf_entry.tqe_prev = (lock1)->lf_entry.tqe_prev ; (lock2)->lf_entry.tqe_next = (lock1); *(lock1)->lf_entry .tqe_prev = (lock2); (lock1)->lf_entry.tqe_prev = &(lock2 )->lf_entry.tqe_next; } while (0); |
782 | return; |
783 | } |
784 | if (lock1->lf_end == lock2->lf_end) { |
785 | lock1->lf_end = lock2->lf_start - 1; |
786 | TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2,do { if (((lock2)->lf_entry.tqe_next = (lock1)->lf_entry .tqe_next) != ((void *)0)) (lock2)->lf_entry.tqe_next-> lf_entry.tqe_prev = &(lock2)->lf_entry.tqe_next; else ( &lock1->lf_state->ls_locks)->tqh_last = &(lock2 )->lf_entry.tqe_next; (lock1)->lf_entry.tqe_next = (lock2 ); (lock2)->lf_entry.tqe_prev = &(lock1)->lf_entry. tqe_next; } while (0) |
787 | lf_entry)do { if (((lock2)->lf_entry.tqe_next = (lock1)->lf_entry .tqe_next) != ((void *)0)) (lock2)->lf_entry.tqe_next-> lf_entry.tqe_prev = &(lock2)->lf_entry.tqe_next; else ( &lock1->lf_state->ls_locks)->tqh_last = &(lock2 )->lf_entry.tqe_next; (lock1)->lf_entry.tqe_next = (lock2 ); (lock2)->lf_entry.tqe_prev = &(lock1)->lf_entry. tqe_next; } while (0); |
788 | return; |
789 | } |
790 | /* |
791 | * Make a new lock consisting of the last part of |
792 | * the encompassing lock |
793 | */ |
794 | splitlock = lf_alloc(lock1->lf_uid, 0); |
795 | splitlock->lf_flags = lock1->lf_flags; |
796 | splitlock->lf_type = lock1->lf_type; |
797 | splitlock->lf_start = lock2->lf_end + 1; |
798 | splitlock->lf_end = lock1->lf_end; |
799 | splitlock->lf_id = lock1->lf_id; |
800 | splitlock->lf_state = lock1->lf_state; |
801 | splitlock->lf_blk = NULL((void *)0); |
802 | splitlock->lf_pid = lock1->lf_pid; |
803 | TAILQ_INIT(&splitlock->lf_blkhd)do { (&splitlock->lf_blkhd)->tqh_first = ((void *)0 ); (&splitlock->lf_blkhd)->tqh_last = &(&splitlock ->lf_blkhd)->tqh_first; } while (0); |
804 | ls_ref(splitlock->lf_state); |
805 | lock1->lf_end = lock2->lf_start - 1; |
806 | |
807 | TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock1, lock2, lf_entry)do { if (((lock2)->lf_entry.tqe_next = (lock1)->lf_entry .tqe_next) != ((void *)0)) (lock2)->lf_entry.tqe_next-> lf_entry.tqe_prev = &(lock2)->lf_entry.tqe_next; else ( &lock1->lf_state->ls_locks)->tqh_last = &(lock2 )->lf_entry.tqe_next; (lock1)->lf_entry.tqe_next = (lock2 ); (lock2)->lf_entry.tqe_prev = &(lock1)->lf_entry. tqe_next; } while (0); |
808 | TAILQ_INSERT_AFTER(&lock1->lf_state->ls_locks, lock2, splitlock,do { if (((splitlock)->lf_entry.tqe_next = (lock2)->lf_entry .tqe_next) != ((void *)0)) (splitlock)->lf_entry.tqe_next-> lf_entry.tqe_prev = &(splitlock)->lf_entry.tqe_next; else (&lock1->lf_state->ls_locks)->tqh_last = &( splitlock)->lf_entry.tqe_next; (lock2)->lf_entry.tqe_next = (splitlock); (splitlock)->lf_entry.tqe_prev = &(lock2 )->lf_entry.tqe_next; } while (0) |
809 | lf_entry)do { if (((splitlock)->lf_entry.tqe_next = (lock2)->lf_entry .tqe_next) != ((void *)0)) (splitlock)->lf_entry.tqe_next-> lf_entry.tqe_prev = &(splitlock)->lf_entry.tqe_next; else (&lock1->lf_state->ls_locks)->tqh_last = &( splitlock)->lf_entry.tqe_next; (lock2)->lf_entry.tqe_next = (splitlock); (splitlock)->lf_entry.tqe_prev = &(lock2 )->lf_entry.tqe_next; } while (0); |
810 | } |
811 | |
812 | /* |
813 | * Wakeup a blocklist |
814 | */ |
815 | void |
816 | lf_wakelock(struct lockf *lock, int flags) |
817 | { |
818 | struct lockf *wakelock; |
819 | |
820 | rw_assert_wrlock(&lockf_lock); |
821 | |
822 | while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd)((&lock->lf_blkhd)->tqh_first))) { |
823 | TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block)do { if (((wakelock)->lf_block.tqe_next) != ((void *)0)) ( wakelock)->lf_block.tqe_next->lf_block.tqe_prev = (wakelock )->lf_block.tqe_prev; else (&lock->lf_blkhd)->tqh_last = (wakelock)->lf_block.tqe_prev; *(wakelock)->lf_block .tqe_prev = (wakelock)->lf_block.tqe_next; ((wakelock)-> lf_block.tqe_prev) = ((void *)-1); ((wakelock)->lf_block.tqe_next ) = ((void *)-1); } while (0); |
824 | wakelock->lf_blk = NULL((void *)0); |
825 | wakelock->lf_flags |= flags; |
826 | wakeup_one(wakelock)wakeup_n((wakelock), 1); |
827 | } |
828 | } |
829 | |
830 | /* |
831 | * Returns non-zero if the given lock would cause a deadlock. |
832 | */ |
833 | int |
834 | lf_deadlock(struct lockf *lock) |
835 | { |
836 | struct lockf *block, *lf, *pending; |
837 | |
838 | lf = TAILQ_FIRST(&lock->lf_state->ls_locks)((&lock->lf_state->ls_locks)->tqh_first); |
839 | for (; (block = lf_getblock(lf, lock)) != NULL((void *)0); |
840 | lf = TAILQ_NEXT(block, lf_entry)((block)->lf_entry.tqe_next)) { |
841 | if ((block->lf_flags & F_POSIX0x040) == 0) |
842 | continue; |
843 | |
844 | TAILQ_FOREACH(pending, &lock->lf_state->ls_pending, lf_entry)for((pending) = ((&lock->lf_state->ls_pending)-> tqh_first); (pending) != ((void *)0); (pending) = ((pending)-> lf_entry.tqe_next)) { |
845 | if (pending->lf_blk == NULL((void *)0)) |
846 | continue; /* lock already unblocked */ |
847 | |
848 | if (pending->lf_pid == block->lf_pid && |
849 | pending->lf_blk->lf_pid == lock->lf_pid) |
850 | return (1); |
851 | } |
852 | } |
853 | |
854 | return (0); |
855 | } |
856 | |
857 | #ifdef LOCKF_DEBUG |
858 | /* |
859 | * Print out a lock. |
860 | */ |
861 | void |
862 | lf_print(const char *tag, struct lockf *lock) |
863 | { |
864 | struct lockf *block; |
865 | |
866 | if (tag) |
867 | printf("%s: ", tag); |
868 | printf("lock %p", lock); |
869 | if (lock == NULL((void *)0)) { |
870 | printf("\n"); |
871 | return; |
872 | } |
873 | printf(", %s %p %s, start %lld, end %lld", |
874 | lock->lf_flags & F_POSIX0x040 ? "posix" : "flock", |
875 | lock->lf_id, |
876 | lock->lf_type == F_RDLCK1 ? "shared" : |
877 | lock->lf_type == F_WRLCK3 ? "exclusive" : |
878 | lock->lf_type == F_UNLCK2 ? "unlock" : |
879 | "unknown", lock->lf_start, lock->lf_end); |
880 | printf(", next %p, state %p", |
881 | TAILQ_NEXT(lock, lf_entry)((lock)->lf_entry.tqe_next), lock->lf_state); |
882 | block = TAILQ_FIRST(&lock->lf_blkhd)((&lock->lf_blkhd)->tqh_first); |
883 | if (block) |
884 | printf(", block"); |
885 | TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)for((block) = ((&lock->lf_blkhd)->tqh_first); (block ) != ((void *)0); (block) = ((block)->lf_block.tqe_next)) |
886 | printf(" %p,", block); |
887 | printf("\n"); |
888 | } |
889 | |
890 | void |
891 | lf_printlist(const char *tag, struct lockf *lock) |
892 | { |
893 | struct lockf *lf; |
894 | |
895 | printf("%s: Lock list:\n", tag); |
896 | TAILQ_FOREACH(lf, &lock->lf_state->ls_locks, lf_entry)for((lf) = ((&lock->lf_state->ls_locks)->tqh_first ); (lf) != ((void *)0); (lf) = ((lf)->lf_entry.tqe_next)) { |
897 | if (lock == lf) |
898 | printf(" * "); |
899 | else |
900 | printf(" "); |
901 | lf_print(NULL((void *)0), lf); |
902 | } |
903 | } |
904 | #endif /* LOCKF_DEBUG */ |