File: | kern/vfs_bio.c |
Warning: | line 269, column 2 Value stored to 'npages' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: vfs_bio.c,v 1.208 2021/12/12 09:14:59 visa Exp $ */ |
2 | /* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */ |
3 | |
4 | /* |
5 | * Copyright (c) 1994 Christopher G. Demetriou |
6 | * Copyright (c) 1982, 1986, 1989, 1993 |
7 | * The Regents of the University of California. All rights reserved. |
8 | * (c) UNIX System Laboratories, Inc. |
9 | * All or some portions of this file are derived from material licensed |
10 | * to the University of California by American Telephone and Telegraph |
11 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
12 | * the permission of UNIX System Laboratories, Inc. |
13 | * |
14 | * Redistribution and use in source and binary forms, with or without |
15 | * modification, are permitted provided that the following conditions |
16 | * are met: |
17 | * 1. Redistributions of source code must retain the above copyright |
18 | * notice, this list of conditions and the following disclaimer. |
19 | * 2. Redistributions in binary form must reproduce the above copyright |
20 | * notice, this list of conditions and the following disclaimer in the |
21 | * documentation and/or other materials provided with the distribution. |
22 | * 3. Neither the name of the University nor the names of its contributors |
23 | * may be used to endorse or promote products derived from this software |
24 | * without specific prior written permission. |
25 | * |
26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
36 | * SUCH DAMAGE. |
37 | * |
38 | * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 |
39 | */ |
40 | |
41 | /* |
42 | * Some references: |
43 | * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) |
44 | * Leffler, et al.: The Design and Implementation of the 4.3BSD |
45 | * UNIX Operating System (Addison Welley, 1989) |
46 | */ |
47 | |
48 | #include <sys/param.h> |
49 | #include <sys/systm.h> |
50 | #include <sys/proc.h> |
51 | #include <sys/buf.h> |
52 | #include <sys/vnode.h> |
53 | #include <sys/mount.h> |
54 | #include <sys/malloc.h> |
55 | #include <sys/pool.h> |
56 | #include <sys/resourcevar.h> |
57 | #include <sys/conf.h> |
58 | #include <sys/kernel.h> |
59 | #include <sys/specdev.h> |
60 | #include <sys/tracepoint.h> |
61 | #include <uvm/uvm_extern.h> |
62 | |
63 | /* XXX Should really be in buf.h, but for uvm_constraint_range.. */ |
64 | int buf_realloc_pages(struct buf *, struct uvm_constraint_range *, int); |
65 | |
66 | struct uvm_constraint_range high_constraint; |
67 | int fliphigh; |
68 | |
69 | int nobuffers; |
70 | int needbuffer; |
71 | struct bio_ops bioops; |
72 | |
73 | /* private bufcache functions */ |
74 | void bufcache_init(void); |
75 | void bufcache_adjust(void); |
76 | struct buf *bufcache_gethighcleanbuf(void); |
77 | struct buf *bufcache_getdmacleanbuf(void); |
78 | |
79 | /* |
80 | * Buffer pool for I/O buffers. |
81 | */ |
82 | struct pool bufpool; |
83 | struct bufhead bufhead = LIST_HEAD_INITIALIZER(bufhead){ ((void *)0) }; |
84 | void buf_put(struct buf *); |
85 | |
86 | struct buf *bio_doread(struct vnode *, daddr_t, int, int); |
87 | struct buf *buf_get(struct vnode *, daddr_t, size_t); |
88 | void bread_cluster_callback(struct buf *); |
89 | int64_t bufcache_recover_dmapages(int discard, int64_t howmany); |
90 | |
91 | struct bcachestats bcstats; /* counters */ |
92 | long lodirtypages; /* dirty page count low water mark */ |
93 | long hidirtypages; /* dirty page count high water mark */ |
94 | long targetpages; /* target number of pages for cache size */ |
95 | long buflowpages; /* smallest size cache allowed */ |
96 | long bufhighpages; /* largest size cache allowed */ |
97 | long bufbackpages; /* minimum number of pages we shrink when asked to */ |
98 | |
99 | vsize_t bufkvm; |
100 | |
101 | struct proc *cleanerproc; |
102 | int bd_req; /* Sleep point for cleaner daemon. */ |
103 | |
104 | #define NUM_CACHES2 2 |
105 | #define DMA_CACHE0 0 |
106 | struct bufcache cleancache[NUM_CACHES2]; |
107 | struct bufqueue dirtyqueue; |
108 | |
109 | void |
110 | buf_put(struct buf *bp) |
111 | { |
112 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
113 | |
114 | #ifdef DIAGNOSTIC1 |
115 | if (bp->b_pobj != NULL((void *)0)) |
116 | KASSERT(bp->b_bufsize > 0)((bp->b_bufsize > 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 116, "bp->b_bufsize > 0" )); |
117 | if (ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080))) |
118 | panic("buf_put: releasing dirty buffer"); |
119 | if (bp->b_freelist.tqe_next != NOLIST((struct buf *)0x87654321) && |
120 | bp->b_freelist.tqe_next != (void *)-1) |
121 | panic("buf_put: still on the free list"); |
122 | if (bp->b_vnbufs.le_next != NOLIST((struct buf *)0x87654321) && |
123 | bp->b_vnbufs.le_next != (void *)-1) |
124 | panic("buf_put: still on the vnode list"); |
125 | if (!LIST_EMPTY(&bp->b_dep)(((&bp->b_dep)->lh_first) == ((void *)0))) |
126 | panic("buf_put: b_dep is not empty"); |
127 | #endif |
128 | |
129 | LIST_REMOVE(bp, b_list)do { if ((bp)->b_list.le_next != ((void *)0)) (bp)->b_list .le_next->b_list.le_prev = (bp)->b_list.le_prev; *(bp)-> b_list.le_prev = (bp)->b_list.le_next; ((bp)->b_list.le_prev ) = ((void *)-1); ((bp)->b_list.le_next) = ((void *)-1); } while (0); |
130 | bcstats.numbufs--; |
131 | |
132 | if (buf_dealloc_mem(bp) != 0) |
133 | return; |
134 | pool_put(&bufpool, bp); |
135 | } |
136 | |
137 | /* |
138 | * Initialize buffers and hash links for buffers. |
139 | */ |
140 | void |
141 | bufinit(void) |
142 | { |
143 | u_int64_t dmapages; |
144 | u_int64_t highpages; |
145 | |
146 | dmapages = uvm_pagecount(&dma_constraint); |
147 | /* take away a guess at how much of this the kernel will consume */ |
148 | dmapages -= (atop(physmem)((physmem) >> 12) - atop(uvmexp.free)((uvmexp.free) >> 12)); |
149 | |
150 | /* See if we have memory above the dma accessible region. */ |
151 | high_constraint.ucr_low = dma_constraint.ucr_high; |
152 | high_constraint.ucr_high = no_constraint.ucr_high; |
153 | if (high_constraint.ucr_low != high_constraint.ucr_high) |
154 | high_constraint.ucr_low++; |
155 | highpages = uvm_pagecount(&high_constraint); |
156 | |
157 | /* |
158 | * Do we have any significant amount of high memory above |
159 | * the DMA region? if so enable moving buffers there, if not, |
160 | * don't bother. |
161 | */ |
162 | if (highpages > dmapages / 4) |
163 | fliphigh = 1; |
164 | else |
165 | fliphigh = 0; |
166 | |
167 | /* |
168 | * If MD code doesn't say otherwise, use up to 10% of DMA'able |
169 | * memory for buffers. |
170 | */ |
171 | if (bufcachepercent == 0) |
172 | bufcachepercent = 10; |
173 | |
174 | /* |
175 | * XXX these values and their same use in kern_sysctl |
176 | * need to move into buf.h |
177 | */ |
178 | KASSERT(bufcachepercent <= 90)((bufcachepercent <= 90) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 178, "bufcachepercent <= 90" )); |
179 | KASSERT(bufcachepercent >= 5)((bufcachepercent >= 5) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 179, "bufcachepercent >= 5" )); |
180 | if (bufpages == 0) |
181 | bufpages = dmapages * bufcachepercent / 100; |
182 | if (bufpages < BCACHE_MIN((4 * (64 * 1024) / (1 << 12)) * 2)) |
183 | bufpages = BCACHE_MIN((4 * (64 * 1024) / (1 << 12)) * 2); |
184 | KASSERT(bufpages < dmapages)((bufpages < dmapages) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 184, "bufpages < dmapages" )); |
185 | |
186 | bufhighpages = bufpages; |
187 | |
188 | /* |
189 | * Set the base backoff level for the buffer cache. We will |
190 | * not allow uvm to steal back more than this number of pages. |
191 | */ |
192 | buflowpages = dmapages * 5 / 100; |
193 | if (buflowpages < BCACHE_MIN((4 * (64 * 1024) / (1 << 12)) * 2)) |
194 | buflowpages = BCACHE_MIN((4 * (64 * 1024) / (1 << 12)) * 2); |
195 | |
196 | /* |
197 | * set bufbackpages to 100 pages, or 10 percent of the low water mark |
198 | * if we don't have that many pages. |
199 | */ |
200 | |
201 | bufbackpages = buflowpages * 10 / 100; |
202 | if (bufbackpages > 100) |
203 | bufbackpages = 100; |
204 | |
205 | /* |
206 | * If the MD code does not say otherwise, reserve 10% of kva |
207 | * space for mapping buffers. |
208 | */ |
209 | if (bufkvm == 0) |
210 | bufkvm = VM_KERNEL_SPACE_SIZE(0xffff800100000000 - 0xffff800000000000) / 10; |
211 | |
212 | /* |
213 | * Don't use more than twice the amount of bufpages for mappings. |
214 | * It's twice since we map things sparsely. |
215 | */ |
216 | if (bufkvm > bufpages * PAGE_SIZE(1 << 12)) |
217 | bufkvm = bufpages * PAGE_SIZE(1 << 12); |
218 | /* |
219 | * Round bufkvm to MAXPHYS because we allocate chunks of va space |
220 | * in MAXPHYS chunks. |
221 | */ |
222 | bufkvm &= ~(MAXPHYS(64 * 1024) - 1); |
223 | |
224 | pool_init(&bufpool, sizeof(struct buf), 0, IPL_BIO0x6, 0, "bufpl", NULL((void *)0)); |
225 | |
226 | bufcache_init(); |
227 | |
228 | /* |
229 | * hmm - bufkvm is an argument because it's static, while |
230 | * bufpages is global because it can change while running. |
231 | */ |
232 | buf_mem_init(bufkvm); |
233 | |
234 | /* |
235 | * Set the dirty page high water mark to be less than the low |
236 | * water mark for pages in the buffer cache. This ensures we |
237 | * can always back off by throwing away clean pages, and give |
238 | * ourselves a chance to write out the dirty pages eventually. |
239 | */ |
240 | hidirtypages = (buflowpages / 4) * 3; |
241 | lodirtypages = buflowpages / 2; |
242 | |
243 | /* |
244 | * We are allowed to use up to the reserve. |
245 | */ |
246 | targetpages = bufpages - RESERVE_PAGES(4 * (64 * 1024) / (1 << 12)); |
247 | } |
248 | |
249 | /* |
250 | * Change cachepct |
251 | */ |
252 | void |
253 | bufadjust(int newbufpages) |
254 | { |
255 | int s; |
256 | int64_t npages; |
257 | |
258 | if (newbufpages < buflowpages) |
259 | newbufpages = buflowpages; |
260 | |
261 | s = splbio()splraise(0x6); |
262 | bufpages = newbufpages; |
263 | |
264 | /* |
265 | * We are allowed to use up to the reserve |
266 | */ |
267 | targetpages = bufpages - RESERVE_PAGES(4 * (64 * 1024) / (1 << 12)); |
268 | |
269 | npages = bcstats.dmapages - targetpages; |
Value stored to 'npages' is never read | |
270 | |
271 | /* |
272 | * Shrinking the cache happens here only if someone has manually |
273 | * adjusted bufcachepercent - or the pagedaemon has told us |
274 | * to give back memory *now* - so we give it all back. |
275 | */ |
276 | if (bcstats.dmapages > targetpages) |
277 | (void) bufcache_recover_dmapages(0, bcstats.dmapages - targetpages); |
278 | bufcache_adjust(); |
279 | |
280 | /* |
281 | * Wake up the cleaner if we have lots of dirty pages, |
282 | * or if we are getting low on buffer cache kva. |
283 | */ |
284 | if ((UNCLEAN_PAGES(bcstats.numbufpages - bcstats.numcleanpages) >= hidirtypages) || |
285 | bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS4) |
286 | wakeup(&bd_req); |
287 | |
288 | splx(s)spllower(s); |
289 | } |
290 | |
291 | /* |
292 | * Make the buffer cache back off from cachepct. |
293 | */ |
294 | int |
295 | bufbackoff(struct uvm_constraint_range *range, long size) |
296 | { |
297 | /* |
298 | * Back off "size" buffer cache pages. Called by the page |
299 | * daemon to consume buffer cache pages rather than scanning. |
300 | * |
301 | * It returns 0 to the pagedaemon to indicate that it has |
302 | * succeeded in freeing enough pages. It returns -1 to |
303 | * indicate that it could not and the pagedaemon should take |
304 | * other measures. |
305 | * |
306 | */ |
307 | long pdelta, oldbufpages; |
308 | |
309 | /* |
310 | * If we will accept high memory for this backoff |
311 | * try to steal it from the high memory buffer cache. |
312 | */ |
313 | if (range != NULL((void *)0) && range->ucr_high > dma_constraint.ucr_high) { |
314 | struct buf *bp; |
315 | int64_t start = bcstats.numbufpages, recovered = 0; |
316 | int s = splbio()splraise(0x6); |
317 | |
318 | while ((recovered < size) && |
319 | (bp = bufcache_gethighcleanbuf())) { |
320 | bufcache_take(bp); |
321 | if (bp->b_vp) { |
322 | RBT_REMOVE(buf_rb_bufs,buf_rb_bufs_RBT_REMOVE(&bp->b_vp->v_bufs_tree, bp) |
323 | &bp->b_vp->v_bufs_tree, bp)buf_rb_bufs_RBT_REMOVE(&bp->b_vp->v_bufs_tree, bp); |
324 | brelvp(bp); |
325 | } |
326 | buf_put(bp); |
327 | recovered = start - bcstats.numbufpages; |
328 | } |
329 | bufcache_adjust(); |
330 | splx(s)spllower(s); |
331 | |
332 | /* If we got enough, return success */ |
333 | if (recovered >= size) |
334 | return 0; |
335 | |
336 | /* |
337 | * If we needed only memory above DMA, |
338 | * return failure |
339 | */ |
340 | if (range->ucr_low > dma_constraint.ucr_high) |
341 | return -1; |
342 | |
343 | /* Otherwise get the rest from DMA */ |
344 | size -= recovered; |
345 | } |
346 | |
347 | /* |
348 | * XXX Otherwise do the dma memory cache dance. this needs |
349 | * refactoring later to get rid of 'bufpages' |
350 | */ |
351 | |
352 | /* |
353 | * Back off by at least bufbackpages. If the page daemon gave us |
354 | * a larger size, back off by that much. |
355 | */ |
356 | pdelta = (size > bufbackpages) ? size : bufbackpages; |
357 | |
358 | if (bufpages <= buflowpages) |
359 | return(-1); |
360 | if (bufpages - pdelta < buflowpages) |
361 | pdelta = bufpages - buflowpages; |
362 | oldbufpages = bufpages; |
363 | bufadjust(bufpages - pdelta); |
364 | if (oldbufpages - bufpages < size) |
365 | return (-1); /* we did not free what we were asked */ |
366 | else |
367 | return(0); |
368 | } |
369 | |
370 | |
371 | /* |
372 | * Opportunistically flip a buffer into high memory. Will move the buffer |
373 | * if memory is available without sleeping, and return 0, otherwise will |
374 | * fail and return -1 with the buffer unchanged. |
375 | */ |
376 | |
377 | int |
378 | buf_flip_high(struct buf *bp) |
379 | { |
380 | int s; |
381 | int ret = -1; |
382 | |
383 | KASSERT(ISSET(bp->b_flags, B_BC))((((bp->b_flags) & (0x02000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 383, "ISSET(bp->b_flags, B_BC)" )); |
384 | KASSERT(ISSET(bp->b_flags, B_DMA))((((bp->b_flags) & (0x04000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 384, "ISSET(bp->b_flags, B_DMA)" )); |
385 | KASSERT(bp->cache == DMA_CACHE)((bp->cache == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 385, "bp->cache == DMA_CACHE")); |
386 | KASSERT(fliphigh)((fliphigh) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 386, "fliphigh")); |
387 | |
388 | /* Attempt to move the buffer to high memory if we can */ |
389 | s = splbio()splraise(0x6); |
390 | if (buf_realloc_pages(bp, &high_constraint, UVM_PLA_NOWAIT0x0002) == 0) { |
391 | KASSERT(!ISSET(bp->b_flags, B_DMA))((!((bp->b_flags) & (0x04000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 391, "!ISSET(bp->b_flags, B_DMA)" )); |
392 | bcstats.highflips++; |
393 | ret = 0; |
394 | } else |
395 | bcstats.highflops++; |
396 | splx(s)spllower(s); |
397 | |
398 | return ret; |
399 | } |
400 | |
401 | /* |
402 | * Flip a buffer to dma reachable memory, when we need it there for |
403 | * I/O. This can sleep since it will wait for memory allocation in the |
404 | * DMA reachable area since we have to have the buffer there to proceed. |
405 | */ |
406 | void |
407 | buf_flip_dma(struct buf *bp) |
408 | { |
409 | KASSERT(ISSET(bp->b_flags, B_BC))((((bp->b_flags) & (0x02000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 409, "ISSET(bp->b_flags, B_BC)" )); |
410 | KASSERT(ISSET(bp->b_flags, B_BUSY))((((bp->b_flags) & (0x00000010))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 410, "ISSET(bp->b_flags, B_BUSY)" )); |
411 | KASSERT(bp->cache < NUM_CACHES)((bp->cache < 2) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 411, "bp->cache < NUM_CACHES")); |
412 | |
413 | if (!ISSET(bp->b_flags, B_DMA)((bp->b_flags) & (0x04000000))) { |
414 | int s = splbio()splraise(0x6); |
415 | |
416 | /* move buf to dma reachable memory */ |
417 | (void) buf_realloc_pages(bp, &dma_constraint, UVM_PLA_WAITOK0x0001); |
418 | KASSERT(ISSET(bp->b_flags, B_DMA))((((bp->b_flags) & (0x04000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 418, "ISSET(bp->b_flags, B_DMA)" )); |
419 | bcstats.dmaflips++; |
420 | splx(s)spllower(s); |
421 | } |
422 | |
423 | if (bp->cache > DMA_CACHE0) { |
424 | CLR(bp->b_flags, B_COLD)((bp->b_flags) &= ~(0x01000000)); |
425 | CLR(bp->b_flags, B_WARM)((bp->b_flags) &= ~(0x00800000)); |
426 | bp->cache = DMA_CACHE0; |
427 | } |
428 | } |
429 | |
430 | struct buf * |
431 | bio_doread(struct vnode *vp, daddr_t blkno, int size, int async) |
432 | { |
433 | struct buf *bp; |
434 | struct mount *mp; |
435 | |
436 | bp = getblk(vp, blkno, size, 0, INFSLP0xffffffffffffffffULL); |
437 | |
438 | /* |
439 | * If buffer does not have valid data, start a read. |
440 | * Note that if buffer is B_INVAL, getblk() won't return it. |
441 | * Therefore, it's valid if its I/O has completed or been delayed. |
442 | */ |
443 | if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))((bp->b_flags) & ((0x00000100 | 0x00000080)))) { |
444 | SET(bp->b_flags, B_READ | async)((bp->b_flags) |= (0x00008000 | async)); |
445 | bcstats.pendingreads++; |
446 | bcstats.numreads++; |
447 | VOP_STRATEGY(bp->b_vp, bp); |
448 | /* Pay for the read. */ |
449 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_inblock++; /* XXX */ |
450 | } else if (async) { |
451 | brelse(bp); |
452 | } |
453 | |
454 | mp = vp->v_type == VBLK ? vp->v_specmountpointv_un.vu_specinfo->si_mountpoint : vp->v_mount; |
455 | |
456 | /* |
457 | * Collect statistics on synchronous and asynchronous reads. |
458 | * Reads from block devices are charged to their associated |
459 | * filesystem (if any). |
460 | */ |
461 | if (mp != NULL((void *)0)) { |
462 | if (async == 0) |
463 | mp->mnt_stat.f_syncreads++; |
464 | else |
465 | mp->mnt_stat.f_asyncreads++; |
466 | } |
467 | |
468 | return (bp); |
469 | } |
470 | |
471 | /* |
472 | * Read a disk block. |
473 | * This algorithm described in Bach (p.54). |
474 | */ |
475 | int |
476 | bread(struct vnode *vp, daddr_t blkno, int size, struct buf **bpp) |
477 | { |
478 | struct buf *bp; |
479 | |
480 | /* Get buffer for block. */ |
481 | bp = *bpp = bio_doread(vp, blkno, size, 0); |
482 | |
483 | /* Wait for the read to complete, and return result. */ |
484 | return (biowait(bp)); |
485 | } |
486 | |
487 | /* |
488 | * Read-ahead multiple disk blocks. The first is sync, the rest async. |
489 | * Trivial modification to the breada algorithm presented in Bach (p.55). |
490 | */ |
491 | int |
492 | breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t rablks[], |
493 | int rasizes[], int nrablks, struct buf **bpp) |
494 | { |
495 | struct buf *bp; |
496 | int i; |
497 | |
498 | bp = *bpp = bio_doread(vp, blkno, size, 0); |
499 | |
500 | /* |
501 | * For each of the read-ahead blocks, start a read, if necessary. |
502 | */ |
503 | for (i = 0; i < nrablks; i++) { |
504 | /* If it's in the cache, just go on to next one. */ |
505 | if (incore(vp, rablks[i])) |
506 | continue; |
507 | |
508 | /* Get a buffer for the read-ahead block */ |
509 | (void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC0x00000004); |
510 | } |
511 | |
512 | /* Otherwise, we had to start a read for it; wait until it's valid. */ |
513 | return (biowait(bp)); |
514 | } |
515 | |
516 | /* |
517 | * Called from interrupt context. |
518 | */ |
519 | void |
520 | bread_cluster_callback(struct buf *bp) |
521 | { |
522 | struct buf **xbpp = bp->b_saveaddr; |
523 | int i; |
524 | |
525 | if (xbpp[1] != NULL((void *)0)) { |
526 | size_t newsize = xbpp[1]->b_bufsize; |
527 | |
528 | /* |
529 | * Shrink this buffer's mapping to only cover its part of |
530 | * the total I/O. |
531 | */ |
532 | buf_fix_mapping(bp, newsize); |
533 | bp->b_bcount = newsize; |
534 | } |
535 | |
536 | /* Invalidate read-ahead buffers if read short */ |
537 | if (bp->b_resid > 0) { |
538 | for (i = 1; xbpp[i] != NULL((void *)0); i++) |
539 | continue; |
540 | for (i = i - 1; i != 0; i--) { |
541 | if (xbpp[i]->b_bufsize <= bp->b_resid) { |
542 | bp->b_resid -= xbpp[i]->b_bufsize; |
543 | SET(xbpp[i]->b_flags, B_INVAL)((xbpp[i]->b_flags) |= (0x00000800)); |
544 | } else if (bp->b_resid > 0) { |
545 | bp->b_resid = 0; |
546 | SET(xbpp[i]->b_flags, B_INVAL)((xbpp[i]->b_flags) |= (0x00000800)); |
547 | } else |
548 | break; |
549 | } |
550 | } |
551 | |
552 | for (i = 1; xbpp[i] != NULL((void *)0); i++) { |
553 | if (ISSET(bp->b_flags, B_ERROR)((bp->b_flags) & (0x00000400))) |
554 | SET(xbpp[i]->b_flags, B_INVAL | B_ERROR)((xbpp[i]->b_flags) |= (0x00000800 | 0x00000400)); |
555 | /* |
556 | * Move the pages from the master buffer's uvm object |
557 | * into the individual buffer's uvm objects. |
558 | */ |
559 | struct uvm_object *newobj = &xbpp[i]->b_uobj; |
560 | struct uvm_object *oldobj = &bp->b_uobj; |
561 | int page; |
562 | |
563 | uvm_obj_init(newobj, &bufcache_pager, 1); |
564 | for (page = 0; page < atop(xbpp[i]->b_bufsize)((xbpp[i]->b_bufsize) >> 12); page++) { |
565 | struct vm_page *pg = uvm_pagelookup(oldobj, |
566 | xbpp[i]->b_poffs + ptoa(page)((paddr_t)(page) << 12)); |
567 | KASSERT(pg != NULL)((pg != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 567, "pg != NULL")); |
568 | KASSERT(pg->wire_count == 1)((pg->wire_count == 1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 568, "pg->wire_count == 1" )); |
569 | uvm_pagerealloc(pg, newobj, xbpp[i]->b_poffs + ptoa(page)((paddr_t)(page) << 12)); |
570 | } |
571 | xbpp[i]->b_pobj = newobj; |
572 | |
573 | biodone(xbpp[i]); |
574 | } |
575 | |
576 | free(xbpp, M_TEMP127, (i + 1) * sizeof(*xbpp)); |
577 | |
578 | if (ISSET(bp->b_flags, B_ASYNC)((bp->b_flags) & (0x00000004))) { |
579 | brelse(bp); |
580 | } else { |
581 | CLR(bp->b_flags, B_WANTED)((bp->b_flags) &= ~(0x00010000)); |
582 | wakeup(bp); |
583 | } |
584 | } |
585 | |
586 | /* |
587 | * Read-ahead multiple disk blocks, but make sure only one (big) I/O |
588 | * request is sent to the disk. |
589 | * XXX This should probably be dropped and breadn should instead be optimized |
590 | * XXX to do fewer I/O requests. |
591 | */ |
592 | int |
593 | bread_cluster(struct vnode *vp, daddr_t blkno, int size, struct buf **rbpp) |
594 | { |
595 | struct buf *bp, **xbpp; |
596 | int howmany, maxra, i, inc; |
597 | daddr_t sblkno; |
598 | |
599 | *rbpp = bio_doread(vp, blkno, size, 0); |
600 | |
601 | /* |
602 | * If the buffer is in the cache skip any I/O operation. |
603 | */ |
604 | if (ISSET((*rbpp)->b_flags, B_CACHE)(((*rbpp)->b_flags) & (0x00000020))) |
605 | goto out; |
606 | |
607 | if (size != round_page(size)(((size) + ((1 << 12) - 1)) & ~((1 << 12) - 1 ))) |
608 | goto out; |
609 | |
610 | if (VOP_BMAP(vp, blkno + 1, NULL((void *)0), &sblkno, &maxra)) |
611 | goto out; |
612 | |
613 | maxra++; |
614 | if (sblkno == -1 || maxra < 2) |
615 | goto out; |
616 | |
617 | howmany = MAXPHYS(64 * 1024) / size; |
618 | if (howmany > maxra) |
619 | howmany = maxra; |
620 | |
621 | xbpp = mallocarray(howmany + 1, sizeof(*xbpp), M_TEMP127, M_NOWAIT0x0002); |
622 | if (xbpp == NULL((void *)0)) |
623 | goto out; |
624 | |
625 | for (i = howmany - 1; i >= 0; i--) { |
626 | size_t sz; |
627 | |
628 | /* |
629 | * First buffer allocates big enough size to cover what |
630 | * all the other buffers need. |
631 | */ |
632 | sz = i == 0 ? howmany * size : 0; |
633 | |
634 | xbpp[i] = buf_get(vp, blkno + i + 1, sz); |
635 | if (xbpp[i] == NULL((void *)0)) { |
636 | for (++i; i < howmany; i++) { |
637 | SET(xbpp[i]->b_flags, B_INVAL)((xbpp[i]->b_flags) |= (0x00000800)); |
638 | brelse(xbpp[i]); |
639 | } |
640 | free(xbpp, M_TEMP127, (howmany + 1) * sizeof(*xbpp)); |
641 | goto out; |
642 | } |
643 | } |
644 | |
645 | bp = xbpp[0]; |
646 | |
647 | xbpp[howmany] = NULL((void *)0); |
648 | |
649 | inc = btodb(size)((size) >> 9); |
650 | |
651 | for (i = 1; i < howmany; i++) { |
652 | bcstats.pendingreads++; |
653 | bcstats.numreads++; |
654 | /* |
655 | * We set B_DMA here because bp above will be B_DMA, |
656 | * and we are playing buffer slice-n-dice games from |
657 | * the memory allocated in bp. |
658 | */ |
659 | SET(xbpp[i]->b_flags, B_DMA | B_READ | B_ASYNC)((xbpp[i]->b_flags) |= (0x04000000 | 0x00008000 | 0x00000004 )); |
660 | xbpp[i]->b_blkno = sblkno + (i * inc); |
661 | xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size; |
662 | xbpp[i]->b_data = NULL((void *)0); |
663 | xbpp[i]->b_pobj = bp->b_pobj; |
664 | xbpp[i]->b_poffs = bp->b_poffs + (i * size); |
665 | } |
666 | |
667 | KASSERT(bp->b_lblkno == blkno + 1)((bp->b_lblkno == blkno + 1) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 667, "bp->b_lblkno == blkno + 1" )); |
668 | KASSERT(bp->b_vp == vp)((bp->b_vp == vp) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 668, "bp->b_vp == vp")); |
669 | |
670 | bp->b_blkno = sblkno; |
671 | SET(bp->b_flags, B_READ | B_ASYNC | B_CALL)((bp->b_flags) |= (0x00008000 | 0x00000004 | 0x00000040)); |
672 | |
673 | bp->b_saveaddr = (void *)xbpp; |
674 | bp->b_iodone = bread_cluster_callback; |
675 | |
676 | bcstats.pendingreads++; |
677 | bcstats.numreads++; |
678 | VOP_STRATEGY(bp->b_vp, bp); |
679 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_inblock++; |
680 | |
681 | out: |
682 | return (biowait(*rbpp)); |
683 | } |
684 | |
685 | /* |
686 | * Block write. Described in Bach (p.56) |
687 | */ |
688 | int |
689 | bwrite(struct buf *bp) |
690 | { |
691 | int rv, async, wasdelayed, s; |
692 | struct vnode *vp; |
693 | struct mount *mp; |
694 | |
695 | vp = bp->b_vp; |
696 | if (vp != NULL((void *)0)) |
697 | mp = vp->v_type == VBLK? vp->v_specmountpointv_un.vu_specinfo->si_mountpoint : vp->v_mount; |
698 | else |
699 | mp = NULL((void *)0); |
700 | |
701 | /* |
702 | * Remember buffer type, to switch on it later. If the write was |
703 | * synchronous, but the file system was mounted with MNT_ASYNC, |
704 | * convert it to a delayed write. |
705 | * XXX note that this relies on delayed tape writes being converted |
706 | * to async, not sync writes (which is safe, but ugly). |
707 | */ |
708 | async = ISSET(bp->b_flags, B_ASYNC)((bp->b_flags) & (0x00000004)); |
709 | if (!async && mp && ISSET(mp->mnt_flag, MNT_ASYNC)((mp->mnt_flag) & (0x00000040))) { |
710 | /* |
711 | * Don't convert writes from VND on async filesystems |
712 | * that already have delayed writes in the upper layer. |
713 | */ |
714 | if (!ISSET(bp->b_flags, B_NOCACHE)((bp->b_flags) & (0x00001000))) { |
715 | bdwrite(bp); |
716 | return (0); |
717 | } |
718 | } |
719 | |
720 | /* |
721 | * Collect statistics on synchronous and asynchronous writes. |
722 | * Writes to block devices are charged to their associated |
723 | * filesystem (if any). |
724 | */ |
725 | if (mp != NULL((void *)0)) { |
726 | if (async) |
727 | mp->mnt_stat.f_asyncwrites++; |
728 | else |
729 | mp->mnt_stat.f_syncwrites++; |
730 | } |
731 | bcstats.pendingwrites++; |
732 | bcstats.numwrites++; |
733 | |
734 | wasdelayed = ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080)); |
735 | CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI))((bp->b_flags) &= ~((0x00008000 | 0x00000100 | 0x00000400 | 0x00000080))); |
736 | |
737 | s = splbio()splraise(0x6); |
738 | |
739 | /* |
740 | * If not synchronous, pay for the I/O operation and make |
741 | * sure the buf is on the correct vnode queue. We have |
742 | * to do this now, because if we don't, the vnode may not |
743 | * be properly notified that its I/O has completed. |
744 | */ |
745 | if (wasdelayed) { |
746 | reassignbuf(bp); |
747 | } else |
748 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_oublock++; |
749 | |
750 | |
751 | /* Initiate disk write. Make sure the appropriate party is charged. */ |
752 | bp->b_vp->v_numoutput++; |
753 | splx(s)spllower(s); |
754 | buf_flip_dma(bp); |
755 | SET(bp->b_flags, B_WRITEINPROG)((bp->b_flags) |= (0x00020000)); |
756 | VOP_STRATEGY(bp->b_vp, bp); |
757 | |
758 | /* |
759 | * If the queue is above the high water mark, wait till |
760 | * the number of outstanding write bufs drops below the low |
761 | * water mark. |
762 | */ |
763 | if (bp->b_bq) |
764 | bufq_wait(bp->b_bq); |
765 | |
766 | if (async) |
767 | return (0); |
768 | |
769 | /* |
770 | * If I/O was synchronous, wait for it to complete. |
771 | */ |
772 | rv = biowait(bp); |
773 | |
774 | /* Release the buffer. */ |
775 | brelse(bp); |
776 | |
777 | return (rv); |
778 | } |
779 | |
780 | |
781 | /* |
782 | * Delayed write. |
783 | * |
784 | * The buffer is marked dirty, but is not queued for I/O. |
785 | * This routine should be used when the buffer is expected |
786 | * to be modified again soon, typically a small write that |
787 | * partially fills a buffer. |
788 | * |
789 | * NB: magnetic tapes cannot be delayed; they must be |
790 | * written in the order that the writes are requested. |
791 | * |
792 | * Described in Leffler, et al. (pp. 208-213). |
793 | */ |
794 | void |
795 | bdwrite(struct buf *bp) |
796 | { |
797 | int s; |
798 | |
799 | /* |
800 | * If the block hasn't been seen before: |
801 | * (1) Mark it as having been seen, |
802 | * (2) Charge for the write. |
803 | * (3) Make sure it's on its vnode's correct block list, |
804 | * (4) If a buffer is rewritten, move it to end of dirty list |
805 | */ |
806 | if (!ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080))) { |
807 | SET(bp->b_flags, B_DELWRI)((bp->b_flags) |= (0x00000080)); |
808 | s = splbio()splraise(0x6); |
809 | buf_flip_dma(bp); |
810 | reassignbuf(bp); |
811 | splx(s)spllower(s); |
812 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_oublock++; /* XXX */ |
813 | } |
814 | |
815 | /* The "write" is done, so mark and release the buffer. */ |
816 | CLR(bp->b_flags, B_NEEDCOMMIT)((bp->b_flags) &= ~(0x00000002)); |
817 | CLR(bp->b_flags, B_NOCACHE)((bp->b_flags) &= ~(0x00001000)); /* Must cache delayed writes */ |
818 | SET(bp->b_flags, B_DONE)((bp->b_flags) |= (0x00000100)); |
819 | brelse(bp); |
820 | } |
821 | |
822 | /* |
823 | * Asynchronous block write; just an asynchronous bwrite(). |
824 | */ |
825 | void |
826 | bawrite(struct buf *bp) |
827 | { |
828 | |
829 | SET(bp->b_flags, B_ASYNC)((bp->b_flags) |= (0x00000004)); |
830 | VOP_BWRITE(bp); |
831 | } |
832 | |
833 | /* |
834 | * Must be called at splbio() |
835 | */ |
836 | void |
837 | buf_dirty(struct buf *bp) |
838 | { |
839 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
840 | |
841 | #ifdef DIAGNOSTIC1 |
842 | if (!ISSET(bp->b_flags, B_BUSY)((bp->b_flags) & (0x00000010))) |
843 | panic("Trying to dirty buffer on freelist!"); |
844 | #endif |
845 | |
846 | if (ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080)) == 0) { |
847 | SET(bp->b_flags, B_DELWRI)((bp->b_flags) |= (0x00000080)); |
848 | buf_flip_dma(bp); |
849 | reassignbuf(bp); |
850 | } |
851 | } |
852 | |
853 | /* |
854 | * Must be called at splbio() |
855 | */ |
856 | void |
857 | buf_undirty(struct buf *bp) |
858 | { |
859 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
860 | |
861 | #ifdef DIAGNOSTIC1 |
862 | if (!ISSET(bp->b_flags, B_BUSY)((bp->b_flags) & (0x00000010))) |
863 | panic("Trying to undirty buffer on freelist!"); |
864 | #endif |
865 | if (ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080))) { |
866 | CLR(bp->b_flags, B_DELWRI)((bp->b_flags) &= ~(0x00000080)); |
867 | reassignbuf(bp); |
868 | } |
869 | } |
870 | |
871 | /* |
872 | * Release a buffer on to the free lists. |
873 | * Described in Bach (p. 46). |
874 | */ |
875 | void |
876 | brelse(struct buf *bp) |
877 | { |
878 | int s; |
879 | |
880 | s = splbio()splraise(0x6); |
881 | |
882 | if (bp->b_data != NULL((void *)0)) |
883 | KASSERT(bp->b_bufsize > 0)((bp->b_bufsize > 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 883, "bp->b_bufsize > 0" )); |
884 | |
885 | /* |
886 | * softdep is basically incompatible with not caching buffers |
887 | * that have dependencies, so this buffer must be cached |
888 | */ |
889 | if (LIST_FIRST(&bp->b_dep)((&bp->b_dep)->lh_first) != NULL((void *)0)) |
890 | CLR(bp->b_flags, B_NOCACHE)((bp->b_flags) &= ~(0x00001000)); |
891 | |
892 | /* |
893 | * Determine which queue the buffer should be on, then put it there. |
894 | */ |
895 | |
896 | /* If it's not cacheable, or an error, mark it invalid. */ |
897 | if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))((bp->b_flags) & ((0x00001000|0x00000400)))) |
898 | SET(bp->b_flags, B_INVAL)((bp->b_flags) |= (0x00000800)); |
899 | /* If it's a write error, also mark the vnode as damaged. */ |
900 | if (ISSET(bp->b_flags, B_ERROR)((bp->b_flags) & (0x00000400)) && !ISSET(bp->b_flags, B_READ)((bp->b_flags) & (0x00008000))) { |
901 | if (bp->b_vp && bp->b_vp->v_type == VREG) |
902 | SET(bp->b_vp->v_bioflag, VBIOERROR)((bp->b_vp->v_bioflag) |= (0x0008)); |
903 | } |
904 | |
905 | if (ISSET(bp->b_flags, B_INVAL)((bp->b_flags) & (0x00000800))) { |
906 | /* |
907 | * If the buffer is invalid, free it now rather than leaving |
908 | * it in a queue and wasting memory. |
909 | */ |
910 | if (LIST_FIRST(&bp->b_dep)((&bp->b_dep)->lh_first) != NULL((void *)0)) |
911 | buf_deallocate(bp); |
912 | |
913 | if (ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080))) { |
914 | CLR(bp->b_flags, B_DELWRI)((bp->b_flags) &= ~(0x00000080)); |
915 | } |
916 | |
917 | if (bp->b_vp) { |
918 | RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp)buf_rb_bufs_RBT_REMOVE(&bp->b_vp->v_bufs_tree, bp); |
919 | brelvp(bp); |
920 | } |
921 | bp->b_vp = NULL((void *)0); |
922 | |
923 | /* |
924 | * Wake up any processes waiting for _this_ buffer to |
925 | * become free. They are not allowed to grab it |
926 | * since it will be freed. But the only sleeper is |
927 | * getblk and it will restart the operation after |
928 | * sleep. |
929 | */ |
930 | if (ISSET(bp->b_flags, B_WANTED)((bp->b_flags) & (0x00010000))) { |
931 | CLR(bp->b_flags, B_WANTED)((bp->b_flags) &= ~(0x00010000)); |
932 | wakeup(bp); |
933 | } |
934 | buf_put(bp); |
935 | } else { |
936 | /* |
937 | * It has valid data. Put it on the end of the appropriate |
938 | * queue, so that it'll stick around for as long as possible. |
939 | */ |
940 | bufcache_release(bp); |
941 | |
942 | /* Unlock the buffer. */ |
943 | CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED))((bp->b_flags) &= ~((0x00000001 | 0x00000004 | 0x00001000 | 0x00080000))); |
944 | buf_release(bp); |
945 | |
946 | /* Wake up any processes waiting for _this_ buffer to |
947 | * become free. */ |
948 | if (ISSET(bp->b_flags, B_WANTED)((bp->b_flags) & (0x00010000))) { |
949 | CLR(bp->b_flags, B_WANTED)((bp->b_flags) &= ~(0x00010000)); |
950 | wakeup(bp); |
951 | } |
952 | |
953 | if (bcstats.dmapages > targetpages) |
954 | (void) bufcache_recover_dmapages(0, |
955 | bcstats.dmapages - targetpages); |
956 | bufcache_adjust(); |
957 | } |
958 | |
959 | /* Wake up syncer and cleaner processes waiting for buffers. */ |
960 | if (nobuffers) { |
961 | nobuffers = 0; |
962 | wakeup(&nobuffers); |
963 | } |
964 | |
965 | /* Wake up any processes waiting for any buffer to become free. */ |
966 | if (needbuffer && bcstats.dmapages < targetpages && |
967 | bcstats.kvaslots_avail > RESERVE_SLOTS4) { |
968 | needbuffer = 0; |
969 | wakeup(&needbuffer); |
970 | } |
971 | |
972 | splx(s)spllower(s); |
973 | } |
974 | |
975 | /* |
976 | * Determine if a block is in the cache. Just look on what would be its hash |
977 | * chain. If it's there, return a pointer to it, unless it's marked invalid. |
978 | */ |
979 | struct buf * |
980 | incore(struct vnode *vp, daddr_t blkno) |
981 | { |
982 | struct buf *bp; |
983 | struct buf b; |
984 | int s; |
985 | |
986 | s = splbio()splraise(0x6); |
987 | |
988 | /* Search buf lookup tree */ |
989 | b.b_lblkno = blkno; |
990 | bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b)buf_rb_bufs_RBT_FIND(&vp->v_bufs_tree, &b); |
991 | if (bp != NULL((void *)0) && ISSET(bp->b_flags, B_INVAL)((bp->b_flags) & (0x00000800))) |
992 | bp = NULL((void *)0); |
993 | |
994 | splx(s)spllower(s); |
995 | return (bp); |
996 | } |
997 | |
998 | /* |
999 | * Get a block of requested size that is associated with |
1000 | * a given vnode and block offset. If it is found in the |
1001 | * block cache, mark it as having been found, make it busy |
1002 | * and return it. Otherwise, return an empty block of the |
1003 | * correct size. It is up to the caller to ensure that the |
1004 | * cached blocks be of the correct size. |
1005 | */ |
1006 | struct buf * |
1007 | getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, |
1008 | uint64_t slptimeo) |
1009 | { |
1010 | struct buf *bp; |
1011 | struct buf b; |
1012 | int s, error; |
1013 | |
1014 | /* |
1015 | * XXX |
1016 | * The following is an inlined version of 'incore()', but with |
1017 | * the 'invalid' test moved to after the 'busy' test. It's |
1018 | * necessary because there are some cases in which the NFS |
1019 | * code sets B_INVAL prior to writing data to the server, but |
1020 | * in which the buffers actually contain valid data. In this |
1021 | * case, we can't allow the system to allocate a new buffer for |
1022 | * the block until the write is finished. |
1023 | */ |
1024 | start: |
1025 | s = splbio()splraise(0x6); |
1026 | b.b_lblkno = blkno; |
1027 | bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b)buf_rb_bufs_RBT_FIND(&vp->v_bufs_tree, &b); |
1028 | if (bp != NULL((void *)0)) { |
1029 | if (ISSET(bp->b_flags, B_BUSY)((bp->b_flags) & (0x00000010))) { |
1030 | SET(bp->b_flags, B_WANTED)((bp->b_flags) |= (0x00010000)); |
1031 | error = tsleep_nsec(bp, slpflag | (PRIBIO16 + 1), |
1032 | "getblk", slptimeo); |
1033 | splx(s)spllower(s); |
1034 | if (error) |
1035 | return (NULL((void *)0)); |
1036 | goto start; |
1037 | } |
1038 | |
1039 | if (!ISSET(bp->b_flags, B_INVAL)((bp->b_flags) & (0x00000800))) { |
1040 | bcstats.cachehits++; |
1041 | SET(bp->b_flags, B_CACHE)((bp->b_flags) |= (0x00000020)); |
1042 | bufcache_take(bp); |
1043 | buf_acquire(bp); |
1044 | splx(s)spllower(s); |
1045 | return (bp); |
1046 | } |
1047 | } |
1048 | splx(s)spllower(s); |
1049 | |
1050 | if ((bp = buf_get(vp, blkno, size)) == NULL((void *)0)) |
1051 | goto start; |
1052 | |
1053 | return (bp); |
1054 | } |
1055 | |
1056 | /* |
1057 | * Get an empty, disassociated buffer of given size. |
1058 | */ |
1059 | struct buf * |
1060 | geteblk(size_t size) |
1061 | { |
1062 | struct buf *bp; |
1063 | |
1064 | while ((bp = buf_get(NULL((void *)0), 0, size)) == NULL((void *)0)) |
1065 | continue; |
1066 | |
1067 | return (bp); |
1068 | } |
1069 | |
1070 | /* |
1071 | * Allocate a buffer. |
1072 | * If vp is given, put it into the buffer cache for that vnode. |
1073 | * If size != 0, allocate memory and call buf_map(). |
1074 | * If there is already a buffer for the given vnode/blkno, return NULL. |
1075 | */ |
1076 | struct buf * |
1077 | buf_get(struct vnode *vp, daddr_t blkno, size_t size) |
1078 | { |
1079 | struct buf *bp; |
1080 | int poolwait = size == 0 ? PR_NOWAIT0x0002 : PR_WAITOK0x0001; |
1081 | int npages; |
1082 | int s; |
1083 | |
1084 | s = splbio()splraise(0x6); |
1085 | if (size) { |
1086 | /* |
1087 | * Wake up the cleaner if we have lots of dirty pages, |
1088 | * or if we are getting low on buffer cache kva. |
1089 | */ |
1090 | if (UNCLEAN_PAGES(bcstats.numbufpages - bcstats.numcleanpages) >= hidirtypages || |
1091 | bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS4) |
1092 | wakeup(&bd_req); |
1093 | |
1094 | npages = atop(round_page(size))(((((size) + ((1 << 12) - 1)) & ~((1 << 12) - 1))) >> 12); |
1095 | |
1096 | /* |
1097 | * if our cache has been previously shrunk, |
1098 | * allow it to grow again with use up to |
1099 | * bufhighpages (cachepercent) |
1100 | */ |
1101 | if (bufpages < bufhighpages) |
1102 | bufadjust(bufhighpages); |
1103 | |
1104 | /* |
1105 | * If we would go over the page target with our |
1106 | * new allocation, free enough buffers first |
1107 | * to stay at the target with our new allocation. |
1108 | */ |
1109 | if (bcstats.dmapages + npages > targetpages) { |
1110 | (void) bufcache_recover_dmapages(0, npages); |
1111 | bufcache_adjust(); |
1112 | } |
1113 | |
1114 | /* |
1115 | * If we get here, we tried to free the world down |
1116 | * above, and couldn't get down - Wake the cleaner |
1117 | * and wait for it to push some buffers out. |
1118 | */ |
1119 | if ((bcstats.dmapages + npages > targetpages || |
1120 | bcstats.kvaslots_avail <= RESERVE_SLOTS4) && |
1121 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc != syncerproc && curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc != cleanerproc) { |
1122 | wakeup(&bd_req); |
1123 | needbuffer++; |
1124 | tsleep_nsec(&needbuffer, PRIBIO16, "needbuffer", INFSLP0xffffffffffffffffULL); |
1125 | splx(s)spllower(s); |
1126 | return (NULL((void *)0)); |
1127 | } |
1128 | if (bcstats.dmapages + npages > bufpages) { |
1129 | /* cleaner or syncer */ |
1130 | nobuffers = 1; |
1131 | tsleep_nsec(&nobuffers, PRIBIO16, "nobuffers", INFSLP0xffffffffffffffffULL); |
1132 | splx(s)spllower(s); |
1133 | return (NULL((void *)0)); |
1134 | } |
1135 | } |
1136 | |
1137 | bp = pool_get(&bufpool, poolwait|PR_ZERO0x0008); |
1138 | |
1139 | if (bp == NULL((void *)0)) { |
1140 | splx(s)spllower(s); |
1141 | return (NULL((void *)0)); |
1142 | } |
1143 | |
1144 | bp->b_freelist.tqe_next = NOLIST((struct buf *)0x87654321); |
1145 | bp->b_dev = NODEV(dev_t)(-1); |
1146 | LIST_INIT(&bp->b_dep)do { ((&bp->b_dep)->lh_first) = ((void *)0); } while (0); |
1147 | bp->b_bcount = size; |
1148 | |
1149 | buf_acquire_nomap(bp); |
1150 | |
1151 | if (vp != NULL((void *)0)) { |
1152 | /* |
1153 | * We insert the buffer into the hash with B_BUSY set |
1154 | * while we allocate pages for it. This way any getblk |
1155 | * that happens while we allocate pages will wait for |
1156 | * this buffer instead of starting its own buf_get. |
1157 | * |
1158 | * But first, we check if someone beat us to it. |
1159 | */ |
1160 | if (incore(vp, blkno)) { |
1161 | pool_put(&bufpool, bp); |
1162 | splx(s)spllower(s); |
1163 | return (NULL((void *)0)); |
1164 | } |
1165 | |
1166 | bp->b_blkno = bp->b_lblkno = blkno; |
1167 | bgetvp(vp, bp); |
1168 | if (RBT_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp)buf_rb_bufs_RBT_INSERT(&vp->v_bufs_tree, bp)) |
1169 | panic("buf_get: dup lblk vp %p bp %p", vp, bp); |
1170 | } else { |
1171 | bp->b_vnbufs.le_next = NOLIST((struct buf *)0x87654321); |
1172 | SET(bp->b_flags, B_INVAL)((bp->b_flags) |= (0x00000800)); |
1173 | bp->b_vp = NULL((void *)0); |
1174 | } |
1175 | |
1176 | LIST_INSERT_HEAD(&bufhead, bp, b_list)do { if (((bp)->b_list.le_next = (&bufhead)->lh_first ) != ((void *)0)) (&bufhead)->lh_first->b_list.le_prev = &(bp)->b_list.le_next; (&bufhead)->lh_first = (bp); (bp)->b_list.le_prev = &(&bufhead)->lh_first ; } while (0); |
1177 | bcstats.numbufs++; |
1178 | |
1179 | if (size) { |
1180 | buf_alloc_pages(bp, round_page(size)(((size) + ((1 << 12) - 1)) & ~((1 << 12) - 1 ))); |
1181 | KASSERT(ISSET(bp->b_flags, B_DMA))((((bp->b_flags) & (0x04000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 1181, "ISSET(bp->b_flags, B_DMA)" )); |
1182 | buf_map(bp); |
1183 | } |
1184 | |
1185 | SET(bp->b_flags, B_BC)((bp->b_flags) |= (0x02000000)); |
1186 | splx(s)spllower(s); |
1187 | |
1188 | return (bp); |
1189 | } |
1190 | |
1191 | /* |
1192 | * Buffer cleaning daemon. |
1193 | */ |
1194 | void |
1195 | buf_daemon(void *arg) |
1196 | { |
1197 | struct buf *bp = NULL((void *)0); |
1198 | int s, pushed = 0; |
1199 | |
1200 | s = splbio()splraise(0x6); |
1201 | for (;;) { |
1202 | if (bp == NULL((void *)0) || (pushed >= 16 && |
1203 | UNCLEAN_PAGES(bcstats.numbufpages - bcstats.numcleanpages) < hidirtypages && |
1204 | bcstats.kvaslots_avail > 2 * RESERVE_SLOTS4)){ |
1205 | pushed = 0; |
1206 | /* |
1207 | * Wake up anyone who was waiting for buffers |
1208 | * to be released. |
1209 | */ |
1210 | if (needbuffer) { |
1211 | needbuffer = 0; |
1212 | wakeup(&needbuffer); |
1213 | } |
1214 | tsleep_nsec(&bd_req, PRIBIO16 - 7, "cleaner", INFSLP0xffffffffffffffffULL); |
1215 | } |
1216 | |
1217 | while ((bp = bufcache_getdirtybuf())) { |
1218 | TRACEPOINT(vfs, cleaner, bp->b_flags, pushed,do { extern struct dt_probe (dt_static_vfs_cleaner); struct dt_probe *dtp = &(dt_static_vfs_cleaner); struct dt_provider *dtpv = dtp->dtp_prov; if (__builtin_expect(((dt_tracing) != 0) , 0) && __builtin_expect(((dtp->dtp_recording) != 0 ), 0)) { dtpv->dtpv_enter(dtpv, dtp, bp->b_flags, pushed , lodirtypages, hidirtypages); } } while (0) |
1219 | lodirtypages, hidirtypages)do { extern struct dt_probe (dt_static_vfs_cleaner); struct dt_probe *dtp = &(dt_static_vfs_cleaner); struct dt_provider *dtpv = dtp->dtp_prov; if (__builtin_expect(((dt_tracing) != 0) , 0) && __builtin_expect(((dtp->dtp_recording) != 0 ), 0)) { dtpv->dtpv_enter(dtpv, dtp, bp->b_flags, pushed , lodirtypages, hidirtypages); } } while (0); |
1220 | |
1221 | if (UNCLEAN_PAGES(bcstats.numbufpages - bcstats.numcleanpages) < lodirtypages && |
1222 | bcstats.kvaslots_avail > 2 * RESERVE_SLOTS4 && |
1223 | pushed >= 16) |
1224 | break; |
1225 | |
1226 | bufcache_take(bp); |
1227 | buf_acquire(bp); |
1228 | splx(s)spllower(s); |
1229 | |
1230 | if (ISSET(bp->b_flags, B_INVAL)((bp->b_flags) & (0x00000800))) { |
1231 | brelse(bp); |
1232 | s = splbio()splraise(0x6); |
1233 | continue; |
1234 | } |
1235 | #ifdef DIAGNOSTIC1 |
1236 | if (!ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080))) |
1237 | panic("Clean buffer on dirty queue"); |
1238 | #endif |
1239 | if (LIST_FIRST(&bp->b_dep)((&bp->b_dep)->lh_first) != NULL((void *)0) && |
1240 | !ISSET(bp->b_flags, B_DEFERRED)((bp->b_flags) & (0x00080000)) && |
1241 | buf_countdeps(bp, 0, 0)) { |
1242 | SET(bp->b_flags, B_DEFERRED)((bp->b_flags) |= (0x00080000)); |
1243 | s = splbio()splraise(0x6); |
1244 | bufcache_release(bp); |
1245 | buf_release(bp); |
1246 | continue; |
1247 | } |
1248 | |
1249 | bawrite(bp); |
1250 | pushed++; |
1251 | |
1252 | sched_pause(yield)do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield (); } while (0); |
1253 | |
1254 | s = splbio()splraise(0x6); |
1255 | } |
1256 | } |
1257 | } |
1258 | |
1259 | /* |
1260 | * Wait for operations on the buffer to complete. |
1261 | * When they do, extract and return the I/O's error value. |
1262 | */ |
1263 | int |
1264 | biowait(struct buf *bp) |
1265 | { |
1266 | int s; |
1267 | |
1268 | KASSERT(!(bp->b_flags & B_ASYNC))((!(bp->b_flags & 0x00000004)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 1268, "!(bp->b_flags & B_ASYNC)" )); |
1269 | |
1270 | s = splbio()splraise(0x6); |
1271 | while (!ISSET(bp->b_flags, B_DONE)((bp->b_flags) & (0x00000100))) |
1272 | tsleep_nsec(bp, PRIBIO16 + 1, "biowait", INFSLP0xffffffffffffffffULL); |
1273 | splx(s)spllower(s); |
1274 | |
1275 | /* check for interruption of I/O (e.g. via NFS), then errors. */ |
1276 | if (ISSET(bp->b_flags, B_EINTR)((bp->b_flags) & (0x00000200))) { |
1277 | CLR(bp->b_flags, B_EINTR)((bp->b_flags) &= ~(0x00000200)); |
1278 | return (EINTR4); |
1279 | } |
1280 | |
1281 | if (ISSET(bp->b_flags, B_ERROR)((bp->b_flags) & (0x00000400))) |
1282 | return (bp->b_error ? bp->b_error : EIO5); |
1283 | else |
1284 | return (0); |
1285 | } |
1286 | |
1287 | /* |
1288 | * Mark I/O complete on a buffer. |
1289 | * |
1290 | * If a callback has been requested, e.g. the pageout |
1291 | * daemon, do so. Otherwise, awaken waiting processes. |
1292 | * |
1293 | * [ Leffler, et al., says on p.247: |
1294 | * "This routine wakes up the blocked process, frees the buffer |
1295 | * for an asynchronous write, or, for a request by the pagedaemon |
1296 | * process, invokes a procedure specified in the buffer structure" ] |
1297 | * |
1298 | * In real life, the pagedaemon (or other system processes) wants |
1299 | * to do async stuff to, and doesn't want the buffer brelse()'d. |
1300 | * (for swap pager, that puts swap buffers on the free lists (!!!), |
1301 | * for the vn device, that puts malloc'd buffers on the free lists!) |
1302 | * |
1303 | * Must be called at splbio(). |
1304 | */ |
1305 | void |
1306 | biodone(struct buf *bp) |
1307 | { |
1308 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
1309 | |
1310 | if (ISSET(bp->b_flags, B_DONE)((bp->b_flags) & (0x00000100))) |
1311 | panic("biodone already"); |
1312 | SET(bp->b_flags, B_DONE)((bp->b_flags) |= (0x00000100)); /* note that it's done */ |
1313 | |
1314 | if (bp->b_bq) |
1315 | bufq_done(bp->b_bq, bp); |
1316 | |
1317 | if (LIST_FIRST(&bp->b_dep)((&bp->b_dep)->lh_first) != NULL((void *)0)) |
1318 | buf_complete(bp); |
1319 | |
1320 | if (!ISSET(bp->b_flags, B_READ)((bp->b_flags) & (0x00008000))) { |
1321 | CLR(bp->b_flags, B_WRITEINPROG)((bp->b_flags) &= ~(0x00020000)); |
1322 | vwakeup(bp->b_vp); |
1323 | } |
1324 | if (bcstats.numbufs && |
1325 | (!(ISSET(bp->b_flags, B_RAW)((bp->b_flags) & (0x00004000)) || ISSET(bp->b_flags, B_PHYS)((bp->b_flags) & (0x00002000))))) { |
1326 | if (!ISSET(bp->b_flags, B_READ)((bp->b_flags) & (0x00008000))) { |
1327 | bcstats.pendingwrites--; |
1328 | } else |
1329 | bcstats.pendingreads--; |
1330 | } |
1331 | if (ISSET(bp->b_flags, B_CALL)((bp->b_flags) & (0x00000040))) { /* if necessary, call out */ |
1332 | CLR(bp->b_flags, B_CALL)((bp->b_flags) &= ~(0x00000040)); /* but note callout done */ |
1333 | (*bp->b_iodone)(bp); |
1334 | } else { |
1335 | if (ISSET(bp->b_flags, B_ASYNC)((bp->b_flags) & (0x00000004))) {/* if async, release it */ |
1336 | brelse(bp); |
1337 | } else { /* or just wakeup the buffer */ |
1338 | CLR(bp->b_flags, B_WANTED)((bp->b_flags) &= ~(0x00010000)); |
1339 | wakeup(bp); |
1340 | } |
1341 | } |
1342 | } |
1343 | |
1344 | #ifdef DDB1 |
1345 | void bcstats_print(int (*)(const char *, ...) |
1346 | __attribute__((__format__(__kprintf__,1,2)))); |
1347 | /* |
1348 | * bcstats_print: ddb hook to print interesting buffer cache counters |
1349 | */ |
1350 | void |
1351 | bcstats_print( |
1352 | int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) |
1353 | { |
1354 | (*pr)("Current Buffer Cache status:\n"); |
1355 | (*pr)("numbufs %lld busymapped %lld, delwri %lld\n", |
1356 | bcstats.numbufs, bcstats.busymapped, bcstats.delwribufs); |
1357 | (*pr)("kvaslots %lld avail kva slots %lld\n", |
1358 | bcstats.kvaslots, bcstats.kvaslots_avail); |
1359 | (*pr)("bufpages %lld, dmapages %lld, dirtypages %lld\n", |
1360 | bcstats.numbufpages, bcstats.dmapages, bcstats.numdirtypages); |
1361 | (*pr)("pendingreads %lld, pendingwrites %lld\n", |
1362 | bcstats.pendingreads, bcstats.pendingwrites); |
1363 | (*pr)("highflips %lld, highflops %lld, dmaflips %lld\n", |
1364 | bcstats.highflips, bcstats.highflops, bcstats.dmaflips); |
1365 | } |
1366 | #endif |
1367 | |
1368 | void |
1369 | buf_adjcnt(struct buf *bp, long ncount) |
1370 | { |
1371 | KASSERT(ncount <= bp->b_bufsize)((ncount <= bp->b_bufsize) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 1371, "ncount <= bp->b_bufsize" )); |
1372 | bp->b_bcount = ncount; |
1373 | } |
1374 | |
1375 | /* bufcache freelist code below */ |
1376 | /* |
1377 | * Copyright (c) 2014 Ted Unangst <tedu@openbsd.org> |
1378 | * |
1379 | * Permission to use, copy, modify, and distribute this software for any |
1380 | * purpose with or without fee is hereby granted, provided that the above |
1381 | * copyright notice and this permission notice appear in all copies. |
1382 | * |
1383 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
1384 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
1385 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
1386 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
1387 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
1388 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
1389 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
1390 | */ |
1391 | |
1392 | /* |
1393 | * The code below implements a variant of the 2Q buffer cache algorithm by |
1394 | * Johnson and Shasha. |
1395 | * |
1396 | * General Outline |
1397 | * We divide the buffer cache into three working sets: current, previous, |
1398 | * and long term. Each list is itself LRU and buffers get promoted and moved |
1399 | * around between them. A buffer starts its life in the current working set. |
1400 | * As time passes and newer buffers push it out, it will turn into the previous |
1401 | * working set and is subject to recycling. But if it's accessed again from |
1402 | * the previous working set, that's an indication that it's actually in the |
1403 | * long term working set, so we promote it there. The separation of current |
1404 | * and previous working sets prevents us from promoting a buffer that's only |
1405 | * temporarily hot to the long term cache. |
1406 | * |
1407 | * The objective is to provide scan resistance by making the long term |
1408 | * working set ineligible for immediate recycling, even as the current |
1409 | * working set is rapidly turned over. |
1410 | * |
1411 | * Implementation |
1412 | * The code below identifies the current, previous, and long term sets as |
1413 | * hotqueue, coldqueue, and warmqueue. The hot and warm queues are capped at |
1414 | * 1/3 of the total clean pages, after which point they start pushing their |
1415 | * oldest buffers into coldqueue. |
1416 | * A buf always starts out with neither WARM or COLD flags set (implying HOT). |
1417 | * When released, it will be returned to the tail of the hotqueue list. |
1418 | * When the hotqueue gets too large, the oldest hot buf will be moved to the |
1419 | * coldqueue, with the B_COLD flag set. When a cold buf is released, we set |
1420 | * the B_WARM flag and put it onto the warmqueue. Warm bufs are also |
1421 | * directly returned to the end of the warmqueue. As with the hotqueue, when |
1422 | * the warmqueue grows too large, B_WARM bufs are moved onto the coldqueue. |
1423 | * |
1424 | * Note that this design does still support large working sets, greater |
1425 | * than the cap of hotqueue or warmqueue would imply. The coldqueue is still |
1426 | * cached and has no maximum length. The hot and warm queues form a Y feeding |
1427 | * into the coldqueue. Moving bufs between queues is constant time, so this |
1428 | * design decays to one long warm->cold queue. |
1429 | * |
1430 | * In the 2Q paper, hotqueue and coldqueue are A1in and A1out. The warmqueue |
1431 | * is Am. We always cache pages, as opposed to pointers to pages for A1. |
1432 | * |
1433 | * This implementation adds support for multiple 2q caches. |
1434 | * |
1435 | * If we have more than one 2q cache, as bufs fall off the cold queue |
1436 | * for recycling, bufs that have been warm before (which retain the |
1437 | * B_WARM flag in addition to B_COLD) can be put into the hot queue of |
1438 | * a second level 2Q cache. buffers which are only B_COLD are |
1439 | * recycled. Bufs falling off the last cache's cold queue are always |
1440 | * recycled. |
1441 | * |
1442 | */ |
1443 | |
1444 | /* |
1445 | * this function is called when a hot or warm queue may have exceeded its |
1446 | * size limit. it will move a buf to the coldqueue. |
1447 | */ |
1448 | int chillbufs(struct |
1449 | bufcache *cache, struct bufqueue *queue, int64_t *queuepages); |
1450 | |
1451 | void |
1452 | bufcache_init(void) |
1453 | { |
1454 | int i; |
1455 | |
1456 | for (i = 0; i < NUM_CACHES2; i++) { |
1457 | TAILQ_INIT(&cleancache[i].hotqueue)do { (&cleancache[i].hotqueue)->tqh_first = ((void *)0 ); (&cleancache[i].hotqueue)->tqh_last = &(&cleancache [i].hotqueue)->tqh_first; } while (0); |
1458 | TAILQ_INIT(&cleancache[i].coldqueue)do { (&cleancache[i].coldqueue)->tqh_first = ((void *) 0); (&cleancache[i].coldqueue)->tqh_last = &(& cleancache[i].coldqueue)->tqh_first; } while (0); |
1459 | TAILQ_INIT(&cleancache[i].warmqueue)do { (&cleancache[i].warmqueue)->tqh_first = ((void *) 0); (&cleancache[i].warmqueue)->tqh_last = &(& cleancache[i].warmqueue)->tqh_first; } while (0); |
1460 | } |
1461 | TAILQ_INIT(&dirtyqueue)do { (&dirtyqueue)->tqh_first = ((void *)0); (&dirtyqueue )->tqh_last = &(&dirtyqueue)->tqh_first; } while (0); |
1462 | } |
1463 | |
1464 | /* |
1465 | * if the buffer caches have shrunk, we may need to rebalance our queues. |
1466 | */ |
1467 | void |
1468 | bufcache_adjust(void) |
1469 | { |
1470 | int i; |
1471 | |
1472 | for (i = 0; i < NUM_CACHES2; i++) { |
1473 | while (chillbufs(&cleancache[i], &cleancache[i].warmqueue, |
1474 | &cleancache[i].warmbufpages) || |
1475 | chillbufs(&cleancache[i], &cleancache[i].hotqueue, |
1476 | &cleancache[i].hotbufpages)) |
1477 | continue; |
1478 | } |
1479 | } |
1480 | |
1481 | /* |
1482 | * Get a clean buffer from the cache. if "discard" is set do not promote |
1483 | * previously warm buffers as normal, because we are tossing everything |
1484 | * away such as in a hibernation |
1485 | */ |
1486 | struct buf * |
1487 | bufcache_getcleanbuf(int cachenum, int discard) |
1488 | { |
1489 | struct buf *bp = NULL((void *)0); |
1490 | struct bufcache *cache = &cleancache[cachenum]; |
1491 | struct bufqueue * queue; |
1492 | |
1493 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
1494 | |
1495 | /* try cold queue */ |
1496 | while ((bp = TAILQ_FIRST(&cache->coldqueue)((&cache->coldqueue)->tqh_first)) || |
1497 | (bp = TAILQ_FIRST(&cache->warmqueue)((&cache->warmqueue)->tqh_first)) || |
1498 | (bp = TAILQ_FIRST(&cache->hotqueue)((&cache->hotqueue)->tqh_first))) { |
1499 | int64_t pages = atop(bp->b_bufsize)((bp->b_bufsize) >> 12); |
1500 | struct bufcache *newcache; |
1501 | |
1502 | if (discard || cachenum >= NUM_CACHES2 - 1) { |
1503 | /* Victim selected, give it up */ |
1504 | return bp; |
1505 | } |
1506 | KASSERT(bp->cache == cachenum)((bp->cache == cachenum) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/kern/vfs_bio.c", 1506, "bp->cache == cachenum" )); |
1507 | |
1508 | /* |
1509 | * If this buffer was warm before, move it to |
1510 | * the hot queue in the next cache |
1511 | */ |
1512 | |
1513 | if (fliphigh) { |
1514 | /* |
1515 | * If we are in the DMA cache, try to flip the |
1516 | * buffer up high to move it on to the other |
1517 | * caches. if we can't move the buffer to high |
1518 | * memory without sleeping, we give it up and |
1519 | * return it rather than fight for more memory |
1520 | * against non buffer cache competitors. |
1521 | */ |
1522 | SET(bp->b_flags, B_BUSY)((bp->b_flags) |= (0x00000010)); |
1523 | if (bp->cache == 0 && buf_flip_high(bp) == -1) { |
1524 | CLR(bp->b_flags, B_BUSY)((bp->b_flags) &= ~(0x00000010)); |
1525 | return bp; |
1526 | } |
1527 | CLR(bp->b_flags, B_BUSY)((bp->b_flags) &= ~(0x00000010)); |
1528 | } |
1529 | |
1530 | /* Move the buffer to the hot queue in the next cache */ |
1531 | if (ISSET(bp->b_flags, B_COLD)((bp->b_flags) & (0x01000000))) { |
1532 | queue = &cache->coldqueue; |
1533 | } else if (ISSET(bp->b_flags, B_WARM)((bp->b_flags) & (0x00800000))) { |
1534 | queue = &cache->warmqueue; |
1535 | cache->warmbufpages -= pages; |
1536 | } else { |
1537 | queue = &cache->hotqueue; |
1538 | cache->hotbufpages -= pages; |
1539 | } |
1540 | TAILQ_REMOVE(queue, bp, b_freelist)do { if (((bp)->b_freelist.tqe_next) != ((void *)0)) (bp)-> b_freelist.tqe_next->b_freelist.tqe_prev = (bp)->b_freelist .tqe_prev; else (queue)->tqh_last = (bp)->b_freelist.tqe_prev ; *(bp)->b_freelist.tqe_prev = (bp)->b_freelist.tqe_next ; ((bp)->b_freelist.tqe_prev) = ((void *)-1); ((bp)->b_freelist .tqe_next) = ((void *)-1); } while (0); |
1541 | cache->cachepages -= pages; |
1542 | CLR(bp->b_flags, B_WARM)((bp->b_flags) &= ~(0x00800000)); |
1543 | CLR(bp->b_flags, B_COLD)((bp->b_flags) &= ~(0x01000000)); |
1544 | bp->cache++; |
1545 | newcache= &cleancache[bp->cache]; |
1546 | newcache->cachepages += pages; |
1547 | newcache->hotbufpages += pages; |
1548 | chillbufs(newcache, &newcache->hotqueue, |
1549 | &newcache->hotbufpages); |
1550 | TAILQ_INSERT_TAIL(&newcache->hotqueue, bp, b_freelist)do { (bp)->b_freelist.tqe_next = ((void *)0); (bp)->b_freelist .tqe_prev = (&newcache->hotqueue)->tqh_last; *(& newcache->hotqueue)->tqh_last = (bp); (&newcache-> hotqueue)->tqh_last = &(bp)->b_freelist.tqe_next; } while (0); |
1551 | } |
1552 | return bp; |
1553 | } |
1554 | |
1555 | |
1556 | void |
1557 | discard_buffer(struct buf *bp) { |
1558 | bufcache_take(bp); |
1559 | if (bp->b_vp) { |
1560 | RBT_REMOVE(buf_rb_bufs,buf_rb_bufs_RBT_REMOVE(&bp->b_vp->v_bufs_tree, bp) |
1561 | &bp->b_vp->v_bufs_tree, bp)buf_rb_bufs_RBT_REMOVE(&bp->b_vp->v_bufs_tree, bp); |
1562 | brelvp(bp); |
1563 | } |
1564 | buf_put(bp); |
1565 | } |
1566 | |
1567 | int64_t |
1568 | bufcache_recover_dmapages(int discard, int64_t howmany) |
1569 | { |
1570 | struct buf *bp = NULL((void *)0); |
1571 | struct bufcache *cache = &cleancache[DMA_CACHE0]; |
1572 | struct bufqueue * queue; |
1573 | int64_t recovered = 0; |
1574 | |
1575 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
1576 | |
1577 | while ((recovered < howmany) && |
1578 | ((bp = TAILQ_FIRST(&cache->coldqueue)((&cache->coldqueue)->tqh_first)) || |
1579 | (bp = TAILQ_FIRST(&cache->warmqueue)((&cache->warmqueue)->tqh_first)) || |
1580 | (bp = TAILQ_FIRST(&cache->hotqueue)((&cache->hotqueue)->tqh_first)))) { |
1581 | int64_t pages = atop(bp->b_bufsize)((bp->b_bufsize) >> 12); |
1582 | struct bufcache *newcache; |
1583 | |
1584 | if (discard || DMA_CACHE0 >= NUM_CACHES2 - 1) { |
1585 | discard_buffer(bp); |
1586 | continue; |
1587 | } |
1588 | KASSERT(bp->cache == DMA_CACHE)((bp->cache == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 1588, "bp->cache == DMA_CACHE")); |
1589 | |
1590 | /* |
1591 | * If this buffer was warm before, move it to |
1592 | * the hot queue in the next cache |
1593 | */ |
1594 | |
1595 | /* |
1596 | * One way or another, the pages for this |
1597 | * buffer are leaving DMA memory |
1598 | */ |
1599 | recovered += pages; |
1600 | |
1601 | if (!fliphigh) { |
1602 | discard_buffer(bp); |
1603 | continue; |
1604 | } |
1605 | |
1606 | /* |
1607 | * If we are in the DMA cache, try to flip the |
1608 | * buffer up high to move it on to the other |
1609 | * caches. if we can't move the buffer to high |
1610 | * memory without sleeping, we give it up |
1611 | * now rather than fight for more memory |
1612 | * against non buffer cache competitors. |
1613 | */ |
1614 | SET(bp->b_flags, B_BUSY)((bp->b_flags) |= (0x00000010)); |
1615 | if (bp->cache == 0 && buf_flip_high(bp) == -1) { |
1616 | CLR(bp->b_flags, B_BUSY)((bp->b_flags) &= ~(0x00000010)); |
1617 | discard_buffer(bp); |
1618 | continue; |
1619 | } |
1620 | CLR(bp->b_flags, B_BUSY)((bp->b_flags) &= ~(0x00000010)); |
1621 | |
1622 | /* |
1623 | * Move the buffer to the hot queue in the next cache |
1624 | */ |
1625 | if (ISSET(bp->b_flags, B_COLD)((bp->b_flags) & (0x01000000))) { |
1626 | queue = &cache->coldqueue; |
1627 | } else if (ISSET(bp->b_flags, B_WARM)((bp->b_flags) & (0x00800000))) { |
1628 | queue = &cache->warmqueue; |
1629 | cache->warmbufpages -= pages; |
1630 | } else { |
1631 | queue = &cache->hotqueue; |
1632 | cache->hotbufpages -= pages; |
1633 | } |
1634 | TAILQ_REMOVE(queue, bp, b_freelist)do { if (((bp)->b_freelist.tqe_next) != ((void *)0)) (bp)-> b_freelist.tqe_next->b_freelist.tqe_prev = (bp)->b_freelist .tqe_prev; else (queue)->tqh_last = (bp)->b_freelist.tqe_prev ; *(bp)->b_freelist.tqe_prev = (bp)->b_freelist.tqe_next ; ((bp)->b_freelist.tqe_prev) = ((void *)-1); ((bp)->b_freelist .tqe_next) = ((void *)-1); } while (0); |
1635 | cache->cachepages -= pages; |
1636 | CLR(bp->b_flags, B_WARM)((bp->b_flags) &= ~(0x00800000)); |
1637 | CLR(bp->b_flags, B_COLD)((bp->b_flags) &= ~(0x01000000)); |
1638 | bp->cache++; |
1639 | newcache= &cleancache[bp->cache]; |
1640 | newcache->cachepages += pages; |
1641 | newcache->hotbufpages += pages; |
1642 | chillbufs(newcache, &newcache->hotqueue, |
1643 | &newcache->hotbufpages); |
1644 | TAILQ_INSERT_TAIL(&newcache->hotqueue, bp, b_freelist)do { (bp)->b_freelist.tqe_next = ((void *)0); (bp)->b_freelist .tqe_prev = (&newcache->hotqueue)->tqh_last; *(& newcache->hotqueue)->tqh_last = (bp); (&newcache-> hotqueue)->tqh_last = &(bp)->b_freelist.tqe_next; } while (0); |
1645 | } |
1646 | return recovered; |
1647 | } |
1648 | |
1649 | struct buf * |
1650 | bufcache_getcleanbuf_range(int start, int end, int discard) |
1651 | { |
1652 | int i, j = start, q = end; |
1653 | struct buf *bp = NULL((void *)0); |
1654 | |
1655 | /* |
1656 | * XXX in theory we could promote warm buffers into a previous queue |
1657 | * so in the pathological case of where we go through all the caches |
1658 | * without getting a buffer we have to start at the beginning again. |
1659 | */ |
1660 | while (j <= q) { |
1661 | for (i = q; i >= j; i--) |
1662 | if ((bp = bufcache_getcleanbuf(i, discard))) |
1663 | return (bp); |
1664 | j++; |
1665 | } |
1666 | return bp; |
1667 | } |
1668 | |
1669 | struct buf * |
1670 | bufcache_gethighcleanbuf(void) |
1671 | { |
1672 | if (!fliphigh) |
1673 | return NULL((void *)0); |
1674 | return bufcache_getcleanbuf_range(DMA_CACHE0 + 1, NUM_CACHES2 - 1, 0); |
1675 | } |
1676 | |
1677 | |
1678 | struct buf * |
1679 | bufcache_getdmacleanbuf(void) |
1680 | { |
1681 | if (fliphigh) |
1682 | return bufcache_getcleanbuf_range(DMA_CACHE0, DMA_CACHE0, 0); |
1683 | return bufcache_getcleanbuf_range(DMA_CACHE0, NUM_CACHES2 - 1, 0); |
1684 | } |
1685 | |
1686 | |
1687 | struct buf * |
1688 | bufcache_getdirtybuf(void) |
1689 | { |
1690 | return TAILQ_FIRST(&dirtyqueue)((&dirtyqueue)->tqh_first); |
1691 | } |
1692 | |
1693 | void |
1694 | bufcache_take(struct buf *bp) |
1695 | { |
1696 | struct bufqueue *queue; |
1697 | int64_t pages; |
1698 | |
1699 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
1700 | KASSERT(ISSET(bp->b_flags, B_BC))((((bp->b_flags) & (0x02000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 1700, "ISSET(bp->b_flags, B_BC)" )); |
1701 | KASSERT(bp->cache >= DMA_CACHE)((bp->cache >= 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 1701, "bp->cache >= DMA_CACHE")); |
1702 | KASSERT((bp->cache < NUM_CACHES))(((bp->cache < 2)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/kern/vfs_bio.c" , 1702, "(bp->cache < NUM_CACHES)")); |
1703 | |
1704 | pages = atop(bp->b_bufsize)((bp->b_bufsize) >> 12); |
1705 | |
1706 | TRACEPOINT(vfs, bufcache_take, bp->b_flags, bp->cache, pages)do { extern struct dt_probe (dt_static_vfs_bufcache_take); struct dt_probe *dtp = &(dt_static_vfs_bufcache_take); struct dt_provider *dtpv = dtp->dtp_prov; if (__builtin_expect(((dt_tracing) != 0), 0) && __builtin_expect(((dtp->dtp_recording ) != 0), 0)) { dtpv->dtpv_enter(dtpv, dtp, bp->b_flags, bp->cache, pages); } } while (0); |
1707 | |
1708 | struct bufcache *cache = &cleancache[bp->cache]; |
1709 | if (!ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080))) { |
1710 | if (ISSET(bp->b_flags, B_COLD)((bp->b_flags) & (0x01000000))) { |
1711 | queue = &cache->coldqueue; |
1712 | } else if (ISSET(bp->b_flags, B_WARM)((bp->b_flags) & (0x00800000))) { |
1713 | queue = &cache->warmqueue; |
1714 | cache->warmbufpages -= pages; |
1715 | } else { |
1716 | queue = &cache->hotqueue; |
1717 | cache->hotbufpages -= pages; |
1718 | } |
1719 | bcstats.numcleanpages -= pages; |
1720 | cache->cachepages -= pages; |
1721 | } else { |
1722 | queue = &dirtyqueue; |
1723 | bcstats.numdirtypages -= pages; |
1724 | bcstats.delwribufs--; |
1725 | } |
1726 | TAILQ_REMOVE(queue, bp, b_freelist)do { if (((bp)->b_freelist.tqe_next) != ((void *)0)) (bp)-> b_freelist.tqe_next->b_freelist.tqe_prev = (bp)->b_freelist .tqe_prev; else (queue)->tqh_last = (bp)->b_freelist.tqe_prev ; *(bp)->b_freelist.tqe_prev = (bp)->b_freelist.tqe_next ; ((bp)->b_freelist.tqe_prev) = ((void *)-1); ((bp)->b_freelist .tqe_next) = ((void *)-1); } while (0); |
1727 | } |
1728 | |
1729 | /* move buffers from a hot or warm queue to a cold queue in a cache */ |
1730 | int |
1731 | chillbufs(struct bufcache *cache, struct bufqueue *queue, int64_t *queuepages) |
1732 | { |
1733 | struct buf *bp; |
1734 | int64_t limit, pages; |
1735 | |
1736 | /* |
1737 | * We limit the hot queue to be small, with a max of 4096 pages. |
1738 | * We limit the warm queue to half the cache size. |
1739 | * |
1740 | * We impose a minimum size of 96 to prevent too much "wobbling". |
1741 | */ |
1742 | if (queue == &cache->hotqueue) |
1743 | limit = min(cache->cachepages / 20, 4096); |
1744 | else if (queue == &cache->warmqueue) |
1745 | limit = (cache->cachepages / 2); |
1746 | else |
1747 | panic("chillbufs: invalid queue"); |
1748 | |
1749 | if (*queuepages > 96 && *queuepages > limit) { |
1750 | bp = TAILQ_FIRST(queue)((queue)->tqh_first); |
1751 | if (!bp) |
1752 | panic("inconsistent bufpage counts"); |
1753 | pages = atop(bp->b_bufsize)((bp->b_bufsize) >> 12); |
1754 | *queuepages -= pages; |
1755 | TAILQ_REMOVE(queue, bp, b_freelist)do { if (((bp)->b_freelist.tqe_next) != ((void *)0)) (bp)-> b_freelist.tqe_next->b_freelist.tqe_prev = (bp)->b_freelist .tqe_prev; else (queue)->tqh_last = (bp)->b_freelist.tqe_prev ; *(bp)->b_freelist.tqe_prev = (bp)->b_freelist.tqe_next ; ((bp)->b_freelist.tqe_prev) = ((void *)-1); ((bp)->b_freelist .tqe_next) = ((void *)-1); } while (0); |
1756 | /* we do not clear B_WARM */ |
1757 | SET(bp->b_flags, B_COLD)((bp->b_flags) |= (0x01000000)); |
1758 | TAILQ_INSERT_TAIL(&cache->coldqueue, bp, b_freelist)do { (bp)->b_freelist.tqe_next = ((void *)0); (bp)->b_freelist .tqe_prev = (&cache->coldqueue)->tqh_last; *(&cache ->coldqueue)->tqh_last = (bp); (&cache->coldqueue )->tqh_last = &(bp)->b_freelist.tqe_next; } while ( 0); |
1759 | return 1; |
1760 | } |
1761 | return 0; |
1762 | } |
1763 | |
1764 | void |
1765 | bufcache_release(struct buf *bp) |
1766 | { |
1767 | struct bufqueue *queue; |
1768 | int64_t pages; |
1769 | struct bufcache *cache = &cleancache[bp->cache]; |
1770 | |
1771 | KASSERT(ISSET(bp->b_flags, B_BC))((((bp->b_flags) & (0x02000000))) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/kern/vfs_bio.c", 1771, "ISSET(bp->b_flags, B_BC)" )); |
1772 | pages = atop(bp->b_bufsize)((bp->b_bufsize) >> 12); |
1773 | |
1774 | TRACEPOINT(vfs, bufcache_rel, bp->b_flags, bp->cache, pages)do { extern struct dt_probe (dt_static_vfs_bufcache_rel); struct dt_probe *dtp = &(dt_static_vfs_bufcache_rel); struct dt_provider *dtpv = dtp->dtp_prov; if (__builtin_expect(((dt_tracing) != 0), 0) && __builtin_expect(((dtp->dtp_recording ) != 0), 0)) { dtpv->dtpv_enter(dtpv, dtp, bp->b_flags, bp->cache, pages); } } while (0); |
1775 | |
1776 | if (fliphigh) { |
1777 | if (ISSET(bp->b_flags, B_DMA)((bp->b_flags) & (0x04000000)) && bp->cache > 0) |
1778 | panic("B_DMA buffer release from cache %d", |
1779 | bp->cache); |
1780 | else if ((!ISSET(bp->b_flags, B_DMA)((bp->b_flags) & (0x04000000))) && bp->cache == 0) |
1781 | panic("Non B_DMA buffer release from cache %d", |
1782 | bp->cache); |
1783 | } |
1784 | |
1785 | if (!ISSET(bp->b_flags, B_DELWRI)((bp->b_flags) & (0x00000080))) { |
1786 | int64_t *queuepages; |
1787 | if (ISSET(bp->b_flags, B_WARM | B_COLD)((bp->b_flags) & (0x00800000 | 0x01000000))) { |
1788 | SET(bp->b_flags, B_WARM)((bp->b_flags) |= (0x00800000)); |
1789 | CLR(bp->b_flags, B_COLD)((bp->b_flags) &= ~(0x01000000)); |
1790 | queue = &cache->warmqueue; |
1791 | queuepages = &cache->warmbufpages; |
1792 | } else { |
1793 | queue = &cache->hotqueue; |
1794 | queuepages = &cache->hotbufpages; |
1795 | } |
1796 | *queuepages += pages; |
1797 | bcstats.numcleanpages += pages; |
1798 | cache->cachepages += pages; |
1799 | chillbufs(cache, queue, queuepages); |
1800 | } else { |
1801 | queue = &dirtyqueue; |
1802 | bcstats.numdirtypages += pages; |
1803 | bcstats.delwribufs++; |
1804 | } |
1805 | TAILQ_INSERT_TAIL(queue, bp, b_freelist)do { (bp)->b_freelist.tqe_next = ((void *)0); (bp)->b_freelist .tqe_prev = (queue)->tqh_last; *(queue)->tqh_last = (bp ); (queue)->tqh_last = &(bp)->b_freelist.tqe_next; } while (0); |
1806 | } |
1807 | |
1808 | #ifdef HIBERNATE1 |
1809 | /* |
1810 | * Nuke the buffer cache from orbit when hibernating. We do not want to save |
1811 | * any clean cache pages to swap and read them back. the original disk files |
1812 | * are just as good. |
1813 | */ |
1814 | void |
1815 | hibernate_suspend_bufcache(void) |
1816 | { |
1817 | struct buf *bp; |
1818 | int s; |
1819 | |
1820 | s = splbio()splraise(0x6); |
1821 | /* Chuck away all the cache pages.. discard bufs, do not promote */ |
1822 | while ((bp = bufcache_getcleanbuf_range(DMA_CACHE0, NUM_CACHES2 - 1, 1))) { |
1823 | bufcache_take(bp); |
1824 | if (bp->b_vp) { |
1825 | RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp)buf_rb_bufs_RBT_REMOVE(&bp->b_vp->v_bufs_tree, bp); |
1826 | brelvp(bp); |
1827 | } |
1828 | buf_put(bp); |
1829 | } |
1830 | splx(s)spllower(s); |
1831 | } |
1832 | |
1833 | void |
1834 | hibernate_resume_bufcache(void) |
1835 | { |
1836 | /* XXX Nothing needed here for now */ |
1837 | } |
1838 | #endif /* HIBERNATE */ |