File: | uvm/uvm_fault.c |
Warning: | line 1024, column 3 Value stored to 'oanon' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: uvm_fault.c,v 1.124 2021/12/28 13:16:28 mpi Exp $ */ |
2 | /* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */ |
3 | |
4 | /* |
5 | * Copyright (c) 1997 Charles D. Cranor and Washington University. |
6 | * All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions |
10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | * |
28 | * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp |
29 | */ |
30 | |
31 | /* |
32 | * uvm_fault.c: fault handler |
33 | */ |
34 | |
35 | #include <sys/param.h> |
36 | #include <sys/systm.h> |
37 | #include <sys/kernel.h> |
38 | #include <sys/percpu.h> |
39 | #include <sys/proc.h> |
40 | #include <sys/malloc.h> |
41 | #include <sys/mman.h> |
42 | #include <sys/tracepoint.h> |
43 | |
44 | #include <uvm/uvm.h> |
45 | |
46 | /* |
47 | * |
48 | * a word on page faults: |
49 | * |
50 | * types of page faults we handle: |
51 | * |
52 | * CASE 1: upper layer faults CASE 2: lower layer faults |
53 | * |
54 | * CASE 1A CASE 1B CASE 2A CASE 2B |
55 | * read/write1 write>1 read/write +-cow_write/zero |
56 | * | | | | |
57 | * +--|--+ +--|--+ +-----+ + | + | +-----+ |
58 | * amap | V | | ---------> new | | | | ^ | |
59 | * +-----+ +-----+ +-----+ + | + | +--|--+ |
60 | * | | | |
61 | * +-----+ +-----+ +--|--+ | +--|--+ |
62 | * uobj | d/c | | d/c | | V | +----+ | |
63 | * +-----+ +-----+ +-----+ +-----+ |
64 | * |
65 | * d/c = don't care |
66 | * |
67 | * case [0]: layerless fault |
68 | * no amap or uobj is present. this is an error. |
69 | * |
70 | * case [1]: upper layer fault [anon active] |
71 | * 1A: [read] or [write with anon->an_ref == 1] |
72 | * I/O takes place in upper level anon and uobj is not touched. |
73 | * 1B: [write with anon->an_ref > 1] |
74 | * new anon is alloc'd and data is copied off ["COW"] |
75 | * |
76 | * case [2]: lower layer fault [uobj] |
77 | * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area] |
78 | * I/O takes place directly in object. |
79 | * 2B: [write to copy_on_write] or [read on NULL uobj] |
80 | * data is "promoted" from uobj to a new anon. |
81 | * if uobj is null, then we zero fill. |
82 | * |
83 | * we follow the standard UVM locking protocol ordering: |
84 | * |
85 | * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ) |
86 | * we hold a PG_BUSY page if we unlock for I/O |
87 | * |
88 | * |
89 | * the code is structured as follows: |
90 | * |
91 | * - init the "IN" params in the ufi structure |
92 | * ReFault: (ERESTART returned to the loop in uvm_fault) |
93 | * - do lookups [locks maps], check protection, handle needs_copy |
94 | * - check for case 0 fault (error) |
95 | * - establish "range" of fault |
96 | * - if we have an amap lock it and extract the anons |
97 | * - if sequential advice deactivate pages behind us |
98 | * - at the same time check pmap for unmapped areas and anon for pages |
99 | * that we could map in (and do map it if found) |
100 | * - check object for resident pages that we could map in |
101 | * - if (case 2) goto Case2 |
102 | * - >>> handle case 1 |
103 | * - ensure source anon is resident in RAM |
104 | * - if case 1B alloc new anon and copy from source |
105 | * - map the correct page in |
106 | * Case2: |
107 | * - >>> handle case 2 |
108 | * - ensure source page is resident (if uobj) |
109 | * - if case 2B alloc new anon and copy from source (could be zero |
110 | * fill if uobj == NULL) |
111 | * - map the correct page in |
112 | * - done! |
113 | * |
114 | * note on paging: |
115 | * if we have to do I/O we place a PG_BUSY page in the correct object, |
116 | * unlock everything, and do the I/O. when I/O is done we must reverify |
117 | * the state of the world before assuming that our data structures are |
118 | * valid. [because mappings could change while the map is unlocked] |
119 | * |
120 | * alternative 1: unbusy the page in question and restart the page fault |
121 | * from the top (ReFault). this is easy but does not take advantage |
122 | * of the information that we already have from our previous lookup, |
123 | * although it is possible that the "hints" in the vm_map will help here. |
124 | * |
125 | * alternative 2: the system already keeps track of a "version" number of |
126 | * a map. [i.e. every time you write-lock a map (e.g. to change a |
127 | * mapping) you bump the version number up by one...] so, we can save |
128 | * the version number of the map before we release the lock and start I/O. |
129 | * then when I/O is done we can relock and check the version numbers |
130 | * to see if anything changed. this might save us some over 1 because |
131 | * we don't have to unbusy the page and may be less compares(?). |
132 | * |
133 | * alternative 3: put in backpointers or a way to "hold" part of a map |
134 | * in place while I/O is in progress. this could be complex to |
135 | * implement (especially with structures like amap that can be referenced |
136 | * by multiple map entries, and figuring out what should wait could be |
137 | * complex as well...). |
138 | * |
139 | * we use alternative 2. given that we are multi-threaded now we may want |
140 | * to reconsider the choice. |
141 | */ |
142 | |
143 | /* |
144 | * local data structures |
145 | */ |
146 | struct uvm_advice { |
147 | int nback; |
148 | int nforw; |
149 | }; |
150 | |
151 | /* |
152 | * page range array: set up in uvmfault_init(). |
153 | */ |
154 | static struct uvm_advice uvmadvice[MADV_MASK0x7 + 1]; |
155 | |
156 | #define UVM_MAXRANGE16 16 /* must be max() of nback+nforw+1 */ |
157 | |
158 | /* |
159 | * private prototypes |
160 | */ |
161 | static void uvmfault_amapcopy(struct uvm_faultinfo *); |
162 | static inline void uvmfault_anonflush(struct vm_anon **, int); |
163 | void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t); |
164 | void uvmfault_update_stats(struct uvm_faultinfo *); |
165 | |
166 | /* |
167 | * inline functions |
168 | */ |
169 | /* |
170 | * uvmfault_anonflush: try and deactivate pages in specified anons |
171 | * |
172 | * => does not have to deactivate page if it is busy |
173 | */ |
174 | static inline void |
175 | uvmfault_anonflush(struct vm_anon **anons, int n) |
176 | { |
177 | int lcv; |
178 | struct vm_page *pg; |
179 | |
180 | for (lcv = 0; lcv < n; lcv++) { |
181 | if (anons[lcv] == NULL((void *)0)) |
182 | continue; |
183 | KASSERT(rw_lock_held(anons[lcv]->an_lock))((rw_lock_held(anons[lcv]->an_lock)) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 183, "rw_lock_held(anons[lcv]->an_lock)" )); |
184 | pg = anons[lcv]->an_page; |
185 | if (pg && (pg->pg_flags & PG_BUSY0x00000001) == 0) { |
186 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
187 | if (pg->wire_count == 0) { |
188 | pmap_page_protect(pg, PROT_NONE0x00); |
189 | uvm_pagedeactivate(pg); |
190 | } |
191 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
192 | } |
193 | } |
194 | } |
195 | |
196 | /* |
197 | * normal functions |
198 | */ |
199 | /* |
200 | * uvmfault_init: compute proper values for the uvmadvice[] array. |
201 | */ |
202 | void |
203 | uvmfault_init(void) |
204 | { |
205 | int npages; |
206 | |
207 | npages = atop(16384)((16384) >> 12); |
208 | if (npages > 0) { |
209 | KASSERT(npages <= UVM_MAXRANGE / 2)((npages <= 16 / 2) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 209, "npages <= UVM_MAXRANGE / 2")); |
210 | uvmadvice[MADV_NORMAL0].nforw = npages; |
211 | uvmadvice[MADV_NORMAL0].nback = npages - 1; |
212 | } |
213 | |
214 | npages = atop(32768)((32768) >> 12); |
215 | if (npages > 0) { |
216 | KASSERT(npages <= UVM_MAXRANGE / 2)((npages <= 16 / 2) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 216, "npages <= UVM_MAXRANGE / 2")); |
217 | uvmadvice[MADV_SEQUENTIAL2].nforw = npages - 1; |
218 | uvmadvice[MADV_SEQUENTIAL2].nback = npages; |
219 | } |
220 | } |
221 | |
222 | /* |
223 | * uvmfault_amapcopy: clear "needs_copy" in a map. |
224 | * |
225 | * => called with VM data structures unlocked (usually, see below) |
226 | * => we get a write lock on the maps and clear needs_copy for a VA |
227 | * => if we are out of RAM we sleep (waiting for more) |
228 | */ |
229 | static void |
230 | uvmfault_amapcopy(struct uvm_faultinfo *ufi) |
231 | { |
232 | for (;;) { |
233 | /* |
234 | * no mapping? give up. |
235 | */ |
236 | if (uvmfault_lookup(ufi, TRUE1) == FALSE0) |
237 | return; |
238 | |
239 | /* |
240 | * copy if needed. |
241 | */ |
242 | if (UVM_ET_ISNEEDSCOPY(ufi->entry)(((ufi->entry)->etype & 0x0008) != 0)) |
243 | amap_copy(ufi->map, ufi->entry, M_NOWAIT0x0002, |
244 | UVM_ET_ISSTACK(ufi->entry)(((ufi->entry)->etype & 0x0040) != 0) ? FALSE0 : TRUE1, |
245 | ufi->orig_rvaddr, ufi->orig_rvaddr + 1); |
246 | |
247 | /* |
248 | * didn't work? must be out of RAM. unlock and sleep. |
249 | */ |
250 | if (UVM_ET_ISNEEDSCOPY(ufi->entry)(((ufi->entry)->etype & 0x0008) != 0)) { |
251 | uvmfault_unlockmaps(ufi, TRUE1); |
252 | uvm_wait("fltamapcopy"); |
253 | continue; |
254 | } |
255 | |
256 | /* |
257 | * got it! unlock and return. |
258 | */ |
259 | uvmfault_unlockmaps(ufi, TRUE1); |
260 | return; |
261 | } |
262 | /*NOTREACHED*/ |
263 | } |
264 | |
265 | /* |
266 | * uvmfault_anonget: get data in an anon into a non-busy, non-released |
267 | * page in that anon. |
268 | * |
269 | * => Map, amap and thus anon should be locked by caller. |
270 | * => If we fail, we unlock everything and error is returned. |
271 | * => If we are successful, return with everything still locked. |
272 | * => We do not move the page on the queues [gets moved later]. If we |
273 | * allocate a new page [we_own], it gets put on the queues. Either way, |
274 | * the result is that the page is on the queues at return time |
275 | */ |
276 | int |
277 | uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, |
278 | struct vm_anon *anon) |
279 | { |
280 | struct vm_page *pg; |
281 | int error; |
282 | |
283 | KASSERT(rw_lock_held(anon->an_lock))((rw_lock_held(anon->an_lock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_fault.c", 283, "rw_lock_held(anon->an_lock)" )); |
284 | KASSERT(anon->an_lock == amap->am_lock)((anon->an_lock == amap->am_lock) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 284, "anon->an_lock == amap->am_lock" )); |
285 | |
286 | /* Increment the counters.*/ |
287 | counters_inc(uvmexp_counters, flt_anget); |
288 | if (anon->an_page) { |
289 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_minflt++; |
290 | } else { |
291 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_majflt++; |
292 | } |
293 | error = 0; |
294 | |
295 | /* |
296 | * Loop until we get the anon data, or fail. |
297 | */ |
298 | for (;;) { |
299 | boolean_t we_own, locked; |
300 | /* |
301 | * Note: 'we_own' will become true if we set PG_BUSY on a page. |
302 | */ |
303 | we_own = FALSE0; |
304 | pg = anon->an_page; |
305 | |
306 | /* |
307 | * Is page resident? Make sure it is not busy/released. |
308 | */ |
309 | if (pg) { |
310 | KASSERT(pg->pg_flags & PQ_ANON)((pg->pg_flags & 0x00100000) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_fault.c", 310, "pg->pg_flags & PQ_ANON" )); |
311 | KASSERT(pg->uanon == anon)((pg->uanon == anon) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 311, "pg->uanon == anon")); |
312 | |
313 | /* |
314 | * if the page is busy, we drop all the locks and |
315 | * try again. |
316 | */ |
317 | if ((pg->pg_flags & (PG_BUSY0x00000001|PG_RELEASED0x00000020)) == 0) |
318 | return (VM_PAGER_OK0); |
319 | atomic_setbits_intx86_atomic_setbits_u32(&pg->pg_flags, PG_WANTED0x00000002); |
320 | counters_inc(uvmexp_counters, flt_pgwait); |
321 | |
322 | /* |
323 | * The last unlock must be an atomic unlock and wait |
324 | * on the owner of page. |
325 | */ |
326 | if (pg->uobject) { |
327 | /* Owner of page is UVM object. */ |
328 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
329 | rwsleep_nsec(pg, pg->uobject->vmobjlock, |
330 | PVM4 | PNORELOCK0x200, "anonget1", INFSLP0xffffffffffffffffULL); |
331 | } else { |
332 | /* Owner of page is anon. */ |
333 | uvmfault_unlockall(ufi, NULL((void *)0), NULL((void *)0)); |
334 | rwsleep_nsec(pg, anon->an_lock, PVM4 | PNORELOCK0x200, |
335 | "anonget2", INFSLP0xffffffffffffffffULL); |
336 | } |
337 | } else { |
338 | /* |
339 | * No page, therefore allocate one. |
340 | */ |
341 | pg = uvm_pagealloc(NULL((void *)0), 0, anon, 0); |
342 | if (pg == NULL((void *)0)) { |
343 | /* Out of memory. Wait a little. */ |
344 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
345 | counters_inc(uvmexp_counters, flt_noram); |
346 | uvm_wait("flt_noram1"); |
347 | } else { |
348 | /* PG_BUSY bit is set. */ |
349 | we_own = TRUE1; |
350 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
351 | |
352 | /* |
353 | * Pass a PG_BUSY+PG_FAKE+PG_CLEAN page into |
354 | * the uvm_swap_get() function with all data |
355 | * structures unlocked. Note that it is OK |
356 | * to read an_swslot here, because we hold |
357 | * PG_BUSY on the page. |
358 | */ |
359 | counters_inc(uvmexp_counters, pageins); |
360 | error = uvm_swap_get(pg, anon->an_swslot, |
361 | PGO_SYNCIO0x002); |
362 | |
363 | /* |
364 | * We clean up after the I/O below in the |
365 | * 'we_own' case. |
366 | */ |
367 | } |
368 | } |
369 | |
370 | /* |
371 | * Re-lock the map and anon. |
372 | */ |
373 | locked = uvmfault_relock(ufi); |
374 | if (locked || we_own) { |
375 | rw_enter(anon->an_lock, RW_WRITE0x0001UL); |
376 | } |
377 | |
378 | /* |
379 | * If we own the page (i.e. we set PG_BUSY), then we need |
380 | * to clean up after the I/O. There are three cases to |
381 | * consider: |
382 | * |
383 | * 1) Page was released during I/O: free anon and ReFault. |
384 | * 2) I/O not OK. Free the page and cause the fault to fail. |
385 | * 3) I/O OK! Activate the page and sync with the non-we_own |
386 | * case (i.e. drop anon lock if not locked). |
387 | */ |
388 | if (we_own) { |
389 | if (pg->pg_flags & PG_WANTED0x00000002) { |
390 | wakeup(pg); |
391 | } |
392 | /* un-busy! */ |
393 | atomic_clearbits_intx86_atomic_clearbits_u32(&pg->pg_flags, |
394 | PG_WANTED0x00000002|PG_BUSY0x00000001|PG_FAKE0x00000040); |
395 | UVM_PAGE_OWN(pg, NULL); |
396 | |
397 | /* |
398 | * if we were RELEASED during I/O, then our anon is |
399 | * no longer part of an amap. we need to free the |
400 | * anon and try again. |
401 | */ |
402 | if (pg->pg_flags & PG_RELEASED0x00000020) { |
403 | pmap_page_protect(pg, PROT_NONE0x00); |
404 | KASSERT(anon->an_ref == 0)((anon->an_ref == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 404, "anon->an_ref == 0")); |
405 | /* |
406 | * Released while we had unlocked amap. |
407 | */ |
408 | if (locked) |
409 | uvmfault_unlockall(ufi, NULL((void *)0), NULL((void *)0)); |
410 | uvm_anon_release(anon); /* frees page for us */ |
411 | counters_inc(uvmexp_counters, flt_pgrele); |
412 | return (VM_PAGER_REFAULT7); /* refault! */ |
413 | } |
414 | |
415 | if (error != VM_PAGER_OK0) { |
416 | KASSERT(error != VM_PAGER_PEND)((error != 3) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 416, "error != VM_PAGER_PEND")); |
417 | |
418 | /* remove page from anon */ |
419 | anon->an_page = NULL((void *)0); |
420 | |
421 | /* |
422 | * Remove the swap slot from the anon and |
423 | * mark the anon as having no real slot. |
424 | * Do not free the swap slot, thus preventing |
425 | * it from being used again. |
426 | */ |
427 | uvm_swap_markbad(anon->an_swslot, 1); |
428 | anon->an_swslot = SWSLOT_BAD(-1); |
429 | |
430 | /* |
431 | * Note: page was never !PG_BUSY, so it |
432 | * cannot be mapped and thus no need to |
433 | * pmap_page_protect() it. |
434 | */ |
435 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
436 | uvm_pagefree(pg); |
437 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
438 | |
439 | if (locked) { |
440 | uvmfault_unlockall(ufi, NULL((void *)0), NULL((void *)0)); |
441 | } |
442 | rw_exit(anon->an_lock); |
443 | return (VM_PAGER_ERROR4); |
444 | } |
445 | |
446 | /* |
447 | * We have successfully read the page, activate it. |
448 | */ |
449 | pmap_clear_modify(pg)pmap_clear_attrs(pg, 0x0000000000000040UL); |
450 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
451 | uvm_pageactivate(pg); |
452 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
453 | } |
454 | |
455 | /* |
456 | * We were not able to re-lock the map - restart the fault. |
457 | */ |
458 | if (!locked) { |
459 | if (we_own) { |
460 | rw_exit(anon->an_lock); |
461 | } |
462 | return (VM_PAGER_REFAULT7); |
463 | } |
464 | |
465 | /* |
466 | * Verify that no one has touched the amap and moved |
467 | * the anon on us. |
468 | */ |
469 | if (ufi != NULL((void *)0) && amap_lookup(&ufi->entry->aref, |
470 | ufi->orig_rvaddr - ufi->entry->start) != anon) { |
471 | |
472 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
473 | return (VM_PAGER_REFAULT7); |
474 | } |
475 | |
476 | /* |
477 | * Retry.. |
478 | */ |
479 | counters_inc(uvmexp_counters, flt_anretry); |
480 | continue; |
481 | |
482 | } |
483 | /*NOTREACHED*/ |
484 | } |
485 | |
486 | /* |
487 | * Update statistics after fault resolution. |
488 | * - maxrss |
489 | */ |
490 | void |
491 | uvmfault_update_stats(struct uvm_faultinfo *ufi) |
492 | { |
493 | struct vm_map *map; |
494 | struct proc *p; |
495 | vsize_t res; |
496 | |
497 | map = ufi->orig_map; |
498 | |
499 | /* |
500 | * If this is a nested pmap (eg, a virtual machine pmap managed |
501 | * by vmm(4) on amd64/i386), don't do any updating, just return. |
502 | * |
503 | * pmap_nested() on other archs is #defined to 0, so this is a |
504 | * no-op. |
505 | */ |
506 | if (pmap_nested(map->pmap)((map->pmap)->pm_type != 1)) |
507 | return; |
508 | |
509 | /* Update the maxrss for the process. */ |
510 | if (map->flags & VM_MAP_ISVMSPACE0x40) { |
511 | p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
512 | KASSERT(p != NULL && &p->p_vmspace->vm_map == map)((p != ((void *)0) && &p->p_vmspace->vm_map == map) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 512, "p != NULL && &p->p_vmspace->vm_map == map" )); |
513 | |
514 | res = pmap_resident_count(map->pmap)((map->pmap)->pm_stats.resident_count); |
515 | /* Convert res from pages to kilobytes. */ |
516 | res <<= (PAGE_SHIFT12 - 10); |
517 | |
518 | if (p->p_ru.ru_maxrss < res) |
519 | p->p_ru.ru_maxrss = res; |
520 | } |
521 | } |
522 | |
523 | /* |
524 | * F A U L T - m a i n e n t r y p o i n t |
525 | */ |
526 | |
527 | /* |
528 | * uvm_fault: page fault handler |
529 | * |
530 | * => called from MD code to resolve a page fault |
531 | * => VM data structures usually should be unlocked. however, it is |
532 | * possible to call here with the main map locked if the caller |
533 | * gets a write lock, sets it recursive, and then calls us (c.f. |
534 | * uvm_map_pageable). this should be avoided because it keeps |
535 | * the map locked off during I/O. |
536 | * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT |
537 | */ |
538 | #define MASK(entry)((((entry)->etype & 0x0004) != 0) ? ~0x02 : (0x01 | 0x02 | 0x04)) (UVM_ET_ISCOPYONWRITE(entry)(((entry)->etype & 0x0004) != 0) ? \ |
539 | ~PROT_WRITE0x02 : PROT_MASK(0x01 | 0x02 | 0x04)) |
540 | struct uvm_faultctx { |
541 | /* |
542 | * the following members are set up by uvm_fault_check() and |
543 | * read-only after that. |
544 | */ |
545 | vm_prot_t enter_prot; |
546 | vm_prot_t access_type; |
547 | vaddr_t startva; |
548 | int npages; |
549 | int centeridx; |
550 | boolean_t narrow; |
551 | boolean_t wired; |
552 | paddr_t pa_flags; |
553 | }; |
554 | |
555 | int uvm_fault_check( |
556 | struct uvm_faultinfo *, struct uvm_faultctx *, |
557 | struct vm_anon ***); |
558 | |
559 | int uvm_fault_upper( |
560 | struct uvm_faultinfo *, struct uvm_faultctx *, |
561 | struct vm_anon **, vm_fault_t); |
562 | boolean_t uvm_fault_upper_lookup( |
563 | struct uvm_faultinfo *, const struct uvm_faultctx *, |
564 | struct vm_anon **, struct vm_page **); |
565 | |
566 | int uvm_fault_lower( |
567 | struct uvm_faultinfo *, struct uvm_faultctx *, |
568 | struct vm_page **, vm_fault_t); |
569 | |
570 | int |
571 | uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type, |
572 | vm_prot_t access_type) |
573 | { |
574 | struct uvm_faultinfo ufi; |
575 | struct uvm_faultctx flt; |
576 | boolean_t shadowed; |
577 | struct vm_anon *anons_store[UVM_MAXRANGE16], **anons; |
578 | struct vm_page *pages[UVM_MAXRANGE16]; |
579 | int error; |
580 | |
581 | counters_inc(uvmexp_counters, faults); |
582 | TRACEPOINT(uvm, fault, vaddr, fault_type, access_type, NULL)do { extern struct dt_probe (dt_static_uvm_fault); struct dt_probe *dtp = &(dt_static_uvm_fault); struct dt_provider *dtpv = dtp->dtp_prov; if (__builtin_expect(((dt_tracing) != 0), 0 ) && __builtin_expect(((dtp->dtp_recording) != 0), 0)) { dtpv->dtpv_enter(dtpv, dtp, vaddr, fault_type, access_type , ((void *)0)); } } while (0); |
583 | |
584 | /* |
585 | * init the IN parameters in the ufi |
586 | */ |
587 | ufi.orig_map = orig_map; |
588 | ufi.orig_rvaddr = trunc_page(vaddr)((vaddr) & ~((1 << 12) - 1)); |
589 | ufi.orig_size = PAGE_SIZE(1 << 12); /* can't get any smaller than this */ |
590 | if (fault_type == VM_FAULT_WIRE((vm_fault_t) 0x2)) |
591 | flt.narrow = TRUE1; /* don't look for neighborhood |
592 | * pages on wire */ |
593 | else |
594 | flt.narrow = FALSE0; /* normal fault */ |
595 | flt.access_type = access_type; |
596 | |
597 | |
598 | error = ERESTART-1; |
599 | while (error == ERESTART-1) { /* ReFault: */ |
600 | anons = anons_store; |
601 | |
602 | error = uvm_fault_check(&ufi, &flt, &anons); |
603 | if (error != 0) |
604 | continue; |
605 | |
606 | /* True if there is an anon at the faulting address */ |
607 | shadowed = uvm_fault_upper_lookup(&ufi, &flt, anons, pages); |
608 | if (shadowed == TRUE1) { |
609 | /* case 1: fault on an anon in our amap */ |
610 | error = uvm_fault_upper(&ufi, &flt, anons, fault_type); |
611 | } else { |
612 | struct uvm_object *uobj = ufi.entry->object.uvm_obj; |
613 | |
614 | /* |
615 | * if the desired page is not shadowed by the amap and |
616 | * we have a backing object, then we check to see if |
617 | * the backing object would prefer to handle the fault |
618 | * itself (rather than letting us do it with the usual |
619 | * pgo_get hook). the backing object signals this by |
620 | * providing a pgo_fault routine. |
621 | */ |
622 | if (uobj != NULL((void *)0) && uobj->pgops->pgo_fault != NULL((void *)0)) { |
623 | KERNEL_LOCK()_kernel_lock(); |
624 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); |
625 | error = uobj->pgops->pgo_fault(&ufi, |
626 | flt.startva, pages, flt.npages, |
627 | flt.centeridx, fault_type, flt.access_type, |
628 | PGO_LOCKED0x040); |
629 | KERNEL_UNLOCK()_kernel_unlock(); |
630 | |
631 | if (error == VM_PAGER_OK0) |
632 | error = 0; |
633 | else if (error == VM_PAGER_REFAULT7) |
634 | error = ERESTART-1; |
635 | else |
636 | error = EACCES13; |
637 | } else { |
638 | /* case 2: fault on backing obj or zero fill */ |
639 | error = uvm_fault_lower(&ufi, &flt, pages, |
640 | fault_type); |
641 | } |
642 | } |
643 | } |
644 | |
645 | return error; |
646 | } |
647 | |
648 | /* |
649 | * uvm_fault_check: check prot, handle needs-copy, etc. |
650 | * |
651 | * 1. lookup entry. |
652 | * 2. check protection. |
653 | * 3. adjust fault condition (mainly for simulated fault). |
654 | * 4. handle needs-copy (lazy amap copy). |
655 | * 5. establish range of interest for neighbor fault (aka pre-fault). |
656 | * 6. look up anons (if amap exists). |
657 | * 7. flush pages (if MADV_SEQUENTIAL) |
658 | * |
659 | * => called with nothing locked. |
660 | * => if we fail (result != 0) we unlock everything. |
661 | * => initialize/adjust many members of flt. |
662 | */ |
663 | int |
664 | uvm_fault_check(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, |
665 | struct vm_anon ***ranons) |
666 | { |
667 | struct vm_amap *amap; |
668 | struct uvm_object *uobj; |
669 | int nback, nforw; |
670 | |
671 | /* |
672 | * lookup and lock the maps |
673 | */ |
674 | if (uvmfault_lookup(ufi, FALSE0) == FALSE0) { |
675 | return EFAULT14; |
676 | } |
677 | /* locked: maps(read) */ |
678 | |
679 | #ifdef DIAGNOSTIC1 |
680 | if ((ufi->map->flags & VM_MAP_PAGEABLE0x01) == 0) |
681 | panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)", |
682 | ufi->map, ufi->orig_rvaddr); |
683 | #endif |
684 | |
685 | /* |
686 | * check protection |
687 | */ |
688 | if ((ufi->entry->protection & flt->access_type) != flt->access_type) { |
689 | uvmfault_unlockmaps(ufi, FALSE0); |
690 | return EACCES13; |
691 | } |
692 | |
693 | /* |
694 | * "enter_prot" is the protection we want to enter the page in at. |
695 | * for certain pages (e.g. copy-on-write pages) this protection can |
696 | * be more strict than ufi->entry->protection. "wired" means either |
697 | * the entry is wired or we are fault-wiring the pg. |
698 | */ |
699 | |
700 | flt->enter_prot = ufi->entry->protection; |
701 | flt->pa_flags = UVM_ET_ISWC(ufi->entry)(((ufi->entry)->etype & 0x0080) != 0) ? PMAP_WC0x2 : 0; |
702 | flt->wired = VM_MAPENT_ISWIRED(ufi->entry)((ufi->entry)->wired_count != 0) || (flt->narrow == TRUE1); |
703 | if (flt->wired) |
704 | flt->access_type = flt->enter_prot; /* full access for wired */ |
705 | |
706 | /* handle "needs_copy" case. */ |
707 | if (UVM_ET_ISNEEDSCOPY(ufi->entry)(((ufi->entry)->etype & 0x0008) != 0)) { |
708 | if ((flt->access_type & PROT_WRITE0x02) || |
709 | (ufi->entry->object.uvm_obj == NULL((void *)0))) { |
710 | /* need to clear */ |
711 | uvmfault_unlockmaps(ufi, FALSE0); |
712 | uvmfault_amapcopy(ufi); |
713 | counters_inc(uvmexp_counters, flt_amcopy); |
714 | return ERESTART-1; |
715 | } else { |
716 | /* |
717 | * ensure that we pmap_enter page R/O since |
718 | * needs_copy is still true |
719 | */ |
720 | flt->enter_prot &= ~PROT_WRITE0x02; |
721 | } |
722 | } |
723 | |
724 | /* |
725 | * identify the players |
726 | */ |
727 | amap = ufi->entry->aref.ar_amap; /* upper layer */ |
728 | uobj = ufi->entry->object.uvm_obj; /* lower layer */ |
729 | |
730 | /* |
731 | * check for a case 0 fault. if nothing backing the entry then |
732 | * error now. |
733 | */ |
734 | if (amap == NULL((void *)0) && uobj == NULL((void *)0)) { |
735 | uvmfault_unlockmaps(ufi, FALSE0); |
736 | return EFAULT14; |
737 | } |
738 | |
739 | /* |
740 | * establish range of interest based on advice from mapper |
741 | * and then clip to fit map entry. note that we only want |
742 | * to do this the first time through the fault. if we |
743 | * ReFault we will disable this by setting "narrow" to true. |
744 | */ |
745 | if (flt->narrow == FALSE0) { |
746 | |
747 | /* wide fault (!narrow) */ |
748 | nback = min(uvmadvice[ufi->entry->advice].nback, |
749 | (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT12); |
750 | flt->startva = ufi->orig_rvaddr - ((vsize_t)nback << PAGE_SHIFT12); |
751 | nforw = min(uvmadvice[ufi->entry->advice].nforw, |
752 | ((ufi->entry->end - ufi->orig_rvaddr) >> PAGE_SHIFT12) - 1); |
753 | /* |
754 | * note: "-1" because we don't want to count the |
755 | * faulting page as forw |
756 | */ |
757 | flt->npages = nback + nforw + 1; |
758 | flt->centeridx = nback; |
759 | |
760 | flt->narrow = TRUE1; /* ensure only once per-fault */ |
761 | } else { |
762 | /* narrow fault! */ |
763 | nback = nforw = 0; |
764 | flt->startva = ufi->orig_rvaddr; |
765 | flt->npages = 1; |
766 | flt->centeridx = 0; |
767 | } |
768 | |
769 | /* |
770 | * if we've got an amap then lock it and extract current anons. |
771 | */ |
772 | if (amap) { |
773 | amap_lock(amap)rw_enter_write((amap)->am_lock); |
774 | amap_lookups(&ufi->entry->aref, |
775 | flt->startva - ufi->entry->start, *ranons, flt->npages); |
776 | } else { |
777 | *ranons = NULL((void *)0); /* to be safe */ |
778 | } |
779 | |
780 | /* |
781 | * for MADV_SEQUENTIAL mappings we want to deactivate the back pages |
782 | * now and then forget about them (for the rest of the fault). |
783 | */ |
784 | if (ufi->entry->advice == MADV_SEQUENTIAL2 && nback != 0) { |
785 | /* flush back-page anons? */ |
786 | if (amap) |
787 | uvmfault_anonflush(*ranons, nback); |
788 | |
789 | /* |
790 | * flush object? |
791 | */ |
792 | if (uobj) { |
793 | voff_t uoff; |
794 | |
795 | uoff = (flt->startva - ufi->entry->start) + ufi->entry->offset; |
796 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); |
797 | (void) uobj->pgops->pgo_flush(uobj, uoff, uoff + |
798 | ((vsize_t)nback << PAGE_SHIFT12), PGO_DEACTIVATE0x004); |
799 | rw_exit(uobj->vmobjlock); |
800 | } |
801 | |
802 | /* now forget about the backpages */ |
803 | if (amap) |
804 | *ranons += nback; |
805 | flt->startva += ((vsize_t)nback << PAGE_SHIFT12); |
806 | flt->npages -= nback; |
807 | flt->centeridx = 0; |
808 | } |
809 | |
810 | return 0; |
811 | } |
812 | |
813 | /* |
814 | * uvm_fault_upper_lookup: look up existing h/w mapping and amap. |
815 | * |
816 | * iterate range of interest: |
817 | * 1. check if h/w mapping exists. if yes, we don't care |
818 | * 2. check if anon exists. if not, page is lower. |
819 | * 3. if anon exists, enter h/w mapping for neighbors. |
820 | * |
821 | * => called with amap locked (if exists). |
822 | */ |
823 | boolean_t |
824 | uvm_fault_upper_lookup(struct uvm_faultinfo *ufi, |
825 | const struct uvm_faultctx *flt, struct vm_anon **anons, |
826 | struct vm_page **pages) |
827 | { |
828 | struct vm_amap *amap = ufi->entry->aref.ar_amap; |
829 | struct vm_anon *anon; |
830 | boolean_t shadowed; |
831 | vaddr_t currva; |
832 | paddr_t pa; |
833 | int lcv; |
834 | |
835 | /* locked: maps(read), amap(if there) */ |
836 | KASSERT(amap == NULL ||((amap == ((void *)0) || rw_write_held(amap->am_lock)) ? ( void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 837, "amap == NULL || rw_write_held(amap->am_lock)")) |
837 | rw_write_held(amap->am_lock))((amap == ((void *)0) || rw_write_held(amap->am_lock)) ? ( void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 837, "amap == NULL || rw_write_held(amap->am_lock)")); |
838 | |
839 | /* |
840 | * map in the backpages and frontpages we found in the amap in hopes |
841 | * of preventing future faults. we also init the pages[] array as |
842 | * we go. |
843 | */ |
844 | currva = flt->startva; |
845 | shadowed = FALSE0; |
846 | for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE(1 << 12)) { |
847 | /* |
848 | * dont play with VAs that are already mapped |
849 | * except for center) |
850 | */ |
851 | if (lcv != flt->centeridx && |
852 | pmap_extract(ufi->orig_map->pmap, currva, &pa)) { |
853 | pages[lcv] = PGO_DONTCARE((struct vm_page *) -1L); |
854 | continue; |
855 | } |
856 | |
857 | /* |
858 | * unmapped or center page. check if any anon at this level. |
859 | */ |
860 | if (amap == NULL((void *)0) || anons[lcv] == NULL((void *)0)) { |
861 | pages[lcv] = NULL((void *)0); |
862 | continue; |
863 | } |
864 | |
865 | /* |
866 | * check for present page and map if possible. |
867 | */ |
868 | pages[lcv] = PGO_DONTCARE((struct vm_page *) -1L); |
869 | if (lcv == flt->centeridx) { /* save center for later! */ |
870 | shadowed = TRUE1; |
871 | continue; |
872 | } |
873 | anon = anons[lcv]; |
874 | KASSERT(anon->an_lock == amap->am_lock)((anon->an_lock == amap->am_lock) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 874, "anon->an_lock == amap->am_lock" )); |
875 | if (anon->an_page && |
876 | (anon->an_page->pg_flags & (PG_RELEASED0x00000020|PG_BUSY0x00000001)) == 0) { |
877 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
878 | uvm_pageactivate(anon->an_page); /* reactivate */ |
879 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
880 | counters_inc(uvmexp_counters, flt_namap); |
881 | |
882 | /* |
883 | * Since this isn't the page that's actually faulting, |
884 | * ignore pmap_enter() failures; it's not critical |
885 | * that we enter these right now. |
886 | */ |
887 | (void) pmap_enter(ufi->orig_map->pmap, currva, |
888 | VM_PAGE_TO_PHYS(anon->an_page)((anon->an_page)->phys_addr) | flt->pa_flags, |
889 | (anon->an_ref > 1) ? |
890 | (flt->enter_prot & ~PROT_WRITE0x02) : flt->enter_prot, |
891 | PMAP_CANFAIL0x00000020 | |
892 | (VM_MAPENT_ISWIRED(ufi->entry)((ufi->entry)->wired_count != 0) ? PMAP_WIRED0x00000010 : 0)); |
893 | } |
894 | } |
895 | if (flt->npages > 1) |
896 | pmap_update(ufi->orig_map->pmap); |
897 | |
898 | return shadowed; |
899 | } |
900 | |
901 | /* |
902 | * uvm_fault_upper: handle upper fault. |
903 | * |
904 | * 1. acquire anon lock. |
905 | * 2. get anon. let uvmfault_anonget do the dirty work. |
906 | * 3. if COW, promote data to new anon |
907 | * 4. enter h/w mapping |
908 | */ |
909 | int |
910 | uvm_fault_upper(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, |
911 | struct vm_anon **anons, vm_fault_t fault_type) |
912 | { |
913 | struct vm_amap *amap = ufi->entry->aref.ar_amap; |
914 | struct vm_anon *oanon, *anon = anons[flt->centeridx]; |
915 | struct vm_page *pg = NULL((void *)0); |
916 | int error, ret; |
917 | |
918 | /* locked: maps(read), amap, anon */ |
919 | KASSERT(rw_write_held(amap->am_lock))((rw_write_held(amap->am_lock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_fault.c", 919, "rw_write_held(amap->am_lock)" )); |
920 | KASSERT(anon->an_lock == amap->am_lock)((anon->an_lock == amap->am_lock) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 920, "anon->an_lock == amap->am_lock" )); |
921 | |
922 | /* |
923 | * no matter if we have case 1A or case 1B we are going to need to |
924 | * have the anon's memory resident. ensure that now. |
925 | */ |
926 | /* |
927 | * let uvmfault_anonget do the dirty work. |
928 | * if it fails (!OK) it will unlock everything for us. |
929 | * if it succeeds, locks are still valid and locked. |
930 | * also, if it is OK, then the anon's page is on the queues. |
931 | * if the page is on loan from a uvm_object, then anonget will |
932 | * lock that object for us if it does not fail. |
933 | */ |
934 | error = uvmfault_anonget(ufi, amap, anon); |
935 | switch (error) { |
936 | case VM_PAGER_OK0: |
937 | break; |
938 | |
939 | case VM_PAGER_REFAULT7: |
940 | return ERESTART-1; |
941 | |
942 | case VM_PAGER_ERROR4: |
943 | /* |
944 | * An error occurred while trying to bring in the |
945 | * page -- this is the only error we return right |
946 | * now. |
947 | */ |
948 | return EACCES13; /* XXX */ |
949 | default: |
950 | #ifdef DIAGNOSTIC1 |
951 | panic("uvm_fault: uvmfault_anonget -> %d", error); |
952 | #else |
953 | return EACCES13; |
954 | #endif |
955 | } |
956 | |
957 | KASSERT(rw_write_held(amap->am_lock))((rw_write_held(amap->am_lock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_fault.c", 957, "rw_write_held(amap->am_lock)" )); |
958 | KASSERT(anon->an_lock == amap->am_lock)((anon->an_lock == amap->am_lock) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 958, "anon->an_lock == amap->am_lock" )); |
959 | |
960 | /* |
961 | * if we are case 1B then we will need to allocate a new blank |
962 | * anon to transfer the data into. note that we have a lock |
963 | * on anon, so no one can busy or release the page until we are done. |
964 | * also note that the ref count can't drop to zero here because |
965 | * it is > 1 and we are only dropping one ref. |
966 | * |
967 | * in the (hopefully very rare) case that we are out of RAM we |
968 | * will unlock, wait for more RAM, and refault. |
969 | * |
970 | * if we are out of anon VM we wait for RAM to become available. |
971 | */ |
972 | |
973 | if ((flt->access_type & PROT_WRITE0x02) != 0 && anon->an_ref > 1) { |
974 | counters_inc(uvmexp_counters, flt_acow); |
975 | oanon = anon; /* oanon = old */ |
976 | anon = uvm_analloc(); |
977 | if (anon) { |
978 | anon->an_lock = amap->am_lock; |
979 | pg = uvm_pagealloc(NULL((void *)0), 0, anon, 0); |
980 | } |
981 | |
982 | /* check for out of RAM */ |
983 | if (anon == NULL((void *)0) || pg == NULL((void *)0)) { |
984 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
985 | if (anon == NULL((void *)0)) |
986 | counters_inc(uvmexp_counters, flt_noanon); |
987 | else { |
988 | anon->an_lock = NULL((void *)0); |
989 | anon->an_ref--; |
990 | uvm_anfree(anon)uvm_anfree_list((anon), ((void *)0)); |
991 | counters_inc(uvmexp_counters, flt_noram); |
992 | } |
993 | |
994 | if (uvm_swapisfull()) |
995 | return ENOMEM12; |
996 | |
997 | /* out of RAM, wait for more */ |
998 | if (anon == NULL((void *)0)) |
999 | uvm_anwait(); |
1000 | else |
1001 | uvm_wait("flt_noram3"); |
1002 | return ERESTART-1; |
1003 | } |
1004 | |
1005 | /* got all resources, replace anon with nanon */ |
1006 | uvm_pagecopy(oanon->an_page, pg); /* pg now !PG_CLEAN */ |
1007 | /* un-busy! new page */ |
1008 | atomic_clearbits_intx86_atomic_clearbits_u32(&pg->pg_flags, PG_BUSY0x00000001|PG_FAKE0x00000040); |
1009 | UVM_PAGE_OWN(pg, NULL); |
1010 | ret = amap_add(&ufi->entry->aref, |
1011 | ufi->orig_rvaddr - ufi->entry->start, anon, 1); |
1012 | KASSERT(ret == 0)((ret == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1012, "ret == 0")); |
1013 | |
1014 | /* deref: can not drop to zero here by defn! */ |
1015 | oanon->an_ref--; |
1016 | |
1017 | /* |
1018 | * note: anon is _not_ locked, but we have the sole references |
1019 | * to in from amap. |
1020 | * thus, no one can get at it until we are done with it. |
1021 | */ |
1022 | } else { |
1023 | counters_inc(uvmexp_counters, flt_anon); |
1024 | oanon = anon; |
Value stored to 'oanon' is never read | |
1025 | pg = anon->an_page; |
1026 | if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */ |
1027 | flt->enter_prot = flt->enter_prot & ~PROT_WRITE0x02; |
1028 | } |
1029 | |
1030 | /* |
1031 | * now map the page in . |
1032 | */ |
1033 | if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, |
1034 | VM_PAGE_TO_PHYS(pg)((pg)->phys_addr) | flt->pa_flags, flt->enter_prot, |
1035 | flt->access_type | PMAP_CANFAIL0x00000020 | (flt->wired ? PMAP_WIRED0x00000010 : 0)) != 0) { |
1036 | /* |
1037 | * No need to undo what we did; we can simply think of |
1038 | * this as the pmap throwing away the mapping information. |
1039 | * |
1040 | * We do, however, have to go through the ReFault path, |
1041 | * as the map may change while we're asleep. |
1042 | */ |
1043 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
1044 | if (uvm_swapisfull()) { |
1045 | /* XXX instrumentation */ |
1046 | return ENOMEM12; |
1047 | } |
1048 | /* XXX instrumentation */ |
1049 | uvm_wait("flt_pmfail1"); |
1050 | return ERESTART-1; |
1051 | } |
1052 | |
1053 | /* |
1054 | * ... update the page queues. |
1055 | */ |
1056 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1057 | |
1058 | if (fault_type == VM_FAULT_WIRE((vm_fault_t) 0x2)) { |
1059 | uvm_pagewire(pg); |
1060 | /* |
1061 | * since the now-wired page cannot be paged out, |
1062 | * release its swap resources for others to use. |
1063 | * since an anon with no swap cannot be PG_CLEAN, |
1064 | * clear its clean flag now. |
1065 | */ |
1066 | atomic_clearbits_intx86_atomic_clearbits_u32(&pg->pg_flags, PG_CLEAN0x00000008); |
1067 | uvm_anon_dropswap(anon); |
1068 | } else { |
1069 | /* activate it */ |
1070 | uvm_pageactivate(pg); |
1071 | } |
1072 | |
1073 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1074 | |
1075 | /* |
1076 | * done case 1! finish up by unlocking everything and returning success |
1077 | */ |
1078 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
1079 | pmap_update(ufi->orig_map->pmap); |
1080 | return 0; |
1081 | } |
1082 | |
1083 | /* |
1084 | * uvm_fault_lower_lookup: look up on-memory uobj pages. |
1085 | * |
1086 | * 1. get on-memory pages. |
1087 | * 2. if failed, give up (get only center page later). |
1088 | * 3. if succeeded, enter h/w mapping of neighbor pages. |
1089 | */ |
1090 | |
1091 | struct vm_page * |
1092 | uvm_fault_lower_lookup( |
1093 | struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt, |
1094 | struct vm_page **pages) |
1095 | { |
1096 | struct uvm_object *uobj = ufi->entry->object.uvm_obj; |
1097 | struct vm_page *uobjpage = NULL((void *)0); |
1098 | int lcv, gotpages; |
1099 | vaddr_t currva; |
1100 | |
1101 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); |
1102 | |
1103 | counters_inc(uvmexp_counters, flt_lget); |
1104 | gotpages = flt->npages; |
1105 | (void) uobj->pgops->pgo_get(uobj, |
1106 | ufi->entry->offset + (flt->startva - ufi->entry->start), |
1107 | pages, &gotpages, flt->centeridx, |
1108 | flt->access_type & MASK(ufi->entry)((((ufi->entry)->etype & 0x0004) != 0) ? ~0x02 : (0x01 | 0x02 | 0x04)), ufi->entry->advice, |
1109 | PGO_LOCKED0x040); |
1110 | |
1111 | /* |
1112 | * check for pages to map, if we got any |
1113 | */ |
1114 | if (gotpages == 0) { |
1115 | return NULL((void *)0); |
1116 | } |
1117 | |
1118 | currva = flt->startva; |
1119 | for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE(1 << 12)) { |
1120 | if (pages[lcv] == NULL((void *)0) || |
1121 | pages[lcv] == PGO_DONTCARE((struct vm_page *) -1L)) |
1122 | continue; |
1123 | |
1124 | KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0)(((pages[lcv]->pg_flags & 0x00000020) == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 1124 , "(pages[lcv]->pg_flags & PG_RELEASED) == 0")); |
1125 | |
1126 | /* |
1127 | * if center page is resident and not |
1128 | * PG_BUSY, then pgo_get made it PG_BUSY |
1129 | * for us and gave us a handle to it. |
1130 | * remember this page as "uobjpage." |
1131 | * (for later use). |
1132 | */ |
1133 | if (lcv == flt->centeridx) { |
1134 | uobjpage = pages[lcv]; |
1135 | continue; |
1136 | } |
1137 | |
1138 | /* |
1139 | * note: calling pgo_get with locked data |
1140 | * structures returns us pages which are |
1141 | * neither busy nor released, so we don't |
1142 | * need to check for this. we can just |
1143 | * directly enter the page (after moving it |
1144 | * to the head of the active queue [useful?]). |
1145 | */ |
1146 | |
1147 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1148 | uvm_pageactivate(pages[lcv]); /* reactivate */ |
1149 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1150 | counters_inc(uvmexp_counters, flt_nomap); |
1151 | |
1152 | /* |
1153 | * Since this page isn't the page that's |
1154 | * actually faulting, ignore pmap_enter() |
1155 | * failures; it's not critical that we |
1156 | * enter these right now. |
1157 | */ |
1158 | (void) pmap_enter(ufi->orig_map->pmap, currva, |
1159 | VM_PAGE_TO_PHYS(pages[lcv])((pages[lcv])->phys_addr) | flt->pa_flags, |
1160 | flt->enter_prot & MASK(ufi->entry)((((ufi->entry)->etype & 0x0004) != 0) ? ~0x02 : (0x01 | 0x02 | 0x04)), |
1161 | PMAP_CANFAIL0x00000020 | |
1162 | (flt->wired ? PMAP_WIRED0x00000010 : 0)); |
1163 | |
1164 | /* |
1165 | * NOTE: page can't be PG_WANTED because |
1166 | * we've held the lock the whole time |
1167 | * we've had the handle. |
1168 | */ |
1169 | atomic_clearbits_intx86_atomic_clearbits_u32(&pages[lcv]->pg_flags, PG_BUSY0x00000001); |
1170 | UVM_PAGE_OWN(pages[lcv], NULL); |
1171 | } |
1172 | pmap_update(ufi->orig_map->pmap); |
1173 | |
1174 | return uobjpage; |
1175 | } |
1176 | |
1177 | /* |
1178 | * uvm_fault_lower: handle lower fault. |
1179 | * |
1180 | */ |
1181 | int |
1182 | uvm_fault_lower(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt, |
1183 | struct vm_page **pages, vm_fault_t fault_type) |
1184 | { |
1185 | struct vm_amap *amap = ufi->entry->aref.ar_amap; |
1186 | struct uvm_object *uobj = ufi->entry->object.uvm_obj; |
1187 | boolean_t promote, locked; |
1188 | int result; |
1189 | struct vm_page *uobjpage, *pg = NULL((void *)0); |
1190 | struct vm_anon *anon = NULL((void *)0); |
1191 | voff_t uoff; |
1192 | |
1193 | /* |
1194 | * now, if the desired page is not shadowed by the amap and we have |
1195 | * a backing object that does not have a special fault routine, then |
1196 | * we ask (with pgo_get) the object for resident pages that we care |
1197 | * about and attempt to map them in. we do not let pgo_get block |
1198 | * (PGO_LOCKED). |
1199 | */ |
1200 | if (uobj == NULL((void *)0)) { |
1201 | /* zero fill; don't care neighbor pages */ |
1202 | uobjpage = NULL((void *)0); |
1203 | } else { |
1204 | uobjpage = uvm_fault_lower_lookup(ufi, flt, pages); |
1205 | } |
1206 | |
1207 | /* |
1208 | * note that at this point we are done with any front or back pages. |
1209 | * we are now going to focus on the center page (i.e. the one we've |
1210 | * faulted on). if we have faulted on the bottom (uobj) |
1211 | * layer [i.e. case 2] and the page was both present and available, |
1212 | * then we've got a pointer to it as "uobjpage" and we've already |
1213 | * made it BUSY. |
1214 | */ |
1215 | |
1216 | /* |
1217 | * locked: |
1218 | */ |
1219 | KASSERT(amap == NULL ||((amap == ((void *)0) || rw_write_held(amap->am_lock)) ? ( void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1220, "amap == NULL || rw_write_held(amap->am_lock)")) |
1220 | rw_write_held(amap->am_lock))((amap == ((void *)0) || rw_write_held(amap->am_lock)) ? ( void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1220, "amap == NULL || rw_write_held(amap->am_lock)")); |
1221 | KASSERT(uobj == NULL ||((uobj == ((void *)0) || rw_write_held(uobj->vmobjlock)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1222, "uobj == NULL || rw_write_held(uobj->vmobjlock)")) |
1222 | rw_write_held(uobj->vmobjlock))((uobj == ((void *)0) || rw_write_held(uobj->vmobjlock)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1222, "uobj == NULL || rw_write_held(uobj->vmobjlock)")); |
1223 | |
1224 | /* |
1225 | * note that uobjpage can not be PGO_DONTCARE at this point. we now |
1226 | * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we |
1227 | * have a backing object, check and see if we are going to promote |
1228 | * the data up to an anon during the fault. |
1229 | */ |
1230 | if (uobj == NULL((void *)0)) { |
1231 | uobjpage = PGO_DONTCARE((struct vm_page *) -1L); |
1232 | promote = TRUE1; /* always need anon here */ |
1233 | } else { |
1234 | KASSERT(uobjpage != PGO_DONTCARE)((uobjpage != ((struct vm_page *) -1L)) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 1234, "uobjpage != PGO_DONTCARE" )); |
1235 | promote = (flt->access_type & PROT_WRITE0x02) && |
1236 | UVM_ET_ISCOPYONWRITE(ufi->entry)(((ufi->entry)->etype & 0x0004) != 0); |
1237 | } |
1238 | |
1239 | /* |
1240 | * if uobjpage is not null then we do not need to do I/O to get the |
1241 | * uobjpage. |
1242 | * |
1243 | * if uobjpage is null, then we need to ask the pager to |
1244 | * get the data for us. once we have the data, we need to reverify |
1245 | * the state the world. we are currently not holding any resources. |
1246 | */ |
1247 | if (uobjpage) { |
1248 | /* update rusage counters */ |
1249 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_minflt++; |
1250 | } else { |
1251 | int gotpages; |
1252 | |
1253 | /* update rusage counters */ |
1254 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_ru.ru_majflt++; |
1255 | |
1256 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
1257 | |
1258 | counters_inc(uvmexp_counters, flt_get); |
1259 | gotpages = 1; |
1260 | uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset; |
1261 | result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages, |
1262 | 0, flt->access_type & MASK(ufi->entry)((((ufi->entry)->etype & 0x0004) != 0) ? ~0x02 : (0x01 | 0x02 | 0x04)), ufi->entry->advice, |
1263 | PGO_SYNCIO0x002); |
1264 | |
1265 | /* |
1266 | * recover from I/O |
1267 | */ |
1268 | if (result != VM_PAGER_OK0) { |
1269 | KASSERT(result != VM_PAGER_PEND)((result != 3) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1269, "result != VM_PAGER_PEND")); |
1270 | |
1271 | if (result == VM_PAGER_AGAIN5) { |
1272 | tsleep_nsec(&nowake, PVM4, "fltagain2", |
1273 | MSEC_TO_NSEC(5)); |
1274 | return ERESTART-1; |
1275 | } |
1276 | |
1277 | if (!UVM_ET_ISNOFAULT(ufi->entry)(((ufi->entry)->etype & 0x0020) != 0)) |
1278 | return (EIO5); |
1279 | |
1280 | uobjpage = PGO_DONTCARE((struct vm_page *) -1L); |
1281 | uobj = NULL((void *)0); |
1282 | promote = TRUE1; |
1283 | } |
1284 | |
1285 | /* re-verify the state of the world. */ |
1286 | locked = uvmfault_relock(ufi); |
1287 | if (locked && amap != NULL((void *)0)) |
1288 | amap_lock(amap)rw_enter_write((amap)->am_lock); |
1289 | |
1290 | /* might be changed */ |
1291 | if (uobjpage != PGO_DONTCARE((struct vm_page *) -1L)) { |
1292 | uobj = uobjpage->uobject; |
1293 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); |
1294 | } |
1295 | |
1296 | /* |
1297 | * Re-verify that amap slot is still free. if there is |
1298 | * a problem, we clean up. |
1299 | */ |
1300 | if (locked && amap && amap_lookup(&ufi->entry->aref, |
1301 | ufi->orig_rvaddr - ufi->entry->start)) { |
1302 | if (locked) |
1303 | uvmfault_unlockall(ufi, amap, NULL((void *)0)); |
1304 | locked = FALSE0; |
1305 | } |
1306 | |
1307 | /* didn't get the lock? release the page and retry. */ |
1308 | if (locked == FALSE0 && uobjpage != PGO_DONTCARE((struct vm_page *) -1L)) { |
1309 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1310 | /* make sure it is in queues */ |
1311 | uvm_pageactivate(uobjpage); |
1312 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1313 | |
1314 | if (uobjpage->pg_flags & PG_WANTED0x00000002) |
1315 | /* still holding object lock */ |
1316 | wakeup(uobjpage); |
1317 | atomic_clearbits_intx86_atomic_clearbits_u32(&uobjpage->pg_flags, |
1318 | PG_BUSY0x00000001|PG_WANTED0x00000002); |
1319 | UVM_PAGE_OWN(uobjpage, NULL); |
1320 | } |
1321 | |
1322 | if (locked == FALSE0) { |
1323 | if (uobjpage != PGO_DONTCARE((struct vm_page *) -1L)) |
1324 | rw_exit(uobj->vmobjlock); |
1325 | return ERESTART-1; |
1326 | } |
1327 | |
1328 | /* |
1329 | * we have the data in uobjpage which is PG_BUSY |
1330 | */ |
1331 | } |
1332 | |
1333 | /* |
1334 | * notes: |
1335 | * - at this point uobjpage can not be NULL |
1336 | * - at this point uobjpage could be PG_WANTED (handle later) |
1337 | */ |
1338 | if (promote == FALSE0) { |
1339 | /* |
1340 | * we are not promoting. if the mapping is COW ensure that we |
1341 | * don't give more access than we should (e.g. when doing a read |
1342 | * fault on a COPYONWRITE mapping we want to map the COW page in |
1343 | * R/O even though the entry protection could be R/W). |
1344 | * |
1345 | * set "pg" to the page we want to map in (uobjpage, usually) |
1346 | */ |
1347 | counters_inc(uvmexp_counters, flt_obj); |
1348 | if (UVM_ET_ISCOPYONWRITE(ufi->entry)(((ufi->entry)->etype & 0x0004) != 0)) |
1349 | flt->enter_prot &= ~PROT_WRITE0x02; |
1350 | pg = uobjpage; /* map in the actual object */ |
1351 | |
1352 | /* assert(uobjpage != PGO_DONTCARE) */ |
1353 | |
1354 | /* |
1355 | * we are faulting directly on the page. |
1356 | */ |
1357 | } else { |
1358 | /* |
1359 | * if we are going to promote the data to an anon we |
1360 | * allocate a blank anon here and plug it into our amap. |
1361 | */ |
1362 | #ifdef DIAGNOSTIC1 |
1363 | if (amap == NULL((void *)0)) |
1364 | panic("uvm_fault: want to promote data, but no anon"); |
1365 | #endif |
1366 | |
1367 | anon = uvm_analloc(); |
1368 | if (anon) { |
1369 | /* |
1370 | * In `Fill in data...' below, if |
1371 | * uobjpage == PGO_DONTCARE, we want |
1372 | * a zero'd, dirty page, so have |
1373 | * uvm_pagealloc() do that for us. |
1374 | */ |
1375 | anon->an_lock = amap->am_lock; |
1376 | pg = uvm_pagealloc(NULL((void *)0), 0, anon, |
1377 | (uobjpage == PGO_DONTCARE((struct vm_page *) -1L)) ? UVM_PGA_ZERO0x0002 : 0); |
1378 | } |
1379 | |
1380 | /* |
1381 | * out of memory resources? |
1382 | */ |
1383 | if (anon == NULL((void *)0) || pg == NULL((void *)0)) { |
1384 | /* |
1385 | * arg! must unbusy our page and fail or sleep. |
1386 | */ |
1387 | if (uobjpage != PGO_DONTCARE((struct vm_page *) -1L)) { |
1388 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1389 | uvm_pageactivate(uobjpage); |
1390 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1391 | |
1392 | if (uobjpage->pg_flags & PG_WANTED0x00000002) |
1393 | wakeup(uobjpage); |
1394 | atomic_clearbits_intx86_atomic_clearbits_u32(&uobjpage->pg_flags, |
1395 | PG_BUSY0x00000001|PG_WANTED0x00000002); |
1396 | UVM_PAGE_OWN(uobjpage, NULL); |
1397 | } |
1398 | |
1399 | /* unlock and fail ... */ |
1400 | uvmfault_unlockall(ufi, amap, uobj); |
1401 | if (anon == NULL((void *)0)) |
1402 | counters_inc(uvmexp_counters, flt_noanon); |
1403 | else { |
1404 | anon->an_lock = NULL((void *)0); |
1405 | anon->an_ref--; |
1406 | uvm_anfree(anon)uvm_anfree_list((anon), ((void *)0)); |
1407 | counters_inc(uvmexp_counters, flt_noram); |
1408 | } |
1409 | |
1410 | if (uvm_swapisfull()) |
1411 | return (ENOMEM12); |
1412 | |
1413 | /* out of RAM, wait for more */ |
1414 | if (anon == NULL((void *)0)) |
1415 | uvm_anwait(); |
1416 | else |
1417 | uvm_wait("flt_noram5"); |
1418 | return ERESTART-1; |
1419 | } |
1420 | |
1421 | /* |
1422 | * fill in the data |
1423 | */ |
1424 | if (uobjpage != PGO_DONTCARE((struct vm_page *) -1L)) { |
1425 | counters_inc(uvmexp_counters, flt_prcopy); |
1426 | /* copy page [pg now dirty] */ |
1427 | uvm_pagecopy(uobjpage, pg); |
1428 | |
1429 | /* |
1430 | * promote to shared amap? make sure all sharing |
1431 | * procs see it |
1432 | */ |
1433 | if ((amap_flags(amap)((amap)->am_flags) & AMAP_SHARED0x1) != 0) { |
1434 | pmap_page_protect(uobjpage, PROT_NONE0x00); |
1435 | } |
1436 | |
1437 | /* dispose of uobjpage. drop handle to uobj as well. */ |
1438 | if (uobjpage->pg_flags & PG_WANTED0x00000002) |
1439 | wakeup(uobjpage); |
1440 | atomic_clearbits_intx86_atomic_clearbits_u32(&uobjpage->pg_flags, |
1441 | PG_BUSY0x00000001|PG_WANTED0x00000002); |
1442 | UVM_PAGE_OWN(uobjpage, NULL); |
1443 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1444 | uvm_pageactivate(uobjpage); |
1445 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1446 | rw_exit(uobj->vmobjlock); |
1447 | uobj = NULL((void *)0); |
1448 | } else { |
1449 | counters_inc(uvmexp_counters, flt_przero); |
1450 | /* |
1451 | * Page is zero'd and marked dirty by uvm_pagealloc() |
1452 | * above. |
1453 | */ |
1454 | } |
1455 | |
1456 | if (amap_add(&ufi->entry->aref, |
1457 | ufi->orig_rvaddr - ufi->entry->start, anon, 0)) { |
1458 | uvmfault_unlockall(ufi, amap, uobj); |
1459 | uvm_anfree(anon)uvm_anfree_list((anon), ((void *)0)); |
1460 | counters_inc(uvmexp_counters, flt_noamap); |
1461 | |
1462 | if (uvm_swapisfull()) |
1463 | return (ENOMEM12); |
1464 | |
1465 | amap_populate(&ufi->entry->aref, |
1466 | ufi->orig_rvaddr - ufi->entry->start); |
1467 | return ERESTART-1; |
1468 | } |
1469 | } |
1470 | |
1471 | /* note: pg is either the uobjpage or the new page in the new anon */ |
1472 | /* |
1473 | * all resources are present. we can now map it in and free our |
1474 | * resources. |
1475 | */ |
1476 | if (amap == NULL((void *)0)) |
1477 | KASSERT(anon == NULL)((anon == ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1477, "anon == NULL")); |
1478 | else { |
1479 | KASSERT(rw_write_held(amap->am_lock))((rw_write_held(amap->am_lock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_fault.c", 1479, "rw_write_held(amap->am_lock)" )); |
1480 | KASSERT(anon == NULL || anon->an_lock == amap->am_lock)((anon == ((void *)0) || anon->an_lock == amap->am_lock ) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1480, "anon == NULL || anon->an_lock == amap->am_lock" )); |
1481 | } |
1482 | if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, |
1483 | VM_PAGE_TO_PHYS(pg)((pg)->phys_addr) | flt->pa_flags, flt->enter_prot, |
1484 | flt->access_type | PMAP_CANFAIL0x00000020 | (flt->wired ? PMAP_WIRED0x00000010 : 0)) != 0) { |
1485 | /* |
1486 | * No need to undo what we did; we can simply think of |
1487 | * this as the pmap throwing away the mapping information. |
1488 | * |
1489 | * We do, however, have to go through the ReFault path, |
1490 | * as the map may change while we're asleep. |
1491 | */ |
1492 | if (pg->pg_flags & PG_WANTED0x00000002) |
1493 | wakeup(pg); |
1494 | |
1495 | atomic_clearbits_intx86_atomic_clearbits_u32(&pg->pg_flags, PG_BUSY0x00000001|PG_FAKE0x00000040|PG_WANTED0x00000002); |
1496 | UVM_PAGE_OWN(pg, NULL); |
1497 | uvmfault_unlockall(ufi, amap, uobj); |
1498 | if (uvm_swapisfull()) { |
1499 | /* XXX instrumentation */ |
1500 | return (ENOMEM12); |
1501 | } |
1502 | /* XXX instrumentation */ |
1503 | uvm_wait("flt_pmfail2"); |
1504 | return ERESTART-1; |
1505 | } |
1506 | |
1507 | if (fault_type == VM_FAULT_WIRE((vm_fault_t) 0x2)) { |
1508 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1509 | uvm_pagewire(pg); |
1510 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1511 | if (pg->pg_flags & PQ_AOBJ0x00200000) { |
1512 | /* |
1513 | * since the now-wired page cannot be paged out, |
1514 | * release its swap resources for others to use. |
1515 | * since an aobj page with no swap cannot be clean, |
1516 | * mark it dirty now. |
1517 | * |
1518 | * use pg->uobject here. if the page is from a |
1519 | * tmpfs vnode, the pages are backed by its UAO and |
1520 | * not the vnode. |
1521 | */ |
1522 | KASSERT(uobj != NULL)((uobj != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1522, "uobj != NULL")); |
1523 | KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock)((uobj->vmobjlock == pg->uobject->vmobjlock) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c", 1523 , "uobj->vmobjlock == pg->uobject->vmobjlock")); |
1524 | atomic_clearbits_intx86_atomic_clearbits_u32(&pg->pg_flags, PG_CLEAN0x00000008); |
1525 | uao_dropswap(uobj, pg->offset >> PAGE_SHIFT12); |
1526 | } |
1527 | } else { |
1528 | /* activate it */ |
1529 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1530 | uvm_pageactivate(pg); |
1531 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1532 | } |
1533 | |
1534 | if (pg->pg_flags & PG_WANTED0x00000002) |
1535 | wakeup(pg); |
1536 | |
1537 | atomic_clearbits_intx86_atomic_clearbits_u32(&pg->pg_flags, PG_BUSY0x00000001|PG_FAKE0x00000040|PG_WANTED0x00000002); |
1538 | UVM_PAGE_OWN(pg, NULL); |
1539 | uvmfault_unlockall(ufi, amap, uobj); |
1540 | pmap_update(ufi->orig_map->pmap); |
1541 | |
1542 | return (0); |
1543 | } |
1544 | |
1545 | |
1546 | /* |
1547 | * uvm_fault_wire: wire down a range of virtual addresses in a map. |
1548 | * |
1549 | * => map may be read-locked by caller, but MUST NOT be write-locked. |
1550 | * => if map is read-locked, any operations which may cause map to |
1551 | * be write-locked in uvm_fault() must be taken care of by |
1552 | * the caller. See uvm_map_pageable(). |
1553 | */ |
1554 | int |
1555 | uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type) |
1556 | { |
1557 | vaddr_t va; |
1558 | int rv; |
1559 | |
1560 | /* |
1561 | * now fault it in a page at a time. if the fault fails then we have |
1562 | * to undo what we have done. note that in uvm_fault PROT_NONE |
1563 | * is replaced with the max protection if fault_type is VM_FAULT_WIRE. |
1564 | */ |
1565 | for (va = start ; va < end ; va += PAGE_SIZE(1 << 12)) { |
1566 | rv = uvm_fault(map, va, VM_FAULT_WIRE((vm_fault_t) 0x2), access_type); |
1567 | if (rv) { |
1568 | if (va != start) { |
1569 | uvm_fault_unwire(map, start, va); |
1570 | } |
1571 | return (rv); |
1572 | } |
1573 | } |
1574 | |
1575 | return (0); |
1576 | } |
1577 | |
1578 | /* |
1579 | * uvm_fault_unwire(): unwire range of virtual space. |
1580 | */ |
1581 | void |
1582 | uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end) |
1583 | { |
1584 | |
1585 | vm_map_lock_read(map)vm_map_lock_read_ln(map, "/usr/src/sys/uvm/uvm_fault.c", 1585 ); |
1586 | uvm_fault_unwire_locked(map, start, end); |
1587 | vm_map_unlock_read(map)vm_map_unlock_read_ln(map, "/usr/src/sys/uvm/uvm_fault.c", 1587 ); |
1588 | } |
1589 | |
1590 | /* |
1591 | * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire(). |
1592 | * |
1593 | * => map must be at least read-locked. |
1594 | */ |
1595 | void |
1596 | uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end) |
1597 | { |
1598 | vm_map_entry_t entry, oentry = NULL((void *)0), next; |
1599 | pmap_t pmap = vm_map_pmap(map)((map)->pmap); |
1600 | vaddr_t va; |
1601 | paddr_t pa; |
1602 | struct vm_page *pg; |
1603 | |
1604 | KASSERT((map->flags & VM_MAP_INTRSAFE) == 0)(((map->flags & 0x02) == 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_fault.c", 1604, "(map->flags & VM_MAP_INTRSAFE) == 0" )); |
1605 | |
1606 | /* |
1607 | * we assume that the area we are unwiring has actually been wired |
1608 | * in the first place. this means that we should be able to extract |
1609 | * the PAs from the pmap. |
1610 | */ |
1611 | |
1612 | /* |
1613 | * find the beginning map entry for the region. |
1614 | */ |
1615 | KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map))((start >= ((map)->min_offset) && end <= ((map )->max_offset)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1615, "start >= vm_map_min(map) && end <= vm_map_max(map)" )); |
1616 | if (uvm_map_lookup_entry(map, start, &entry) == FALSE0) |
1617 | panic("uvm_fault_unwire_locked: address not in map"); |
1618 | |
1619 | for (va = start; va < end ; va += PAGE_SIZE(1 << 12)) { |
1620 | if (pmap_extract(pmap, va, &pa) == FALSE0) |
1621 | continue; |
1622 | |
1623 | /* |
1624 | * find the map entry for the current address. |
1625 | */ |
1626 | KASSERT(va >= entry->start)((va >= entry->start) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_fault.c", 1626, "va >= entry->start" )); |
1627 | while (va >= entry->end) { |
1628 | next = RBT_NEXT(uvm_map_addr, entry)uvm_map_addr_RBT_NEXT(entry); |
1629 | KASSERT(next != NULL && next->start <= entry->end)((next != ((void *)0) && next->start <= entry-> end) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_fault.c" , 1629, "next != NULL && next->start <= entry->end" )); |
1630 | entry = next; |
1631 | } |
1632 | |
1633 | /* |
1634 | * lock it. |
1635 | */ |
1636 | if (entry != oentry) { |
1637 | if (oentry != NULL((void *)0)) { |
1638 | uvm_map_unlock_entry(oentry); |
1639 | } |
1640 | uvm_map_lock_entry(entry); |
1641 | oentry = entry; |
1642 | } |
1643 | |
1644 | /* |
1645 | * if the entry is no longer wired, tell the pmap. |
1646 | */ |
1647 | if (VM_MAPENT_ISWIRED(entry)((entry)->wired_count != 0) == 0) |
1648 | pmap_unwire(pmap, va); |
1649 | |
1650 | pg = PHYS_TO_VM_PAGE(pa); |
1651 | if (pg) { |
1652 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); |
1653 | uvm_pageunwire(pg); |
1654 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); |
1655 | } |
1656 | } |
1657 | |
1658 | if (oentry != NULL((void *)0)) { |
1659 | uvm_map_unlock_entry(entry); |
1660 | } |
1661 | } |
1662 | |
1663 | /* |
1664 | * uvmfault_unlockmaps: unlock the maps |
1665 | */ |
1666 | void |
1667 | uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked) |
1668 | { |
1669 | /* |
1670 | * ufi can be NULL when this isn't really a fault, |
1671 | * but merely paging in anon data. |
1672 | */ |
1673 | if (ufi == NULL((void *)0)) { |
1674 | return; |
1675 | } |
1676 | |
1677 | uvmfault_update_stats(ufi); |
1678 | if (write_locked) { |
1679 | vm_map_unlock(ufi->map)vm_map_unlock_ln(ufi->map, "/usr/src/sys/uvm/uvm_fault.c", 1679); |
1680 | } else { |
1681 | vm_map_unlock_read(ufi->map)vm_map_unlock_read_ln(ufi->map, "/usr/src/sys/uvm/uvm_fault.c" , 1681); |
1682 | } |
1683 | } |
1684 | |
1685 | /* |
1686 | * uvmfault_unlockall: unlock everything passed in. |
1687 | * |
1688 | * => maps must be read-locked (not write-locked). |
1689 | */ |
1690 | void |
1691 | uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap, |
1692 | struct uvm_object *uobj) |
1693 | { |
1694 | if (uobj) |
1695 | rw_exit(uobj->vmobjlock); |
1696 | if (amap != NULL((void *)0)) |
1697 | amap_unlock(amap)rw_exit_write((amap)->am_lock); |
1698 | uvmfault_unlockmaps(ufi, FALSE0); |
1699 | } |
1700 | |
1701 | /* |
1702 | * uvmfault_lookup: lookup a virtual address in a map |
1703 | * |
1704 | * => caller must provide a uvm_faultinfo structure with the IN |
1705 | * params properly filled in |
1706 | * => we will lookup the map entry (handling submaps) as we go |
1707 | * => if the lookup is a success we will return with the maps locked |
1708 | * => if "write_lock" is TRUE, we write_lock the map, otherwise we only |
1709 | * get a read lock. |
1710 | * => note that submaps can only appear in the kernel and they are |
1711 | * required to use the same virtual addresses as the map they |
1712 | * are referenced by (thus address translation between the main |
1713 | * map and the submap is unnecessary). |
1714 | */ |
1715 | |
1716 | boolean_t |
1717 | uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock) |
1718 | { |
1719 | vm_map_t tmpmap; |
1720 | |
1721 | /* |
1722 | * init ufi values for lookup. |
1723 | */ |
1724 | ufi->map = ufi->orig_map; |
1725 | ufi->size = ufi->orig_size; |
1726 | |
1727 | /* |
1728 | * keep going down levels until we are done. note that there can |
1729 | * only be two levels so we won't loop very long. |
1730 | */ |
1731 | while (1) { |
1732 | if (ufi->orig_rvaddr < ufi->map->min_offset || |
1733 | ufi->orig_rvaddr >= ufi->map->max_offset) |
1734 | return FALSE0; |
1735 | |
1736 | /* lock map */ |
1737 | if (write_lock) { |
1738 | vm_map_lock(ufi->map)vm_map_lock_ln(ufi->map, "/usr/src/sys/uvm/uvm_fault.c", 1738 ); |
1739 | } else { |
1740 | vm_map_lock_read(ufi->map)vm_map_lock_read_ln(ufi->map, "/usr/src/sys/uvm/uvm_fault.c" , 1740); |
1741 | } |
1742 | |
1743 | /* lookup */ |
1744 | if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr, |
1745 | &ufi->entry)) { |
1746 | uvmfault_unlockmaps(ufi, write_lock); |
1747 | return FALSE0; |
1748 | } |
1749 | |
1750 | /* reduce size if necessary */ |
1751 | if (ufi->entry->end - ufi->orig_rvaddr < ufi->size) |
1752 | ufi->size = ufi->entry->end - ufi->orig_rvaddr; |
1753 | |
1754 | /* |
1755 | * submap? replace map with the submap and lookup again. |
1756 | * note: VAs in submaps must match VAs in main map. |
1757 | */ |
1758 | if (UVM_ET_ISSUBMAP(ufi->entry)(((ufi->entry)->etype & 0x0002) != 0)) { |
1759 | tmpmap = ufi->entry->object.sub_map; |
1760 | uvmfault_unlockmaps(ufi, write_lock); |
1761 | ufi->map = tmpmap; |
1762 | continue; |
1763 | } |
1764 | |
1765 | /* |
1766 | * got it! |
1767 | */ |
1768 | ufi->mapv = ufi->map->timestamp; |
1769 | return TRUE1; |
1770 | |
1771 | } /* while loop */ |
1772 | |
1773 | /*NOTREACHED*/ |
1774 | } |
1775 | |
1776 | /* |
1777 | * uvmfault_relock: attempt to relock the same version of the map |
1778 | * |
1779 | * => fault data structures should be unlocked before calling. |
1780 | * => if a success (TRUE) maps will be locked after call. |
1781 | */ |
1782 | boolean_t |
1783 | uvmfault_relock(struct uvm_faultinfo *ufi) |
1784 | { |
1785 | /* |
1786 | * ufi can be NULL when this isn't really a fault, |
1787 | * but merely paging in anon data. |
1788 | */ |
1789 | if (ufi == NULL((void *)0)) { |
1790 | return TRUE1; |
1791 | } |
1792 | |
1793 | counters_inc(uvmexp_counters, flt_relck); |
1794 | |
1795 | /* |
1796 | * relock map. fail if version mismatch (in which case nothing |
1797 | * gets locked). |
1798 | */ |
1799 | vm_map_lock_read(ufi->map)vm_map_lock_read_ln(ufi->map, "/usr/src/sys/uvm/uvm_fault.c" , 1799); |
1800 | if (ufi->mapv != ufi->map->timestamp) { |
1801 | vm_map_unlock_read(ufi->map)vm_map_unlock_read_ln(ufi->map, "/usr/src/sys/uvm/uvm_fault.c" , 1801); |
1802 | return FALSE0; |
1803 | } |
1804 | |
1805 | counters_inc(uvmexp_counters, flt_relckok); |
1806 | return TRUE1; /* got it! */ |
1807 | } |