File: | uvm/uvm_aobj.c |
Warning: | line 1511, column 6 The result of the left shift is undefined due to shifting '9223372036854775792' by '12', which is unrepresentable in the unsigned version of the return type 'voff_t' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: uvm_aobj.c,v 1.103 2021/12/29 20:22:06 mpi Exp $ */ | |||
2 | /* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */ | |||
3 | ||||
4 | /* | |||
5 | * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and | |||
6 | * Washington University. | |||
7 | * All rights reserved. | |||
8 | * | |||
9 | * Redistribution and use in source and binary forms, with or without | |||
10 | * modification, are permitted provided that the following conditions | |||
11 | * are met: | |||
12 | * 1. Redistributions of source code must retain the above copyright | |||
13 | * notice, this list of conditions and the following disclaimer. | |||
14 | * 2. Redistributions in binary form must reproduce the above copyright | |||
15 | * notice, this list of conditions and the following disclaimer in the | |||
16 | * documentation and/or other materials provided with the distribution. | |||
17 | * | |||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
28 | * | |||
29 | * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp | |||
30 | */ | |||
31 | /* | |||
32 | * uvm_aobj.c: anonymous memory uvm_object pager | |||
33 | * | |||
34 | * author: Chuck Silvers <chuq@chuq.com> | |||
35 | * started: Jan-1998 | |||
36 | * | |||
37 | * - design mostly from Chuck Cranor | |||
38 | */ | |||
39 | ||||
40 | #include <sys/param.h> | |||
41 | #include <sys/systm.h> | |||
42 | #include <sys/malloc.h> | |||
43 | #include <sys/kernel.h> | |||
44 | #include <sys/pool.h> | |||
45 | #include <sys/stdint.h> | |||
46 | #include <sys/atomic.h> | |||
47 | ||||
48 | #include <uvm/uvm.h> | |||
49 | ||||
50 | /* | |||
51 | * An anonymous UVM object (aobj) manages anonymous-memory. In addition to | |||
52 | * keeping the list of resident pages, it may also keep a list of allocated | |||
53 | * swap blocks. Depending on the size of the object, this list is either | |||
54 | * stored in an array (small objects) or in a hash table (large objects). | |||
55 | */ | |||
56 | ||||
57 | /* | |||
58 | * Note: for hash tables, we break the address space of the aobj into blocks | |||
59 | * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two. | |||
60 | */ | |||
61 | #define UAO_SWHASH_CLUSTER_SHIFT4 4 | |||
62 | #define UAO_SWHASH_CLUSTER_SIZE(1 << 4) (1 << UAO_SWHASH_CLUSTER_SHIFT4) | |||
63 | ||||
64 | /* Get the "tag" for this page index. */ | |||
65 | #define UAO_SWHASH_ELT_TAG(idx)((idx) >> 4) ((idx) >> UAO_SWHASH_CLUSTER_SHIFT4) | |||
66 | #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx)((idx) & ((1 << 4) - 1)) \ | |||
67 | ((idx) & (UAO_SWHASH_CLUSTER_SIZE(1 << 4) - 1)) | |||
68 | ||||
69 | /* Given an ELT and a page index, find the swap slot. */ | |||
70 | #define UAO_SWHASH_ELT_PAGESLOT(elt, idx)((elt)->slots[((idx) & ((1 << 4) - 1))]) \ | |||
71 | ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)((idx) & ((1 << 4) - 1))]) | |||
72 | ||||
73 | /* Given an ELT, return its pageidx base. */ | |||
74 | #define UAO_SWHASH_ELT_PAGEIDX_BASE(elt)((elt)->tag << 4) \ | |||
75 | ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT4) | |||
76 | ||||
77 | /* The hash function. */ | |||
78 | #define UAO_SWHASH_HASH(aobj, idx)(&(aobj)->u_swap.slot_hash[(((idx) >> 4) & ( aobj)->u_swhashmask)]) \ | |||
79 | (&(aobj)->u_swhashu_swap.slot_hash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT4) \ | |||
80 | & (aobj)->u_swhashmask)]) | |||
81 | ||||
82 | /* | |||
83 | * The threshold which determines whether we will use an array or a | |||
84 | * hash table to store the list of allocated swap blocks. | |||
85 | */ | |||
86 | #define UAO_SWHASH_THRESHOLD((1 << 4) * 4) (UAO_SWHASH_CLUSTER_SIZE(1 << 4) * 4) | |||
87 | #define UAO_USES_SWHASH(aobj)((aobj)->u_pages > ((1 << 4) * 4)) \ | |||
88 | ((aobj)->u_pages > UAO_SWHASH_THRESHOLD((1 << 4) * 4)) | |||
89 | ||||
90 | /* The number of buckets in a hash, with an upper bound. */ | |||
91 | #define UAO_SWHASH_MAXBUCKETS256 256 | |||
92 | #define UAO_SWHASH_BUCKETS(pages)(min((pages) >> 4, 256)) \ | |||
93 | (min((pages) >> UAO_SWHASH_CLUSTER_SHIFT4, UAO_SWHASH_MAXBUCKETS256)) | |||
94 | ||||
95 | ||||
96 | /* | |||
97 | * uao_swhash_elt: when a hash table is being used, this structure defines | |||
98 | * the format of an entry in the bucket list. | |||
99 | */ | |||
100 | struct uao_swhash_elt { | |||
101 | LIST_ENTRY(uao_swhash_elt)struct { struct uao_swhash_elt *le_next; struct uao_swhash_elt **le_prev; } list; /* the hash list */ | |||
102 | voff_t tag; /* our 'tag' */ | |||
103 | int count; /* our number of active slots */ | |||
104 | int slots[UAO_SWHASH_CLUSTER_SIZE(1 << 4)]; /* the slots */ | |||
105 | }; | |||
106 | ||||
107 | /* | |||
108 | * uao_swhash: the swap hash table structure | |||
109 | */ | |||
110 | LIST_HEAD(uao_swhash, uao_swhash_elt)struct uao_swhash { struct uao_swhash_elt *lh_first; }; | |||
111 | ||||
112 | /* | |||
113 | * uao_swhash_elt_pool: pool of uao_swhash_elt structures | |||
114 | */ | |||
115 | struct pool uao_swhash_elt_pool; | |||
116 | ||||
117 | /* | |||
118 | * uvm_aobj: the actual anon-backed uvm_object | |||
119 | * | |||
120 | * => the uvm_object is at the top of the structure, this allows | |||
121 | * (struct uvm_aobj *) == (struct uvm_object *) | |||
122 | * => only one of u_swslots and u_swhash is used in any given aobj | |||
123 | */ | |||
124 | struct uvm_aobj { | |||
125 | struct uvm_object u_obj; /* has: pgops, memt, #pages, #refs */ | |||
126 | int u_pages; /* number of pages in entire object */ | |||
127 | int u_flags; /* the flags (see uvm_aobj.h) */ | |||
128 | /* | |||
129 | * Either an array or hashtable (array of bucket heads) of | |||
130 | * offset -> swapslot mappings for the aobj. | |||
131 | */ | |||
132 | #define u_swslotsu_swap.slot_array u_swap.slot_array | |||
133 | #define u_swhashu_swap.slot_hash u_swap.slot_hash | |||
134 | union swslots { | |||
135 | int *slot_array; | |||
136 | struct uao_swhash *slot_hash; | |||
137 | } u_swap; | |||
138 | u_long u_swhashmask; /* mask for hashtable */ | |||
139 | LIST_ENTRY(uvm_aobj)struct { struct uvm_aobj *le_next; struct uvm_aobj **le_prev; } u_list; /* global list of aobjs */ | |||
140 | }; | |||
141 | ||||
142 | struct pool uvm_aobj_pool; | |||
143 | ||||
144 | static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *, int, | |||
145 | boolean_t); | |||
146 | static int uao_find_swslot(struct uvm_object *, int); | |||
147 | static boolean_t uao_flush(struct uvm_object *, voff_t, | |||
148 | voff_t, int); | |||
149 | static void uao_free(struct uvm_aobj *); | |||
150 | static int uao_get(struct uvm_object *, voff_t, | |||
151 | vm_page_t *, int *, int, vm_prot_t, | |||
152 | int, int); | |||
153 | static boolean_t uao_pagein(struct uvm_aobj *, int, int); | |||
154 | static boolean_t uao_pagein_page(struct uvm_aobj *, int); | |||
155 | ||||
156 | void uao_dropswap_range(struct uvm_object *, voff_t, voff_t); | |||
157 | void uao_shrink_flush(struct uvm_object *, int, int); | |||
158 | int uao_shrink_hash(struct uvm_object *, int); | |||
159 | int uao_shrink_array(struct uvm_object *, int); | |||
160 | int uao_shrink_convert(struct uvm_object *, int); | |||
161 | ||||
162 | int uao_grow_hash(struct uvm_object *, int); | |||
163 | int uao_grow_array(struct uvm_object *, int); | |||
164 | int uao_grow_convert(struct uvm_object *, int); | |||
165 | ||||
166 | /* | |||
167 | * aobj_pager | |||
168 | * | |||
169 | * note that some functions (e.g. put) are handled elsewhere | |||
170 | */ | |||
171 | const struct uvm_pagerops aobj_pager = { | |||
172 | .pgo_reference = uao_reference, | |||
173 | .pgo_detach = uao_detach, | |||
174 | .pgo_flush = uao_flush, | |||
175 | .pgo_get = uao_get, | |||
176 | }; | |||
177 | ||||
178 | /* | |||
179 | * uao_list: global list of active aobjs, locked by uao_list_lock | |||
180 | * | |||
181 | * Lock ordering: generally the locking order is object lock, then list lock. | |||
182 | * in the case of swap off we have to iterate over the list, and thus the | |||
183 | * ordering is reversed. In that case we must use trylocking to prevent | |||
184 | * deadlock. | |||
185 | */ | |||
186 | static LIST_HEAD(aobjlist, uvm_aobj)struct aobjlist { struct uvm_aobj *lh_first; } uao_list = LIST_HEAD_INITIALIZER(uao_list){ ((void *)0) }; | |||
187 | static struct mutex uao_list_lock = MUTEX_INITIALIZER(IPL_MPFLOOR){ ((void *)0), ((((0x9)) > 0x0 && ((0x9)) < 0x9 ) ? 0x9 : ((0x9))), 0x0 }; | |||
188 | ||||
189 | ||||
190 | /* | |||
191 | * functions | |||
192 | */ | |||
193 | /* | |||
194 | * hash table/array related functions | |||
195 | */ | |||
196 | /* | |||
197 | * uao_find_swhash_elt: find (or create) a hash table entry for a page | |||
198 | * offset. | |||
199 | */ | |||
200 | static struct uao_swhash_elt * | |||
201 | uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create) | |||
202 | { | |||
203 | struct uao_swhash *swhash; | |||
204 | struct uao_swhash_elt *elt; | |||
205 | voff_t page_tag; | |||
206 | ||||
207 | swhash = UAO_SWHASH_HASH(aobj, pageidx)(&(aobj)->u_swap.slot_hash[(((pageidx) >> 4) & (aobj)->u_swhashmask)]); /* first hash to get bucket */ | |||
208 | page_tag = UAO_SWHASH_ELT_TAG(pageidx)((pageidx) >> 4); /* tag to search for */ | |||
209 | ||||
210 | /* | |||
211 | * now search the bucket for the requested tag | |||
212 | */ | |||
213 | LIST_FOREACH(elt, swhash, list)for((elt) = ((swhash)->lh_first); (elt)!= ((void *)0); (elt ) = ((elt)->list.le_next)) { | |||
214 | if (elt->tag == page_tag) | |||
215 | return elt; | |||
216 | } | |||
217 | ||||
218 | if (!create) | |||
219 | return NULL((void *)0); | |||
220 | ||||
221 | /* | |||
222 | * allocate a new entry for the bucket and init/insert it in | |||
223 | */ | |||
224 | elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT0x0002 | PR_ZERO0x0008); | |||
225 | /* | |||
226 | * XXX We cannot sleep here as the hash table might disappear | |||
227 | * from under our feet. And we run the risk of deadlocking | |||
228 | * the pagedeamon. In fact this code will only be called by | |||
229 | * the pagedaemon and allocation will only fail if we | |||
230 | * exhausted the pagedeamon reserve. In that case we're | |||
231 | * doomed anyway, so panic. | |||
232 | */ | |||
233 | if (elt == NULL((void *)0)) | |||
234 | panic("%s: can't allocate entry", __func__); | |||
235 | LIST_INSERT_HEAD(swhash, elt, list)do { if (((elt)->list.le_next = (swhash)->lh_first) != ( (void *)0)) (swhash)->lh_first->list.le_prev = &(elt )->list.le_next; (swhash)->lh_first = (elt); (elt)-> list.le_prev = &(swhash)->lh_first; } while (0); | |||
236 | elt->tag = page_tag; | |||
237 | ||||
238 | return elt; | |||
239 | } | |||
240 | ||||
241 | /* | |||
242 | * uao_find_swslot: find the swap slot number for an aobj/pageidx | |||
243 | */ | |||
244 | inline static int | |||
245 | uao_find_swslot(struct uvm_object *uobj, int pageidx) | |||
246 | { | |||
247 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
248 | ||||
249 | KASSERT(UVM_OBJ_IS_AOBJ(uobj))((((uobj)->pgops == &aobj_pager)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 249, "UVM_OBJ_IS_AOBJ(uobj)" )); | |||
250 | ||||
251 | /* | |||
252 | * if noswap flag is set, then we never return a slot | |||
253 | */ | |||
254 | if (aobj->u_flags & UAO_FLAG_NOSWAP0x8) | |||
255 | return 0; | |||
256 | ||||
257 | /* | |||
258 | * if hashing, look in hash table. | |||
259 | */ | |||
260 | if (UAO_USES_SWHASH(aobj)((aobj)->u_pages > ((1 << 4) * 4))) { | |||
261 | struct uao_swhash_elt *elt = | |||
262 | uao_find_swhash_elt(aobj, pageidx, FALSE0); | |||
263 | ||||
264 | if (elt) | |||
265 | return UAO_SWHASH_ELT_PAGESLOT(elt, pageidx)((elt)->slots[((pageidx) & ((1 << 4) - 1))]); | |||
266 | else | |||
267 | return 0; | |||
268 | } | |||
269 | ||||
270 | /* | |||
271 | * otherwise, look in the array | |||
272 | */ | |||
273 | return aobj->u_swslotsu_swap.slot_array[pageidx]; | |||
274 | } | |||
275 | ||||
276 | /* | |||
277 | * uao_set_swslot: set the swap slot for a page in an aobj. | |||
278 | * | |||
279 | * => setting a slot to zero frees the slot | |||
280 | * => object must be locked by caller | |||
281 | * => we return the old slot number, or -1 if we failed to allocate | |||
282 | * memory to record the new slot number | |||
283 | */ | |||
284 | int | |||
285 | uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot) | |||
286 | { | |||
287 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
288 | int oldslot; | |||
289 | ||||
290 | KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0)((rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 290, "rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0" )); | |||
291 | KASSERT(UVM_OBJ_IS_AOBJ(uobj))((((uobj)->pgops == &aobj_pager)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 291, "UVM_OBJ_IS_AOBJ(uobj)" )); | |||
292 | ||||
293 | /* | |||
294 | * if noswap flag is set, then we can't set a slot | |||
295 | */ | |||
296 | if (aobj->u_flags & UAO_FLAG_NOSWAP0x8) { | |||
297 | if (slot == 0) | |||
298 | return 0; /* a clear is ok */ | |||
299 | ||||
300 | /* but a set is not */ | |||
301 | printf("uao_set_swslot: uobj = %p\n", uobj); | |||
302 | panic("uao_set_swslot: attempt to set a slot on a NOSWAP object"); | |||
303 | } | |||
304 | ||||
305 | /* | |||
306 | * are we using a hash table? if so, add it in the hash. | |||
307 | */ | |||
308 | if (UAO_USES_SWHASH(aobj)((aobj)->u_pages > ((1 << 4) * 4))) { | |||
309 | /* | |||
310 | * Avoid allocating an entry just to free it again if | |||
311 | * the page had not swap slot in the first place, and | |||
312 | * we are freeing. | |||
313 | */ | |||
314 | struct uao_swhash_elt *elt = | |||
315 | uao_find_swhash_elt(aobj, pageidx, slot ? TRUE1 : FALSE0); | |||
316 | if (elt == NULL((void *)0)) { | |||
317 | KASSERT(slot == 0)((slot == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 317, "slot == 0")); | |||
318 | return 0; | |||
319 | } | |||
320 | ||||
321 | oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx)((elt)->slots[((pageidx) & ((1 << 4) - 1))]); | |||
322 | UAO_SWHASH_ELT_PAGESLOT(elt, pageidx)((elt)->slots[((pageidx) & ((1 << 4) - 1))]) = slot; | |||
323 | ||||
324 | /* | |||
325 | * now adjust the elt's reference counter and free it if we've | |||
326 | * dropped it to zero. | |||
327 | */ | |||
328 | if (slot) { | |||
329 | if (oldslot == 0) | |||
330 | elt->count++; | |||
331 | } else { | |||
332 | if (oldslot) | |||
333 | elt->count--; | |||
334 | ||||
335 | if (elt->count == 0) { | |||
336 | LIST_REMOVE(elt, list)do { if ((elt)->list.le_next != ((void *)0)) (elt)->list .le_next->list.le_prev = (elt)->list.le_prev; *(elt)-> list.le_prev = (elt)->list.le_next; ((elt)->list.le_prev ) = ((void *)-1); ((elt)->list.le_next) = ((void *)-1); } while (0); | |||
337 | pool_put(&uao_swhash_elt_pool, elt); | |||
338 | } | |||
339 | } | |||
340 | } else { | |||
341 | /* we are using an array */ | |||
342 | oldslot = aobj->u_swslotsu_swap.slot_array[pageidx]; | |||
343 | aobj->u_swslotsu_swap.slot_array[pageidx] = slot; | |||
344 | } | |||
345 | return oldslot; | |||
346 | } | |||
347 | /* | |||
348 | * end of hash/array functions | |||
349 | */ | |||
350 | ||||
351 | /* | |||
352 | * uao_free: free all resources held by an aobj, and then free the aobj | |||
353 | * | |||
354 | * => the aobj should be dead | |||
355 | */ | |||
356 | static void | |||
357 | uao_free(struct uvm_aobj *aobj) | |||
358 | { | |||
359 | struct uvm_object *uobj = &aobj->u_obj; | |||
360 | ||||
361 | KASSERT(UVM_OBJ_IS_AOBJ(uobj))((((uobj)->pgops == &aobj_pager)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 361, "UVM_OBJ_IS_AOBJ(uobj)" )); | |||
362 | KASSERT(rw_write_held(uobj->vmobjlock))((rw_write_held(uobj->vmobjlock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 362, "rw_write_held(uobj->vmobjlock)" )); | |||
363 | uao_dropswap_range(uobj, 0, 0); | |||
364 | rw_exit(uobj->vmobjlock); | |||
365 | ||||
366 | if (UAO_USES_SWHASH(aobj)((aobj)->u_pages > ((1 << 4) * 4))) { | |||
367 | /* | |||
368 | * free the hash table itself. | |||
369 | */ | |||
370 | hashfree(aobj->u_swhashu_swap.slot_hash, UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)), M_UVMAOBJ99); | |||
371 | } else { | |||
372 | free(aobj->u_swslotsu_swap.slot_array, M_UVMAOBJ99, aobj->u_pages * sizeof(int)); | |||
373 | } | |||
374 | ||||
375 | /* | |||
376 | * finally free the aobj itself | |||
377 | */ | |||
378 | uvm_obj_destroy(uobj); | |||
379 | pool_put(&uvm_aobj_pool, aobj); | |||
380 | } | |||
381 | ||||
382 | /* | |||
383 | * pager functions | |||
384 | */ | |||
385 | ||||
386 | #ifdef TMPFS | |||
387 | /* | |||
388 | * Shrink an aobj to a given number of pages. The procedure is always the same: | |||
389 | * assess the necessity of data structure conversion (hash to array), secure | |||
390 | * resources, flush pages and drop swap slots. | |||
391 | * | |||
392 | */ | |||
393 | ||||
394 | void | |||
395 | uao_shrink_flush(struct uvm_object *uobj, int startpg, int endpg) | |||
396 | { | |||
397 | KASSERT(startpg < endpg)((startpg < endpg) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 397, "startpg < endpg")); | |||
398 | KASSERT(uobj->uo_refs == 1)((uobj->uo_refs == 1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 398, "uobj->uo_refs == 1")); | |||
399 | uao_flush(uobj, (voff_t)startpg << PAGE_SHIFT12, | |||
400 | (voff_t)endpg << PAGE_SHIFT12, PGO_FREE0x008); | |||
401 | uao_dropswap_range(uobj, startpg, endpg); | |||
402 | } | |||
403 | ||||
404 | int | |||
405 | uao_shrink_hash(struct uvm_object *uobj, int pages) | |||
406 | { | |||
407 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
408 | struct uao_swhash *new_swhash; | |||
409 | struct uao_swhash_elt *elt; | |||
410 | unsigned long new_hashmask; | |||
411 | int i; | |||
412 | ||||
413 | KASSERT(UAO_USES_SWHASH(aobj))((((aobj)->u_pages > ((1 << 4) * 4))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 413, "UAO_USES_SWHASH(aobj)" )); | |||
414 | ||||
415 | /* | |||
416 | * If the size of the hash table doesn't change, all we need to do is | |||
417 | * to adjust the page count. | |||
418 | */ | |||
419 | if (UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)) == UAO_SWHASH_BUCKETS(pages)(min((pages) >> 4, 256))) { | |||
420 | uao_shrink_flush(uobj, pages, aobj->u_pages); | |||
421 | aobj->u_pages = pages; | |||
422 | return 0; | |||
423 | } | |||
424 | ||||
425 | new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages)(min((pages) >> 4, 256)), M_UVMAOBJ99, | |||
426 | M_WAITOK0x0001 | M_CANFAIL0x0004, &new_hashmask); | |||
427 | if (new_swhash == NULL((void *)0)) | |||
428 | return ENOMEM12; | |||
429 | ||||
430 | uao_shrink_flush(uobj, pages, aobj->u_pages); | |||
431 | ||||
432 | /* | |||
433 | * Even though the hash table size is changing, the hash of the buckets | |||
434 | * we are interested in copying should not change. | |||
435 | */ | |||
436 | for (i = 0; i < UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)); i++) { | |||
437 | while (LIST_EMPTY(&aobj->u_swhash[i])(((&aobj->u_swap.slot_hash[i])->lh_first) == ((void *)0)) == 0) { | |||
438 | elt = LIST_FIRST(&aobj->u_swhash[i])((&aobj->u_swap.slot_hash[i])->lh_first); | |||
439 | LIST_REMOVE(elt, list)do { if ((elt)->list.le_next != ((void *)0)) (elt)->list .le_next->list.le_prev = (elt)->list.le_prev; *(elt)-> list.le_prev = (elt)->list.le_next; ((elt)->list.le_prev ) = ((void *)-1); ((elt)->list.le_next) = ((void *)-1); } while (0); | |||
440 | LIST_INSERT_HEAD(&new_swhash[i], elt, list)do { if (((elt)->list.le_next = (&new_swhash[i])->lh_first ) != ((void *)0)) (&new_swhash[i])->lh_first->list. le_prev = &(elt)->list.le_next; (&new_swhash[i])-> lh_first = (elt); (elt)->list.le_prev = &(&new_swhash [i])->lh_first; } while (0); | |||
441 | } | |||
442 | } | |||
443 | ||||
444 | hashfree(aobj->u_swhashu_swap.slot_hash, UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)), M_UVMAOBJ99); | |||
445 | ||||
446 | aobj->u_swhashu_swap.slot_hash = new_swhash; | |||
447 | aobj->u_pages = pages; | |||
448 | aobj->u_swhashmask = new_hashmask; | |||
449 | ||||
450 | return 0; | |||
451 | } | |||
452 | ||||
453 | int | |||
454 | uao_shrink_convert(struct uvm_object *uobj, int pages) | |||
455 | { | |||
456 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
457 | struct uao_swhash_elt *elt; | |||
458 | int i, *new_swslots; | |||
459 | ||||
460 | new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ99, | |||
461 | M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008); | |||
462 | if (new_swslots == NULL((void *)0)) | |||
463 | return ENOMEM12; | |||
464 | ||||
465 | uao_shrink_flush(uobj, pages, aobj->u_pages); | |||
466 | ||||
467 | /* Convert swap slots from hash to array. */ | |||
468 | for (i = 0; i < pages; i++) { | |||
469 | elt = uao_find_swhash_elt(aobj, i, FALSE0); | |||
470 | if (elt != NULL((void *)0)) { | |||
471 | new_swslots[i] = UAO_SWHASH_ELT_PAGESLOT(elt, i)((elt)->slots[((i) & ((1 << 4) - 1))]); | |||
472 | if (new_swslots[i] != 0) | |||
473 | elt->count--; | |||
474 | if (elt->count == 0) { | |||
475 | LIST_REMOVE(elt, list)do { if ((elt)->list.le_next != ((void *)0)) (elt)->list .le_next->list.le_prev = (elt)->list.le_prev; *(elt)-> list.le_prev = (elt)->list.le_next; ((elt)->list.le_prev ) = ((void *)-1); ((elt)->list.le_next) = ((void *)-1); } while (0); | |||
476 | pool_put(&uao_swhash_elt_pool, elt); | |||
477 | } | |||
478 | } | |||
479 | } | |||
480 | ||||
481 | hashfree(aobj->u_swhashu_swap.slot_hash, UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)), M_UVMAOBJ99); | |||
482 | ||||
483 | aobj->u_swslotsu_swap.slot_array = new_swslots; | |||
484 | aobj->u_pages = pages; | |||
485 | ||||
486 | return 0; | |||
487 | } | |||
488 | ||||
489 | int | |||
490 | uao_shrink_array(struct uvm_object *uobj, int pages) | |||
491 | { | |||
492 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
493 | int i, *new_swslots; | |||
494 | ||||
495 | new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ99, | |||
496 | M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008); | |||
497 | if (new_swslots == NULL((void *)0)) | |||
498 | return ENOMEM12; | |||
499 | ||||
500 | uao_shrink_flush(uobj, pages, aobj->u_pages); | |||
501 | ||||
502 | for (i = 0; i < pages; i++) | |||
503 | new_swslots[i] = aobj->u_swslotsu_swap.slot_array[i]; | |||
504 | ||||
505 | free(aobj->u_swslotsu_swap.slot_array, M_UVMAOBJ99, aobj->u_pages * sizeof(int)); | |||
506 | ||||
507 | aobj->u_swslotsu_swap.slot_array = new_swslots; | |||
508 | aobj->u_pages = pages; | |||
509 | ||||
510 | return 0; | |||
511 | } | |||
512 | ||||
513 | int | |||
514 | uao_shrink(struct uvm_object *uobj, int pages) | |||
515 | { | |||
516 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
517 | ||||
518 | KASSERT(pages < aobj->u_pages)((pages < aobj->u_pages) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 518, "pages < aobj->u_pages" )); | |||
519 | ||||
520 | /* | |||
521 | * Distinguish between three possible cases: | |||
522 | * 1. aobj uses hash and must be converted to array. | |||
523 | * 2. aobj uses array and array size needs to be adjusted. | |||
524 | * 3. aobj uses hash and hash size needs to be adjusted. | |||
525 | */ | |||
526 | if (pages > UAO_SWHASH_THRESHOLD((1 << 4) * 4)) | |||
527 | return uao_shrink_hash(uobj, pages); /* case 3 */ | |||
528 | else if (aobj->u_pages > UAO_SWHASH_THRESHOLD((1 << 4) * 4)) | |||
529 | return uao_shrink_convert(uobj, pages); /* case 1 */ | |||
530 | else | |||
531 | return uao_shrink_array(uobj, pages); /* case 2 */ | |||
532 | } | |||
533 | ||||
534 | /* | |||
535 | * Grow an aobj to a given number of pages. Right now we only adjust the swap | |||
536 | * slots. We could additionally handle page allocation directly, so that they | |||
537 | * don't happen through uvm_fault(). That would allow us to use another | |||
538 | * mechanism for the swap slots other than malloc(). It is thus mandatory that | |||
539 | * the caller of these functions does not allow faults to happen in case of | |||
540 | * growth error. | |||
541 | */ | |||
542 | int | |||
543 | uao_grow_array(struct uvm_object *uobj, int pages) | |||
544 | { | |||
545 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
546 | int i, *new_swslots; | |||
547 | ||||
548 | KASSERT(aobj->u_pages <= UAO_SWHASH_THRESHOLD)((aobj->u_pages <= ((1 << 4) * 4)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 548, "aobj->u_pages <= UAO_SWHASH_THRESHOLD" )); | |||
549 | ||||
550 | new_swslots = mallocarray(pages, sizeof(int), M_UVMAOBJ99, | |||
551 | M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008); | |||
552 | if (new_swslots == NULL((void *)0)) | |||
553 | return ENOMEM12; | |||
554 | ||||
555 | for (i = 0; i < aobj->u_pages; i++) | |||
556 | new_swslots[i] = aobj->u_swslotsu_swap.slot_array[i]; | |||
557 | ||||
558 | free(aobj->u_swslotsu_swap.slot_array, M_UVMAOBJ99, aobj->u_pages * sizeof(int)); | |||
559 | ||||
560 | aobj->u_swslotsu_swap.slot_array = new_swslots; | |||
561 | aobj->u_pages = pages; | |||
562 | ||||
563 | return 0; | |||
564 | } | |||
565 | ||||
566 | int | |||
567 | uao_grow_hash(struct uvm_object *uobj, int pages) | |||
568 | { | |||
569 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
570 | struct uao_swhash *new_swhash; | |||
571 | struct uao_swhash_elt *elt; | |||
572 | unsigned long new_hashmask; | |||
573 | int i; | |||
574 | ||||
575 | KASSERT(pages > UAO_SWHASH_THRESHOLD)((pages > ((1 << 4) * 4)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 575, "pages > UAO_SWHASH_THRESHOLD" )); | |||
576 | ||||
577 | /* | |||
578 | * If the size of the hash table doesn't change, all we need to do is | |||
579 | * to adjust the page count. | |||
580 | */ | |||
581 | if (UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)) == UAO_SWHASH_BUCKETS(pages)(min((pages) >> 4, 256))) { | |||
582 | aobj->u_pages = pages; | |||
583 | return 0; | |||
584 | } | |||
585 | ||||
586 | KASSERT(UAO_SWHASH_BUCKETS(aobj->u_pages) < UAO_SWHASH_BUCKETS(pages))(((min((aobj->u_pages) >> 4, 256)) < (min((pages) >> 4, 256))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 586, "UAO_SWHASH_BUCKETS(aobj->u_pages) < UAO_SWHASH_BUCKETS(pages)" )); | |||
587 | ||||
588 | new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages)(min((pages) >> 4, 256)), M_UVMAOBJ99, | |||
589 | M_WAITOK0x0001 | M_CANFAIL0x0004, &new_hashmask); | |||
590 | if (new_swhash == NULL((void *)0)) | |||
591 | return ENOMEM12; | |||
592 | ||||
593 | for (i = 0; i < UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)); i++) { | |||
594 | while (LIST_EMPTY(&aobj->u_swhash[i])(((&aobj->u_swap.slot_hash[i])->lh_first) == ((void *)0)) == 0) { | |||
595 | elt = LIST_FIRST(&aobj->u_swhash[i])((&aobj->u_swap.slot_hash[i])->lh_first); | |||
596 | LIST_REMOVE(elt, list)do { if ((elt)->list.le_next != ((void *)0)) (elt)->list .le_next->list.le_prev = (elt)->list.le_prev; *(elt)-> list.le_prev = (elt)->list.le_next; ((elt)->list.le_prev ) = ((void *)-1); ((elt)->list.le_next) = ((void *)-1); } while (0); | |||
597 | LIST_INSERT_HEAD(&new_swhash[i], elt, list)do { if (((elt)->list.le_next = (&new_swhash[i])->lh_first ) != ((void *)0)) (&new_swhash[i])->lh_first->list. le_prev = &(elt)->list.le_next; (&new_swhash[i])-> lh_first = (elt); (elt)->list.le_prev = &(&new_swhash [i])->lh_first; } while (0); | |||
598 | } | |||
599 | } | |||
600 | ||||
601 | hashfree(aobj->u_swhashu_swap.slot_hash, UAO_SWHASH_BUCKETS(aobj->u_pages)(min((aobj->u_pages) >> 4, 256)), M_UVMAOBJ99); | |||
602 | ||||
603 | aobj->u_swhashu_swap.slot_hash = new_swhash; | |||
604 | aobj->u_pages = pages; | |||
605 | aobj->u_swhashmask = new_hashmask; | |||
606 | ||||
607 | return 0; | |||
608 | } | |||
609 | ||||
610 | int | |||
611 | uao_grow_convert(struct uvm_object *uobj, int pages) | |||
612 | { | |||
613 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
614 | struct uao_swhash *new_swhash; | |||
615 | struct uao_swhash_elt *elt; | |||
616 | unsigned long new_hashmask; | |||
617 | int i, *old_swslots; | |||
618 | ||||
619 | new_swhash = hashinit(UAO_SWHASH_BUCKETS(pages)(min((pages) >> 4, 256)), M_UVMAOBJ99, | |||
620 | M_WAITOK0x0001 | M_CANFAIL0x0004, &new_hashmask); | |||
621 | if (new_swhash == NULL((void *)0)) | |||
622 | return ENOMEM12; | |||
623 | ||||
624 | /* Set these now, so we can use uao_find_swhash_elt(). */ | |||
625 | old_swslots = aobj->u_swslotsu_swap.slot_array; | |||
626 | aobj->u_swhashu_swap.slot_hash = new_swhash; | |||
627 | aobj->u_swhashmask = new_hashmask; | |||
628 | ||||
629 | for (i = 0; i < aobj->u_pages; i++) { | |||
630 | if (old_swslots[i] != 0) { | |||
631 | elt = uao_find_swhash_elt(aobj, i, TRUE1); | |||
632 | elt->count++; | |||
633 | UAO_SWHASH_ELT_PAGESLOT(elt, i)((elt)->slots[((i) & ((1 << 4) - 1))]) = old_swslots[i]; | |||
634 | } | |||
635 | } | |||
636 | ||||
637 | free(old_swslots, M_UVMAOBJ99, aobj->u_pages * sizeof(int)); | |||
638 | aobj->u_pages = pages; | |||
639 | ||||
640 | return 0; | |||
641 | } | |||
642 | ||||
643 | int | |||
644 | uao_grow(struct uvm_object *uobj, int pages) | |||
645 | { | |||
646 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
647 | ||||
648 | KASSERT(pages > aobj->u_pages)((pages > aobj->u_pages) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 648, "pages > aobj->u_pages" )); | |||
649 | ||||
650 | /* | |||
651 | * Distinguish between three possible cases: | |||
652 | * 1. aobj uses hash and hash size needs to be adjusted. | |||
653 | * 2. aobj uses array and array size needs to be adjusted. | |||
654 | * 3. aobj uses array and must be converted to hash. | |||
655 | */ | |||
656 | if (pages <= UAO_SWHASH_THRESHOLD((1 << 4) * 4)) | |||
657 | return uao_grow_array(uobj, pages); /* case 2 */ | |||
658 | else if (aobj->u_pages > UAO_SWHASH_THRESHOLD((1 << 4) * 4)) | |||
659 | return uao_grow_hash(uobj, pages); /* case 1 */ | |||
660 | else | |||
661 | return uao_grow_convert(uobj, pages); | |||
662 | } | |||
663 | #endif /* TMPFS */ | |||
664 | ||||
665 | /* | |||
666 | * uao_create: create an aobj of the given size and return its uvm_object. | |||
667 | * | |||
668 | * => for normal use, flags are zero or UAO_FLAG_CANFAIL. | |||
669 | * => for the kernel object, the flags are: | |||
670 | * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once) | |||
671 | * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ") | |||
672 | */ | |||
673 | struct uvm_object * | |||
674 | uao_create(vsize_t size, int flags) | |||
675 | { | |||
676 | static struct uvm_aobj kernel_object_store; | |||
677 | static struct rwlock bootstrap_kernel_object_lock; | |||
678 | static int kobj_alloced = 0; | |||
679 | int pages = round_page(size)(((size) + ((1 << 12) - 1)) & ~((1 << 12) - 1 )) >> PAGE_SHIFT12; | |||
680 | struct uvm_aobj *aobj; | |||
681 | int refs; | |||
682 | ||||
683 | /* | |||
684 | * Allocate a new aobj, unless kernel object is requested. | |||
685 | */ | |||
686 | if (flags & UAO_FLAG_KERNOBJ0x1) { | |||
687 | KASSERT(!kobj_alloced)((!kobj_alloced) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 687, "!kobj_alloced")); | |||
688 | aobj = &kernel_object_store; | |||
689 | aobj->u_pages = pages; | |||
690 | aobj->u_flags = UAO_FLAG_NOSWAP0x8; | |||
691 | refs = UVM_OBJ_KERN(-2); | |||
692 | kobj_alloced = UAO_FLAG_KERNOBJ0x1; | |||
693 | } else if (flags & UAO_FLAG_KERNSWAP0x2) { | |||
694 | KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ)((kobj_alloced == 0x1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 694, "kobj_alloced == UAO_FLAG_KERNOBJ")); | |||
695 | aobj = &kernel_object_store; | |||
696 | kobj_alloced = UAO_FLAG_KERNSWAP0x2; | |||
697 | } else { | |||
698 | aobj = pool_get(&uvm_aobj_pool, PR_WAITOK0x0001); | |||
699 | aobj->u_pages = pages; | |||
700 | aobj->u_flags = 0; | |||
701 | refs = 1; | |||
702 | } | |||
703 | ||||
704 | /* | |||
705 | * allocate hash/array if necessary | |||
706 | */ | |||
707 | if (flags == 0 || (flags & (UAO_FLAG_KERNSWAP0x2 | UAO_FLAG_CANFAIL0x4))) { | |||
708 | int mflags; | |||
709 | ||||
710 | if (flags) | |||
711 | mflags = M_NOWAIT0x0002; | |||
712 | else | |||
713 | mflags = M_WAITOK0x0001; | |||
714 | ||||
715 | /* allocate hash table or array depending on object size */ | |||
716 | if (UAO_USES_SWHASH(aobj)((aobj)->u_pages > ((1 << 4) * 4))) { | |||
717 | aobj->u_swhashu_swap.slot_hash = hashinit(UAO_SWHASH_BUCKETS(pages)(min((pages) >> 4, 256)), | |||
718 | M_UVMAOBJ99, mflags, &aobj->u_swhashmask); | |||
719 | if (aobj->u_swhashu_swap.slot_hash == NULL((void *)0)) { | |||
720 | if (flags & UAO_FLAG_CANFAIL0x4) { | |||
721 | pool_put(&uvm_aobj_pool, aobj); | |||
722 | return NULL((void *)0); | |||
723 | } | |||
724 | panic("uao_create: hashinit swhash failed"); | |||
725 | } | |||
726 | } else { | |||
727 | aobj->u_swslotsu_swap.slot_array = mallocarray(pages, sizeof(int), | |||
728 | M_UVMAOBJ99, mflags|M_ZERO0x0008); | |||
729 | if (aobj->u_swslotsu_swap.slot_array == NULL((void *)0)) { | |||
730 | if (flags & UAO_FLAG_CANFAIL0x4) { | |||
731 | pool_put(&uvm_aobj_pool, aobj); | |||
732 | return NULL((void *)0); | |||
733 | } | |||
734 | panic("uao_create: malloc swslots failed"); | |||
735 | } | |||
736 | } | |||
737 | ||||
738 | if (flags & UAO_FLAG_KERNSWAP0x2) { | |||
739 | aobj->u_flags &= ~UAO_FLAG_NOSWAP0x8; /* clear noswap */ | |||
740 | return &aobj->u_obj; | |||
741 | /* done! */ | |||
742 | } | |||
743 | } | |||
744 | ||||
745 | /* | |||
746 | * Initialise UVM object. | |||
747 | */ | |||
748 | uvm_obj_init(&aobj->u_obj, &aobj_pager, refs); | |||
749 | if (flags & UAO_FLAG_KERNOBJ0x1) { | |||
750 | /* Use a temporary static lock for kernel_object. */ | |||
751 | rw_init(&bootstrap_kernel_object_lock, "kobjlk")_rw_init_flags(&bootstrap_kernel_object_lock, "kobjlk", 0 , ((void *)0)); | |||
752 | uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock); | |||
753 | } | |||
754 | ||||
755 | /* | |||
756 | * now that aobj is ready, add it to the global list | |||
757 | */ | |||
758 | mtx_enter(&uao_list_lock); | |||
759 | LIST_INSERT_HEAD(&uao_list, aobj, u_list)do { if (((aobj)->u_list.le_next = (&uao_list)->lh_first ) != ((void *)0)) (&uao_list)->lh_first->u_list.le_prev = &(aobj)->u_list.le_next; (&uao_list)->lh_first = (aobj); (aobj)->u_list.le_prev = &(&uao_list)-> lh_first; } while (0); | |||
760 | mtx_leave(&uao_list_lock); | |||
761 | ||||
762 | return &aobj->u_obj; | |||
763 | } | |||
764 | ||||
765 | ||||
766 | ||||
767 | /* | |||
768 | * uao_init: set up aobj pager subsystem | |||
769 | * | |||
770 | * => called at boot time from uvm_pager_init() | |||
771 | */ | |||
772 | void | |||
773 | uao_init(void) | |||
774 | { | |||
775 | /* | |||
776 | * NOTE: Pages for this pool must not come from a pageable | |||
777 | * kernel map! | |||
778 | */ | |||
779 | pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0, | |||
780 | IPL_NONE0x0, PR_WAITOK0x0001, "uaoeltpl", NULL((void *)0)); | |||
781 | pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, | |||
782 | IPL_NONE0x0, PR_WAITOK0x0001, "aobjpl", NULL((void *)0)); | |||
783 | } | |||
784 | ||||
785 | /* | |||
786 | * uao_reference: hold a reference to an anonymous UVM object. | |||
787 | */ | |||
788 | void | |||
789 | uao_reference(struct uvm_object *uobj) | |||
790 | { | |||
791 | /* Kernel object is persistent. */ | |||
792 | if (UVM_OBJ_IS_KERN_OBJECT(uobj)((uobj)->uo_refs == (-2))) | |||
793 | return; | |||
794 | ||||
795 | atomic_inc_int(&uobj->uo_refs)_atomic_inc_int(&uobj->uo_refs); | |||
796 | } | |||
797 | ||||
798 | ||||
799 | /* | |||
800 | * uao_detach: drop a reference to an anonymous UVM object. | |||
801 | */ | |||
802 | void | |||
803 | uao_detach(struct uvm_object *uobj) | |||
804 | { | |||
805 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
806 | struct vm_page *pg; | |||
807 | ||||
808 | /* | |||
809 | * Detaching from kernel_object is a NOP. | |||
810 | */ | |||
811 | if (UVM_OBJ_IS_KERN_OBJECT(uobj)((uobj)->uo_refs == (-2))) | |||
812 | return; | |||
813 | ||||
814 | /* | |||
815 | * Drop the reference. If it was the last one, destroy the object. | |||
816 | */ | |||
817 | if (atomic_dec_int_nv(&uobj->uo_refs)_atomic_sub_int_nv((&uobj->uo_refs), 1) > 0) { | |||
818 | return; | |||
819 | } | |||
820 | ||||
821 | /* | |||
822 | * Remove the aobj from the global list. | |||
823 | */ | |||
824 | mtx_enter(&uao_list_lock); | |||
825 | LIST_REMOVE(aobj, u_list)do { if ((aobj)->u_list.le_next != ((void *)0)) (aobj)-> u_list.le_next->u_list.le_prev = (aobj)->u_list.le_prev ; *(aobj)->u_list.le_prev = (aobj)->u_list.le_next; ((aobj )->u_list.le_prev) = ((void *)-1); ((aobj)->u_list.le_next ) = ((void *)-1); } while (0); | |||
826 | mtx_leave(&uao_list_lock); | |||
827 | ||||
828 | /* | |||
829 | * Free all the pages left in the aobj. For each page, when the | |||
830 | * page is no longer busy (and thus after any disk I/O that it is | |||
831 | * involved in is complete), release any swap resources and free | |||
832 | * the page itself. | |||
833 | */ | |||
834 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); | |||
835 | while ((pg = RBT_ROOT(uvm_objtree, &uobj->memt)uvm_objtree_RBT_ROOT(&uobj->memt)) != NULL((void *)0)) { | |||
836 | pmap_page_protect(pg, PROT_NONE0x00); | |||
837 | if (pg->pg_flags & PG_BUSY0x00000001) { | |||
838 | atomic_setbits_intx86_atomic_setbits_u32(&pg->pg_flags, PG_WANTED0x00000002); | |||
839 | rwsleep_nsec(pg, uobj->vmobjlock, PVM4, "uao_det", | |||
840 | INFSLP0xffffffffffffffffULL); | |||
841 | continue; | |||
842 | } | |||
843 | uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT12); | |||
844 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); | |||
845 | uvm_pagefree(pg); | |||
846 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); | |||
847 | } | |||
848 | ||||
849 | /* | |||
850 | * Finally, free the anonymous UVM object itself. | |||
851 | */ | |||
852 | uao_free(aobj); | |||
853 | } | |||
854 | ||||
855 | /* | |||
856 | * uao_flush: flush pages out of a uvm object | |||
857 | * | |||
858 | * => if PGO_CLEANIT is not set, then we will not block. | |||
859 | * => if PGO_ALLPAGE is set, then all pages in the object are valid targets | |||
860 | * for flushing. | |||
861 | * => NOTE: we are allowed to lock the page queues, so the caller | |||
862 | * must not be holding the lock on them [e.g. pagedaemon had | |||
863 | * better not call us with the queues locked] | |||
864 | * => we return TRUE unless we encountered some sort of I/O error | |||
865 | * XXXJRT currently never happens, as we never directly initiate | |||
866 | * XXXJRT I/O | |||
867 | */ | |||
868 | boolean_t | |||
869 | uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) | |||
870 | { | |||
871 | struct uvm_aobj *aobj = (struct uvm_aobj *) uobj; | |||
872 | struct vm_page *pg; | |||
873 | voff_t curoff; | |||
874 | ||||
875 | KASSERT(UVM_OBJ_IS_AOBJ(uobj))((((uobj)->pgops == &aobj_pager)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 875, "UVM_OBJ_IS_AOBJ(uobj)" )); | |||
876 | KASSERT(rw_write_held(uobj->vmobjlock))((rw_write_held(uobj->vmobjlock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 876, "rw_write_held(uobj->vmobjlock)" )); | |||
877 | ||||
878 | if (flags & PGO_ALLPAGES0x010) { | |||
879 | start = 0; | |||
880 | stop = (voff_t)aobj->u_pages << PAGE_SHIFT12; | |||
881 | } else { | |||
882 | start = trunc_page(start)((start) & ~((1 << 12) - 1)); | |||
883 | stop = round_page(stop)(((stop) + ((1 << 12) - 1)) & ~((1 << 12) - 1 )); | |||
884 | if (stop > ((voff_t)aobj->u_pages << PAGE_SHIFT12)) { | |||
885 | printf("uao_flush: strange, got an out of range " | |||
886 | "flush (fixed)\n"); | |||
887 | stop = (voff_t)aobj->u_pages << PAGE_SHIFT12; | |||
888 | } | |||
889 | } | |||
890 | ||||
891 | /* | |||
892 | * Don't need to do any work here if we're not freeing | |||
893 | * or deactivating pages. | |||
894 | */ | |||
895 | if ((flags & (PGO_DEACTIVATE0x004|PGO_FREE0x008)) == 0) { | |||
896 | return TRUE1; | |||
897 | } | |||
898 | ||||
899 | curoff = start; | |||
900 | for (;;) { | |||
901 | if (curoff < stop) { | |||
902 | pg = uvm_pagelookup(uobj, curoff); | |||
903 | curoff += PAGE_SIZE(1 << 12); | |||
904 | if (pg == NULL((void *)0)) | |||
905 | continue; | |||
906 | } else { | |||
907 | break; | |||
908 | } | |||
909 | ||||
910 | /* Make sure page is unbusy, else wait for it. */ | |||
911 | if (pg->pg_flags & PG_BUSY0x00000001) { | |||
912 | atomic_setbits_intx86_atomic_setbits_u32(&pg->pg_flags, PG_WANTED0x00000002); | |||
913 | rwsleep_nsec(pg, uobj->vmobjlock, PVM4, "uaoflsh", | |||
914 | INFSLP0xffffffffffffffffULL); | |||
915 | curoff -= PAGE_SIZE(1 << 12); | |||
916 | continue; | |||
917 | } | |||
918 | ||||
919 | switch (flags & (PGO_CLEANIT0x001|PGO_FREE0x008|PGO_DEACTIVATE0x004)) { | |||
920 | /* | |||
921 | * XXX In these first 3 cases, we always just | |||
922 | * XXX deactivate the page. We may want to | |||
923 | * XXX handle the different cases more specifically | |||
924 | * XXX in the future. | |||
925 | */ | |||
926 | case PGO_CLEANIT0x001|PGO_FREE0x008: | |||
927 | /* FALLTHROUGH */ | |||
928 | case PGO_CLEANIT0x001|PGO_DEACTIVATE0x004: | |||
929 | /* FALLTHROUGH */ | |||
930 | case PGO_DEACTIVATE0x004: | |||
931 | deactivate_it: | |||
932 | if (pg->wire_count != 0) | |||
933 | continue; | |||
934 | ||||
935 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); | |||
936 | pmap_page_protect(pg, PROT_NONE0x00); | |||
937 | uvm_pagedeactivate(pg); | |||
938 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); | |||
939 | ||||
940 | continue; | |||
941 | case PGO_FREE0x008: | |||
942 | /* | |||
943 | * If there are multiple references to | |||
944 | * the object, just deactivate the page. | |||
945 | */ | |||
946 | if (uobj->uo_refs > 1) | |||
947 | goto deactivate_it; | |||
948 | ||||
949 | /* XXX skip the page if it's wired */ | |||
950 | if (pg->wire_count != 0) | |||
951 | continue; | |||
952 | ||||
953 | /* | |||
954 | * free the swap slot and the page. | |||
955 | */ | |||
956 | pmap_page_protect(pg, PROT_NONE0x00); | |||
957 | ||||
958 | /* | |||
959 | * freeing swapslot here is not strictly necessary. | |||
960 | * however, leaving it here doesn't save much | |||
961 | * because we need to update swap accounting anyway. | |||
962 | */ | |||
963 | uao_dropswap(uobj, pg->offset >> PAGE_SHIFT12); | |||
964 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); | |||
965 | uvm_pagefree(pg); | |||
966 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); | |||
967 | ||||
968 | continue; | |||
969 | default: | |||
970 | panic("uao_flush: weird flags"); | |||
971 | } | |||
972 | } | |||
973 | ||||
974 | return TRUE1; | |||
975 | } | |||
976 | ||||
977 | /* | |||
978 | * uao_get: fetch me a page | |||
979 | * | |||
980 | * we have three cases: | |||
981 | * 1: page is resident -> just return the page. | |||
982 | * 2: page is zero-fill -> allocate a new page and zero it. | |||
983 | * 3: page is swapped out -> fetch the page from swap. | |||
984 | * | |||
985 | * cases 1 can be handled with PGO_LOCKED, cases 2 and 3 cannot. | |||
986 | * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES), | |||
987 | * then we will need to return VM_PAGER_UNLOCK. | |||
988 | * | |||
989 | * => flags: PGO_ALLPAGES: get all of the pages | |||
990 | * PGO_LOCKED: fault data structures are locked | |||
991 | * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] | |||
992 | * => NOTE: caller must check for released pages!! | |||
993 | */ | |||
994 | static int | |||
995 | uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, | |||
996 | int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags) | |||
997 | { | |||
998 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
999 | voff_t current_offset; | |||
1000 | vm_page_t ptmp; | |||
1001 | int lcv, gotpages, maxpages, swslot, rv, pageidx; | |||
1002 | boolean_t done; | |||
1003 | ||||
1004 | KASSERT(UVM_OBJ_IS_AOBJ(uobj))((((uobj)->pgops == &aobj_pager)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 1004, "UVM_OBJ_IS_AOBJ(uobj)" )); | |||
1005 | KASSERT(rw_write_held(uobj->vmobjlock))((rw_write_held(uobj->vmobjlock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 1005, "rw_write_held(uobj->vmobjlock)" )); | |||
1006 | ||||
1007 | /* | |||
1008 | * get number of pages | |||
1009 | */ | |||
1010 | maxpages = *npagesp; | |||
1011 | ||||
1012 | if (flags & PGO_LOCKED0x040) { | |||
1013 | /* | |||
1014 | * step 1a: get pages that are already resident. only do | |||
1015 | * this if the data structures are locked (i.e. the first | |||
1016 | * time through). | |||
1017 | */ | |||
1018 | ||||
1019 | done = TRUE1; /* be optimistic */ | |||
1020 | gotpages = 0; /* # of pages we got so far */ | |||
1021 | ||||
1022 | for (lcv = 0, current_offset = offset ; lcv < maxpages ; | |||
1023 | lcv++, current_offset += PAGE_SIZE(1 << 12)) { | |||
1024 | /* do we care about this page? if not, skip it */ | |||
1025 | if (pps[lcv] == PGO_DONTCARE((struct vm_page *) -1L)) | |||
1026 | continue; | |||
1027 | ||||
1028 | ptmp = uvm_pagelookup(uobj, current_offset); | |||
1029 | ||||
1030 | /* | |||
1031 | * if page is new, attempt to allocate the page, | |||
1032 | * zero-fill'd. | |||
1033 | */ | |||
1034 | if (ptmp == NULL((void *)0) && uao_find_swslot(uobj, | |||
1035 | current_offset >> PAGE_SHIFT12) == 0) { | |||
1036 | ptmp = uvm_pagealloc(uobj, current_offset, | |||
1037 | NULL((void *)0), UVM_PGA_ZERO0x0002); | |||
1038 | if (ptmp) { | |||
1039 | /* new page */ | |||
1040 | atomic_clearbits_intx86_atomic_clearbits_u32(&ptmp->pg_flags, | |||
1041 | PG_BUSY0x00000001|PG_FAKE0x00000040); | |||
1042 | atomic_setbits_intx86_atomic_setbits_u32(&ptmp->pg_flags, | |||
1043 | PQ_AOBJ0x00200000); | |||
1044 | UVM_PAGE_OWN(ptmp, NULL); | |||
1045 | } | |||
1046 | } | |||
1047 | ||||
1048 | /* | |||
1049 | * to be useful must get a non-busy page | |||
1050 | */ | |||
1051 | if (ptmp == NULL((void *)0) || | |||
1052 | (ptmp->pg_flags & PG_BUSY0x00000001) != 0) { | |||
1053 | if (lcv == centeridx || | |||
1054 | (flags & PGO_ALLPAGES0x010) != 0) | |||
1055 | /* need to do a wait or I/O! */ | |||
1056 | done = FALSE0; | |||
1057 | continue; | |||
1058 | } | |||
1059 | ||||
1060 | /* | |||
1061 | * useful page: plug it in our result array | |||
1062 | */ | |||
1063 | atomic_setbits_intx86_atomic_setbits_u32(&ptmp->pg_flags, PG_BUSY0x00000001); | |||
1064 | UVM_PAGE_OWN(ptmp, "uao_get1"); | |||
1065 | pps[lcv] = ptmp; | |||
1066 | gotpages++; | |||
1067 | ||||
1068 | } | |||
1069 | ||||
1070 | /* | |||
1071 | * step 1b: now we've either done everything needed or we | |||
1072 | * to unlock and do some waiting or I/O. | |||
1073 | */ | |||
1074 | *npagesp = gotpages; | |||
1075 | if (done) | |||
1076 | /* bingo! */ | |||
1077 | return VM_PAGER_OK0; | |||
1078 | else | |||
1079 | /* EEK! Need to unlock and I/O */ | |||
1080 | return VM_PAGER_UNLOCK6; | |||
1081 | } | |||
1082 | ||||
1083 | /* | |||
1084 | * step 2: get non-resident or busy pages. | |||
1085 | * data structures are unlocked. | |||
1086 | */ | |||
1087 | for (lcv = 0, current_offset = offset ; lcv < maxpages ; | |||
1088 | lcv++, current_offset += PAGE_SIZE(1 << 12)) { | |||
1089 | /* | |||
1090 | * - skip over pages we've already gotten or don't want | |||
1091 | * - skip over pages we don't _have_ to get | |||
1092 | */ | |||
1093 | if (pps[lcv] != NULL((void *)0) || | |||
1094 | (lcv != centeridx && (flags & PGO_ALLPAGES0x010) == 0)) | |||
1095 | continue; | |||
1096 | ||||
1097 | pageidx = current_offset >> PAGE_SHIFT12; | |||
1098 | ||||
1099 | /* | |||
1100 | * we have yet to locate the current page (pps[lcv]). we | |||
1101 | * first look for a page that is already at the current offset. | |||
1102 | * if we find a page, we check to see if it is busy or | |||
1103 | * released. if that is the case, then we sleep on the page | |||
1104 | * until it is no longer busy or released and repeat the lookup. | |||
1105 | * if the page we found is neither busy nor released, then we | |||
1106 | * busy it (so we own it) and plug it into pps[lcv]. this | |||
1107 | * 'break's the following while loop and indicates we are | |||
1108 | * ready to move on to the next page in the "lcv" loop above. | |||
1109 | * | |||
1110 | * if we exit the while loop with pps[lcv] still set to NULL, | |||
1111 | * then it means that we allocated a new busy/fake/clean page | |||
1112 | * ptmp in the object and we need to do I/O to fill in the data. | |||
1113 | */ | |||
1114 | ||||
1115 | /* top of "pps" while loop */ | |||
1116 | while (pps[lcv] == NULL((void *)0)) { | |||
1117 | /* look for a resident page */ | |||
1118 | ptmp = uvm_pagelookup(uobj, current_offset); | |||
1119 | ||||
1120 | /* not resident? allocate one now (if we can) */ | |||
1121 | if (ptmp == NULL((void *)0)) { | |||
1122 | ||||
1123 | ptmp = uvm_pagealloc(uobj, current_offset, | |||
1124 | NULL((void *)0), 0); | |||
1125 | ||||
1126 | /* out of RAM? */ | |||
1127 | if (ptmp == NULL((void *)0)) { | |||
1128 | rw_exit(uobj->vmobjlock); | |||
1129 | uvm_wait("uao_getpage"); | |||
1130 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); | |||
1131 | /* goto top of pps while loop */ | |||
1132 | continue; | |||
1133 | } | |||
1134 | ||||
1135 | /* | |||
1136 | * safe with PQ's unlocked: because we just | |||
1137 | * alloc'd the page | |||
1138 | */ | |||
1139 | atomic_setbits_intx86_atomic_setbits_u32(&ptmp->pg_flags, PQ_AOBJ0x00200000); | |||
1140 | ||||
1141 | /* | |||
1142 | * got new page ready for I/O. break pps while | |||
1143 | * loop. pps[lcv] is still NULL. | |||
1144 | */ | |||
1145 | break; | |||
1146 | } | |||
1147 | ||||
1148 | /* page is there, see if we need to wait on it */ | |||
1149 | if ((ptmp->pg_flags & PG_BUSY0x00000001) != 0) { | |||
1150 | atomic_setbits_intx86_atomic_setbits_u32(&ptmp->pg_flags, PG_WANTED0x00000002); | |||
1151 | rwsleep_nsec(ptmp, uobj->vmobjlock, PVM4, | |||
1152 | "uao_get", INFSLP0xffffffffffffffffULL); | |||
1153 | continue; /* goto top of pps while loop */ | |||
1154 | } | |||
1155 | ||||
1156 | /* | |||
1157 | * if we get here then the page is resident and | |||
1158 | * unbusy. we busy it now (so we own it). | |||
1159 | */ | |||
1160 | /* we own it, caller must un-busy */ | |||
1161 | atomic_setbits_intx86_atomic_setbits_u32(&ptmp->pg_flags, PG_BUSY0x00000001); | |||
1162 | UVM_PAGE_OWN(ptmp, "uao_get2"); | |||
1163 | pps[lcv] = ptmp; | |||
1164 | } | |||
1165 | ||||
1166 | /* | |||
1167 | * if we own the valid page at the correct offset, pps[lcv] will | |||
1168 | * point to it. nothing more to do except go to the next page. | |||
1169 | */ | |||
1170 | if (pps[lcv]) | |||
1171 | continue; /* next lcv */ | |||
1172 | ||||
1173 | /* | |||
1174 | * we have a "fake/busy/clean" page that we just allocated. | |||
1175 | * do the needed "i/o", either reading from swap or zeroing. | |||
1176 | */ | |||
1177 | swslot = uao_find_swslot(uobj, pageidx); | |||
1178 | ||||
1179 | /* just zero the page if there's nothing in swap. */ | |||
1180 | if (swslot == 0) { | |||
1181 | /* page hasn't existed before, just zero it. */ | |||
1182 | uvm_pagezero(ptmp); | |||
1183 | } else { | |||
1184 | /* | |||
1185 | * page in the swapped-out page. | |||
1186 | * unlock object for i/o, relock when done. | |||
1187 | */ | |||
1188 | ||||
1189 | rw_exit(uobj->vmobjlock); | |||
1190 | rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO0x002); | |||
1191 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); | |||
1192 | ||||
1193 | /* | |||
1194 | * I/O done. check for errors. | |||
1195 | */ | |||
1196 | if (rv != VM_PAGER_OK0) { | |||
1197 | /* | |||
1198 | * remove the swap slot from the aobj | |||
1199 | * and mark the aobj as having no real slot. | |||
1200 | * don't free the swap slot, thus preventing | |||
1201 | * it from being used again. | |||
1202 | */ | |||
1203 | swslot = uao_set_swslot(&aobj->u_obj, pageidx, | |||
1204 | SWSLOT_BAD(-1)); | |||
1205 | uvm_swap_markbad(swslot, 1); | |||
1206 | ||||
1207 | if (ptmp->pg_flags & PG_WANTED0x00000002) | |||
1208 | wakeup(ptmp); | |||
1209 | atomic_clearbits_intx86_atomic_clearbits_u32(&ptmp->pg_flags, | |||
1210 | PG_WANTED0x00000002|PG_BUSY0x00000001); | |||
1211 | UVM_PAGE_OWN(ptmp, NULL); | |||
1212 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); | |||
1213 | uvm_pagefree(ptmp); | |||
1214 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); | |||
1215 | rw_exit(uobj->vmobjlock); | |||
1216 | ||||
1217 | return rv; | |||
1218 | } | |||
1219 | } | |||
1220 | ||||
1221 | /* | |||
1222 | * we got the page! clear the fake flag (indicates valid | |||
1223 | * data now in page) and plug into our result array. note | |||
1224 | * that page is still busy. | |||
1225 | * | |||
1226 | * it is the callers job to: | |||
1227 | * => check if the page is released | |||
1228 | * => unbusy the page | |||
1229 | * => activate the page | |||
1230 | */ | |||
1231 | atomic_clearbits_intx86_atomic_clearbits_u32(&ptmp->pg_flags, PG_FAKE0x00000040); | |||
1232 | pmap_clear_modify(ptmp)pmap_clear_attrs(ptmp, 0x0000000000000040UL); /* ... and clean */ | |||
1233 | pps[lcv] = ptmp; | |||
1234 | ||||
1235 | } /* lcv loop */ | |||
1236 | ||||
1237 | rw_exit(uobj->vmobjlock); | |||
1238 | return VM_PAGER_OK0; | |||
1239 | } | |||
1240 | ||||
1241 | /* | |||
1242 | * uao_dropswap: release any swap resources from this aobj page. | |||
1243 | * | |||
1244 | * => aobj must be locked or have a reference count of 0. | |||
1245 | */ | |||
1246 | int | |||
1247 | uao_dropswap(struct uvm_object *uobj, int pageidx) | |||
1248 | { | |||
1249 | int slot; | |||
1250 | ||||
1251 | KASSERT(UVM_OBJ_IS_AOBJ(uobj))((((uobj)->pgops == &aobj_pager)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 1251, "UVM_OBJ_IS_AOBJ(uobj)" )); | |||
1252 | ||||
1253 | slot = uao_set_swslot(uobj, pageidx, 0); | |||
1254 | if (slot) { | |||
1255 | uvm_swap_free(slot, 1); | |||
1256 | } | |||
1257 | return slot; | |||
1258 | } | |||
1259 | ||||
1260 | /* | |||
1261 | * page in every page in every aobj that is paged-out to a range of swslots. | |||
1262 | * | |||
1263 | * => aobj must be locked and is returned locked. | |||
1264 | * => returns TRUE if pagein was aborted due to lack of memory. | |||
1265 | */ | |||
1266 | boolean_t | |||
1267 | uao_swap_off(int startslot, int endslot) | |||
1268 | { | |||
1269 | struct uvm_aobj *aobj; | |||
1270 | ||||
1271 | /* | |||
1272 | * Walk the list of all anonymous UVM objects. Grab the first. | |||
1273 | */ | |||
1274 | mtx_enter(&uao_list_lock); | |||
1275 | if ((aobj = LIST_FIRST(&uao_list)((&uao_list)->lh_first)) == NULL((void *)0)) { | |||
| ||||
1276 | mtx_leave(&uao_list_lock); | |||
1277 | return FALSE0; | |||
1278 | } | |||
1279 | uao_reference(&aobj->u_obj); | |||
1280 | ||||
1281 | do { | |||
1282 | struct uvm_aobj *nextaobj; | |||
1283 | boolean_t rv; | |||
1284 | ||||
1285 | /* | |||
1286 | * Prefetch the next object and immediately hold a reference | |||
1287 | * on it, so neither the current nor the next entry could | |||
1288 | * disappear while we are iterating. | |||
1289 | */ | |||
1290 | if ((nextaobj = LIST_NEXT(aobj, u_list)((aobj)->u_list.le_next)) != NULL((void *)0)) { | |||
1291 | uao_reference(&nextaobj->u_obj); | |||
1292 | } | |||
1293 | mtx_leave(&uao_list_lock); | |||
1294 | ||||
1295 | /* | |||
1296 | * Page in all pages in the swap slot range. | |||
1297 | */ | |||
1298 | rw_enter(aobj->u_obj.vmobjlock, RW_WRITE0x0001UL); | |||
1299 | rv = uao_pagein(aobj, startslot, endslot); | |||
1300 | rw_exit(aobj->u_obj.vmobjlock); | |||
1301 | ||||
1302 | /* Drop the reference of the current object. */ | |||
1303 | uao_detach(&aobj->u_obj); | |||
1304 | if (rv) { | |||
1305 | if (nextaobj) { | |||
1306 | uao_detach(&nextaobj->u_obj); | |||
1307 | } | |||
1308 | return rv; | |||
1309 | } | |||
1310 | ||||
1311 | aobj = nextaobj; | |||
1312 | mtx_enter(&uao_list_lock); | |||
1313 | } while (aobj); | |||
1314 | ||||
1315 | /* | |||
1316 | * done with traversal, unlock the list | |||
1317 | */ | |||
1318 | mtx_leave(&uao_list_lock); | |||
1319 | return FALSE0; | |||
1320 | } | |||
1321 | ||||
1322 | /* | |||
1323 | * page in any pages from aobj in the given range. | |||
1324 | * | |||
1325 | * => returns TRUE if pagein was aborted due to lack of memory. | |||
1326 | */ | |||
1327 | static boolean_t | |||
1328 | uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot) | |||
1329 | { | |||
1330 | boolean_t rv; | |||
1331 | ||||
1332 | if (UAO_USES_SWHASH(aobj)((aobj)->u_pages > ((1 << 4) * 4))) { | |||
1333 | struct uao_swhash_elt *elt; | |||
1334 | int bucket; | |||
1335 | ||||
1336 | restart: | |||
1337 | for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) { | |||
1338 | for (elt = LIST_FIRST(&aobj->u_swhash[bucket])((&aobj->u_swap.slot_hash[bucket])->lh_first); | |||
1339 | elt != NULL((void *)0); | |||
1340 | elt = LIST_NEXT(elt, list)((elt)->list.le_next)) { | |||
1341 | int i; | |||
1342 | ||||
1343 | for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE(1 << 4); i++) { | |||
1344 | int slot = elt->slots[i]; | |||
1345 | ||||
1346 | /* | |||
1347 | * if the slot isn't in range, skip it. | |||
1348 | */ | |||
1349 | if (slot < startslot || | |||
1350 | slot >= endslot) { | |||
1351 | continue; | |||
1352 | } | |||
1353 | ||||
1354 | /* | |||
1355 | * process the page, | |||
1356 | * the start over on this object | |||
1357 | * since the swhash elt | |||
1358 | * may have been freed. | |||
1359 | */ | |||
1360 | rv = uao_pagein_page(aobj, | |||
1361 | UAO_SWHASH_ELT_PAGEIDX_BASE(elt)((elt)->tag << 4) + i); | |||
1362 | if (rv) { | |||
1363 | return rv; | |||
1364 | } | |||
1365 | goto restart; | |||
1366 | } | |||
1367 | } | |||
1368 | } | |||
1369 | } else { | |||
1370 | int i; | |||
1371 | ||||
1372 | for (i = 0; i < aobj->u_pages; i++) { | |||
1373 | int slot = aobj->u_swslotsu_swap.slot_array[i]; | |||
1374 | ||||
1375 | /* | |||
1376 | * if the slot isn't in range, skip it | |||
1377 | */ | |||
1378 | if (slot < startslot || slot >= endslot) { | |||
1379 | continue; | |||
1380 | } | |||
1381 | ||||
1382 | /* | |||
1383 | * process the page. | |||
1384 | */ | |||
1385 | rv = uao_pagein_page(aobj, i); | |||
1386 | if (rv) { | |||
1387 | return rv; | |||
1388 | } | |||
1389 | } | |||
1390 | } | |||
1391 | ||||
1392 | return FALSE0; | |||
1393 | } | |||
1394 | ||||
1395 | /* | |||
1396 | * uao_pagein_page: page in a single page from an anonymous UVM object. | |||
1397 | * | |||
1398 | * => Returns TRUE if pagein was aborted due to lack of memory. | |||
1399 | */ | |||
1400 | static boolean_t | |||
1401 | uao_pagein_page(struct uvm_aobj *aobj, int pageidx) | |||
1402 | { | |||
1403 | struct uvm_object *uobj = &aobj->u_obj; | |||
1404 | struct vm_page *pg; | |||
1405 | int rv, slot, npages; | |||
1406 | ||||
1407 | pg = NULL((void *)0); | |||
1408 | npages = 1; | |||
1409 | ||||
1410 | KASSERT(rw_write_held(uobj->vmobjlock))((rw_write_held(uobj->vmobjlock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 1410, "rw_write_held(uobj->vmobjlock)" )); | |||
1411 | rv = uao_get(&aobj->u_obj, (voff_t)pageidx << PAGE_SHIFT12, | |||
1412 | &pg, &npages, 0, PROT_READ0x01 | PROT_WRITE0x02, 0, 0); | |||
1413 | ||||
1414 | /* | |||
1415 | * relock and finish up. | |||
1416 | */ | |||
1417 | rw_enter(uobj->vmobjlock, RW_WRITE0x0001UL); | |||
1418 | switch (rv) { | |||
1419 | case VM_PAGER_OK0: | |||
1420 | break; | |||
1421 | ||||
1422 | case VM_PAGER_ERROR4: | |||
1423 | case VM_PAGER_REFAULT7: | |||
1424 | /* | |||
1425 | * nothing more to do on errors. | |||
1426 | * VM_PAGER_REFAULT can only mean that the anon was freed, | |||
1427 | * so again there's nothing to do. | |||
1428 | */ | |||
1429 | return FALSE0; | |||
1430 | } | |||
1431 | ||||
1432 | /* | |||
1433 | * ok, we've got the page now. | |||
1434 | * mark it as dirty, clear its swslot and un-busy it. | |||
1435 | */ | |||
1436 | slot = uao_set_swslot(&aobj->u_obj, pageidx, 0); | |||
1437 | uvm_swap_free(slot, 1); | |||
1438 | atomic_clearbits_intx86_atomic_clearbits_u32(&pg->pg_flags, PG_BUSY0x00000001|PG_CLEAN0x00000008|PG_FAKE0x00000040); | |||
1439 | UVM_PAGE_OWN(pg, NULL); | |||
1440 | ||||
1441 | /* | |||
1442 | * deactivate the page (to put it on a page queue). | |||
1443 | */ | |||
1444 | pmap_clear_reference(pg)pmap_clear_attrs(pg, 0x0000000000000020UL); | |||
1445 | uvm_lock_pageq()mtx_enter(&uvm.pageqlock); | |||
1446 | uvm_pagedeactivate(pg); | |||
1447 | uvm_unlock_pageq()mtx_leave(&uvm.pageqlock); | |||
1448 | ||||
1449 | return FALSE0; | |||
1450 | } | |||
1451 | ||||
1452 | /* | |||
1453 | * uao_dropswap_range: drop swapslots in the range. | |||
1454 | * | |||
1455 | * => aobj must be locked and is returned locked. | |||
1456 | * => start is inclusive. end is exclusive. | |||
1457 | */ | |||
1458 | void | |||
1459 | uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end) | |||
1460 | { | |||
1461 | struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; | |||
1462 | int swpgonlydelta = 0; | |||
1463 | ||||
1464 | KASSERT(UVM_OBJ_IS_AOBJ(uobj))((((uobj)->pgops == &aobj_pager)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 1464, "UVM_OBJ_IS_AOBJ(uobj)" )); | |||
1465 | KASSERT(rw_write_held(uobj->vmobjlock))((rw_write_held(uobj->vmobjlock)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 1465, "rw_write_held(uobj->vmobjlock)" )); | |||
1466 | ||||
1467 | if (end
| |||
1468 | end = INT64_MAX0x7fffffffffffffffLL; | |||
1469 | } | |||
1470 | ||||
1471 | if (UAO_USES_SWHASH(aobj)((aobj)->u_pages > ((1 << 4) * 4))) { | |||
1472 | int i, hashbuckets = aobj->u_swhashmask + 1; | |||
1473 | voff_t taghi; | |||
1474 | voff_t taglo; | |||
1475 | ||||
1476 | taglo = UAO_SWHASH_ELT_TAG(start)((start) >> 4); | |||
1477 | taghi = UAO_SWHASH_ELT_TAG(end)((end) >> 4); | |||
1478 | ||||
1479 | for (i = 0; i < hashbuckets; i++) { | |||
1480 | struct uao_swhash_elt *elt, *next; | |||
1481 | ||||
1482 | for (elt = LIST_FIRST(&aobj->u_swhash[i])((&aobj->u_swap.slot_hash[i])->lh_first); | |||
1483 | elt != NULL((void *)0); | |||
1484 | elt = next) { | |||
1485 | int startidx, endidx; | |||
1486 | int j; | |||
1487 | ||||
1488 | next = LIST_NEXT(elt, list)((elt)->list.le_next); | |||
1489 | ||||
1490 | if (elt->tag < taglo || taghi < elt->tag) { | |||
1491 | continue; | |||
1492 | } | |||
1493 | ||||
1494 | if (elt->tag == taglo) { | |||
1495 | startidx = | |||
1496 | UAO_SWHASH_ELT_PAGESLOT_IDX(start)((start) & ((1 << 4) - 1)); | |||
1497 | } else { | |||
1498 | startidx = 0; | |||
1499 | } | |||
1500 | ||||
1501 | if (elt->tag == taghi) { | |||
1502 | endidx = | |||
1503 | UAO_SWHASH_ELT_PAGESLOT_IDX(end)((end) & ((1 << 4) - 1)); | |||
1504 | } else { | |||
1505 | endidx = UAO_SWHASH_CLUSTER_SIZE(1 << 4); | |||
1506 | } | |||
1507 | ||||
1508 | for (j = startidx; j < endidx; j++) { | |||
1509 | int slot = elt->slots[j]; | |||
1510 | ||||
1511 | KASSERT(uvm_pagelookup(&aobj->u_obj,((uvm_pagelookup(&aobj->u_obj, (voff_t)(((elt)->tag << 4) + j) << 12) == ((void *)0)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 1513, "uvm_pagelookup(&aobj->u_obj, (voff_t)(UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + j) << PAGE_SHIFT) == NULL" )) | |||
| ||||
1512 | (voff_t)(UAO_SWHASH_ELT_PAGEIDX_BASE(elt)((uvm_pagelookup(&aobj->u_obj, (voff_t)(((elt)->tag << 4) + j) << 12) == ((void *)0)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 1513, "uvm_pagelookup(&aobj->u_obj, (voff_t)(UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + j) << PAGE_SHIFT) == NULL" )) | |||
1513 | + j) << PAGE_SHIFT) == NULL)((uvm_pagelookup(&aobj->u_obj, (voff_t)(((elt)->tag << 4) + j) << 12) == ((void *)0)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c", 1513, "uvm_pagelookup(&aobj->u_obj, (voff_t)(UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + j) << PAGE_SHIFT) == NULL" )); | |||
1514 | ||||
1515 | if (slot > 0) { | |||
1516 | uvm_swap_free(slot, 1); | |||
1517 | swpgonlydelta++; | |||
1518 | KASSERT(elt->count > 0)((elt->count > 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/uvm/uvm_aobj.c" , 1518, "elt->count > 0")); | |||
1519 | elt->slots[j] = 0; | |||
1520 | elt->count--; | |||
1521 | } | |||
1522 | } | |||
1523 | ||||
1524 | if (elt->count == 0) { | |||
1525 | LIST_REMOVE(elt, list)do { if ((elt)->list.le_next != ((void *)0)) (elt)->list .le_next->list.le_prev = (elt)->list.le_prev; *(elt)-> list.le_prev = (elt)->list.le_next; ((elt)->list.le_prev ) = ((void *)-1); ((elt)->list.le_next) = ((void *)-1); } while (0); | |||
1526 | pool_put(&uao_swhash_elt_pool, elt); | |||
1527 | } | |||
1528 | } | |||
1529 | } | |||
1530 | } else { | |||
1531 | int i; | |||
1532 | ||||
1533 | if (aobj->u_pages < end) { | |||
1534 | end = aobj->u_pages; | |||
1535 | } | |||
1536 | for (i = start; i < end; i++) { | |||
1537 | int slot = aobj->u_swslotsu_swap.slot_array[i]; | |||
1538 | ||||
1539 | if (slot > 0) { | |||
1540 | uvm_swap_free(slot, 1); | |||
1541 | swpgonlydelta++; | |||
1542 | } | |||
1543 | } | |||
1544 | } | |||
1545 | ||||
1546 | /* | |||
1547 | * adjust the counter of pages only in swap for all | |||
1548 | * the swap slots we've freed. | |||
1549 | */ | |||
1550 | if (swpgonlydelta > 0) { | |||
1551 | KASSERT(uvmexp.swpgonly >= swpgonlydelta)((uvmexp.swpgonly >= swpgonlydelta) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/uvm/uvm_aobj.c", 1551, "uvmexp.swpgonly >= swpgonlydelta" )); | |||
1552 | atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta)_atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); | |||
1553 | } | |||
1554 | } |