File: | dev/pci/drm/drm_mm.c |
Warning: | line 214, column 3 Value stored to 'leftmost' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /************************************************************************** |
2 | * |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
4 | * Copyright 2016 Intel Corporation |
5 | * All Rights Reserved. |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
27 | * |
28 | **************************************************************************/ |
29 | |
30 | /* |
31 | * Generic simple memory manager implementation. Intended to be used as a base |
32 | * class implementation for more advanced memory managers. |
33 | * |
34 | * Note that the algorithm used is quite simple and there might be substantial |
35 | * performance gains if a smarter free list is implemented. Currently it is |
36 | * just an unordered stack of free regions. This could easily be improved if |
37 | * an RB-tree is used instead. At least if we expect heavy fragmentation. |
38 | * |
39 | * Aligned allocations can also see improvement. |
40 | * |
41 | * Authors: |
42 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
43 | */ |
44 | |
45 | #include <linux/export.h> |
46 | #include <linux/interval_tree_generic.h> |
47 | #include <linux/seq_file.h> |
48 | #include <linux/slab.h> |
49 | #include <linux/stacktrace.h> |
50 | |
51 | #include <drm/drm_mm.h> |
52 | |
53 | /** |
54 | * DOC: Overview |
55 | * |
56 | * drm_mm provides a simple range allocator. The drivers are free to use the |
57 | * resource allocator from the linux core if it suits them, the upside of drm_mm |
58 | * is that it's in the DRM core. Which means that it's easier to extend for |
59 | * some of the crazier special purpose needs of gpus. |
60 | * |
61 | * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. |
62 | * Drivers are free to embed either of them into their own suitable |
63 | * datastructures. drm_mm itself will not do any memory allocations of its own, |
64 | * so if drivers choose not to embed nodes they need to still allocate them |
65 | * themselves. |
66 | * |
67 | * The range allocator also supports reservation of preallocated blocks. This is |
68 | * useful for taking over initial mode setting configurations from the firmware, |
69 | * where an object needs to be created which exactly matches the firmware's |
70 | * scanout target. As long as the range is still free it can be inserted anytime |
71 | * after the allocator is initialized, which helps with avoiding looped |
72 | * dependencies in the driver load sequence. |
73 | * |
74 | * drm_mm maintains a stack of most recently freed holes, which of all |
75 | * simplistic datastructures seems to be a fairly decent approach to clustering |
76 | * allocations and avoiding too much fragmentation. This means free space |
77 | * searches are O(num_holes). Given that all the fancy features drm_mm supports |
78 | * something better would be fairly complex and since gfx thrashing is a fairly |
79 | * steep cliff not a real concern. Removing a node again is O(1). |
80 | * |
81 | * drm_mm supports a few features: Alignment and range restrictions can be |
82 | * supplied. Furthermore every &drm_mm_node has a color value (which is just an |
83 | * opaque unsigned long) which in conjunction with a driver callback can be used |
84 | * to implement sophisticated placement restrictions. The i915 DRM driver uses |
85 | * this to implement guard pages between incompatible caching domains in the |
86 | * graphics TT. |
87 | * |
88 | * Two behaviors are supported for searching and allocating: bottom-up and |
89 | * top-down. The default is bottom-up. Top-down allocation can be used if the |
90 | * memory area has different restrictions, or just to reduce fragmentation. |
91 | * |
92 | * Finally iteration helpers to walk all nodes and all holes are provided as are |
93 | * some basic allocator dumpers for debugging. |
94 | * |
95 | * Note that this range allocator is not thread-safe, drivers need to protect |
96 | * modifications with their own locking. The idea behind this is that for a full |
97 | * memory manager additional data needs to be protected anyway, hence internal |
98 | * locking would be fully redundant. |
99 | */ |
100 | |
101 | #ifdef CONFIG_DRM_DEBUG_MM |
102 | #include <linux/stackdepot.h> |
103 | |
104 | #define STACKDEPTH 32 |
105 | #define BUFSZ 4096 |
106 | |
107 | static noinline__attribute__((__noinline__)) void save_stack(struct drm_mm_node *node) |
108 | { |
109 | unsigned long entries[STACKDEPTH]; |
110 | unsigned int n; |
111 | |
112 | n = stack_trace_save(entries, ARRAY_SIZE(entries)(sizeof((entries)) / sizeof((entries)[0])), 1); |
113 | |
114 | /* May be called under spinlock, so avoid sleeping */ |
115 | node->stack = stack_depot_save(entries, n, GFP_NOWAIT0x0002); |
116 | } |
117 | |
118 | static void show_leaks(struct drm_mm *mm) |
119 | { |
120 | struct drm_mm_node *node; |
121 | unsigned long *entries; |
122 | unsigned int nr_entries; |
123 | char *buf; |
124 | |
125 | buf = kmalloc(BUFSZ, GFP_KERNEL(0x0001 | 0x0004)); |
126 | if (!buf) |
127 | return; |
128 | |
129 | list_for_each_entry(node, drm_mm_nodes(mm), node_list)for (node = ({ const __typeof( ((__typeof(*node) *)0)->node_list ) *__mptr = (((&(mm)->head_node.node_list))->next) ; (__typeof(*node) *)( (char *)__mptr - __builtin_offsetof(__typeof (*node), node_list) );}); &node->node_list != ((&( mm)->head_node.node_list)); node = ({ const __typeof( ((__typeof (*node) *)0)->node_list ) *__mptr = (node->node_list.next ); (__typeof(*node) *)( (char *)__mptr - __builtin_offsetof(__typeof (*node), node_list) );})) { |
130 | if (!node->stack) { |
131 | DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",__drm_err("node [%08llx + %08llx]: unknown owner\n", node-> start, node->size) |
132 | node->start, node->size)__drm_err("node [%08llx + %08llx]: unknown owner\n", node-> start, node->size); |
133 | continue; |
134 | } |
135 | |
136 | nr_entries = stack_depot_fetch(node->stack, &entries); |
137 | stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); |
138 | DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",__drm_err("node [%08llx + %08llx]: inserted at\n%s", node-> start, node->size, buf) |
139 | node->start, node->size, buf)__drm_err("node [%08llx + %08llx]: inserted at\n%s", node-> start, node->size, buf); |
140 | } |
141 | |
142 | kfree(buf); |
143 | } |
144 | |
145 | #undef STACKDEPTH |
146 | #undef BUFSZ |
147 | #else |
148 | static void save_stack(struct drm_mm_node *node) { } |
149 | static void show_leaks(struct drm_mm *mm) { } |
150 | #endif |
151 | |
152 | #define START(node)((node)->start) ((node)->start) |
153 | #define LAST(node)((node)->start + (node)->size - 1) ((node)->start + (node)->size - 1) |
154 | |
155 | #ifdef __linux__ |
156 | INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, |
157 | u64, __subtree_last, |
158 | START, LAST, static inline, drm_mm_interval_tree) |
159 | #else |
160 | static struct drm_mm_node * |
161 | drm_mm_interval_tree_iter_first(const struct rb_root_cached *root, |
162 | uint64_t start, uint64_t last) |
163 | { |
164 | struct drm_mm_node *node; |
165 | struct rb_node *rb; |
166 | |
167 | for (rb = rb_first_cached(root)linux_root_RB_MINMAX((struct linux_root *)(&(root)->rb_root ), -1); rb; rb = rb_next(rb)linux_root_RB_NEXT((rb))) { |
168 | node = rb_entry(rb, typeof(*node), rb)({ const __typeof( ((typeof(*node) *)0)->rb ) *__mptr = (rb ); (typeof(*node) *)( (char *)__mptr - __builtin_offsetof(typeof (*node), rb) );}); |
169 | if (LAST(node)((node)->start + (node)->size - 1) >= start && START(node)((node)->start) <= last) |
170 | return node; |
171 | } |
172 | return NULL((void *)0); |
173 | } |
174 | |
175 | static void |
176 | drm_mm_interval_tree_remove(struct drm_mm_node *node, |
177 | struct rb_root_cached *root) |
178 | { |
179 | rb_erase_cached(&node->rb, root)linux_root_RB_REMOVE((struct linux_root *)(&(root)->rb_root ), (&node->rb)); |
180 | } |
181 | #endif |
182 | |
183 | struct drm_mm_node * |
184 | __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) |
185 | { |
186 | return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, |
187 | start, last) ?: (struct drm_mm_node *)&mm->head_node; |
188 | } |
189 | EXPORT_SYMBOL(__drm_mm_interval_first); |
190 | |
191 | static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, |
192 | struct drm_mm_node *node) |
193 | { |
194 | struct drm_mm *mm = hole_node->mm; |
195 | struct rb_node **link, *rb; |
196 | struct drm_mm_node *parent; |
197 | bool_Bool leftmost; |
198 | |
199 | node->__subtree_last = LAST(node)((node)->start + (node)->size - 1); |
200 | |
201 | if (drm_mm_node_allocated(hole_node)) { |
202 | rb = &hole_node->rb; |
203 | while (rb) { |
204 | parent = rb_entry(rb, struct drm_mm_node, rb)({ const __typeof( ((struct drm_mm_node *)0)->rb ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb) );}); |
205 | if (parent->__subtree_last >= node->__subtree_last) |
206 | break; |
207 | |
208 | parent->__subtree_last = node->__subtree_last; |
209 | rb = rb_parent(rb)(rb)->__entry.rbe_parent; |
210 | } |
211 | |
212 | rb = &hole_node->rb; |
213 | link = &hole_node->rb.rb_right__entry.rbe_right; |
214 | leftmost = false0; |
Value stored to 'leftmost' is never read | |
215 | } else { |
216 | rb = NULL((void *)0); |
217 | link = &mm->interval_tree.rb_root.rb_node; |
218 | leftmost = true1; |
219 | } |
220 | |
221 | while (*link) { |
222 | rb = *link; |
223 | parent = rb_entry(rb, struct drm_mm_node, rb)({ const __typeof( ((struct drm_mm_node *)0)->rb ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb) );}); |
224 | if (parent->__subtree_last < node->__subtree_last) |
225 | parent->__subtree_last = node->__subtree_last; |
226 | if (node->start < parent->start) { |
227 | link = &parent->rb.rb_left__entry.rbe_left; |
228 | } else { |
229 | link = &parent->rb.rb_right__entry.rbe_right; |
230 | leftmost = false0; |
231 | } |
232 | } |
233 | |
234 | rb_link_node(&node->rb, rb, link); |
235 | #ifdef notyet |
236 | rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, |
237 | &drm_mm_interval_tree_augment); |
238 | #else |
239 | rb_insert_color_cached(&node->rb, &mm->interval_tree, leftmost)linux_root_RB_INSERT_COLOR((struct linux_root *)(&(&mm ->interval_tree)->rb_root), (&node->rb)); |
240 | #endif |
241 | } |
242 | |
243 | #define DRM_RB_INSERT(root, member, expr)do { struct rb_node **link = &root.rb_node, *rb = ((void * )0); u64 x = expr(node); while (*link) { rb = *link; if (x < expr(({ const __typeof( ((struct drm_mm_node *)0)->member ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, member) );}))) link = &rb->__entry .rbe_left; else link = &rb->__entry.rbe_right; } rb_link_node (&node->member, rb, link); linux_root_RB_INSERT_COLOR( (struct linux_root *)(&root), (&node->member)); } while (0) do { \ |
244 | struct rb_node **link = &root.rb_node, *rb = NULL((void *)0); \ |
245 | u64 x = expr(node); \ |
246 | while (*link) { \ |
247 | rb = *link; \ |
248 | if (x < expr(rb_entry(rb, struct drm_mm_node, member)({ const __typeof( ((struct drm_mm_node *)0)->member ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, member) );}))) \ |
249 | link = &rb->rb_left__entry.rbe_left; \ |
250 | else \ |
251 | link = &rb->rb_right__entry.rbe_right; \ |
252 | } \ |
253 | rb_link_node(&node->member, rb, link); \ |
254 | rb_insert_color(&node->member, &root)linux_root_RB_INSERT_COLOR((struct linux_root *)(&root), ( &node->member)); \ |
255 | } while (0) |
256 | |
257 | #define HOLE_SIZE(NODE)((NODE)->hole_size) ((NODE)->hole_size) |
258 | #define HOLE_ADDR(NODE)(__drm_mm_hole_node_start(NODE)) (__drm_mm_hole_node_start(NODE)) |
259 | |
260 | static u64 rb_to_hole_size(struct rb_node *rb) |
261 | { |
262 | return rb_entry(rb, struct drm_mm_node, rb_hole_size)({ const __typeof( ((struct drm_mm_node *)0)->rb_hole_size ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb_hole_size) );})->hole_size; |
263 | } |
264 | |
265 | static void insert_hole_size(struct rb_root_cached *root, |
266 | struct drm_mm_node *node) |
267 | { |
268 | struct rb_node **link = &root->rb_root.rb_node, *rb = NULL((void *)0); |
269 | u64 x = node->hole_size; |
270 | bool_Bool first = true1; |
271 | |
272 | while (*link) { |
273 | rb = *link; |
274 | if (x > rb_to_hole_size(rb)) { |
275 | link = &rb->rb_left__entry.rbe_left; |
276 | } else { |
277 | link = &rb->rb_right__entry.rbe_right; |
278 | first = false0; |
279 | } |
280 | } |
281 | |
282 | rb_link_node(&node->rb_hole_size, rb, link); |
283 | rb_insert_color_cached(&node->rb_hole_size, root, first)linux_root_RB_INSERT_COLOR((struct linux_root *)(&(root)-> rb_root), (&node->rb_hole_size)); |
284 | } |
285 | |
286 | static void add_hole(struct drm_mm_node *node) |
287 | { |
288 | struct drm_mm *mm = node->mm; |
289 | |
290 | node->hole_size = |
291 | __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); |
292 | DRM_MM_BUG_ON(!drm_mm_hole_follows(node))((void)0); |
293 | |
294 | insert_hole_size(&mm->holes_size, node); |
295 | DRM_RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR)do { struct rb_node **link = &mm->holes_addr.rb_node, * rb = ((void *)0); u64 x = (__drm_mm_hole_node_start(node)); while (*link) { rb = *link; if (x < (__drm_mm_hole_node_start(( { const __typeof( ((struct drm_mm_node *)0)->rb_hole_addr ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb_hole_addr) );})))) link = &rb-> __entry.rbe_left; else link = &rb->__entry.rbe_right; } rb_link_node(&node->rb_hole_addr, rb, link); linux_root_RB_INSERT_COLOR ((struct linux_root *)(&mm->holes_addr), (&node-> rb_hole_addr)); } while (0); |
296 | |
297 | list_add(&node->hole_stack, &mm->hole_stack); |
298 | } |
299 | |
300 | static void rm_hole(struct drm_mm_node *node) |
301 | { |
302 | DRM_MM_BUG_ON(!drm_mm_hole_follows(node))((void)0); |
303 | |
304 | list_del(&node->hole_stack); |
305 | rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size)linux_root_RB_REMOVE((struct linux_root *)(&(&node-> mm->holes_size)->rb_root), (&node->rb_hole_size) ); |
306 | rb_erase(&node->rb_hole_addr, &node->mm->holes_addr)linux_root_RB_REMOVE((struct linux_root *)(&node->mm-> holes_addr), (&node->rb_hole_addr)); |
307 | node->hole_size = 0; |
308 | |
309 | DRM_MM_BUG_ON(drm_mm_hole_follows(node))((void)0); |
310 | } |
311 | |
312 | static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) |
313 | { |
314 | return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size)(rb ? ({ const __typeof( ((struct drm_mm_node *)0)->rb_hole_size ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb_hole_size) );}) : ((void *)0)); |
315 | } |
316 | |
317 | static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) |
318 | { |
319 | return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr)(rb ? ({ const __typeof( ((struct drm_mm_node *)0)->rb_hole_addr ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb_hole_addr) );}) : ((void *)0)); |
320 | } |
321 | |
322 | static inline u64 rb_hole_size(struct rb_node *rb) |
323 | { |
324 | return rb_entry(rb, struct drm_mm_node, rb_hole_size)({ const __typeof( ((struct drm_mm_node *)0)->rb_hole_size ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb_hole_size) );})->hole_size; |
325 | } |
326 | |
327 | static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) |
328 | { |
329 | struct rb_node *rb = mm->holes_size.rb_root.rb_node; |
330 | struct drm_mm_node *best = NULL((void *)0); |
331 | |
332 | do { |
333 | struct drm_mm_node *node = |
334 | rb_entry(rb, struct drm_mm_node, rb_hole_size)({ const __typeof( ((struct drm_mm_node *)0)->rb_hole_size ) *__mptr = (rb); (struct drm_mm_node *)( (char *)__mptr - __builtin_offsetof (struct drm_mm_node, rb_hole_size) );}); |
335 | |
336 | if (size <= node->hole_size) { |
337 | best = node; |
338 | rb = rb->rb_right__entry.rbe_right; |
339 | } else { |
340 | rb = rb->rb_left__entry.rbe_left; |
341 | } |
342 | } while (rb); |
343 | |
344 | return best; |
345 | } |
346 | |
347 | static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) |
348 | { |
349 | struct rb_node *rb = mm->holes_addr.rb_node; |
350 | struct drm_mm_node *node = NULL((void *)0); |
351 | |
352 | while (rb) { |
353 | u64 hole_start; |
354 | |
355 | node = rb_hole_addr_to_node(rb); |
356 | hole_start = __drm_mm_hole_node_start(node); |
357 | |
358 | if (addr < hole_start) |
359 | rb = node->rb_hole_addr.rb_left__entry.rbe_left; |
360 | else if (addr > hole_start + node->hole_size) |
361 | rb = node->rb_hole_addr.rb_right__entry.rbe_right; |
362 | else |
363 | break; |
364 | } |
365 | |
366 | return node; |
367 | } |
368 | |
369 | static struct drm_mm_node * |
370 | first_hole(struct drm_mm *mm, |
371 | u64 start, u64 end, u64 size, |
372 | enum drm_mm_insert_mode mode) |
373 | { |
374 | switch (mode) { |
375 | default: |
376 | case DRM_MM_INSERT_BEST: |
377 | return best_hole(mm, size); |
378 | |
379 | case DRM_MM_INSERT_LOW: |
380 | return find_hole(mm, start); |
381 | |
382 | case DRM_MM_INSERT_HIGH: |
383 | return find_hole(mm, end); |
384 | |
385 | case DRM_MM_INSERT_EVICT: |
386 | return list_first_entry_or_null(&mm->hole_stack,(list_empty(&mm->hole_stack) ? ((void *)0) : ({ const __typeof ( ((struct drm_mm_node *)0)->hole_stack ) *__mptr = ((& mm->hole_stack)->next); (struct drm_mm_node *)( (char * )__mptr - __builtin_offsetof(struct drm_mm_node, hole_stack) ) ;})) |
387 | struct drm_mm_node,(list_empty(&mm->hole_stack) ? ((void *)0) : ({ const __typeof ( ((struct drm_mm_node *)0)->hole_stack ) *__mptr = ((& mm->hole_stack)->next); (struct drm_mm_node *)( (char * )__mptr - __builtin_offsetof(struct drm_mm_node, hole_stack) ) ;})) |
388 | hole_stack)(list_empty(&mm->hole_stack) ? ((void *)0) : ({ const __typeof ( ((struct drm_mm_node *)0)->hole_stack ) *__mptr = ((& mm->hole_stack)->next); (struct drm_mm_node *)( (char * )__mptr - __builtin_offsetof(struct drm_mm_node, hole_stack) ) ;})); |
389 | } |
390 | } |
391 | |
392 | static struct drm_mm_node * |
393 | next_hole(struct drm_mm *mm, |
394 | struct drm_mm_node *node, |
395 | enum drm_mm_insert_mode mode) |
396 | { |
397 | switch (mode) { |
398 | default: |
399 | case DRM_MM_INSERT_BEST: |
400 | return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)linux_root_RB_PREV((&node->rb_hole_size))); |
401 | |
402 | case DRM_MM_INSERT_LOW: |
403 | return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)linux_root_RB_NEXT((&node->rb_hole_addr))); |
404 | |
405 | case DRM_MM_INSERT_HIGH: |
406 | return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr)linux_root_RB_PREV((&node->rb_hole_addr))); |
407 | |
408 | case DRM_MM_INSERT_EVICT: |
409 | node = list_next_entry(node, hole_stack)({ const __typeof( ((typeof(*(node)) *)0)->hole_stack ) *__mptr = (((node)->hole_stack.next)); (typeof(*(node)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(node)), hole_stack) ) ;}); |
410 | return &node->hole_stack == &mm->hole_stack ? NULL((void *)0) : node; |
411 | } |
412 | } |
413 | |
414 | /** |
415 | * drm_mm_reserve_node - insert an pre-initialized node |
416 | * @mm: drm_mm allocator to insert @node into |
417 | * @node: drm_mm_node to insert |
418 | * |
419 | * This functions inserts an already set-up &drm_mm_node into the allocator, |
420 | * meaning that start, size and color must be set by the caller. All other |
421 | * fields must be cleared to 0. This is useful to initialize the allocator with |
422 | * preallocated objects which must be set-up before the range allocator can be |
423 | * set-up, e.g. when taking over a firmware framebuffer. |
424 | * |
425 | * Returns: |
426 | * 0 on success, -ENOSPC if there's no hole where @node is. |
427 | */ |
428 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
429 | { |
430 | struct drm_mm_node *hole; |
431 | u64 hole_start, hole_end; |
432 | u64 adj_start, adj_end; |
433 | u64 end; |
434 | |
435 | end = node->start + node->size; |
436 | if (unlikely(end <= node->start)__builtin_expect(!!(end <= node->start), 0)) |
437 | return -ENOSPC28; |
438 | |
439 | /* Find the relevant hole to add our node to */ |
440 | hole = find_hole(mm, node->start); |
441 | if (!hole) |
442 | return -ENOSPC28; |
443 | |
444 | adj_start = hole_start = __drm_mm_hole_node_start(hole); |
445 | adj_end = hole_end = hole_start + hole->hole_size; |
446 | |
447 | if (mm->color_adjust) |
448 | mm->color_adjust(hole, node->color, &adj_start, &adj_end); |
449 | |
450 | if (adj_start > node->start || adj_end < end) |
451 | return -ENOSPC28; |
452 | |
453 | node->mm = mm; |
454 | |
455 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT0, &node->flags); |
456 | list_add(&node->node_list, &hole->node_list); |
457 | drm_mm_interval_tree_add_node(hole, node); |
458 | node->hole_size = 0; |
459 | |
460 | rm_hole(hole); |
461 | if (node->start > hole_start) |
462 | add_hole(hole); |
463 | if (end < hole_end) |
464 | add_hole(node); |
465 | |
466 | save_stack(node); |
467 | return 0; |
468 | } |
469 | EXPORT_SYMBOL(drm_mm_reserve_node); |
470 | |
471 | static u64 rb_to_hole_size_or_zero(struct rb_node *rb) |
472 | { |
473 | return rb ? rb_to_hole_size(rb) : 0; |
474 | } |
475 | |
476 | /** |
477 | * drm_mm_insert_node_in_range - ranged search for space and insert @node |
478 | * @mm: drm_mm to allocate from |
479 | * @node: preallocate node to insert |
480 | * @size: size of the allocation |
481 | * @alignment: alignment of the allocation |
482 | * @color: opaque tag value to use for this node |
483 | * @range_start: start of the allowed range for this node |
484 | * @range_end: end of the allowed range for this node |
485 | * @mode: fine-tune the allocation search and placement |
486 | * |
487 | * The preallocated @node must be cleared to 0. |
488 | * |
489 | * Returns: |
490 | * 0 on success, -ENOSPC if there's no suitable hole. |
491 | */ |
492 | int drm_mm_insert_node_in_range(struct drm_mm * const mm, |
493 | struct drm_mm_node * const node, |
494 | u64 size, u64 alignment, |
495 | unsigned long color, |
496 | u64 range_start, u64 range_end, |
497 | enum drm_mm_insert_mode mode) |
498 | { |
499 | struct drm_mm_node *hole; |
500 | u64 remainder_mask; |
501 | bool_Bool once; |
502 | |
503 | DRM_MM_BUG_ON(range_start > range_end)((void)0); |
504 | |
505 | if (unlikely(size == 0 || range_end - range_start < size)__builtin_expect(!!(size == 0 || range_end - range_start < size), 0)) |
506 | return -ENOSPC28; |
507 | |
508 | if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)linux_root_RB_MINMAX((struct linux_root *)(&(&mm-> holes_size)->rb_root), -1)) < size) |
509 | return -ENOSPC28; |
510 | |
511 | if (alignment <= 1) |
512 | alignment = 0; |
513 | |
514 | once = mode & DRM_MM_INSERT_ONCE; |
515 | mode &= ~DRM_MM_INSERT_ONCE; |
516 | |
517 | remainder_mask = is_power_of_2(alignment)(((alignment) != 0) && (((alignment) - 1) & (alignment )) == 0) ? alignment - 1 : 0; |
518 | for (hole = first_hole(mm, range_start, range_end, size, mode); |
519 | hole; |
520 | hole = once ? NULL((void *)0) : next_hole(mm, hole, mode)) { |
521 | u64 hole_start = __drm_mm_hole_node_start(hole); |
522 | u64 hole_end = hole_start + hole->hole_size; |
523 | u64 adj_start, adj_end; |
524 | u64 col_start, col_end; |
525 | |
526 | if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) |
527 | break; |
528 | |
529 | if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) |
530 | break; |
531 | |
532 | col_start = hole_start; |
533 | col_end = hole_end; |
534 | if (mm->color_adjust) |
535 | mm->color_adjust(hole, color, &col_start, &col_end); |
536 | |
537 | adj_start = max(col_start, range_start)(((col_start)>(range_start))?(col_start):(range_start)); |
538 | adj_end = min(col_end, range_end)(((col_end)<(range_end))?(col_end):(range_end)); |
539 | |
540 | if (adj_end <= adj_start || adj_end - adj_start < size) |
541 | continue; |
542 | |
543 | if (mode == DRM_MM_INSERT_HIGH) |
544 | adj_start = adj_end - size; |
545 | |
546 | if (alignment) { |
547 | u64 rem; |
548 | |
549 | if (likely(remainder_mask)__builtin_expect(!!(remainder_mask), 1)) |
550 | rem = adj_start & remainder_mask; |
551 | else |
552 | div64_u64_rem(adj_start, alignment, &rem); |
553 | if (rem) { |
554 | adj_start -= rem; |
555 | if (mode != DRM_MM_INSERT_HIGH) |
556 | adj_start += alignment; |
557 | |
558 | if (adj_start < max(col_start, range_start)(((col_start)>(range_start))?(col_start):(range_start)) || |
559 | min(col_end, range_end)(((col_end)<(range_end))?(col_end):(range_end)) - adj_start < size) |
560 | continue; |
561 | |
562 | if (adj_end <= adj_start || |
563 | adj_end - adj_start < size) |
564 | continue; |
565 | } |
566 | } |
567 | |
568 | node->mm = mm; |
569 | node->size = size; |
570 | node->start = adj_start; |
571 | node->color = color; |
572 | node->hole_size = 0; |
573 | |
574 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT0, &node->flags); |
575 | list_add(&node->node_list, &hole->node_list); |
576 | drm_mm_interval_tree_add_node(hole, node); |
577 | |
578 | rm_hole(hole); |
579 | if (adj_start > hole_start) |
580 | add_hole(hole); |
581 | if (adj_start + size < hole_end) |
582 | add_hole(node); |
583 | |
584 | save_stack(node); |
585 | return 0; |
586 | } |
587 | |
588 | return -ENOSPC28; |
589 | } |
590 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); |
591 | |
592 | static inline bool_Bool drm_mm_node_scanned_block(const struct drm_mm_node *node) |
593 | { |
594 | return test_bit(DRM_MM_NODE_SCANNED_BIT1, &node->flags); |
595 | } |
596 | |
597 | /** |
598 | * drm_mm_remove_node - Remove a memory node from the allocator. |
599 | * @node: drm_mm_node to remove |
600 | * |
601 | * This just removes a node from its drm_mm allocator. The node does not need to |
602 | * be cleared again before it can be re-inserted into this or any other drm_mm |
603 | * allocator. It is a bug to call this function on a unallocated node. |
604 | */ |
605 | void drm_mm_remove_node(struct drm_mm_node *node) |
606 | { |
607 | struct drm_mm *mm = node->mm; |
608 | struct drm_mm_node *prev_node; |
609 | |
610 | DRM_MM_BUG_ON(!drm_mm_node_allocated(node))((void)0); |
611 | DRM_MM_BUG_ON(drm_mm_node_scanned_block(node))((void)0); |
612 | |
613 | prev_node = list_prev_entry(node, node_list)({ const __typeof( ((typeof(*(node)) *)0)->node_list ) *__mptr = (((node)->node_list.prev)); (typeof(*(node)) *)( (char * )__mptr - __builtin_offsetof(typeof(*(node)), node_list) );}); |
614 | |
615 | if (drm_mm_hole_follows(node)) |
616 | rm_hole(node); |
617 | |
618 | drm_mm_interval_tree_remove(node, &mm->interval_tree); |
619 | list_del(&node->node_list); |
620 | |
621 | if (drm_mm_hole_follows(prev_node)) |
622 | rm_hole(prev_node); |
623 | add_hole(prev_node); |
624 | |
625 | clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT0, &node->flags); |
626 | } |
627 | EXPORT_SYMBOL(drm_mm_remove_node); |
628 | |
629 | /** |
630 | * drm_mm_replace_node - move an allocation from @old to @new |
631 | * @old: drm_mm_node to remove from the allocator |
632 | * @new: drm_mm_node which should inherit @old's allocation |
633 | * |
634 | * This is useful for when drivers embed the drm_mm_node structure and hence |
635 | * can't move allocations by reassigning pointers. It's a combination of remove |
636 | * and insert with the guarantee that the allocation start will match. |
637 | */ |
638 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
639 | { |
640 | struct drm_mm *mm = old->mm; |
641 | |
642 | DRM_MM_BUG_ON(!drm_mm_node_allocated(old))((void)0); |
643 | |
644 | *new = *old; |
645 | |
646 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT0, &new->flags); |
647 | list_replace(&old->node_list, &new->node_list); |
648 | rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree)rb_replace_node(&old->rb, &new->rb, &(& mm->interval_tree)->rb_root); |
649 | |
650 | if (drm_mm_hole_follows(old)) { |
651 | list_replace(&old->hole_stack, &new->hole_stack); |
652 | rb_replace_node_cached(&old->rb_hole_size,rb_replace_node(&old->rb_hole_size, &new->rb_hole_size , &(&mm->holes_size)->rb_root) |
653 | &new->rb_hole_size,rb_replace_node(&old->rb_hole_size, &new->rb_hole_size , &(&mm->holes_size)->rb_root) |
654 | &mm->holes_size)rb_replace_node(&old->rb_hole_size, &new->rb_hole_size , &(&mm->holes_size)->rb_root); |
655 | rb_replace_node(&old->rb_hole_addr, |
656 | &new->rb_hole_addr, |
657 | &mm->holes_addr); |
658 | } |
659 | |
660 | clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT0, &old->flags); |
661 | } |
662 | EXPORT_SYMBOL(drm_mm_replace_node); |
663 | |
664 | /** |
665 | * DOC: lru scan roster |
666 | * |
667 | * Very often GPUs need to have continuous allocations for a given object. When |
668 | * evicting objects to make space for a new one it is therefore not most |
669 | * efficient when we simply start to select all objects from the tail of an LRU |
670 | * until there's a suitable hole: Especially for big objects or nodes that |
671 | * otherwise have special allocation constraints there's a good chance we evict |
672 | * lots of (smaller) objects unnecessarily. |
673 | * |
674 | * The DRM range allocator supports this use-case through the scanning |
675 | * interfaces. First a scan operation needs to be initialized with |
676 | * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds |
677 | * objects to the roster, probably by walking an LRU list, but this can be |
678 | * freely implemented. Eviction candiates are added using |
679 | * drm_mm_scan_add_block() until a suitable hole is found or there are no |
680 | * further evictable objects. Eviction roster metadata is tracked in &struct |
681 | * drm_mm_scan. |
682 | * |
683 | * The driver must walk through all objects again in exactly the reverse |
684 | * order to restore the allocator state. Note that while the allocator is used |
685 | * in the scan mode no other operation is allowed. |
686 | * |
687 | * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() |
688 | * reported true) in the scan, and any overlapping nodes after color adjustment |
689 | * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and |
690 | * since freeing a node is also O(1) the overall complexity is |
691 | * O(scanned_objects). So like the free stack which needs to be walked before a |
692 | * scan operation even begins this is linear in the number of objects. It |
693 | * doesn't seem to hurt too badly. |
694 | */ |
695 | |
696 | /** |
697 | * drm_mm_scan_init_with_range - initialize range-restricted lru scanning |
698 | * @scan: scan state |
699 | * @mm: drm_mm to scan |
700 | * @size: size of the allocation |
701 | * @alignment: alignment of the allocation |
702 | * @color: opaque tag value to use for the allocation |
703 | * @start: start of the allowed range for the allocation |
704 | * @end: end of the allowed range for the allocation |
705 | * @mode: fine-tune the allocation search and placement |
706 | * |
707 | * This simply sets up the scanning routines with the parameters for the desired |
708 | * hole. |
709 | * |
710 | * Warning: |
711 | * As long as the scan list is non-empty, no other operations than |
712 | * adding/removing nodes to/from the scan list are allowed. |
713 | */ |
714 | void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, |
715 | struct drm_mm *mm, |
716 | u64 size, |
717 | u64 alignment, |
718 | unsigned long color, |
719 | u64 start, |
720 | u64 end, |
721 | enum drm_mm_insert_mode mode) |
722 | { |
723 | DRM_MM_BUG_ON(start >= end)((void)0); |
724 | DRM_MM_BUG_ON(!size || size > end - start)((void)0); |
725 | DRM_MM_BUG_ON(mm->scan_active)((void)0); |
726 | |
727 | scan->mm = mm; |
728 | |
729 | if (alignment <= 1) |
730 | alignment = 0; |
731 | |
732 | scan->color = color; |
733 | scan->alignment = alignment; |
734 | scan->remainder_mask = is_power_of_2(alignment)(((alignment) != 0) && (((alignment) - 1) & (alignment )) == 0) ? alignment - 1 : 0; |
735 | scan->size = size; |
736 | scan->mode = mode; |
737 | |
738 | DRM_MM_BUG_ON(end <= start)((void)0); |
739 | scan->range_start = start; |
740 | scan->range_end = end; |
741 | |
742 | scan->hit_start = U64_MAX0xffffffffffffffffULL; |
743 | scan->hit_end = 0; |
744 | } |
745 | EXPORT_SYMBOL(drm_mm_scan_init_with_range); |
746 | |
747 | /** |
748 | * drm_mm_scan_add_block - add a node to the scan list |
749 | * @scan: the active drm_mm scanner |
750 | * @node: drm_mm_node to add |
751 | * |
752 | * Add a node to the scan list that might be freed to make space for the desired |
753 | * hole. |
754 | * |
755 | * Returns: |
756 | * True if a hole has been found, false otherwise. |
757 | */ |
758 | bool_Bool drm_mm_scan_add_block(struct drm_mm_scan *scan, |
759 | struct drm_mm_node *node) |
760 | { |
761 | struct drm_mm *mm = scan->mm; |
762 | struct drm_mm_node *hole; |
763 | u64 hole_start, hole_end; |
764 | u64 col_start, col_end; |
765 | u64 adj_start, adj_end; |
766 | |
767 | DRM_MM_BUG_ON(node->mm != mm)((void)0); |
768 | DRM_MM_BUG_ON(!drm_mm_node_allocated(node))((void)0); |
769 | DRM_MM_BUG_ON(drm_mm_node_scanned_block(node))((void)0); |
770 | __set_bit(DRM_MM_NODE_SCANNED_BIT1, &node->flags); |
771 | mm->scan_active++; |
772 | |
773 | /* Remove this block from the node_list so that we enlarge the hole |
774 | * (distance between the end of our previous node and the start of |
775 | * or next), without poisoning the link so that we can restore it |
776 | * later in drm_mm_scan_remove_block(). |
777 | */ |
778 | hole = list_prev_entry(node, node_list)({ const __typeof( ((typeof(*(node)) *)0)->node_list ) *__mptr = (((node)->node_list.prev)); (typeof(*(node)) *)( (char * )__mptr - __builtin_offsetof(typeof(*(node)), node_list) );}); |
779 | DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node)((void)0); |
780 | __list_del_entry(&node->node_list)list_del(&node->node_list); |
781 | |
782 | hole_start = __drm_mm_hole_node_start(hole); |
783 | hole_end = __drm_mm_hole_node_end(hole); |
784 | |
785 | col_start = hole_start; |
786 | col_end = hole_end; |
787 | if (mm->color_adjust) |
788 | mm->color_adjust(hole, scan->color, &col_start, &col_end); |
789 | |
790 | adj_start = max(col_start, scan->range_start)(((col_start)>(scan->range_start))?(col_start):(scan-> range_start)); |
791 | adj_end = min(col_end, scan->range_end)(((col_end)<(scan->range_end))?(col_end):(scan->range_end )); |
792 | if (adj_end <= adj_start || adj_end - adj_start < scan->size) |
793 | return false0; |
794 | |
795 | if (scan->mode == DRM_MM_INSERT_HIGH) |
796 | adj_start = adj_end - scan->size; |
797 | |
798 | if (scan->alignment) { |
799 | u64 rem; |
800 | |
801 | if (likely(scan->remainder_mask)__builtin_expect(!!(scan->remainder_mask), 1)) |
802 | rem = adj_start & scan->remainder_mask; |
803 | else |
804 | div64_u64_rem(adj_start, scan->alignment, &rem); |
805 | if (rem) { |
806 | adj_start -= rem; |
807 | if (scan->mode != DRM_MM_INSERT_HIGH) |
808 | adj_start += scan->alignment; |
809 | if (adj_start < max(col_start, scan->range_start)(((col_start)>(scan->range_start))?(col_start):(scan-> range_start)) || |
810 | min(col_end, scan->range_end)(((col_end)<(scan->range_end))?(col_end):(scan->range_end )) - adj_start < scan->size) |
811 | return false0; |
812 | |
813 | if (adj_end <= adj_start || |
814 | adj_end - adj_start < scan->size) |
815 | return false0; |
816 | } |
817 | } |
818 | |
819 | scan->hit_start = adj_start; |
820 | scan->hit_end = adj_start + scan->size; |
821 | |
822 | DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end)((void)0); |
823 | DRM_MM_BUG_ON(scan->hit_start < hole_start)((void)0); |
824 | DRM_MM_BUG_ON(scan->hit_end > hole_end)((void)0); |
825 | |
826 | return true1; |
827 | } |
828 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
829 | |
830 | /** |
831 | * drm_mm_scan_remove_block - remove a node from the scan list |
832 | * @scan: the active drm_mm scanner |
833 | * @node: drm_mm_node to remove |
834 | * |
835 | * Nodes **must** be removed in exactly the reverse order from the scan list as |
836 | * they have been added (e.g. using list_add() as they are added and then |
837 | * list_for_each() over that eviction list to remove), otherwise the internal |
838 | * state of the memory manager will be corrupted. |
839 | * |
840 | * When the scan list is empty, the selected memory nodes can be freed. An |
841 | * immediately following drm_mm_insert_node_in_range_generic() or one of the |
842 | * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return |
843 | * the just freed block (because it's at the top of the free_stack list). |
844 | * |
845 | * Returns: |
846 | * True if this block should be evicted, false otherwise. Will always |
847 | * return false when no hole has been found. |
848 | */ |
849 | bool_Bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, |
850 | struct drm_mm_node *node) |
851 | { |
852 | struct drm_mm_node *prev_node; |
853 | |
854 | DRM_MM_BUG_ON(node->mm != scan->mm)((void)0); |
855 | DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node))((void)0); |
856 | __clear_bit(DRM_MM_NODE_SCANNED_BIT1, &node->flags); |
857 | |
858 | DRM_MM_BUG_ON(!node->mm->scan_active)((void)0); |
859 | node->mm->scan_active--; |
860 | |
861 | /* During drm_mm_scan_add_block() we decoupled this node leaving |
862 | * its pointers intact. Now that the caller is walking back along |
863 | * the eviction list we can restore this block into its rightful |
864 | * place on the full node_list. To confirm that the caller is walking |
865 | * backwards correctly we check that prev_node->next == node->next, |
866 | * i.e. both believe the same node should be on the other side of the |
867 | * hole. |
868 | */ |
869 | prev_node = list_prev_entry(node, node_list)({ const __typeof( ((typeof(*(node)) *)0)->node_list ) *__mptr = (((node)->node_list.prev)); (typeof(*(node)) *)( (char * )__mptr - __builtin_offsetof(typeof(*(node)), node_list) );}); |
870 | DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=((void)0) |
871 | list_next_entry(node, node_list))((void)0); |
872 | list_add(&node->node_list, &prev_node->node_list); |
873 | |
874 | return (node->start + node->size > scan->hit_start && |
875 | node->start < scan->hit_end); |
876 | } |
877 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
878 | |
879 | /** |
880 | * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole |
881 | * @scan: drm_mm scan with target hole |
882 | * |
883 | * After completing an eviction scan and removing the selected nodes, we may |
884 | * need to remove a few more nodes from either side of the target hole if |
885 | * mm.color_adjust is being used. |
886 | * |
887 | * Returns: |
888 | * A node to evict, or NULL if there are no overlapping nodes. |
889 | */ |
890 | struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) |
891 | { |
892 | struct drm_mm *mm = scan->mm; |
893 | struct drm_mm_node *hole; |
894 | u64 hole_start, hole_end; |
895 | |
896 | DRM_MM_BUG_ON(list_empty(&mm->hole_stack))((void)0); |
897 | |
898 | if (!mm->color_adjust) |
899 | return NULL((void *)0); |
900 | |
901 | /* |
902 | * The hole found during scanning should ideally be the first element |
903 | * in the hole_stack list, but due to side-effects in the driver it |
904 | * may not be. |
905 | */ |
906 | list_for_each_entry(hole, &mm->hole_stack, hole_stack)for (hole = ({ const __typeof( ((__typeof(*hole) *)0)->hole_stack ) *__mptr = ((&mm->hole_stack)->next); (__typeof(* hole) *)( (char *)__mptr - __builtin_offsetof(__typeof(*hole) , hole_stack) );}); &hole->hole_stack != (&mm-> hole_stack); hole = ({ const __typeof( ((__typeof(*hole) *)0) ->hole_stack ) *__mptr = (hole->hole_stack.next); (__typeof (*hole) *)( (char *)__mptr - __builtin_offsetof(__typeof(*hole ), hole_stack) );})) { |
907 | hole_start = __drm_mm_hole_node_start(hole); |
908 | hole_end = hole_start + hole->hole_size; |
909 | |
910 | if (hole_start <= scan->hit_start && |
911 | hole_end >= scan->hit_end) |
912 | break; |
913 | } |
914 | |
915 | /* We should only be called after we found the hole previously */ |
916 | DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack)((void)0); |
917 | if (unlikely(&hole->hole_stack == &mm->hole_stack)__builtin_expect(!!(&hole->hole_stack == &mm->hole_stack ), 0)) |
918 | return NULL((void *)0); |
919 | |
920 | DRM_MM_BUG_ON(hole_start > scan->hit_start)((void)0); |
921 | DRM_MM_BUG_ON(hole_end < scan->hit_end)((void)0); |
922 | |
923 | mm->color_adjust(hole, scan->color, &hole_start, &hole_end); |
924 | if (hole_start > scan->hit_start) |
925 | return hole; |
926 | if (hole_end < scan->hit_end) |
927 | return list_next_entry(hole, node_list)({ const __typeof( ((typeof(*(hole)) *)0)->node_list ) *__mptr = (((hole)->node_list.next)); (typeof(*(hole)) *)( (char * )__mptr - __builtin_offsetof(typeof(*(hole)), node_list) );}); |
928 | |
929 | return NULL((void *)0); |
930 | } |
931 | EXPORT_SYMBOL(drm_mm_scan_color_evict); |
932 | |
933 | /** |
934 | * drm_mm_init - initialize a drm-mm allocator |
935 | * @mm: the drm_mm structure to initialize |
936 | * @start: start of the range managed by @mm |
937 | * @size: end of the range managed by @mm |
938 | * |
939 | * Note that @mm must be cleared to 0 before calling this function. |
940 | */ |
941 | void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) |
942 | { |
943 | DRM_MM_BUG_ON(start + size <= start)((void)0); |
944 | |
945 | mm->color_adjust = NULL((void *)0); |
946 | |
947 | INIT_LIST_HEAD(&mm->hole_stack); |
948 | mm->interval_tree = RB_ROOT_CACHED(struct rb_root_cached) { ((void *)0) }; |
949 | mm->holes_size = RB_ROOT_CACHED(struct rb_root_cached) { ((void *)0) }; |
950 | mm->holes_addr = RB_ROOT(struct rb_root) { ((void *)0) }; |
951 | |
952 | /* Clever trick to avoid a special case in the free hole tracking. */ |
953 | INIT_LIST_HEAD(&mm->head_node.node_list); |
954 | mm->head_node.flags = 0; |
955 | mm->head_node.mm = mm; |
956 | mm->head_node.start = start + size; |
957 | mm->head_node.size = -size; |
958 | add_hole(&mm->head_node); |
959 | |
960 | mm->scan_active = 0; |
961 | } |
962 | EXPORT_SYMBOL(drm_mm_init); |
963 | |
964 | /** |
965 | * drm_mm_takedown - clean up a drm_mm allocator |
966 | * @mm: drm_mm allocator to clean up |
967 | * |
968 | * Note that it is a bug to call this function on an allocator which is not |
969 | * clean. |
970 | */ |
971 | void drm_mm_takedown(struct drm_mm *mm) |
972 | { |
973 | if (WARN(!drm_mm_clean(mm),({ int __ret = !!(!drm_mm_clean(mm)); if (__ret) printf("Memory manager not clean during takedown.\n" ); __builtin_expect(!!(__ret), 0); }) |
974 | "Memory manager not clean during takedown.\n")({ int __ret = !!(!drm_mm_clean(mm)); if (__ret) printf("Memory manager not clean during takedown.\n" ); __builtin_expect(!!(__ret), 0); })) |
975 | show_leaks(mm); |
976 | } |
977 | EXPORT_SYMBOL(drm_mm_takedown); |
978 | |
979 | static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) |
980 | { |
981 | u64 start, size; |
982 | |
983 | size = entry->hole_size; |
984 | if (size) { |
985 | start = drm_mm_hole_node_start(entry); |
986 | drm_printf(p, "%#018llx-%#018llx: %llu: free\n", |
987 | start, start + size, size); |
988 | } |
989 | |
990 | return size; |
991 | } |
992 | /** |
993 | * drm_mm_print - print allocator state |
994 | * @mm: drm_mm allocator to print |
995 | * @p: DRM printer to use |
996 | */ |
997 | void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) |
998 | { |
999 | const struct drm_mm_node *entry; |
1000 | u64 total_used = 0, total_free = 0, total = 0; |
1001 | |
1002 | total_free += drm_mm_dump_hole(p, &mm->head_node); |
1003 | |
1004 | drm_mm_for_each_node(entry, mm)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->node_list ) *__mptr = (((&(mm)->head_node.node_list))->next) ; (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof (*entry), node_list) );}); &entry->node_list != ((& (mm)->head_node.node_list)); entry = ({ const __typeof( (( __typeof(*entry) *)0)->node_list ) *__mptr = (entry->node_list .next); (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof (__typeof(*entry), node_list) );})) { |
1005 | drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, |
1006 | entry->start + entry->size, entry->size); |
1007 | total_used += entry->size; |
1008 | total_free += drm_mm_dump_hole(p, entry); |
1009 | } |
1010 | total = total_free + total_used; |
1011 | |
1012 | drm_printf(p, "total: %llu, used %llu free %llu\n", total, |
1013 | total_used, total_free); |
1014 | } |
1015 | EXPORT_SYMBOL(drm_mm_print); |