File: | dev/pci/drm/i915/gt/intel_gt_buffer_pool.c |
Warning: | line 241, column 31 Value stored to 'pool' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2014-2018 Intel Corporation |
4 | */ |
5 | |
6 | #include "gem/i915_gem_internal.h" |
7 | #include "gem/i915_gem_object.h" |
8 | |
9 | #include "i915_drv.h" |
10 | #include "intel_engine_pm.h" |
11 | #include "intel_gt_buffer_pool.h" |
12 | |
13 | static struct list_head * |
14 | bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) |
15 | { |
16 | int n; |
17 | |
18 | /* |
19 | * Compute a power-of-two bucket, but throw everything greater than |
20 | * 16KiB into the same bucket: i.e. the buckets hold objects of |
21 | * (1 page, 2 pages, 4 pages, 8+ pages). |
22 | */ |
23 | n = fls(sz >> PAGE_SHIFT12) - 1; |
24 | if (n >= ARRAY_SIZE(pool->cache_list)(sizeof((pool->cache_list)) / sizeof((pool->cache_list) [0]))) |
25 | n = ARRAY_SIZE(pool->cache_list)(sizeof((pool->cache_list)) / sizeof((pool->cache_list) [0])) - 1; |
26 | |
27 | return &pool->cache_list[n]; |
28 | } |
29 | |
30 | static void node_free(struct intel_gt_buffer_pool_node *node) |
31 | { |
32 | i915_gem_object_put(node->obj); |
33 | i915_active_fini(&node->active); |
34 | kfree_rcu(node, rcu)do { free((void *)node, 145, 0); } while(0); |
35 | } |
36 | |
37 | static bool_Bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) |
38 | { |
39 | struct intel_gt_buffer_pool_node *node, *stale = NULL((void *)0); |
40 | bool_Bool active = false0; |
41 | int n; |
42 | |
43 | /* Free buffers that have not been used in the past second */ |
44 | for (n = 0; n < ARRAY_SIZE(pool->cache_list)(sizeof((pool->cache_list)) / sizeof((pool->cache_list) [0])); n++) { |
45 | struct list_head *list = &pool->cache_list[n]; |
46 | |
47 | if (list_empty(list)) |
48 | continue; |
49 | |
50 | if (spin_trylock_irq(&pool->lock)mtx_enter_try(&pool->lock)) { |
51 | struct list_head *pos; |
52 | |
53 | /* Most recent at head; oldest at tail */ |
54 | list_for_each_prev(pos, list)for (pos = (list)->prev; pos != (list); pos = pos->prev ) { |
55 | unsigned long age; |
56 | |
57 | node = list_entry(pos, typeof(*node), link)({ const __typeof( ((typeof(*node) *)0)->link ) *__mptr = ( pos); (typeof(*node) *)( (char *)__mptr - __builtin_offsetof( typeof(*node), link) );}); |
58 | |
59 | age = READ_ONCE(node->age)({ typeof(node->age) __tmp = *(volatile typeof(node->age ) *)&(node->age); membar_datadep_consumer(); __tmp; }); |
60 | if (!age || jiffies - age < keep) |
61 | break; |
62 | |
63 | /* Check we are the first to claim this node */ |
64 | if (!xchg(&node->age, 0)__sync_lock_test_and_set(&node->age, 0)) |
65 | break; |
66 | |
67 | node->free = stale; |
68 | stale = node; |
69 | } |
70 | if (!list_is_last(pos, list)) |
71 | __list_del_many(pos, list); |
72 | |
73 | spin_unlock_irq(&pool->lock)mtx_leave(&pool->lock); |
74 | } |
75 | |
76 | active |= !list_empty(list); |
77 | } |
78 | |
79 | while ((node = stale)) { |
80 | stale = stale->free; |
81 | node_free(node); |
82 | } |
83 | |
84 | return active; |
85 | } |
86 | |
87 | static void pool_free_work(struct work_struct *wrk) |
88 | { |
89 | struct intel_gt_buffer_pool *pool = |
90 | container_of(wrk, typeof(*pool), work.work)({ const __typeof( ((typeof(*pool) *)0)->work.work ) *__mptr = (wrk); (typeof(*pool) *)( (char *)__mptr - __builtin_offsetof (typeof(*pool), work.work) );}); |
91 | |
92 | if (pool_free_older_than(pool, HZhz)) |
93 | schedule_delayed_work(&pool->work, |
94 | round_jiffies_up_relative(HZhz)); |
95 | } |
96 | |
97 | static void pool_retire(struct i915_active *ref) |
98 | { |
99 | struct intel_gt_buffer_pool_node *node = |
100 | container_of(ref, typeof(*node), active)({ const __typeof( ((typeof(*node) *)0)->active ) *__mptr = (ref); (typeof(*node) *)( (char *)__mptr - __builtin_offsetof (typeof(*node), active) );}); |
101 | struct intel_gt_buffer_pool *pool = node->pool; |
102 | struct list_head *list = bucket_for_size(pool, node->obj->base.size); |
103 | unsigned long flags; |
104 | |
105 | if (node->pinned) { |
106 | i915_gem_object_unpin_pages(node->obj); |
107 | |
108 | /* Return this object to the shrinker pool */ |
109 | i915_gem_object_make_purgeable(node->obj); |
110 | node->pinned = false0; |
111 | } |
112 | |
113 | GEM_BUG_ON(node->age)((void)0); |
114 | spin_lock_irqsave(&pool->lock, flags)do { flags = 0; mtx_enter(&pool->lock); } while (0); |
115 | list_add_rcu(&node->link, list)list_add(&node->link, list); |
116 | WRITE_ONCE(node->age, jiffies ?: 1)({ typeof(node->age) __tmp = (jiffies ?: 1); *(volatile typeof (node->age) *)&(node->age) = __tmp; __tmp; }); /* 0 reserved for active nodes */ |
117 | spin_unlock_irqrestore(&pool->lock, flags)do { (void)(flags); mtx_leave(&pool->lock); } while (0 ); |
118 | |
119 | schedule_delayed_work(&pool->work, |
120 | round_jiffies_up_relative(HZhz)); |
121 | } |
122 | |
123 | void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node) |
124 | { |
125 | assert_object_held(node->obj)do { (void)(&((node->obj)->base.resv)->lock.base ); } while(0); |
126 | |
127 | if (node->pinned) |
128 | return; |
129 | |
130 | __i915_gem_object_pin_pages(node->obj); |
131 | /* Hide this pinned object from the shrinker until retired */ |
132 | i915_gem_object_make_unshrinkable(node->obj); |
133 | node->pinned = true1; |
134 | } |
135 | |
136 | static struct intel_gt_buffer_pool_node * |
137 | node_create(struct intel_gt_buffer_pool *pool, size_t sz, |
138 | enum i915_map_type type) |
139 | { |
140 | struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool)({ const __typeof( ((struct intel_gt *)0)->buffer_pool ) * __mptr = (pool); (struct intel_gt *)( (char *)__mptr - __builtin_offsetof (struct intel_gt, buffer_pool) );}); |
141 | struct intel_gt_buffer_pool_node *node; |
142 | struct drm_i915_gem_object *obj; |
143 | |
144 | node = kmalloc(sizeof(*node), |
145 | GFP_KERNEL(0x0001 | 0x0004) | __GFP_RETRY_MAYFAIL0 | __GFP_NOWARN0); |
146 | if (!node) |
147 | return ERR_PTR(-ENOMEM12); |
148 | |
149 | node->age = 0; |
150 | node->pool = pool; |
151 | node->pinned = false0; |
152 | i915_active_init(&node->active, NULL, pool_retire, 0)do { static struct lock_class_key __mkey; static struct lock_class_key __wkey; __i915_active_init(&node->active, ((void *)0) , pool_retire, 0, &__mkey, &__wkey); } while (0); |
153 | |
154 | obj = i915_gem_object_create_internal(gt->i915, sz); |
155 | if (IS_ERR(obj)) { |
156 | i915_active_fini(&node->active); |
157 | kfree(node); |
158 | return ERR_CAST(obj); |
159 | } |
160 | |
161 | i915_gem_object_set_readonly(obj); |
162 | |
163 | node->type = type; |
164 | node->obj = obj; |
165 | return node; |
166 | } |
167 | |
168 | struct intel_gt_buffer_pool_node * |
169 | intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size, |
170 | enum i915_map_type type) |
171 | { |
172 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
173 | struct intel_gt_buffer_pool_node *node; |
174 | struct list_head *list; |
175 | int ret; |
176 | |
177 | size = PAGE_ALIGN(size)(((size) + ((1 << 12) - 1)) & ~((1 << 12) - 1 )); |
178 | list = bucket_for_size(pool, size); |
179 | |
180 | rcu_read_lock(); |
181 | list_for_each_entry_rcu(node, list, link)for (node = ({ const __typeof( ((__typeof(*node) *)0)->link ) *__mptr = ((list)->next); (__typeof(*node) *)( (char *) __mptr - __builtin_offsetof(__typeof(*node), link) );}); & node->link != (list); node = ({ const __typeof( ((__typeof (*node) *)0)->link ) *__mptr = (node->link.next); (__typeof (*node) *)( (char *)__mptr - __builtin_offsetof(__typeof(*node ), link) );})) { |
182 | unsigned long age; |
183 | |
184 | if (node->obj->base.size < size) |
185 | continue; |
186 | |
187 | if (node->type != type) |
188 | continue; |
189 | |
190 | age = READ_ONCE(node->age)({ typeof(node->age) __tmp = *(volatile typeof(node->age ) *)&(node->age); membar_datadep_consumer(); __tmp; }); |
191 | if (!age) |
192 | continue; |
193 | |
194 | if (cmpxchg(&node->age, age, 0)__sync_val_compare_and_swap(&node->age, age, 0) == age) { |
195 | spin_lock_irq(&pool->lock)mtx_enter(&pool->lock); |
196 | list_del_rcu(&node->link)list_del(&node->link); |
197 | spin_unlock_irq(&pool->lock)mtx_leave(&pool->lock); |
198 | break; |
199 | } |
200 | } |
201 | rcu_read_unlock(); |
202 | |
203 | if (&node->link == list) { |
204 | node = node_create(pool, size, type); |
205 | if (IS_ERR(node)) |
206 | return node; |
207 | } |
208 | |
209 | ret = i915_active_acquire(&node->active); |
210 | if (ret) { |
211 | node_free(node); |
212 | return ERR_PTR(ret); |
213 | } |
214 | |
215 | return node; |
216 | } |
217 | |
218 | void intel_gt_init_buffer_pool(struct intel_gt *gt) |
219 | { |
220 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
221 | int n; |
222 | |
223 | mtx_init(&pool->lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&pool-> lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); |
224 | for (n = 0; n < ARRAY_SIZE(pool->cache_list)(sizeof((pool->cache_list)) / sizeof((pool->cache_list) [0])); n++) |
225 | INIT_LIST_HEAD(&pool->cache_list[n]); |
226 | INIT_DELAYED_WORK(&pool->work, pool_free_work); |
227 | } |
228 | |
229 | void intel_gt_flush_buffer_pool(struct intel_gt *gt) |
230 | { |
231 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
232 | |
233 | do { |
234 | while (pool_free_older_than(pool, 0)) |
235 | ; |
236 | } while (cancel_delayed_work_sync(&pool->work)); |
237 | } |
238 | |
239 | void intel_gt_fini_buffer_pool(struct intel_gt *gt) |
240 | { |
241 | struct intel_gt_buffer_pool *pool = >->buffer_pool; |
Value stored to 'pool' during its initialization is never read | |
242 | int n; |
243 | |
244 | for (n = 0; n < ARRAY_SIZE(pool->cache_list)(sizeof((pool->cache_list)) / sizeof((pool->cache_list) [0])); n++) |
245 | GEM_BUG_ON(!list_empty(&pool->cache_list[n]))((void)0); |
246 | } |