Bug Summary

File:dev/pci/drm/i915/gem/i915_gem_context.c
Warning:line 2356, column 17
Potential leak of memory pointed to by 'ext_data.pc'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name i915_gem_context.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/gem/i915_gem_context.c

/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_context.c

1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2011-2012 Intel Corporation
5 */
6
7/*
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
14 *
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
25 *
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
30 *
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
42 *
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
52 *
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
59 *
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
64 *
65 */
66
67#include <linux/highmem.h>
68#include <linux/log2.h>
69#include <linux/nospec.h>
70
71#include <drm/drm_cache.h>
72#include <drm/drm_syncobj.h>
73
74#include "gt/gen6_ppgtt.h"
75#include "gt/intel_context.h"
76#include "gt/intel_context_param.h"
77#include "gt/intel_engine_heartbeat.h"
78#include "gt/intel_engine_user.h"
79#include "gt/intel_gpu_commands.h"
80#include "gt/intel_ring.h"
81
82#include "pxp/intel_pxp.h"
83
84#include "i915_file_private.h"
85#include "i915_gem_context.h"
86#include "i915_trace.h"
87#include "i915_user_extensions.h"
88
89#define ALL_L3_SLICES(dev)(1 << ((IS_PLATFORM(dev, INTEL_HASWELL) && (&
(dev)->__info)->gt == 3) ? 2 : ((&(dev)->__info)
->has_l3_dpf))) - 1
(1 << NUM_L3_SLICES(dev)((IS_PLATFORM(dev, INTEL_HASWELL) && (&(dev)->
__info)->gt == 3) ? 2 : ((&(dev)->__info)->has_l3_dpf
))
) - 1
90
91static struct pool slab_luts;
92
93struct i915_lut_handle *i915_lut_handle_alloc(void)
94{
95#ifdef __linux__
96 return kmem_cache_alloc(slab_luts, GFP_KERNEL(0x0001 | 0x0004));
97#else
98 return pool_get(&slab_luts, PR_WAITOK0x0001);
99#endif
100}
101
102void i915_lut_handle_free(struct i915_lut_handle *lut)
103{
104#ifdef __linux__
105 return kmem_cache_free(slab_luts, lut);
106#else
107 pool_put(&slab_luts, lut);
108#endif
109}
110
111static void lut_close(struct i915_gem_context *ctx)
112{
113 struct radix_tree_iter iter;
114 void __rcu **slot;
115
116 mutex_lock(&ctx->lut_mutex)rw_enter_write(&ctx->lut_mutex);
117 rcu_read_lock();
118 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0)for ((&iter)->index = (0); radix_tree_iter_find(&ctx
->handles_vma, &iter, &(slot)); (&iter)->index
++)
{
119 struct i915_vma *vma = rcu_dereference_raw(*slot)(*slot);
120 struct drm_i915_gem_object *obj = vma->obj;
121 struct i915_lut_handle *lut;
122
123 if (!kref_get_unless_zero(&obj->base.refcount))
124 continue;
125
126 spin_lock(&obj->lut_lock)mtx_enter(&obj->lut_lock);
127 list_for_each_entry(lut, &obj->lut_list, obj_link)for (lut = ({ const __typeof( ((__typeof(*lut) *)0)->obj_link
) *__mptr = ((&obj->lut_list)->next); (__typeof(*lut
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*lut), obj_link
) );}); &lut->obj_link != (&obj->lut_list); lut
= ({ const __typeof( ((__typeof(*lut) *)0)->obj_link ) *__mptr
= (lut->obj_link.next); (__typeof(*lut) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*lut), obj_link) );}))
{
128 if (lut->ctx != ctx)
129 continue;
130
131 if (lut->handle != iter.index)
132 continue;
133
134 list_del(&lut->obj_link);
135 break;
136 }
137 spin_unlock(&obj->lut_lock)mtx_leave(&obj->lut_lock);
138
139 if (&lut->obj_link != &obj->lut_list) {
140 i915_lut_handle_free(lut);
141 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
142 i915_vma_close(vma);
143 i915_gem_object_put(obj);
144 }
145
146 i915_gem_object_put(obj);
147 }
148 rcu_read_unlock();
149 mutex_unlock(&ctx->lut_mutex)rw_exit_write(&ctx->lut_mutex);
150}
151
152static struct intel_context *
153lookup_user_engine(struct i915_gem_context *ctx,
154 unsigned long flags,
155 const struct i915_engine_class_instance *ci)
156#define LOOKUP_USER_INDEX(1UL << (0)) BIT(0)(1UL << (0))
157{
158 int idx;
159
160 if (!!(flags & LOOKUP_USER_INDEX(1UL << (0))) != i915_gem_context_user_engines(ctx))
161 return ERR_PTR(-EINVAL22);
162
163 if (!i915_gem_context_user_engines(ctx)) {
164 struct intel_engine_cs *engine;
165
166 engine = intel_engine_lookup_user(ctx->i915,
167 ci->engine_class,
168 ci->engine_instance);
169 if (!engine)
170 return ERR_PTR(-EINVAL22);
171
172 idx = engine->legacy_idx;
173 } else {
174 idx = ci->engine_instance;
175 }
176
177 return i915_gem_context_get_engine(ctx, idx);
178}
179
180static int validate_priority(struct drm_i915_privateinteldrm_softc *i915,
181 const struct drm_i915_gem_context_param *args)
182{
183 s64 priority = args->value;
184
185 if (args->size)
186 return -EINVAL22;
187
188 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY(1ul << 1)))
189 return -ENODEV19;
190
191 if (priority > I915_CONTEXT_MAX_USER_PRIORITY1023 ||
192 priority < I915_CONTEXT_MIN_USER_PRIORITY-1023)
193 return -EINVAL22;
194
195 if (priority > I915_CONTEXT_DEFAULT_PRIORITY0 &&
196 !capable(CAP_SYS_NICE0x2))
197 return -EPERM1;
198
199 return 0;
200}
201
202static void proto_context_close(struct drm_i915_privateinteldrm_softc *i915,
203 struct i915_gem_proto_context *pc)
204{
205 int i;
206
207 if (pc->pxp_wakeref)
208 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
209 if (pc->vm)
210 i915_vm_put(pc->vm);
211 if (pc->user_engines) {
212 for (i = 0; i < pc->num_user_engines; i++)
213 kfree(pc->user_engines[i].siblings);
214 kfree(pc->user_engines);
215 }
216 kfree(pc);
217}
218
219static int proto_context_set_persistence(struct drm_i915_privateinteldrm_softc *i915,
220 struct i915_gem_proto_context *pc,
221 bool_Bool persist)
222{
223 if (persist) {
224 /*
225 * Only contexts that are short-lived [that will expire or be
226 * reset] are allowed to survive past termination. We require
227 * hangcheck to ensure that the persistent requests are healthy.
228 */
229 if (!i915->params.enable_hangcheck)
230 return -EINVAL22;
231
232 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE)(1UL << (4));
233 } else {
234 /* To cancel a context we use "preempt-to-idle" */
235 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION(1ul << 2)))
236 return -ENODEV19;
237
238 /*
239 * If the cancel fails, we then need to reset, cleanly!
240 *
241 * If the per-engine reset fails, all hope is lost! We resort
242 * to a full GPU reset in that unlikely case, but realistically
243 * if the engine could not reset, the full reset does not fare
244 * much better. The damage has been done.
245 *
246 * However, if we cannot reset an engine by itself, we cannot
247 * cleanup a hanging persistent context without causing
248 * colateral damage, and we should not pretend we can by
249 * exposing the interface.
250 */
251 if (!intel_has_reset_engine(to_gt(i915)))
252 return -ENODEV19;
253
254 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE)(1UL << (4));
255 }
256
257 return 0;
258}
259
260static int proto_context_set_protected(struct drm_i915_privateinteldrm_softc *i915,
261 struct i915_gem_proto_context *pc,
262 bool_Bool protected)
263{
264 int ret = 0;
265
266 if (!protected) {
267 pc->uses_protected_content = false0;
268 } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
269 ret = -ENODEV19;
270 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)(1UL << (3))) ||
271 !(pc->user_flags & BIT(UCONTEXT_BANNABLE)(1UL << (2)))) {
272 ret = -EPERM1;
273 } else {
274 pc->uses_protected_content = true1;
275
276 /*
277 * protected context usage requires the PXP session to be up,
278 * which in turn requires the device to be active.
279 */
280 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
281
282 if (!intel_pxp_is_active(&to_gt(i915)->pxp))
283 ret = intel_pxp_start(&to_gt(i915)->pxp);
284 }
285
286 return ret;
287}
288
289static struct i915_gem_proto_context *
290proto_context_create(struct drm_i915_privateinteldrm_softc *i915, unsigned int flags)
291{
292 struct i915_gem_proto_context *pc, *err;
293
294 pc = kzalloc(sizeof(*pc), GFP_KERNEL(0x0001 | 0x0004));
9
Calling 'kzalloc'
11
Returned allocated memory
295 if (!pc)
12
Assuming 'pc' is non-null
13
Taking false branch
296 return ERR_PTR(-ENOMEM12);
297
298 pc->num_user_engines = -1;
299 pc->user_engines = NULL((void *)0);
300 pc->user_flags = BIT(UCONTEXT_BANNABLE)(1UL << (2)) |
301 BIT(UCONTEXT_RECOVERABLE)(1UL << (3));
302 if (i915->params.enable_hangcheck)
14
Assuming field 'enable_hangcheck' is false
15
Taking false branch
303 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE)(1UL << (4));
304 pc->sched.priority = I915_PRIORITY_NORMAL;
305
306 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE(1u << 1)) {
16
Assuming the condition is false
17
Taking false branch
307 if (!HAS_EXECLISTS(i915)((&(i915)->__info)->has_logical_ring_contexts)) {
308 err = ERR_PTR(-EINVAL22);
309 goto proto_close;
310 }
311 pc->single_timeline = true1;
312 }
313
314 return pc;
315
316proto_close:
317 proto_context_close(i915, pc);
318 return err;
319}
320
321static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
322 struct i915_gem_proto_context *pc,
323 u32 *id)
324{
325 int ret;
326 void *old;
327
328 lockdep_assert_held(&fpriv->proto_context_lock)do { (void)(&fpriv->proto_context_lock); } while(0);
329
330 ret = xa_alloc(&fpriv->context_xa, id, NULL((void *)0), xa_limit_32b0, GFP_KERNEL(0x0001 | 0x0004));
331 if (ret)
332 return ret;
333
334 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL(0x0001 | 0x0004));
335 if (xa_is_err(old)) {
336 xa_erase(&fpriv->context_xa, *id);
337 return xa_err(old);
338 }
339 WARN_ON(old)({ int __ret = !!(old); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "old", "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_context.c"
, 339); __builtin_expect(!!(__ret), 0); })
;
340
341 return 0;
342}
343
344static int proto_context_register(struct drm_i915_file_private *fpriv,
345 struct i915_gem_proto_context *pc,
346 u32 *id)
347{
348 int ret;
349
350 mutex_lock(&fpriv->proto_context_lock)rw_enter_write(&fpriv->proto_context_lock);
351 ret = proto_context_register_locked(fpriv, pc, id);
352 mutex_unlock(&fpriv->proto_context_lock)rw_exit_write(&fpriv->proto_context_lock);
353
354 return ret;
355}
356
357static struct i915_address_space *
358i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
359{
360 struct i915_address_space *vm;
361
362 xa_lock(&file_priv->vm_xa)do { mtx_enter(&(&file_priv->vm_xa)->xa_lock); }
while (0)
;
363 vm = xa_load(&file_priv->vm_xa, id);
364 if (vm)
365 kref_get(&vm->ref);
366 xa_unlock(&file_priv->vm_xa)do { mtx_leave(&(&file_priv->vm_xa)->xa_lock); }
while (0)
;
367
368 return vm;
369}
370
371static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
372 struct i915_gem_proto_context *pc,
373 const struct drm_i915_gem_context_param *args)
374{
375 struct drm_i915_privateinteldrm_softc *i915 = fpriv->dev_priv;
376 struct i915_address_space *vm;
377
378 if (args->size)
379 return -EINVAL22;
380
381 if (!HAS_FULL_PPGTT(i915)(((&(i915)->__runtime)->ppgtt_type) >= INTEL_PPGTT_FULL
)
)
382 return -ENODEV19;
383
384 if (upper_32_bits(args->value)((u32)(((args->value) >> 16) >> 16)))
385 return -ENOENT2;
386
387 vm = i915_gem_vm_lookup(fpriv, args->value);
388 if (!vm)
389 return -ENOENT2;
390
391 if (pc->vm)
392 i915_vm_put(pc->vm);
393 pc->vm = vm;
394
395 return 0;
396}
397
398struct set_proto_ctx_engines {
399 struct drm_i915_privateinteldrm_softc *i915;
400 unsigned num_engines;
401 struct i915_gem_proto_engine *engines;
402};
403
404static int
405set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
406 void *data)
407{
408 struct i915_context_engines_load_balance __user *ext =
409 container_of_user(base, typeof(*ext), base)({ void *__mptr = (void *)(base); do { } while (0); ((typeof(
*ext) *)(__mptr - __builtin_offsetof(typeof(*ext), base))); }
)
;
410 const struct set_proto_ctx_engines *set = data;
411 struct drm_i915_privateinteldrm_softc *i915 = set->i915;
412 struct intel_engine_cs **siblings;
413 u16 num_siblings, idx;
414 unsigned int n;
415 int err;
416
417 if (!HAS_EXECLISTS(i915)((&(i915)->__info)->has_logical_ring_contexts))
418 return -ENODEV19;
419
420 if (get_user(idx, &ext->engine_index)-copyin(&ext->engine_index, &(idx), sizeof(idx)))
421 return -EFAULT14;
422
423 if (idx >= set->num_engines) {
424 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement value, %d >= %d\n"
, idx, set->num_engines)
425 idx, set->num_engines)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement value, %d >= %d\n"
, idx, set->num_engines)
;
426 return -EINVAL22;
427 }
428
429 idx = array_index_nospec(idx, set->num_engines)(idx);
430 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
431 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement[%d], already occupied\n"
, idx)
432 "Invalid placement[%d], already occupied\n", idx)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement[%d], already occupied\n"
, idx)
;
433 return -EEXIST17;
434 }
435
436 if (get_user(num_siblings, &ext->num_siblings)-copyin(&ext->num_siblings, &(num_siblings), sizeof
(num_siblings))
)
437 return -EFAULT14;
438
439 err = check_user_mbz(&ext->flags)({ typeof(*(&ext->flags)) mbz__; -copyin((&ext->
flags), &(mbz__), sizeof(mbz__)) ? -14 : mbz__ ? -22 : 0;
})
;
440 if (err)
441 return err;
442
443 err = check_user_mbz(&ext->mbz64)({ typeof(*(&ext->mbz64)) mbz__; -copyin((&ext->
mbz64), &(mbz__), sizeof(mbz__)) ? -14 : mbz__ ? -22 : 0;
})
;
444 if (err)
445 return err;
446
447 if (num_siblings == 0)
448 return 0;
449
450 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL(0x0001 | 0x0004));
451 if (!siblings)
452 return -ENOMEM12;
453
454 for (n = 0; n < num_siblings; n++) {
455 struct i915_engine_class_instance ci;
456
457 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
458 err = -EFAULT14;
459 goto err_siblings;
460 }
461
462 siblings[n] = intel_engine_lookup_user(i915,
463 ci.engine_class,
464 ci.engine_instance);
465 if (!siblings[n]) {
466 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid sibling[%d]: { class:%d, inst:%d }\n"
, n, ci.engine_class, ci.engine_instance)
467 "Invalid sibling[%d]: { class:%d, inst:%d }\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid sibling[%d]: { class:%d, inst:%d }\n"
, n, ci.engine_class, ci.engine_instance)
468 n, ci.engine_class, ci.engine_instance)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid sibling[%d]: { class:%d, inst:%d }\n"
, n, ci.engine_class, ci.engine_instance)
;
469 err = -EINVAL22;
470 goto err_siblings;
471 }
472 }
473
474 if (num_siblings == 1) {
475 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
476 set->engines[idx].engine = siblings[0];
477 kfree(siblings);
478 } else {
479 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
480 set->engines[idx].num_siblings = num_siblings;
481 set->engines[idx].siblings = siblings;
482 }
483
484 return 0;
485
486err_siblings:
487 kfree(siblings);
488
489 return err;
490}
491
492static int
493set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
494{
495 struct i915_context_engines_bond __user *ext =
496 container_of_user(base, typeof(*ext), base)({ void *__mptr = (void *)(base); do { } while (0); ((typeof(
*ext) *)(__mptr - __builtin_offsetof(typeof(*ext), base))); }
)
;
497 const struct set_proto_ctx_engines *set = data;
498 struct drm_i915_privateinteldrm_softc *i915 = set->i915;
499 struct i915_engine_class_instance ci;
500 struct intel_engine_cs *master;
501 u16 idx, num_bonds;
502 int err, n;
503
504 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12 && !IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE) &&
505 !IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE) && !IS_ALDERLAKE_S(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_S)) {
506 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Bonding not supported on this platform\n"
)
507 "Bonding not supported on this platform\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Bonding not supported on this platform\n"
)
;
508 return -ENODEV19;
509 }
510
511 if (get_user(idx, &ext->virtual_index)-copyin(&ext->virtual_index, &(idx), sizeof(idx)))
512 return -EFAULT14;
513
514 if (idx >= set->num_engines) {
515 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid index for virtual engine: %d >= %d\n"
, idx, set->num_engines)
516 "Invalid index for virtual engine: %d >= %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid index for virtual engine: %d >= %d\n"
, idx, set->num_engines)
517 idx, set->num_engines)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid index for virtual engine: %d >= %d\n"
, idx, set->num_engines)
;
518 return -EINVAL22;
519 }
520
521 idx = array_index_nospec(idx, set->num_engines)(idx);
522 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
523 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid engine at %d\n"
, idx)
;
524 return -EINVAL22;
525 }
526
527 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
528 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Bonding with virtual engines not allowed\n"
)
529 "Bonding with virtual engines not allowed\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Bonding with virtual engines not allowed\n"
)
;
530 return -EINVAL22;
531 }
532
533 err = check_user_mbz(&ext->flags)({ typeof(*(&ext->flags)) mbz__; -copyin((&ext->
flags), &(mbz__), sizeof(mbz__)) ? -14 : mbz__ ? -22 : 0;
})
;
534 if (err)
535 return err;
536
537 for (n = 0; n < ARRAY_SIZE(ext->mbz64)(sizeof((ext->mbz64)) / sizeof((ext->mbz64)[0])); n++) {
538 err = check_user_mbz(&ext->mbz64[n])({ typeof(*(&ext->mbz64[n])) mbz__; -copyin((&ext->
mbz64[n]), &(mbz__), sizeof(mbz__)) ? -14 : mbz__ ? -22 :
0; })
;
539 if (err)
540 return err;
541 }
542
543 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
544 return -EFAULT14;
545
546 master = intel_engine_lookup_user(i915,
547 ci.engine_class,
548 ci.engine_instance);
549 if (!master) {
550 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unrecognised master engine: { class:%u, instance:%u }\n"
, ci.engine_class, ci.engine_instance)
551 "Unrecognised master engine: { class:%u, instance:%u }\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unrecognised master engine: { class:%u, instance:%u }\n"
, ci.engine_class, ci.engine_instance)
552 ci.engine_class, ci.engine_instance)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unrecognised master engine: { class:%u, instance:%u }\n"
, ci.engine_class, ci.engine_instance)
;
553 return -EINVAL22;
554 }
555
556 if (intel_engine_uses_guc(master)) {
557 DRM_DEBUG("bonding extension not supported with GuC submission")___drm_dbg(((void *)0), DRM_UT_CORE, "bonding extension not supported with GuC submission"
)
;
558 return -ENODEV19;
559 }
560
561 if (get_user(num_bonds, &ext->num_bonds)-copyin(&ext->num_bonds, &(num_bonds), sizeof(num_bonds
))
)
562 return -EFAULT14;
563
564 for (n = 0; n < num_bonds; n++) {
565 struct intel_engine_cs *bond;
566
567 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
568 return -EFAULT14;
569
570 bond = intel_engine_lookup_user(i915,
571 ci.engine_class,
572 ci.engine_instance);
573 if (!bond) {
574 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n"
, n, ci.engine_class, ci.engine_instance)
575 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n"
, n, ci.engine_class, ci.engine_instance)
576 n, ci.engine_class, ci.engine_instance)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n"
, n, ci.engine_class, ci.engine_instance)
;
577 return -EINVAL22;
578 }
579 }
580
581 return 0;
582}
583
584static int
585set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
586 void *data)
587{
588 struct i915_context_engines_parallel_submit __user *ext =
589 container_of_user(base, typeof(*ext), base)({ void *__mptr = (void *)(base); do { } while (0); ((typeof(
*ext) *)(__mptr - __builtin_offsetof(typeof(*ext), base))); }
)
;
590 const struct set_proto_ctx_engines *set = data;
591 struct drm_i915_privateinteldrm_softc *i915 = set->i915;
592 struct i915_engine_class_instance prev_engine;
593 u64 flags;
594 int err = 0, n, i, j;
595 u16 slot, width, num_siblings;
596 struct intel_engine_cs **siblings = NULL((void *)0);
597 intel_engine_mask_t prev_mask;
598
599 if (get_user(slot, &ext->engine_index)-copyin(&ext->engine_index, &(slot), sizeof(slot)))
600 return -EFAULT14;
601
602 if (get_user(width, &ext->width)-copyin(&ext->width, &(width), sizeof(width)))
603 return -EFAULT14;
604
605 if (get_user(num_siblings, &ext->num_siblings)-copyin(&ext->num_siblings, &(num_siblings), sizeof
(num_siblings))
)
606 return -EFAULT14;
607
608 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
609 num_siblings != 1) {
610 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Only 1 sibling (%d) supported in non-GuC mode\n"
, num_siblings)
611 num_siblings)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Only 1 sibling (%d) supported in non-GuC mode\n"
, num_siblings)
;
612 return -EINVAL22;
613 }
614
615 if (slot >= set->num_engines) {
616 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement value, %d >= %d\n"
, slot, set->num_engines)
617 slot, set->num_engines)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement value, %d >= %d\n"
, slot, set->num_engines)
;
618 return -EINVAL22;
619 }
620
621 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
622 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement[%d], already occupied\n"
, slot)
623 "Invalid placement[%d], already occupied\n", slot)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid placement[%d], already occupied\n"
, slot)
;
624 return -EINVAL22;
625 }
626
627 if (get_user(flags, &ext->flags)-copyin(&ext->flags, &(flags), sizeof(flags)))
628 return -EFAULT14;
629
630 if (flags) {
631 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown flags 0x%02llx"
, flags)
;
632 return -EINVAL22;
633 }
634
635 for (n = 0; n < ARRAY_SIZE(ext->mbz64)(sizeof((ext->mbz64)) / sizeof((ext->mbz64)[0])); n++) {
636 err = check_user_mbz(&ext->mbz64[n])({ typeof(*(&ext->mbz64[n])) mbz__; -copyin((&ext->
mbz64[n]), &(mbz__), sizeof(mbz__)) ? -14 : mbz__ ? -22 :
0; })
;
637 if (err)
638 return err;
639 }
640
641 if (width < 2) {
642 drm_dbg(&i915->drm, "Width (%d) < 2\n", width)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Width (%d) < 2\n"
, width)
;
643 return -EINVAL22;
644 }
645
646 if (num_siblings < 1) {
647 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Number siblings (%d) < 1\n"
, num_siblings)
648 num_siblings)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Number siblings (%d) < 1\n"
, num_siblings)
;
649 return -EINVAL22;
650 }
651
652 siblings = kmalloc_array(num_siblings * width,
653 sizeof(*siblings),
654 GFP_KERNEL(0x0001 | 0x0004));
655 if (!siblings)
656 return -ENOMEM12;
657
658 /* Create contexts / engines */
659 for (i = 0; i < width; ++i) {
660 intel_engine_mask_t current_mask = 0;
661
662 for (j = 0; j < num_siblings; ++j) {
663 struct i915_engine_class_instance ci;
664
665 n = i * num_siblings + j;
666 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
667 err = -EFAULT14;
668 goto out_err;
669 }
670
671 siblings[n] =
672 intel_engine_lookup_user(i915, ci.engine_class,
673 ci.engine_instance);
674 if (!siblings[n]) {
675 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid sibling[%d]: { class:%d, inst:%d }\n"
, n, ci.engine_class, ci.engine_instance)
676 "Invalid sibling[%d]: { class:%d, inst:%d }\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid sibling[%d]: { class:%d, inst:%d }\n"
, n, ci.engine_class, ci.engine_instance)
677 n, ci.engine_class, ci.engine_instance)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid sibling[%d]: { class:%d, inst:%d }\n"
, n, ci.engine_class, ci.engine_instance)
;
678 err = -EINVAL22;
679 goto out_err;
680 }
681
682 /*
683 * We don't support breadcrumb handshake on these
684 * classes
685 */
686 if (siblings[n]->class == RENDER_CLASS0 ||
687 siblings[n]->class == COMPUTE_CLASS5) {
688 err = -EINVAL22;
689 goto out_err;
690 }
691
692 if (n) {
693 if (prev_engine.engine_class !=
694 ci.engine_class) {
695 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Mismatched class %d, %d\n"
, prev_engine.engine_class, ci.engine_class)
696 "Mismatched class %d, %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Mismatched class %d, %d\n"
, prev_engine.engine_class, ci.engine_class)
697 prev_engine.engine_class,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Mismatched class %d, %d\n"
, prev_engine.engine_class, ci.engine_class)
698 ci.engine_class)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Mismatched class %d, %d\n"
, prev_engine.engine_class, ci.engine_class)
;
699 err = -EINVAL22;
700 goto out_err;
701 }
702 }
703
704 prev_engine = ci;
705 current_mask |= siblings[n]->logical_mask;
706 }
707
708 if (i > 0) {
709 if (current_mask != prev_mask << 1) {
710 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Non contiguous logical mask 0x%x, 0x%x\n"
, prev_mask, current_mask)
711 "Non contiguous logical mask 0x%x, 0x%x\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Non contiguous logical mask 0x%x, 0x%x\n"
, prev_mask, current_mask)
712 prev_mask, current_mask)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Non contiguous logical mask 0x%x, 0x%x\n"
, prev_mask, current_mask)
;
713 err = -EINVAL22;
714 goto out_err;
715 }
716 }
717 prev_mask = current_mask;
718 }
719
720 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
721 set->engines[slot].num_siblings = num_siblings;
722 set->engines[slot].width = width;
723 set->engines[slot].siblings = siblings;
724
725 return 0;
726
727out_err:
728 kfree(siblings);
729
730 return err;
731}
732
733static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
734 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE0] = set_proto_ctx_engines_balance,
735 [I915_CONTEXT_ENGINES_EXT_BOND1] = set_proto_ctx_engines_bond,
736 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT2] =
737 set_proto_ctx_engines_parallel_submit,
738};
739
740static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
741 struct i915_gem_proto_context *pc,
742 const struct drm_i915_gem_context_param *args)
743{
744 struct drm_i915_privateinteldrm_softc *i915 = fpriv->dev_priv;
745 struct set_proto_ctx_engines set = { .i915 = i915 };
746 struct i915_context_param_engines __user *user =
747 u64_to_user_ptr(args->value)((void *)(uintptr_t)(args->value));
748 unsigned int n;
749 u64 extensions;
750 int err;
751
752 if (pc->num_user_engines >= 0) {
753 drm_dbg(&i915->drm, "Cannot set engines twice")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Cannot set engines twice"
)
;
754 return -EINVAL22;
755 }
756
757 if (args->size < sizeof(*user) ||
758 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))(((args->size - sizeof(*user)) & ((sizeof(*user->engines
)) - 1)) == 0)
) {
759 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid size for engine array: %d\n"
, args->size)
760 args->size)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid size for engine array: %d\n"
, args->size)
;
761 return -EINVAL22;
762 }
763
764 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
765 /* RING_MASK has no shift so we can use it directly here */
766 if (set.num_engines > I915_EXEC_RING_MASK(0x3f) + 1)
767 return -EINVAL22;
768
769 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL(0x0001 | 0x0004));
770 if (!set.engines)
771 return -ENOMEM12;
772
773 for (n = 0; n < set.num_engines; n++) {
774 struct i915_engine_class_instance ci;
775 struct intel_engine_cs *engine;
776
777 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
778 kfree(set.engines);
779 return -EFAULT14;
780 }
781
782 memset(&set.engines[n], 0, sizeof(set.engines[n]))__builtin_memset((&set.engines[n]), (0), (sizeof(set.engines
[n])))
;
783
784 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
785 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE-1)
786 continue;
787
788 engine = intel_engine_lookup_user(i915,
789 ci.engine_class,
790 ci.engine_instance);
791 if (!engine) {
792 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid engine[%d]: { class:%d, instance:%d }\n"
, n, ci.engine_class, ci.engine_instance)
793 "Invalid engine[%d]: { class:%d, instance:%d }\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid engine[%d]: { class:%d, instance:%d }\n"
, n, ci.engine_class, ci.engine_instance)
794 n, ci.engine_class, ci.engine_instance)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid engine[%d]: { class:%d, instance:%d }\n"
, n, ci.engine_class, ci.engine_instance)
;
795 kfree(set.engines);
796 return -ENOENT2;
797 }
798
799 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
800 set.engines[n].engine = engine;
801 }
802
803 err = -EFAULT14;
804 if (!get_user(extensions, &user->extensions)-copyin(&user->extensions, &(extensions), sizeof(extensions
))
)
805 err = i915_user_extensions(u64_to_user_ptr(extensions)((void *)(uintptr_t)(extensions)),
806 set_proto_ctx_engines_extensions,
807 ARRAY_SIZE(set_proto_ctx_engines_extensions)(sizeof((set_proto_ctx_engines_extensions)) / sizeof((set_proto_ctx_engines_extensions
)[0]))
,
808 &set);
809 if (err) {
810 kfree(set.engines);
811 return err;
812 }
813
814 pc->num_user_engines = set.num_engines;
815 pc->user_engines = set.engines;
816
817 return 0;
818}
819
820static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
821 struct i915_gem_proto_context *pc,
822 struct drm_i915_gem_context_param *args)
823{
824 struct drm_i915_privateinteldrm_softc *i915 = fpriv->dev_priv;
825 struct drm_i915_gem_context_param_sseu user_sseu;
826 struct intel_sseu *sseu;
827 int ret;
828
829 if (args->size < sizeof(user_sseu))
830 return -EINVAL22;
831
832 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) != 11)
833 return -ENODEV19;
834
835 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value)((void *)(uintptr_t)(args->value)),
836 sizeof(user_sseu)))
837 return -EFAULT14;
838
839 if (user_sseu.rsvd)
840 return -EINVAL22;
841
842 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX(1u << 0)))
843 return -EINVAL22;
844
845 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX(1u << 0)) != (pc->num_user_engines >= 0))
846 return -EINVAL22;
847
848 if (pc->num_user_engines >= 0) {
849 int idx = user_sseu.engine.engine_instance;
850 struct i915_gem_proto_engine *pe;
851
852 if (idx >= pc->num_user_engines)
853 return -EINVAL22;
854
855 idx = array_index_nospec(idx, pc->num_user_engines)(idx);
856 pe = &pc->user_engines[idx];
857
858 /* Only render engine supports RPCS configuration. */
859 if (pe->engine->class != RENDER_CLASS0)
860 return -EINVAL22;
861
862 sseu = &pe->sseu;
863 } else {
864 /* Only render engine supports RPCS configuration. */
865 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
866 return -EINVAL22;
867
868 /* There is only one render engine */
869 if (user_sseu.engine.engine_instance != 0)
870 return -EINVAL22;
871
872 sseu = &pc->legacy_rcs_sseu;
873 }
874
875 ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
876 if (ret)
877 return ret;
878
879 args->size = sizeof(user_sseu);
880
881 return 0;
882}
883
884static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
885 struct i915_gem_proto_context *pc,
886 struct drm_i915_gem_context_param *args)
887{
888 int ret = 0;
889
890 switch (args->param) {
891 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE0x4:
892 if (args->size)
893 ret = -EINVAL22;
894 else if (args->value)
895 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE)(1UL << (1));
896 else
897 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE)(1UL << (1));
898 break;
899
900 case I915_CONTEXT_PARAM_BANNABLE0x5:
901 if (args->size)
902 ret = -EINVAL22;
903 else if (!capable(CAP_SYS_ADMIN0x1) && !args->value)
904 ret = -EPERM1;
905 else if (args->value)
906 pc->user_flags |= BIT(UCONTEXT_BANNABLE)(1UL << (2));
907 else if (pc->uses_protected_content)
908 ret = -EPERM1;
909 else
910 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE)(1UL << (2));
911 break;
912
913 case I915_CONTEXT_PARAM_RECOVERABLE0x8:
914 if (args->size)
915 ret = -EINVAL22;
916 else if (!args->value)
917 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE)(1UL << (3));
918 else if (pc->uses_protected_content)
919 ret = -EPERM1;
920 else
921 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE)(1UL << (3));
922 break;
923
924 case I915_CONTEXT_PARAM_PRIORITY0x6:
925 ret = validate_priority(fpriv->dev_priv, args);
926 if (!ret)
927 pc->sched.priority = args->value;
928 break;
929
930 case I915_CONTEXT_PARAM_SSEU0x7:
931 ret = set_proto_ctx_sseu(fpriv, pc, args);
932 break;
933
934 case I915_CONTEXT_PARAM_VM0x9:
935 ret = set_proto_ctx_vm(fpriv, pc, args);
936 break;
937
938 case I915_CONTEXT_PARAM_ENGINES0xa:
939 ret = set_proto_ctx_engines(fpriv, pc, args);
940 break;
941
942 case I915_CONTEXT_PARAM_PERSISTENCE0xb:
943 if (args->size)
944 ret = -EINVAL22;
945 else
946 ret = proto_context_set_persistence(fpriv->dev_priv, pc,
947 args->value);
948 break;
949
950 case I915_CONTEXT_PARAM_PROTECTED_CONTENT0xd:
951 ret = proto_context_set_protected(fpriv->dev_priv, pc,
952 args->value);
953 break;
954
955 case I915_CONTEXT_PARAM_NO_ZEROMAP0x2:
956 case I915_CONTEXT_PARAM_BAN_PERIOD0x1:
957 case I915_CONTEXT_PARAM_RINGSIZE0xc:
958 default:
959 ret = -EINVAL22;
960 break;
961 }
962
963 return ret;
964}
965
966static int intel_context_set_gem(struct intel_context *ce,
967 struct i915_gem_context *ctx,
968 struct intel_sseu sseu)
969{
970 int ret = 0;
971
972 GEM_BUG_ON(rcu_access_pointer(ce->gem_context))((void)0);
973 RCU_INIT_POINTER(ce->gem_context, ctx)do { (ce->gem_context) = (ctx); } while(0);
974
975 GEM_BUG_ON(intel_context_is_pinned(ce))((void)0);
976 ce->ring_size = SZ_16K(16 << 10);
977
978 i915_vm_put(ce->vm);
979 ce->vm = i915_gem_context_get_eb_vm(ctx);
980
981 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
982 intel_engine_has_timeslices(ce->engine) &&
983 intel_engine_has_semaphores(ce->engine))
984 __set_bit(CONTEXT_USE_SEMAPHORES5, &ce->flags);
985
986 if (CONFIG_DRM_I915_REQUEST_TIMEOUT20000 &&
987 ctx->i915->params.request_timeout_ms) {
988 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
989
990 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
991 }
992
993 /* A valid SSEU has no zero fields */
994 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)({ int __ret = !!(ce->engine->class != 0); if (__ret) printf
("WARNING %s failed at %s:%d\n", "ce->engine->class != 0"
, "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_context.c", 994
); __builtin_expect(!!(__ret), 0); })
)
995 ret = intel_context_reconfigure_sseu(ce, sseu);
996
997 return ret;
998}
999
1000static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
1001{
1002 while (count--) {
1003 struct intel_context *ce = e->engines[count], *child;
1004
1005 if (!ce || !test_bit(CONTEXT_PERMA_PIN11, &ce->flags))
1006 continue;
1007
1008 for_each_child(ce, child)for (child = ({ const __typeof( ((__typeof(*child) *)0)->parallel
.child_link ) *__mptr = ((&(ce)->parallel.child_list)->
next); (__typeof(*child) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*child), parallel.child_link) );}); &child->
parallel.child_link != (&(ce)->parallel.child_list); child
= ({ const __typeof( ((__typeof(*child) *)0)->parallel.child_link
) *__mptr = (child->parallel.child_link.next); (__typeof(
*child) *)( (char *)__mptr - __builtin_offsetof(__typeof(*child
), parallel.child_link) );}))
1009 intel_context_unpin(child);
1010 intel_context_unpin(ce);
1011 }
1012}
1013
1014static void unpin_engines(struct i915_gem_engines *e)
1015{
1016 __unpin_engines(e, e->num_engines);
1017}
1018
1019static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1020{
1021 while (count--) {
1022 if (!e->engines[count])
1023 continue;
1024
1025 intel_context_put(e->engines[count]);
1026 }
1027 kfree(e);
1028}
1029
1030static void free_engines(struct i915_gem_engines *e)
1031{
1032 __free_engines(e, e->num_engines);
1033}
1034
1035static void free_engines_rcu(struct rcu_head *rcu)
1036{
1037 struct i915_gem_engines *engines =
1038 container_of(rcu, struct i915_gem_engines, rcu)({ const __typeof( ((struct i915_gem_engines *)0)->rcu ) *
__mptr = (rcu); (struct i915_gem_engines *)( (char *)__mptr -
__builtin_offsetof(struct i915_gem_engines, rcu) );})
;
1039
1040 i915_sw_fence_fini(&engines->fence);
1041 free_engines(engines);
1042}
1043
1044static void accumulate_runtime(struct i915_drm_client *client,
1045 struct i915_gem_engines *engines)
1046{
1047 struct i915_gem_engines_iter it;
1048 struct intel_context *ce;
1049
1050 if (!client)
1051 return;
1052
1053 /* Transfer accumulated runtime to the parent GEM context. */
1054 for_each_gem_engine(ce, engines, it)for (i915_gem_engines_iter_init(&(it), (engines)); ((ce) =
i915_gem_engines_iter_next(&(it)));)
{
1055 unsigned int class = ce->engine->uabi_class;
1056
1057 GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime))((void)0);
1058 atomic64_add(intel_context_get_total_runtime_ns(ce),__sync_fetch_and_add_8(&client->past_runtime[class], intel_context_get_total_runtime_ns
(ce))
1059 &client->past_runtime[class])__sync_fetch_and_add_8(&client->past_runtime[class], intel_context_get_total_runtime_ns
(ce))
;
1060 }
1061}
1062
1063static int
1064engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1065{
1066 struct i915_gem_engines *engines =
1067 container_of(fence, typeof(*engines), fence)({ const __typeof( ((typeof(*engines) *)0)->fence ) *__mptr
= (fence); (typeof(*engines) *)( (char *)__mptr - __builtin_offsetof
(typeof(*engines), fence) );})
;
1068 struct i915_gem_context *ctx = engines->ctx;
1069
1070 switch (state) {
1071 case FENCE_COMPLETE:
1072 if (!list_empty(&engines->link)) {
1073 unsigned long flags;
1074
1075 spin_lock_irqsave(&ctx->stale.lock, flags)do { flags = 0; mtx_enter(&ctx->stale.lock); } while (
0)
;
1076 list_del(&engines->link);
1077 spin_unlock_irqrestore(&ctx->stale.lock, flags)do { (void)(flags); mtx_leave(&ctx->stale.lock); } while
(0)
;
1078 }
1079 accumulate_runtime(ctx->client, engines);
1080 i915_gem_context_put(ctx);
1081
1082 break;
1083
1084 case FENCE_FREE:
1085 init_rcu_head(&engines->rcu);
1086 call_rcu(&engines->rcu, free_engines_rcu);
1087 break;
1088 }
1089
1090 return NOTIFY_DONE0;
1091}
1092
1093static struct i915_gem_engines *alloc_engines(unsigned int count)
1094{
1095 struct i915_gem_engines *e;
1096
1097 e = kzalloc(struct_size(e, engines, count)(sizeof(*(e)) + ((count) * (sizeof(*(e)->engines)))), GFP_KERNEL(0x0001 | 0x0004));
1098 if (!e)
1099 return NULL((void *)0);
1100
1101 i915_sw_fence_init(&e->fence, engines_notify)do { extern char _ctassert[(!((engines_notify) == ((void *)0)
)) ? 1 : -1 ] __attribute__((__unused__)); __i915_sw_fence_init
((&e->fence), (engines_notify), ((void *)0), ((void *)
0)); } while (0)
;
1102 return e;
1103}
1104
1105static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1106 struct intel_sseu rcs_sseu)
1107{
1108 const struct intel_gt *gt = to_gt(ctx->i915);
1109 struct intel_engine_cs *engine;
1110 struct i915_gem_engines *e, *err;
1111 enum intel_engine_id id;
1112
1113 e = alloc_engines(I915_NUM_ENGINES);
1114 if (!e)
1115 return ERR_PTR(-ENOMEM12);
1116
1117 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
1118 struct intel_context *ce;
1119 struct intel_sseu sseu = {};
1120 int ret;
1121
1122 if (engine->legacy_idx == INVALID_ENGINE((enum intel_engine_id)-1))
1123 continue;
1124
1125 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES)((void)0);
1126 GEM_BUG_ON(e->engines[engine->legacy_idx])((void)0);
1127
1128 ce = intel_context_create(engine);
1129 if (IS_ERR(ce)) {
1130 err = ERR_CAST(ce);
1131 goto free_engines;
1132 }
1133
1134 e->engines[engine->legacy_idx] = ce;
1135 e->num_engines = max(e->num_engines, engine->legacy_idx + 1)(((e->num_engines)>(engine->legacy_idx + 1))?(e->
num_engines):(engine->legacy_idx + 1))
;
1136
1137 if (engine->class == RENDER_CLASS0)
1138 sseu = rcs_sseu;
1139
1140 ret = intel_context_set_gem(ce, ctx, sseu);
1141 if (ret) {
1142 err = ERR_PTR(ret);
1143 goto free_engines;
1144 }
1145
1146 }
1147
1148 return e;
1149
1150free_engines:
1151 free_engines(e);
1152 return err;
1153}
1154
1155static int perma_pin_contexts(struct intel_context *ce)
1156{
1157 struct intel_context *child;
1158 int i = 0, j = 0, ret;
1159
1160 GEM_BUG_ON(!intel_context_is_parent(ce))((void)0);
1161
1162 ret = intel_context_pin(ce);
1163 if (unlikely(ret)__builtin_expect(!!(ret), 0))
1164 return ret;
1165
1166 for_each_child(ce, child)for (child = ({ const __typeof( ((__typeof(*child) *)0)->parallel
.child_link ) *__mptr = ((&(ce)->parallel.child_list)->
next); (__typeof(*child) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*child), parallel.child_link) );}); &child->
parallel.child_link != (&(ce)->parallel.child_list); child
= ({ const __typeof( ((__typeof(*child) *)0)->parallel.child_link
) *__mptr = (child->parallel.child_link.next); (__typeof(
*child) *)( (char *)__mptr - __builtin_offsetof(__typeof(*child
), parallel.child_link) );}))
{
1167 ret = intel_context_pin(child);
1168 if (unlikely(ret)__builtin_expect(!!(ret), 0))
1169 goto unwind;
1170 ++i;
1171 }
1172
1173 set_bit(CONTEXT_PERMA_PIN11, &ce->flags);
1174
1175 return 0;
1176
1177unwind:
1178 intel_context_unpin(ce);
1179 for_each_child(ce, child)for (child = ({ const __typeof( ((__typeof(*child) *)0)->parallel
.child_link ) *__mptr = ((&(ce)->parallel.child_list)->
next); (__typeof(*child) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*child), parallel.child_link) );}); &child->
parallel.child_link != (&(ce)->parallel.child_list); child
= ({ const __typeof( ((__typeof(*child) *)0)->parallel.child_link
) *__mptr = (child->parallel.child_link.next); (__typeof(
*child) *)( (char *)__mptr - __builtin_offsetof(__typeof(*child
), parallel.child_link) );}))
{
1180 if (j++ < i)
1181 intel_context_unpin(child);
1182 else
1183 break;
1184 }
1185
1186 return ret;
1187}
1188
1189static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1190 unsigned int num_engines,
1191 struct i915_gem_proto_engine *pe)
1192{
1193 struct i915_gem_engines *e, *err;
1194 unsigned int n;
1195
1196 e = alloc_engines(num_engines);
1197 if (!e)
1198 return ERR_PTR(-ENOMEM12);
1199 e->num_engines = num_engines;
1200
1201 for (n = 0; n < num_engines; n++) {
1202 struct intel_context *ce, *child;
1203 int ret;
1204
1205 switch (pe[n].type) {
1206 case I915_GEM_ENGINE_TYPE_PHYSICAL:
1207 ce = intel_context_create(pe[n].engine);
1208 break;
1209
1210 case I915_GEM_ENGINE_TYPE_BALANCED:
1211 ce = intel_engine_create_virtual(pe[n].siblings,
1212 pe[n].num_siblings, 0);
1213 break;
1214
1215 case I915_GEM_ENGINE_TYPE_PARALLEL:
1216 ce = intel_engine_create_parallel(pe[n].siblings,
1217 pe[n].num_siblings,
1218 pe[n].width);
1219 break;
1220
1221 case I915_GEM_ENGINE_TYPE_INVALID:
1222 default:
1223 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID)({ __builtin_expect(!!(!!(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID
)), 0); })
;
1224 continue;
1225 }
1226
1227 if (IS_ERR(ce)) {
1228 err = ERR_CAST(ce);
1229 goto free_engines;
1230 }
1231
1232 e->engines[n] = ce;
1233
1234 ret = intel_context_set_gem(ce, ctx, pe->sseu);
1235 if (ret) {
1236 err = ERR_PTR(ret);
1237 goto free_engines;
1238 }
1239 for_each_child(ce, child)for (child = ({ const __typeof( ((__typeof(*child) *)0)->parallel
.child_link ) *__mptr = ((&(ce)->parallel.child_list)->
next); (__typeof(*child) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*child), parallel.child_link) );}); &child->
parallel.child_link != (&(ce)->parallel.child_list); child
= ({ const __typeof( ((__typeof(*child) *)0)->parallel.child_link
) *__mptr = (child->parallel.child_link.next); (__typeof(
*child) *)( (char *)__mptr - __builtin_offsetof(__typeof(*child
), parallel.child_link) );}))
{
1240 ret = intel_context_set_gem(child, ctx, pe->sseu);
1241 if (ret) {
1242 err = ERR_PTR(ret);
1243 goto free_engines;
1244 }
1245 }
1246
1247 /*
1248 * XXX: Must be done after calling intel_context_set_gem as that
1249 * function changes the ring size. The ring is allocated when
1250 * the context is pinned. If the ring size is changed after
1251 * allocation we have a mismatch of the ring size and will cause
1252 * the context to hang. Presumably with a bit of reordering we
1253 * could move the perma-pin step to the backend function
1254 * intel_engine_create_parallel.
1255 */
1256 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1257 ret = perma_pin_contexts(ce);
1258 if (ret) {
1259 err = ERR_PTR(ret);
1260 goto free_engines;
1261 }
1262 }
1263 }
1264
1265 return e;
1266
1267free_engines:
1268 free_engines(e);
1269 return err;
1270}
1271
1272static void i915_gem_context_release_work(struct work_struct *work)
1273{
1274 struct i915_gem_context *ctx = container_of(work, typeof(*ctx),({ const __typeof( ((typeof(*ctx) *)0)->release_work ) *__mptr
= (work); (typeof(*ctx) *)( (char *)__mptr - __builtin_offsetof
(typeof(*ctx), release_work) );})
1275 release_work)({ const __typeof( ((typeof(*ctx) *)0)->release_work ) *__mptr
= (work); (typeof(*ctx) *)( (char *)__mptr - __builtin_offsetof
(typeof(*ctx), release_work) );})
;
1276 struct i915_address_space *vm;
1277
1278 trace_i915_context_free(ctx);
1279 GEM_BUG_ON(!i915_gem_context_is_closed(ctx))((void)0);
1280
1281 spin_lock(&ctx->i915->gem.contexts.lock)mtx_enter(&ctx->i915->gem.contexts.lock);
1282 list_del(&ctx->link);
1283 spin_unlock(&ctx->i915->gem.contexts.lock)mtx_leave(&ctx->i915->gem.contexts.lock);
1284
1285 if (ctx->syncobj)
1286 drm_syncobj_put(ctx->syncobj);
1287
1288 vm = ctx->vm;
1289 if (vm)
1290 i915_vm_put(vm);
1291
1292 if (ctx->pxp_wakeref)
1293 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1294
1295 if (ctx->client)
1296 i915_drm_client_put(ctx->client);
1297
1298 mutex_destroy(&ctx->engines_mutex);
1299 mutex_destroy(&ctx->lut_mutex);
1300
1301 put_pid(ctx->pid);
1302 mutex_destroy(&ctx->mutex);
1303
1304 kfree_rcu(ctx, rcu)do { free((void *)ctx, 145, 0); } while(0);
1305}
1306
1307void i915_gem_context_release(struct kref *ref)
1308{
1309 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref)({ const __typeof( ((typeof(*ctx) *)0)->ref ) *__mptr = (ref
); (typeof(*ctx) *)( (char *)__mptr - __builtin_offsetof(typeof
(*ctx), ref) );})
;
1310
1311 queue_work(ctx->i915->wq, &ctx->release_work);
1312}
1313
1314static inline struct i915_gem_engines *
1315__context_engines_static(const struct i915_gem_context *ctx)
1316{
1317 return rcu_dereference_protected(ctx->engines, true)(ctx->engines);
1318}
1319
1320static void __reset_context(struct i915_gem_context *ctx,
1321 struct intel_engine_cs *engine)
1322{
1323 intel_gt_handle_error(engine->gt, engine->mask, 0,
1324 "context closure in %s", ctx->name);
1325}
1326
1327static bool_Bool __cancel_engine(struct intel_engine_cs *engine)
1328{
1329 /*
1330 * Send a "high priority pulse" down the engine to cause the
1331 * current request to be momentarily preempted. (If it fails to
1332 * be preempted, it will be reset). As we have marked our context
1333 * as banned, any incomplete request, including any running, will
1334 * be skipped following the preemption.
1335 *
1336 * If there is no hangchecking (one of the reasons why we try to
1337 * cancel the context) and no forced preemption, there may be no
1338 * means by which we reset the GPU and evict the persistent hog.
1339 * Ergo if we are unable to inject a preemptive pulse that can
1340 * kill the banned context, we fallback to doing a local reset
1341 * instead.
1342 */
1343 return intel_engine_pulse(engine) == 0;
1344}
1345
1346static struct intel_engine_cs *active_engine(struct intel_context *ce)
1347{
1348 struct intel_engine_cs *engine = NULL((void *)0);
1349 struct i915_request *rq;
1350
1351 if (intel_context_has_inflight(ce))
1352 return intel_context_inflight(ce)({ unsigned long __v = (unsigned long)(({ typeof((ce)->inflight
) __tmp = *(volatile typeof((ce)->inflight) *)&((ce)->
inflight); membar_datadep_consumer(); __tmp; })); (typeof(({ typeof
((ce)->inflight) __tmp = *(volatile typeof((ce)->inflight
) *)&((ce)->inflight); membar_datadep_consumer(); __tmp
; })))(__v & -(1UL << (3))); })
;
1353
1354 if (!ce->timeline)
1355 return NULL((void *)0);
1356
1357 /*
1358 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1359 * to the request to prevent it being transferred to a new timeline
1360 * (and onto a new timeline->requests list).
1361 */
1362 rcu_read_lock();
1363 list_for_each_entry_reverse(rq, &ce->timeline->requests, link)for (rq = ({ const __typeof( ((__typeof(*rq) *)0)->link ) *
__mptr = ((&ce->timeline->requests)->prev); (__typeof
(*rq) *)( (char *)__mptr - __builtin_offsetof(__typeof(*rq), link
) );}); &rq->link != (&ce->timeline->requests
); rq = ({ const __typeof( ((__typeof(*rq) *)0)->link ) *__mptr
= (rq->link.prev); (__typeof(*rq) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*rq), link) );}))
{
1364 bool_Bool found;
1365
1366 /* timeline is already completed upto this point? */
1367 if (!i915_request_get_rcu(rq))
1368 break;
1369
1370 /* Check with the backend if the request is inflight */
1371 found = true1;
1372 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)__builtin_expect(!!((rq->timeline) == ce->timeline), 1))
1373 found = i915_request_active_engine(rq, &engine);
1374
1375 i915_request_put(rq);
1376 if (found)
1377 break;
1378 }
1379 rcu_read_unlock();
1380
1381 return engine;
1382}
1383
1384static void
1385kill_engines(struct i915_gem_engines *engines, bool_Bool exit, bool_Bool persistent)
1386{
1387 struct i915_gem_engines_iter it;
1388 struct intel_context *ce;
1389
1390 /*
1391 * Map the user's engine back to the actual engines; one virtual
1392 * engine will be mapped to multiple engines, and using ctx->engine[]
1393 * the same engine may be have multiple instances in the user's map.
1394 * However, we only care about pending requests, so only include
1395 * engines on which there are incomplete requests.
1396 */
1397 for_each_gem_engine(ce, engines, it)for (i915_gem_engines_iter_init(&(it), (engines)); ((ce) =
i915_gem_engines_iter_next(&(it)));)
{
1398 struct intel_engine_cs *engine;
1399
1400 if ((exit || !persistent) && intel_context_revoke(ce))
1401 continue; /* Already marked. */
1402
1403 /*
1404 * Check the current active state of this context; if we
1405 * are currently executing on the GPU we need to evict
1406 * ourselves. On the other hand, if we haven't yet been
1407 * submitted to the GPU or if everything is complete,
1408 * we have nothing to do.
1409 */
1410 engine = active_engine(ce);
1411
1412 /* First attempt to gracefully cancel the context */
1413 if (engine && !__cancel_engine(engine) && (exit || !persistent))
1414 /*
1415 * If we are unable to send a preemptive pulse to bump
1416 * the context from the GPU, we have to resort to a full
1417 * reset. We hope the collateral damage is worth it.
1418 */
1419 __reset_context(engines->ctx, engine);
1420 }
1421}
1422
1423static void kill_context(struct i915_gem_context *ctx)
1424{
1425 struct i915_gem_engines *pos, *next;
1426 static int warn = 1;
1427
1428 spin_lock_irq(&ctx->stale.lock)mtx_enter(&ctx->stale.lock);
1429 GEM_BUG_ON(!i915_gem_context_is_closed(ctx))((void)0);
1430 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->link )
*__mptr = ((&ctx->stale.engines)->next); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, link) );}), next = ({ const __typeof( ((__typeof(*pos) *)0)
->link ) *__mptr = (pos->link.next); (__typeof(*pos) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*pos), link) )
;}); &pos->link != (&ctx->stale.engines); pos =
next, next = ({ const __typeof( ((__typeof(*next) *)0)->link
) *__mptr = (next->link.next); (__typeof(*next) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*next), link) );}))
{
1431 if (!i915_sw_fence_await(&pos->fence)) {
1432 list_del_init(&pos->link);
1433 continue;
1434 }
1435
1436 /*
1437 * XXX don't incorrectly reset chip on
1438 * gm45/vlv/ivb/hsw/bdw cause unknown
1439 */
1440 if (IS_GRAPHICS_VER(ctx->i915, 4, 8)(((&(ctx->i915)->__runtime)->graphics.ip.ver) >=
(4) && ((&(ctx->i915)->__runtime)->graphics
.ip.ver) <= (8))
) {
1441 if (warn) {
1442 DRM_DEBUG("%s XXX skipping reset pos %p\n", __func__, pos)___drm_dbg(((void *)0), DRM_UT_CORE, "%s XXX skipping reset pos %p\n"
, __func__, pos)
;
1443 warn = 0;
1444 }
1445 continue;
1446 }
1447
1448 spin_unlock_irq(&ctx->stale.lock)mtx_leave(&ctx->stale.lock);
1449
1450 kill_engines(pos, !ctx->i915->params.enable_hangcheck,
1451 i915_gem_context_is_persistent(ctx));
1452
1453 spin_lock_irq(&ctx->stale.lock)mtx_enter(&ctx->stale.lock);
1454 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence))((void)0);
1455 list_safe_reset_next(pos, next, link)next = ({ const __typeof( ((typeof(*(pos)) *)0)->link ) *__mptr
= (((pos)->link.next)); (typeof(*(pos)) *)( (char *)__mptr
- __builtin_offsetof(typeof(*(pos)), link) );})
;
1456 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1457
1458 i915_sw_fence_complete(&pos->fence);
1459 }
1460 spin_unlock_irq(&ctx->stale.lock)mtx_leave(&ctx->stale.lock);
1461}
1462
1463static void engines_idle_release(struct i915_gem_context *ctx,
1464 struct i915_gem_engines *engines)
1465{
1466 struct i915_gem_engines_iter it;
1467 struct intel_context *ce;
1468
1469 INIT_LIST_HEAD(&engines->link);
1470
1471 engines->ctx = i915_gem_context_get(ctx);
1472
1473 for_each_gem_engine(ce, engines, it)for (i915_gem_engines_iter_init(&(it), (engines)); ((ce) =
i915_gem_engines_iter_next(&(it)));)
{
1474 int err;
1475
1476 /* serialises with execbuf */
1477 set_bit(CONTEXT_CLOSED_BIT4, &ce->flags);
1478 if (!intel_context_pin_if_active(ce))
1479 continue;
1480
1481 /* Wait until context is finally scheduled out and retired */
1482 err = i915_sw_fence_await_active(&engines->fence,
1483 &ce->active,
1484 I915_ACTIVE_AWAIT_BARRIER(1UL << (2)));
1485 intel_context_unpin(ce);
1486 if (err)
1487 goto kill;
1488 }
1489
1490 spin_lock_irq(&ctx->stale.lock)mtx_enter(&ctx->stale.lock);
1491 if (!i915_gem_context_is_closed(ctx))
1492 list_add_tail(&engines->link, &ctx->stale.engines);
1493 spin_unlock_irq(&ctx->stale.lock)mtx_leave(&ctx->stale.lock);
1494
1495kill:
1496 if (list_empty(&engines->link)) /* raced, already closed */
1497 kill_engines(engines, true1,
1498 i915_gem_context_is_persistent(ctx));
1499
1500 i915_sw_fence_commit(&engines->fence);
1501}
1502
1503static void set_closed_name(struct i915_gem_context *ctx)
1504{
1505 char *s;
1506
1507 /* Replace '[]' with '<>' to indicate closed in debug prints */
1508
1509 s = strrchr(ctx->name, '[');
1510 if (!s)
1511 return;
1512
1513 *s = '<';
1514
1515 s = strchr(s + 1, ']');
1516 if (s)
1517 *s = '>';
1518}
1519
1520static void context_close(struct i915_gem_context *ctx)
1521{
1522 struct i915_drm_client *client;
1523
1524 /* Flush any concurrent set_engines() */
1525 mutex_lock(&ctx->engines_mutex)rw_enter_write(&ctx->engines_mutex);
1526 unpin_engines(__context_engines_static(ctx));
1527 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)({ __typeof(ctx->engines) __r = ctx->engines; ctx->engines
= ((void *)0); __r; })
);
1528 i915_gem_context_set_closed(ctx);
1529 mutex_unlock(&ctx->engines_mutex)rw_exit_write(&ctx->engines_mutex);
1530
1531 mutex_lock(&ctx->mutex)rw_enter_write(&ctx->mutex);
1532
1533 set_closed_name(ctx);
1534
1535 /*
1536 * The LUT uses the VMA as a backpointer to unref the object,
1537 * so we need to clear the LUT before we close all the VMA (inside
1538 * the ppgtt).
1539 */
1540 lut_close(ctx);
1541
1542 ctx->file_priv = ERR_PTR(-EBADF9);
1543
1544 client = ctx->client;
1545 if (client) {
1546 spin_lock(&client->ctx_lock)mtx_enter(&client->ctx_lock);
1547 list_del_rcu(&ctx->client_link)list_del(&ctx->client_link);
1548 spin_unlock(&client->ctx_lock)mtx_leave(&client->ctx_lock);
1549 }
1550
1551 mutex_unlock(&ctx->mutex)rw_exit_write(&ctx->mutex);
1552
1553 /*
1554 * If the user has disabled hangchecking, we can not be sure that
1555 * the batches will ever complete after the context is closed,
1556 * keeping the context and all resources pinned forever. So in this
1557 * case we opt to forcibly kill off all remaining requests on
1558 * context close.
1559 */
1560 kill_context(ctx);
1561
1562 i915_gem_context_put(ctx);
1563}
1564
1565static int __context_set_persistence(struct i915_gem_context *ctx, bool_Bool state)
1566{
1567 if (i915_gem_context_is_persistent(ctx) == state)
1568 return 0;
1569
1570 if (state) {
1571 /*
1572 * Only contexts that are short-lived [that will expire or be
1573 * reset] are allowed to survive past termination. We require
1574 * hangcheck to ensure that the persistent requests are healthy.
1575 */
1576 if (!ctx->i915->params.enable_hangcheck)
1577 return -EINVAL22;
1578
1579 i915_gem_context_set_persistence(ctx);
1580 } else {
1581 /* To cancel a context we use "preempt-to-idle" */
1582 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION(1ul << 2)))
1583 return -ENODEV19;
1584
1585 /*
1586 * If the cancel fails, we then need to reset, cleanly!
1587 *
1588 * If the per-engine reset fails, all hope is lost! We resort
1589 * to a full GPU reset in that unlikely case, but realistically
1590 * if the engine could not reset, the full reset does not fare
1591 * much better. The damage has been done.
1592 *
1593 * However, if we cannot reset an engine by itself, we cannot
1594 * cleanup a hanging persistent context without causing
1595 * colateral damage, and we should not pretend we can by
1596 * exposing the interface.
1597 */
1598 if (!intel_has_reset_engine(to_gt(ctx->i915)))
1599 return -ENODEV19;
1600
1601 i915_gem_context_clear_persistence(ctx);
1602 }
1603
1604 return 0;
1605}
1606
1607static struct i915_gem_context *
1608i915_gem_create_context(struct drm_i915_privateinteldrm_softc *i915,
1609 const struct i915_gem_proto_context *pc)
1610{
1611 struct i915_gem_context *ctx;
1612 struct i915_address_space *vm = NULL((void *)0);
1613 struct i915_gem_engines *e;
1614 int err;
1615 int i;
1616
1617 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL(0x0001 | 0x0004));
1618 if (!ctx)
1619 return ERR_PTR(-ENOMEM12);
1620
1621 kref_init(&ctx->ref);
1622 ctx->i915 = i915;
1623 ctx->sched = pc->sched;
1624 rw_init(&ctx->mutex, "gemctx")_rw_init_flags(&ctx->mutex, "gemctx", 0, ((void *)0));
1625 INIT_LIST_HEAD(&ctx->link);
1626 INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1627
1628 mtx_init(&ctx->stale.lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&ctx->
stale.lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ?
0x9 : ((0x9)))); } while (0)
;
1629 INIT_LIST_HEAD(&ctx->stale.engines);
1630
1631 if (pc->vm) {
1632 vm = i915_vm_get(pc->vm);
1633 } else if (HAS_FULL_PPGTT(i915)(((&(i915)->__runtime)->ppgtt_type) >= INTEL_PPGTT_FULL
)
) {
1634 struct i915_ppgtt *ppgtt;
1635
1636 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1637 if (IS_ERR(ppgtt)) {
1638 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "PPGTT setup failed (%ld)\n"
, PTR_ERR(ppgtt))
1639 PTR_ERR(ppgtt))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "PPGTT setup failed (%ld)\n"
, PTR_ERR(ppgtt))
;
1640 err = PTR_ERR(ppgtt);
1641 goto err_ctx;
1642 }
1643 vm = &ppgtt->vm;
1644 }
1645 if (vm)
1646 ctx->vm = vm;
1647
1648 rw_init(&ctx->engines_mutex, "gemeng")_rw_init_flags(&ctx->engines_mutex, "gemeng", 0, ((void
*)0))
;
1649 if (pc->num_user_engines >= 0) {
1650 i915_gem_context_set_user_engines(ctx);
1651 e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1652 } else {
1653 i915_gem_context_clear_user_engines(ctx);
1654 e = default_engines(ctx, pc->legacy_rcs_sseu);
1655 }
1656 if (IS_ERR(e)) {
1657 err = PTR_ERR(e);
1658 goto err_vm;
1659 }
1660 RCU_INIT_POINTER(ctx->engines, e)do { (ctx->engines) = (e); } while(0);
1661
1662 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL){ (&ctx->handles_vma)->rnode = ((void *)0); (&ctx
->handles_vma)->gfp_mask = (0x0001 | 0x0004); (&ctx
->handles_vma)->height = 0; }
;
1663 rw_init(&ctx->lut_mutex, "lutrw")_rw_init_flags(&ctx->lut_mutex, "lutrw", 0, ((void *)0
))
;
1664
1665 /* NB: Mark all slices as needing a remap so that when the context first
1666 * loads it will restore whatever remap state already exists. If there
1667 * is no remap info, it will be a NOP. */
1668 ctx->remap_slice = ALL_L3_SLICES(i915)(1 << ((IS_PLATFORM(i915, INTEL_HASWELL) && (&
(i915)->__info)->gt == 3) ? 2 : ((&(i915)->__info
)->has_l3_dpf))) - 1
;
1669
1670 ctx->user_flags = pc->user_flags;
1671
1672 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp)(sizeof((ctx->hang_timestamp)) / sizeof((ctx->hang_timestamp
)[0]))
; i++)
1673 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES(120 * hz);
1674
1675 if (pc->single_timeline) {
1676 err = drm_syncobj_create(&ctx->syncobj,
1677 DRM_SYNCOBJ_CREATE_SIGNALED(1 << 0),
1678 NULL((void *)0));
1679 if (err)
1680 goto err_engines;
1681 }
1682
1683 if (pc->uses_protected_content) {
1684 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1685 ctx->uses_protected_content = true1;
1686 }
1687
1688 trace_i915_context_create(ctx);
1689
1690 return ctx;
1691
1692err_engines:
1693 free_engines(e);
1694err_vm:
1695 if (ctx->vm)
1696 i915_vm_put(ctx->vm);
1697err_ctx:
1698 kfree(ctx);
1699 return ERR_PTR(err);
1700}
1701
1702static void init_contexts(struct i915_gem_contexts *gc)
1703{
1704 mtx_init(&gc->lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&gc->
lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 :
((0x0)))); } while (0)
;
1705 INIT_LIST_HEAD(&gc->list);
1706}
1707
1708void i915_gem_init__contexts(struct drm_i915_privateinteldrm_softc *i915)
1709{
1710 init_contexts(&i915->gem.contexts);
1711}
1712
1713/*
1714 * Note that this implicitly consumes the ctx reference, by placing
1715 * the ctx in the context_xa.
1716 */
1717static void gem_context_register(struct i915_gem_context *ctx,
1718 struct drm_i915_file_private *fpriv,
1719 u32 id)
1720{
1721 struct drm_i915_privateinteldrm_softc *i915 = ctx->i915;
1722 void *old;
1723
1724 ctx->file_priv = fpriv;
1725
1726#ifdef __linux__
1727 ctx->pid = get_task_pid(current, PIDTYPE_PID);
1728 ctx->client = i915_drm_client_get(fpriv->client);
1729
1730 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1731 current->comm, pid_nr(ctx->pid));
1732#else
1733 ctx->pid = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_p->ps_pid;
1734 ctx->client = i915_drm_client_get(fpriv->client);
1735
1736 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1737 curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_p->ps_comm, ctx->pid);
1738#endif
1739
1740 spin_lock(&ctx->client->ctx_lock)mtx_enter(&ctx->client->ctx_lock);
1741 list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list)list_add_tail(&ctx->client_link, &ctx->client->
ctx_list)
;
1742 spin_unlock(&ctx->client->ctx_lock)mtx_leave(&ctx->client->ctx_lock);
1743
1744 spin_lock(&i915->gem.contexts.lock)mtx_enter(&i915->gem.contexts.lock);
1745 list_add_tail(&ctx->link, &i915->gem.contexts.list);
1746 spin_unlock(&i915->gem.contexts.lock)mtx_leave(&i915->gem.contexts.lock);
1747
1748 /* And finally expose ourselves to userspace via the idr */
1749 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL(0x0001 | 0x0004));
1750 WARN_ON(old)({ int __ret = !!(old); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "old", "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_context.c"
, 1750); __builtin_expect(!!(__ret), 0); })
;
1751}
1752
1753int i915_gem_context_open(struct drm_i915_privateinteldrm_softc *i915,
1754 struct drm_file *file)
1755{
1756 struct drm_i915_file_private *file_priv = file->driver_priv;
1757 struct i915_gem_proto_context *pc;
1758 struct i915_gem_context *ctx;
1759 int err;
1760
1761 rw_init(&file_priv->proto_context_lock, "pctxlk")_rw_init_flags(&file_priv->proto_context_lock, "pctxlk"
, 0, ((void *)0))
;
1762 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC1);
1763
1764 /* 0 reserved for the default context */
1765 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC12);
1766
1767 /* 0 reserved for invalid/unassigned ppgtt */
1768 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC12);
1769
1770 pc = proto_context_create(i915, 0);
1771 if (IS_ERR(pc)) {
1772 err = PTR_ERR(pc);
1773 goto err;
1774 }
1775
1776 ctx = i915_gem_create_context(i915, pc);
1777 proto_context_close(i915, pc);
1778 if (IS_ERR(ctx)) {
1779 err = PTR_ERR(ctx);
1780 goto err;
1781 }
1782
1783 gem_context_register(ctx, file_priv, 0);
1784
1785 return 0;
1786
1787err:
1788 xa_destroy(&file_priv->vm_xa);
1789 xa_destroy(&file_priv->context_xa);
1790 xa_destroy(&file_priv->proto_context_xa);
1791 mutex_destroy(&file_priv->proto_context_lock);
1792 return err;
1793}
1794
1795void i915_gem_context_close(struct drm_file *file)
1796{
1797 struct drm_i915_file_private *file_priv = file->driver_priv;
1798 struct i915_gem_proto_context *pc;
1799 struct i915_address_space *vm;
1800 struct i915_gem_context *ctx;
1801 unsigned long idx;
1802
1803 xa_for_each(&file_priv->proto_context_xa, idx, pc)for (idx = 0; ((pc) = xa_get_next(&file_priv->proto_context_xa
, &(idx))) != ((void *)0); idx++)
1804 proto_context_close(file_priv->dev_priv, pc);
1805 xa_destroy(&file_priv->proto_context_xa);
1806 mutex_destroy(&file_priv->proto_context_lock);
1807
1808 xa_for_each(&file_priv->context_xa, idx, ctx)for (idx = 0; ((ctx) = xa_get_next(&file_priv->context_xa
, &(idx))) != ((void *)0); idx++)
1809 context_close(ctx);
1810 xa_destroy(&file_priv->context_xa);
1811
1812 xa_for_each(&file_priv->vm_xa, idx, vm)for (idx = 0; ((vm) = xa_get_next(&file_priv->vm_xa, &
(idx))) != ((void *)0); idx++)
1813 i915_vm_put(vm);
1814 xa_destroy(&file_priv->vm_xa);
1815}
1816
1817int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1818 struct drm_file *file)
1819{
1820 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
1821 struct drm_i915_gem_vm_control *args = data;
1822 struct drm_i915_file_private *file_priv = file->driver_priv;
1823 struct i915_ppgtt *ppgtt;
1824 u32 id;
1825 int err;
1826
1827 if (!HAS_FULL_PPGTT(i915)(((&(i915)->__runtime)->ppgtt_type) >= INTEL_PPGTT_FULL
)
)
1828 return -ENODEV19;
1829
1830 if (args->flags)
1831 return -EINVAL22;
1832
1833 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1834 if (IS_ERR(ppgtt))
1835 return PTR_ERR(ppgtt);
1836
1837 if (args->extensions) {
1838 err = i915_user_extensions(u64_to_user_ptr(args->extensions)((void *)(uintptr_t)(args->extensions)),
1839 NULL((void *)0), 0,
1840 ppgtt);
1841 if (err)
1842 goto err_put;
1843 }
1844
1845 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1846 xa_limit_32b0, GFP_KERNEL(0x0001 | 0x0004));
1847 if (err)
1848 goto err_put;
1849
1850 GEM_BUG_ON(id == 0)((void)0); /* reserved for invalid/unassigned ppgtt */
1851 args->vm_id = id;
1852 return 0;
1853
1854err_put:
1855 i915_vm_put(&ppgtt->vm);
1856 return err;
1857}
1858
1859int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1860 struct drm_file *file)
1861{
1862 struct drm_i915_file_private *file_priv = file->driver_priv;
1863 struct drm_i915_gem_vm_control *args = data;
1864 struct i915_address_space *vm;
1865
1866 if (args->flags)
1867 return -EINVAL22;
1868
1869 if (args->extensions)
1870 return -EINVAL22;
1871
1872 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1873 if (!vm)
1874 return -ENOENT2;
1875
1876 i915_vm_put(vm);
1877 return 0;
1878}
1879
1880static int get_ppgtt(struct drm_i915_file_private *file_priv,
1881 struct i915_gem_context *ctx,
1882 struct drm_i915_gem_context_param *args)
1883{
1884 struct i915_address_space *vm;
1885 int err;
1886 u32 id;
1887
1888 if (!i915_gem_context_has_full_ppgtt(ctx))
1889 return -ENODEV19;
1890
1891 vm = ctx->vm;
1892 GEM_BUG_ON(!vm)((void)0);
1893
1894 /*
1895 * Get a reference for the allocated handle. Once the handle is
1896 * visible in the vm_xa table, userspace could try to close it
1897 * from under our feet, so we need to hold the extra reference
1898 * first.
1899 */
1900 i915_vm_get(vm);
1901
1902 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b0, GFP_KERNEL(0x0001 | 0x0004));
1903 if (err) {
1904 i915_vm_put(vm);
1905 return err;
1906 }
1907
1908 GEM_BUG_ON(id == 0)((void)0); /* reserved for invalid/unassigned ppgtt */
1909 args->value = id;
1910 args->size = 0;
1911
1912 return err;
1913}
1914
1915int
1916i915_gem_user_to_context_sseu(struct intel_gt *gt,
1917 const struct drm_i915_gem_context_param_sseu *user,
1918 struct intel_sseu *context)
1919{
1920 const struct sseu_dev_info *device = &gt->info.sseu;
1921 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
1922 unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
1923
1924 /* No zeros in any field. */
1925 if (!user->slice_mask || !user->subslice_mask ||
1926 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1927 return -EINVAL22;
1928
1929 /* Max > min. */
1930 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1931 return -EINVAL22;
1932
1933 /*
1934 * Some future proofing on the types since the uAPI is wider than the
1935 * current internal implementation.
1936 */
1937 if (overflows_type(user->slice_mask, context->slice_mask)(sizeof(user->slice_mask) > sizeof(context->slice_mask
) && (user->slice_mask) >> (8 * sizeof(context
->slice_mask)))
||
1938 overflows_type(user->subslice_mask, context->subslice_mask)(sizeof(user->subslice_mask) > sizeof(context->subslice_mask
) && (user->subslice_mask) >> (8 * sizeof(context
->subslice_mask)))
||
1939 overflows_type(user->min_eus_per_subslice,(sizeof(user->min_eus_per_subslice) > sizeof(context->
min_eus_per_subslice) && (user->min_eus_per_subslice
) >> (8 * sizeof(context->min_eus_per_subslice)))
1940 context->min_eus_per_subslice)(sizeof(user->min_eus_per_subslice) > sizeof(context->
min_eus_per_subslice) && (user->min_eus_per_subslice
) >> (8 * sizeof(context->min_eus_per_subslice)))
||
1941 overflows_type(user->max_eus_per_subslice,(sizeof(user->max_eus_per_subslice) > sizeof(context->
max_eus_per_subslice) && (user->max_eus_per_subslice
) >> (8 * sizeof(context->max_eus_per_subslice)))
1942 context->max_eus_per_subslice)(sizeof(user->max_eus_per_subslice) > sizeof(context->
max_eus_per_subslice) && (user->max_eus_per_subslice
) >> (8 * sizeof(context->max_eus_per_subslice)))
)
1943 return -EINVAL22;
1944
1945 /* Check validity against hardware. */
1946 if (user->slice_mask & ~device->slice_mask)
1947 return -EINVAL22;
1948
1949 if (user->subslice_mask & ~dev_subslice_mask)
1950 return -EINVAL22;
1951
1952 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1953 return -EINVAL22;
1954
1955 context->slice_mask = user->slice_mask;
1956 context->subslice_mask = user->subslice_mask;
1957 context->min_eus_per_subslice = user->min_eus_per_subslice;
1958 context->max_eus_per_subslice = user->max_eus_per_subslice;
1959
1960 /* Part specific restrictions. */
1961 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 11) {
1962 unsigned int hw_s = hweight8(device->slice_mask);
1963 unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
1964 unsigned int req_s = hweight8(context->slice_mask);
1965 unsigned int req_ss = hweight8(context->subslice_mask);
1966
1967 /*
1968 * Only full subslice enablement is possible if more than one
1969 * slice is turned on.
1970 */
1971 if (req_s > 1 && req_ss != hw_ss_per_s)
1972 return -EINVAL22;
1973
1974 /*
1975 * If more than four (SScount bitfield limit) subslices are
1976 * requested then the number has to be even.
1977 */
1978 if (req_ss > 4 && (req_ss & 1))
1979 return -EINVAL22;
1980
1981 /*
1982 * If only one slice is enabled and subslice count is below the
1983 * device full enablement, it must be at most half of the all
1984 * available subslices.
1985 */
1986 if (req_s == 1 && req_ss < hw_ss_per_s &&
1987 req_ss > (hw_ss_per_s / 2))
1988 return -EINVAL22;
1989
1990 /* ABI restriction - VME use case only. */
1991
1992 /* All slices or one slice only. */
1993 if (req_s != 1 && req_s != hw_s)
1994 return -EINVAL22;
1995
1996 /*
1997 * Half subslices or full enablement only when one slice is
1998 * enabled.
1999 */
2000 if (req_s == 1 &&
2001 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
2002 return -EINVAL22;
2003
2004 /* No EU configuration changes. */
2005 if ((user->min_eus_per_subslice !=
2006 device->max_eus_per_subslice) ||
2007 (user->max_eus_per_subslice !=
2008 device->max_eus_per_subslice))
2009 return -EINVAL22;
2010 }
2011
2012 return 0;
2013}
2014
2015static int set_sseu(struct i915_gem_context *ctx,
2016 struct drm_i915_gem_context_param *args)
2017{
2018 struct drm_i915_privateinteldrm_softc *i915 = ctx->i915;
2019 struct drm_i915_gem_context_param_sseu user_sseu;
2020 struct intel_context *ce;
2021 struct intel_sseu sseu;
2022 unsigned long lookup;
2023 int ret;
2024
2025 if (args->size < sizeof(user_sseu))
2026 return -EINVAL22;
2027
2028 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) != 11)
2029 return -ENODEV19;
2030
2031 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value)((void *)(uintptr_t)(args->value)),
2032 sizeof(user_sseu)))
2033 return -EFAULT14;
2034
2035 if (user_sseu.rsvd)
2036 return -EINVAL22;
2037
2038 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX(1u << 0)))
2039 return -EINVAL22;
2040
2041 lookup = 0;
2042 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX(1u << 0))
2043 lookup |= LOOKUP_USER_INDEX(1UL << (0));
2044
2045 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2046 if (IS_ERR(ce))
2047 return PTR_ERR(ce);
2048
2049 /* Only render engine supports RPCS configuration. */
2050 if (ce->engine->class != RENDER_CLASS0) {
2051 ret = -ENODEV19;
2052 goto out_ce;
2053 }
2054
2055 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
2056 if (ret)
2057 goto out_ce;
2058
2059 ret = intel_context_reconfigure_sseu(ce, sseu);
2060 if (ret)
2061 goto out_ce;
2062
2063 args->size = sizeof(user_sseu);
2064
2065out_ce:
2066 intel_context_put(ce);
2067 return ret;
2068}
2069
2070static int
2071set_persistence(struct i915_gem_context *ctx,
2072 const struct drm_i915_gem_context_param *args)
2073{
2074 if (args->size)
2075 return -EINVAL22;
2076
2077 return __context_set_persistence(ctx, args->value);
2078}
2079
2080static int set_priority(struct i915_gem_context *ctx,
2081 const struct drm_i915_gem_context_param *args)
2082{
2083 struct i915_gem_engines_iter it;
2084 struct intel_context *ce;
2085 int err;
2086
2087 err = validate_priority(ctx->i915, args);
2088 if (err)
2089 return err;
2090
2091 ctx->sched.priority = args->value;
2092
2093 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)for (i915_gem_engines_iter_init(&(it), (i915_gem_context_lock_engines
(ctx))); ((ce) = i915_gem_engines_iter_next(&(it)));)
{
2094 if (!intel_engine_has_timeslices(ce->engine))
2095 continue;
2096
2097 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2098 intel_engine_has_semaphores(ce->engine))
2099 intel_context_set_use_semaphores(ce);
2100 else
2101 intel_context_clear_use_semaphores(ce);
2102 }
2103 i915_gem_context_unlock_engines(ctx);
2104
2105 return 0;
2106}
2107
2108static int get_protected(struct i915_gem_context *ctx,
2109 struct drm_i915_gem_context_param *args)
2110{
2111 args->size = 0;
2112 args->value = i915_gem_context_uses_protected_content(ctx);
2113
2114 return 0;
2115}
2116
2117static int ctx_setparam(struct drm_i915_file_private *fpriv,
2118 struct i915_gem_context *ctx,
2119 struct drm_i915_gem_context_param *args)
2120{
2121 int ret = 0;
2122
2123 switch (args->param) {
2124 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE0x4:
2125 if (args->size)
2126 ret = -EINVAL22;
2127 else if (args->value)
2128 i915_gem_context_set_no_error_capture(ctx);
2129 else
2130 i915_gem_context_clear_no_error_capture(ctx);
2131 break;
2132
2133 case I915_CONTEXT_PARAM_BANNABLE0x5:
2134 if (args->size)
2135 ret = -EINVAL22;
2136 else if (!capable(CAP_SYS_ADMIN0x1) && !args->value)
2137 ret = -EPERM1;
2138 else if (args->value)
2139 i915_gem_context_set_bannable(ctx);
2140 else if (i915_gem_context_uses_protected_content(ctx))
2141 ret = -EPERM1; /* can't clear this for protected contexts */
2142 else
2143 i915_gem_context_clear_bannable(ctx);
2144 break;
2145
2146 case I915_CONTEXT_PARAM_RECOVERABLE0x8:
2147 if (args->size)
2148 ret = -EINVAL22;
2149 else if (!args->value)
2150 i915_gem_context_clear_recoverable(ctx);
2151 else if (i915_gem_context_uses_protected_content(ctx))
2152 ret = -EPERM1; /* can't set this for protected contexts */
2153 else
2154 i915_gem_context_set_recoverable(ctx);
2155 break;
2156
2157 case I915_CONTEXT_PARAM_PRIORITY0x6:
2158 ret = set_priority(ctx, args);
2159 break;
2160
2161 case I915_CONTEXT_PARAM_SSEU0x7:
2162 ret = set_sseu(ctx, args);
2163 break;
2164
2165 case I915_CONTEXT_PARAM_PERSISTENCE0xb:
2166 ret = set_persistence(ctx, args);
2167 break;
2168
2169 case I915_CONTEXT_PARAM_PROTECTED_CONTENT0xd:
2170 case I915_CONTEXT_PARAM_NO_ZEROMAP0x2:
2171 case I915_CONTEXT_PARAM_BAN_PERIOD0x1:
2172 case I915_CONTEXT_PARAM_RINGSIZE0xc:
2173 case I915_CONTEXT_PARAM_VM0x9:
2174 case I915_CONTEXT_PARAM_ENGINES0xa:
2175 default:
2176 ret = -EINVAL22;
2177 break;
2178 }
2179
2180 return ret;
2181}
2182
2183struct create_ext {
2184 struct i915_gem_proto_context *pc;
2185 struct drm_i915_file_private *fpriv;
2186};
2187
2188static int create_setparam(struct i915_user_extension __user *ext, void *data)
2189{
2190 struct drm_i915_gem_context_create_ext_setparam local;
2191 const struct create_ext *arg = data;
2192
2193 if (copy_from_user(&local, ext, sizeof(local)))
2194 return -EFAULT14;
2195
2196 if (local.param.ctx_id)
2197 return -EINVAL22;
2198
2199 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2200}
2201
2202static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2203{
2204 return -EINVAL22;
2205}
2206
2207static const i915_user_extension_fn create_extensions[] = {
2208 [I915_CONTEXT_CREATE_EXT_SETPARAM0] = create_setparam,
2209 [I915_CONTEXT_CREATE_EXT_CLONE1] = invalid_ext,
2210};
2211
2212static bool_Bool client_is_banned(struct drm_i915_file_private *file_priv)
2213{
2214 return atomic_read(&file_priv->ban_score)({ typeof(*(&file_priv->ban_score)) __tmp = *(volatile
typeof(*(&file_priv->ban_score)) *)&(*(&file_priv
->ban_score)); membar_datadep_consumer(); __tmp; })
>= I915_CLIENT_SCORE_BANNED9;
2215}
2216
2217static inline struct i915_gem_context *
2218__context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2219{
2220 struct i915_gem_context *ctx;
2221
2222 rcu_read_lock();
2223 ctx = xa_load(&file_priv->context_xa, id);
2224 if (ctx && !kref_get_unless_zero(&ctx->ref))
2225 ctx = NULL((void *)0);
2226 rcu_read_unlock();
2227
2228 return ctx;
2229}
2230
2231static struct i915_gem_context *
2232finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2233 struct i915_gem_proto_context *pc, u32 id)
2234{
2235 struct i915_gem_context *ctx;
2236 void *old;
2237
2238 lockdep_assert_held(&file_priv->proto_context_lock)do { (void)(&file_priv->proto_context_lock); } while(0
)
;
2239
2240 ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2241 if (IS_ERR(ctx))
2242 return ctx;
2243
2244 /*
2245 * One for the xarray and one for the caller. We need to grab
2246 * the reference *prior* to making the ctx visble to userspace
2247 * in gem_context_register(), as at any point after that
2248 * userspace can try to race us with another thread destroying
2249 * the context under our feet.
2250 */
2251 i915_gem_context_get(ctx);
2252
2253 gem_context_register(ctx, file_priv, id);
2254
2255 old = xa_erase(&file_priv->proto_context_xa, id);
2256 GEM_BUG_ON(old != pc)((void)0);
2257 proto_context_close(file_priv->dev_priv, pc);
2258
2259 return ctx;
2260}
2261
2262struct i915_gem_context *
2263i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2264{
2265 struct i915_gem_proto_context *pc;
2266 struct i915_gem_context *ctx;
2267
2268 ctx = __context_lookup(file_priv, id);
2269 if (ctx)
2270 return ctx;
2271
2272 mutex_lock(&file_priv->proto_context_lock)rw_enter_write(&file_priv->proto_context_lock);
2273 /* Try one more time under the lock */
2274 ctx = __context_lookup(file_priv, id);
2275 if (!ctx) {
2276 pc = xa_load(&file_priv->proto_context_xa, id);
2277 if (!pc)
2278 ctx = ERR_PTR(-ENOENT2);
2279 else
2280 ctx = finalize_create_context_locked(file_priv, pc, id);
2281 }
2282 mutex_unlock(&file_priv->proto_context_lock)rw_exit_write(&file_priv->proto_context_lock);
2283
2284 return ctx;
2285}
2286
2287int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2288 struct drm_file *file)
2289{
2290 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
2291 struct drm_i915_gem_context_create_ext *args = data;
2292 struct create_ext ext_data;
2293 int ret;
2294 u32 id;
2295
2296 if (!DRIVER_CAPS(i915)(&(i915)->caps)->has_logical_contexts)
1
Assuming field 'has_logical_contexts' is true
2
Taking false branch
2297 return -ENODEV19;
2298
2299 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN(-((1u << 1) << 1)))
3
Assuming the condition is false
4
Taking false branch
2300 return -EINVAL22;
2301
2302 ret = intel_gt_terminally_wedged(to_gt(i915));
2303 if (ret)
5
Assuming 'ret' is 0
6
Taking false branch
2304 return ret;
2305
2306 ext_data.fpriv = file->driver_priv;
2307 if (client_is_banned(ext_data.fpriv)) {
7
Taking false branch
2308#ifdef __linux__
2309 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "client %s[%d] banned from creating ctx\n"
, current->comm, task_pid_nr(current))
2310 "client %s[%d] banned from creating ctx\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "client %s[%d] banned from creating ctx\n"
, current->comm, task_pid_nr(current))
2311 current->comm, task_pid_nr(current))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "client %s[%d] banned from creating ctx\n"
, current->comm, task_pid_nr(current))
;
2312#else
2313 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "client %s[%d] banned from creating ctx\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_comm, ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid)
2314 "client %s[%d] banned from creating ctx\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "client %s[%d] banned from creating ctx\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_comm, ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid)
2315 curproc->p_p->ps_comm, curproc->p_p->ps_pid)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "client %s[%d] banned from creating ctx\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_comm, ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid)
;
2316#endif
2317 return -EIO5;
2318 }
2319
2320 ext_data.pc = proto_context_create(i915, args->flags);
8
Calling 'proto_context_create'
18
Returned allocated memory
2321 if (IS_ERR(ext_data.pc))
19
Taking false branch
2322 return PTR_ERR(ext_data.pc);
2323
2324 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS(1u << 0)) {
20
Assuming the condition is false
21
Taking false branch
2325 ret = i915_user_extensions(u64_to_user_ptr(args->extensions)((void *)(uintptr_t)(args->extensions)),
2326 create_extensions,
2327 ARRAY_SIZE(create_extensions)(sizeof((create_extensions)) / sizeof((create_extensions)[0])
)
,
2328 &ext_data);
2329 if (ret)
2330 goto err_pc;
2331 }
2332
2333 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) > 12) {
22
Assuming field 'ver' is <= 12
23
Taking false branch
2334 struct i915_gem_context *ctx;
2335
2336 /* Get ourselves a context ID */
2337 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL((void *)0),
2338 xa_limit_32b0, GFP_KERNEL(0x0001 | 0x0004));
2339 if (ret)
2340 goto err_pc;
2341
2342 ctx = i915_gem_create_context(i915, ext_data.pc);
2343 if (IS_ERR(ctx)) {
2344 ret = PTR_ERR(ctx);
2345 goto err_pc;
2346 }
2347
2348 proto_context_close(i915, ext_data.pc);
2349 gem_context_register(ctx, ext_data.fpriv, id);
2350 } else {
2351 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2352 if (ret < 0)
24
Assuming 'ret' is >= 0
25
Taking false branch
2353 goto err_pc;
2354 }
2355
2356 args->ctx_id = id;
26
Potential leak of memory pointed to by 'ext_data.pc'
2357 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "HW context %d created\n"
, args->ctx_id)
;
2358
2359 return 0;
2360
2361err_pc:
2362 proto_context_close(i915, ext_data.pc);
2363 return ret;
2364}
2365
2366int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2367 struct drm_file *file)
2368{
2369 struct drm_i915_gem_context_destroy *args = data;
2370 struct drm_i915_file_private *file_priv = file->driver_priv;
2371 struct i915_gem_proto_context *pc;
2372 struct i915_gem_context *ctx;
2373
2374 if (args->pad != 0)
2375 return -EINVAL22;
2376
2377 if (!args->ctx_id)
2378 return -ENOENT2;
2379
2380 /* We need to hold the proto-context lock here to prevent races
2381 * with finalize_create_context_locked().
2382 */
2383 mutex_lock(&file_priv->proto_context_lock)rw_enter_write(&file_priv->proto_context_lock);
2384 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2385 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2386 mutex_unlock(&file_priv->proto_context_lock)rw_exit_write(&file_priv->proto_context_lock);
2387
2388 if (!ctx && !pc)
2389 return -ENOENT2;
2390 GEM_WARN_ON(ctx && pc)({ __builtin_expect(!!(!!(ctx && pc)), 0); });
2391
2392 if (pc)
2393 proto_context_close(file_priv->dev_priv, pc);
2394
2395 if (ctx)
2396 context_close(ctx);
2397
2398 return 0;
2399}
2400
2401static int get_sseu(struct i915_gem_context *ctx,
2402 struct drm_i915_gem_context_param *args)
2403{
2404 struct drm_i915_gem_context_param_sseu user_sseu;
2405 struct intel_context *ce;
2406 unsigned long lookup;
2407 int err;
2408
2409 if (args->size == 0)
2410 goto out;
2411 else if (args->size < sizeof(user_sseu))
2412 return -EINVAL22;
2413
2414 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value)((void *)(uintptr_t)(args->value)),
2415 sizeof(user_sseu)))
2416 return -EFAULT14;
2417
2418 if (user_sseu.rsvd)
2419 return -EINVAL22;
2420
2421 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX(1u << 0)))
2422 return -EINVAL22;
2423
2424 lookup = 0;
2425 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX(1u << 0))
2426 lookup |= LOOKUP_USER_INDEX(1UL << (0));
2427
2428 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2429 if (IS_ERR(ce))
2430 return PTR_ERR(ce);
2431
2432 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2433 if (err) {
2434 intel_context_put(ce);
2435 return err;
2436 }
2437
2438 user_sseu.slice_mask = ce->sseu.slice_mask;
2439 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2440 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2441 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2442
2443 intel_context_unlock_pinned(ce);
2444 intel_context_put(ce);
2445
2446 if (copy_to_user(u64_to_user_ptr(args->value)((void *)(uintptr_t)(args->value)), &user_sseu,
2447 sizeof(user_sseu)))
2448 return -EFAULT14;
2449
2450out:
2451 args->size = sizeof(user_sseu);
2452
2453 return 0;
2454}
2455
2456int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2457 struct drm_file *file)
2458{
2459 struct drm_i915_file_private *file_priv = file->driver_priv;
2460 struct drm_i915_gem_context_param *args = data;
2461 struct i915_gem_context *ctx;
2462 struct i915_address_space *vm;
2463 int ret = 0;
2464
2465 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2466 if (IS_ERR(ctx))
2467 return PTR_ERR(ctx);
2468
2469 switch (args->param) {
2470 case I915_CONTEXT_PARAM_GTT_SIZE0x3:
2471 args->size = 0;
2472 vm = i915_gem_context_get_eb_vm(ctx);
2473 args->value = vm->total;
2474 i915_vm_put(vm);
2475
2476 break;
2477
2478 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE0x4:
2479 args->size = 0;
2480 args->value = i915_gem_context_no_error_capture(ctx);
2481 break;
2482
2483 case I915_CONTEXT_PARAM_BANNABLE0x5:
2484 args->size = 0;
2485 args->value = i915_gem_context_is_bannable(ctx);
2486 break;
2487
2488 case I915_CONTEXT_PARAM_RECOVERABLE0x8:
2489 args->size = 0;
2490 args->value = i915_gem_context_is_recoverable(ctx);
2491 break;
2492
2493 case I915_CONTEXT_PARAM_PRIORITY0x6:
2494 args->size = 0;
2495 args->value = ctx->sched.priority;
2496 break;
2497
2498 case I915_CONTEXT_PARAM_SSEU0x7:
2499 ret = get_sseu(ctx, args);
2500 break;
2501
2502 case I915_CONTEXT_PARAM_VM0x9:
2503 ret = get_ppgtt(file_priv, ctx, args);
2504 break;
2505
2506 case I915_CONTEXT_PARAM_PERSISTENCE0xb:
2507 args->size = 0;
2508 args->value = i915_gem_context_is_persistent(ctx);
2509 break;
2510
2511 case I915_CONTEXT_PARAM_PROTECTED_CONTENT0xd:
2512 ret = get_protected(ctx, args);
2513 break;
2514
2515 case I915_CONTEXT_PARAM_NO_ZEROMAP0x2:
2516 case I915_CONTEXT_PARAM_BAN_PERIOD0x1:
2517 case I915_CONTEXT_PARAM_ENGINES0xa:
2518 case I915_CONTEXT_PARAM_RINGSIZE0xc:
2519 default:
2520 ret = -EINVAL22;
2521 break;
2522 }
2523
2524 i915_gem_context_put(ctx);
2525 return ret;
2526}
2527
2528int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2529 struct drm_file *file)
2530{
2531 struct drm_i915_file_private *file_priv = file->driver_priv;
2532 struct drm_i915_gem_context_param *args = data;
2533 struct i915_gem_proto_context *pc;
2534 struct i915_gem_context *ctx;
2535 int ret = 0;
2536
2537 mutex_lock(&file_priv->proto_context_lock)rw_enter_write(&file_priv->proto_context_lock);
2538 ctx = __context_lookup(file_priv, args->ctx_id);
2539 if (!ctx) {
2540 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2541 if (pc) {
2542 /* Contexts should be finalized inside
2543 * GEM_CONTEXT_CREATE starting with graphics
2544 * version 13.
2545 */
2546 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12)({ int __ret = !!(((&(file_priv->dev_priv)->__runtime
)->graphics.ip.ver) > 12); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "((&(file_priv->dev_priv)->__runtime)->graphics.ip.ver) > 12"
, "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_context.c", 2546
); __builtin_expect(!!(__ret), 0); })
;
2547 ret = set_proto_ctx_param(file_priv, pc, args);
2548 } else {
2549 ret = -ENOENT2;
2550 }
2551 }
2552 mutex_unlock(&file_priv->proto_context_lock)rw_exit_write(&file_priv->proto_context_lock);
2553
2554 if (ctx) {
2555 ret = ctx_setparam(file_priv, ctx, args);
2556 i915_gem_context_put(ctx);
2557 }
2558
2559 return ret;
2560}
2561
2562int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2563 void *data, struct drm_file *file)
2564{
2565 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
2566 struct drm_i915_reset_stats *args = data;
2567 struct i915_gem_context *ctx;
2568
2569 if (args->flags || args->pad)
2570 return -EINVAL22;
2571
2572 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2573 if (IS_ERR(ctx))
2574 return PTR_ERR(ctx);
2575
2576 /*
2577 * We opt for unserialised reads here. This may result in tearing
2578 * in the extremely unlikely event of a GPU hang on this context
2579 * as we are querying them. If we need that extra layer of protection,
2580 * we should wrap the hangstats with a seqlock.
2581 */
2582
2583 if (capable(CAP_SYS_ADMIN0x1))
2584 args->reset_count = i915_reset_count(&i915->gpu_error);
2585 else
2586 args->reset_count = 0;
2587
2588 args->batch_active = atomic_read(&ctx->guilty_count)({ typeof(*(&ctx->guilty_count)) __tmp = *(volatile typeof
(*(&ctx->guilty_count)) *)&(*(&ctx->guilty_count
)); membar_datadep_consumer(); __tmp; })
;
2589 args->batch_pending = atomic_read(&ctx->active_count)({ typeof(*(&ctx->active_count)) __tmp = *(volatile typeof
(*(&ctx->active_count)) *)&(*(&ctx->active_count
)); membar_datadep_consumer(); __tmp; })
;
2590
2591 i915_gem_context_put(ctx);
2592 return 0;
2593}
2594
2595/* GEM context-engines iterator: for_each_gem_engine() */
2596struct intel_context *
2597i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2598{
2599 const struct i915_gem_engines *e = it->engines;
2600 struct intel_context *ctx;
2601
2602 if (unlikely(!e)__builtin_expect(!!(!e), 0))
2603 return NULL((void *)0);
2604
2605 do {
2606 if (it->idx >= e->num_engines)
2607 return NULL((void *)0);
2608
2609 ctx = e->engines[it->idx++];
2610 } while (!ctx);
2611
2612 return ctx;
2613}
2614
2615#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
2616#include "selftests/mock_context.c"
2617#include "selftests/i915_gem_context.c"
2618#endif
2619
2620void i915_gem_context_module_exit(void)
2621{
2622#ifdef __linux__
2623 kmem_cache_destroy(slab_luts);
2624#else
2625 pool_destroy(&slab_luts);
2626#endif
2627}
2628
2629int __init i915_gem_context_module_init(void)
2630{
2631#ifdef __linux__
2632 slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2633 if (!slab_luts)
2634 return -ENOMEM12;
2635#else
2636 pool_init(&slab_luts , sizeof(struct i915_lut_handle),
2637 0, IPL_NONE0x0, 0, "drmlut", NULL((void *)0));
2638#endif
2639
2640 return 0;
2641}

/usr/src/sys/dev/pci/drm/include/linux/slab.h

1/* Public domain. */
2
3#ifndef _LINUX_SLAB_H
4#define _LINUX_SLAB_H
5
6#include <sys/types.h>
7#include <sys/malloc.h>
8
9#include <linux/types.h>
10#include <linux/workqueue.h>
11#include <linux/gfp.h>
12
13#include <linux/processor.h> /* for CACHELINESIZE */
14
15#define ARCH_KMALLOC_MINALIGN64 CACHELINESIZE64
16
17#define ZERO_SIZE_PTR((void *)0) NULL((void *)0)
18
19static inline void *
20kmalloc(size_t size, int flags)
21{
22 return malloc(size, M_DRM145, flags);
23}
24
25static inline void *
26kmalloc_array(size_t n, size_t size, int flags)
27{
28 if (n != 0 && SIZE_MAX0xffffffffffffffffUL / n < size)
29 return NULL((void *)0);
30 return malloc(n * size, M_DRM145, flags);
31}
32
33static inline void *
34kcalloc(size_t n, size_t size, int flags)
35{
36 if (n != 0 && SIZE_MAX0xffffffffffffffffUL / n < size)
37 return NULL((void *)0);
38 return malloc(n * size, M_DRM145, flags | M_ZERO0x0008);
39}
40
41static inline void *
42kzalloc(size_t size, int flags)
43{
44 return malloc(size, M_DRM145, flags | M_ZERO0x0008);
10
Memory is allocated
45}
46
47static inline void
48kfree(const void *objp)
49{
50 free((void *)objp, M_DRM145, 0);
51}
52
53#endif