File: | dev/pci/drm/i915/i915_perf.c |
Warning: | line 705, column 8 Although the value stored to 'taken' is used in the enclosing expression, the value is never actually read from 'taken' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright © 2015-2016 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Robert Bragg <robert@sixbynine.org> |
25 | */ |
26 | |
27 | |
28 | /** |
29 | * DOC: i915 Perf Overview |
30 | * |
31 | * Gen graphics supports a large number of performance counters that can help |
32 | * driver and application developers understand and optimize their use of the |
33 | * GPU. |
34 | * |
35 | * This i915 perf interface enables userspace to configure and open a file |
36 | * descriptor representing a stream of GPU metrics which can then be read() as |
37 | * a stream of sample records. |
38 | * |
39 | * The interface is particularly suited to exposing buffered metrics that are |
40 | * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. |
41 | * |
42 | * Streams representing a single context are accessible to applications with a |
43 | * corresponding drm file descriptor, such that OpenGL can use the interface |
44 | * without special privileges. Access to system-wide metrics requires root |
45 | * privileges by default, unless changed via the dev.i915.perf_event_paranoid |
46 | * sysctl option. |
47 | * |
48 | */ |
49 | |
50 | /** |
51 | * DOC: i915 Perf History and Comparison with Core Perf |
52 | * |
53 | * The interface was initially inspired by the core Perf infrastructure but |
54 | * some notable differences are: |
55 | * |
56 | * i915 perf file descriptors represent a "stream" instead of an "event"; where |
57 | * a perf event primarily corresponds to a single 64bit value, while a stream |
58 | * might sample sets of tightly-coupled counters, depending on the |
59 | * configuration. For example the Gen OA unit isn't designed to support |
60 | * orthogonal configurations of individual counters; it's configured for a set |
61 | * of related counters. Samples for an i915 perf stream capturing OA metrics |
62 | * will include a set of counter values packed in a compact HW specific format. |
63 | * The OA unit supports a number of different packing formats which can be |
64 | * selected by the user opening the stream. Perf has support for grouping |
65 | * events, but each event in the group is configured, validated and |
66 | * authenticated individually with separate system calls. |
67 | * |
68 | * i915 perf stream configurations are provided as an array of u64 (key,value) |
69 | * pairs, instead of a fixed struct with multiple miscellaneous config members, |
70 | * interleaved with event-type specific members. |
71 | * |
72 | * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. |
73 | * The supported metrics are being written to memory by the GPU unsynchronized |
74 | * with the CPU, using HW specific packing formats for counter sets. Sometimes |
75 | * the constraints on HW configuration require reports to be filtered before it |
76 | * would be acceptable to expose them to unprivileged applications - to hide |
77 | * the metrics of other processes/contexts. For these use cases a read() based |
78 | * interface is a good fit, and provides an opportunity to filter data as it |
79 | * gets copied from the GPU mapped buffers to userspace buffers. |
80 | * |
81 | * |
82 | * Issues hit with first prototype based on Core Perf |
83 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
84 | * |
85 | * The first prototype of this driver was based on the core perf |
86 | * infrastructure, and while we did make that mostly work, with some changes to |
87 | * perf, we found we were breaking or working around too many assumptions baked |
88 | * into perf's currently cpu centric design. |
89 | * |
90 | * In the end we didn't see a clear benefit to making perf's implementation and |
91 | * interface more complex by changing design assumptions while we knew we still |
92 | * wouldn't be able to use any existing perf based userspace tools. |
93 | * |
94 | * Also considering the Gen specific nature of the Observability hardware and |
95 | * how userspace will sometimes need to combine i915 perf OA metrics with |
96 | * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're |
97 | * expecting the interface to be used by a platform specific userspace such as |
98 | * OpenGL or tools. This is to say; we aren't inherently missing out on having |
99 | * a standard vendor/architecture agnostic interface by not using perf. |
100 | * |
101 | * |
102 | * For posterity, in case we might re-visit trying to adapt core perf to be |
103 | * better suited to exposing i915 metrics these were the main pain points we |
104 | * hit: |
105 | * |
106 | * - The perf based OA PMU driver broke some significant design assumptions: |
107 | * |
108 | * Existing perf pmus are used for profiling work on a cpu and we were |
109 | * introducing the idea of _IS_DEVICE pmus with different security |
110 | * implications, the need to fake cpu-related data (such as user/kernel |
111 | * registers) to fit with perf's current design, and adding _DEVICE records |
112 | * as a way to forward device-specific status records. |
113 | * |
114 | * The OA unit writes reports of counters into a circular buffer, without |
115 | * involvement from the CPU, making our PMU driver the first of a kind. |
116 | * |
117 | * Given the way we were periodically forward data from the GPU-mapped, OA |
118 | * buffer to perf's buffer, those bursts of sample writes looked to perf like |
119 | * we were sampling too fast and so we had to subvert its throttling checks. |
120 | * |
121 | * Perf supports groups of counters and allows those to be read via |
122 | * transactions internally but transactions currently seem designed to be |
123 | * explicitly initiated from the cpu (say in response to a userspace read()) |
124 | * and while we could pull a report out of the OA buffer we can't |
125 | * trigger a report from the cpu on demand. |
126 | * |
127 | * Related to being report based; the OA counters are configured in HW as a |
128 | * set while perf generally expects counter configurations to be orthogonal. |
129 | * Although counters can be associated with a group leader as they are |
130 | * opened, there's no clear precedent for being able to provide group-wide |
131 | * configuration attributes (for example we want to let userspace choose the |
132 | * OA unit report format used to capture all counters in a set, or specify a |
133 | * GPU context to filter metrics on). We avoided using perf's grouping |
134 | * feature and forwarded OA reports to userspace via perf's 'raw' sample |
135 | * field. This suited our userspace well considering how coupled the counters |
136 | * are when dealing with normalizing. It would be inconvenient to split |
137 | * counters up into separate events, only to require userspace to recombine |
138 | * them. For Mesa it's also convenient to be forwarded raw, periodic reports |
139 | * for combining with the side-band raw reports it captures using |
140 | * MI_REPORT_PERF_COUNT commands. |
141 | * |
142 | * - As a side note on perf's grouping feature; there was also some concern |
143 | * that using PERF_FORMAT_GROUP as a way to pack together counter values |
144 | * would quite drastically inflate our sample sizes, which would likely |
145 | * lower the effective sampling resolutions we could use when the available |
146 | * memory bandwidth is limited. |
147 | * |
148 | * With the OA unit's report formats, counters are packed together as 32 |
149 | * or 40bit values, with the largest report size being 256 bytes. |
150 | * |
151 | * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a |
152 | * documented ordering to the values, implying PERF_FORMAT_ID must also be |
153 | * used to add a 64bit ID before each value; giving 16 bytes per counter. |
154 | * |
155 | * Related to counter orthogonality; we can't time share the OA unit, while |
156 | * event scheduling is a central design idea within perf for allowing |
157 | * userspace to open + enable more events than can be configured in HW at any |
158 | * one time. The OA unit is not designed to allow re-configuration while in |
159 | * use. We can't reconfigure the OA unit without losing internal OA unit |
160 | * state which we can't access explicitly to save and restore. Reconfiguring |
161 | * the OA unit is also relatively slow, involving ~100 register writes. From |
162 | * userspace Mesa also depends on a stable OA configuration when emitting |
163 | * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be |
164 | * disabled while there are outstanding MI_RPC commands lest we hang the |
165 | * command streamer. |
166 | * |
167 | * The contents of sample records aren't extensible by device drivers (i.e. |
168 | * the sample_type bits). As an example; Sourab Gupta had been looking to |
169 | * attach GPU timestamps to our OA samples. We were shoehorning OA reports |
170 | * into sample records by using the 'raw' field, but it's tricky to pack more |
171 | * than one thing into this field because events/core.c currently only lets a |
172 | * pmu give a single raw data pointer plus len which will be copied into the |
173 | * ring buffer. To include more than the OA report we'd have to copy the |
174 | * report into an intermediate larger buffer. I'd been considering allowing a |
175 | * vector of data+len values to be specified for copying the raw data, but |
176 | * it felt like a kludge to being using the raw field for this purpose. |
177 | * |
178 | * - It felt like our perf based PMU was making some technical compromises |
179 | * just for the sake of using perf: |
180 | * |
181 | * perf_event_open() requires events to either relate to a pid or a specific |
182 | * cpu core, while our device pmu related to neither. Events opened with a |
183 | * pid will be automatically enabled/disabled according to the scheduling of |
184 | * that process - so not appropriate for us. When an event is related to a |
185 | * cpu id, perf ensures pmu methods will be invoked via an inter process |
186 | * interrupt on that core. To avoid invasive changes our userspace opened OA |
187 | * perf events for a specific cpu. This was workable but it meant the |
188 | * majority of the OA driver ran in atomic context, including all OA report |
189 | * forwarding, which wasn't really necessary in our case and seems to make |
190 | * our locking requirements somewhat complex as we handled the interaction |
191 | * with the rest of the i915 driver. |
192 | */ |
193 | |
194 | #include <linux/anon_inodes.h> |
195 | #include <linux/sizes.h> |
196 | #include <linux/uuid.h> |
197 | |
198 | #include "gem/i915_gem_context.h" |
199 | #include "gem/i915_gem_internal.h" |
200 | #include "gt/intel_engine_pm.h" |
201 | #include "gt/intel_engine_regs.h" |
202 | #include "gt/intel_engine_user.h" |
203 | #include "gt/intel_execlists_submission.h" |
204 | #include "gt/intel_gpu_commands.h" |
205 | #include "gt/intel_gt.h" |
206 | #include "gt/intel_gt_clock_utils.h" |
207 | #include "gt/intel_gt_regs.h" |
208 | #include "gt/intel_lrc.h" |
209 | #include "gt/intel_lrc_reg.h" |
210 | #include "gt/intel_ring.h" |
211 | |
212 | #include "i915_drv.h" |
213 | #include "i915_file_private.h" |
214 | #include "i915_perf.h" |
215 | #include "i915_perf_oa_regs.h" |
216 | |
217 | /* HW requires this to be a power of two, between 128k and 16M, though driver |
218 | * is currently generally designed assuming the largest 16M size is used such |
219 | * that the overflow cases are unlikely in normal operation. |
220 | */ |
221 | #define OA_BUFFER_SIZE(16 << 20) SZ_16M(16 << 20) |
222 | |
223 | #define OA_TAKEN(tail, head)((tail - head) & ((16 << 20) - 1)) ((tail - head) & (OA_BUFFER_SIZE(16 << 20) - 1)) |
224 | |
225 | /** |
226 | * DOC: OA Tail Pointer Race |
227 | * |
228 | * There's a HW race condition between OA unit tail pointer register updates and |
229 | * writes to memory whereby the tail pointer can sometimes get ahead of what's |
230 | * been written out to the OA buffer so far (in terms of what's visible to the |
231 | * CPU). |
232 | * |
233 | * Although this can be observed explicitly while copying reports to userspace |
234 | * by checking for a zeroed report-id field in tail reports, we want to account |
235 | * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of |
236 | * redundant read() attempts. |
237 | * |
238 | * We workaround this issue in oa_buffer_check_unlocked() by reading the reports |
239 | * in the OA buffer, starting from the tail reported by the HW until we find a |
240 | * report with its first 2 dwords not 0 meaning its previous report is |
241 | * completely in memory and ready to be read. Those dwords are also set to 0 |
242 | * once read and the whole buffer is cleared upon OA buffer initialization. The |
243 | * first dword is the reason for this report while the second is the timestamp, |
244 | * making the chances of having those 2 fields at 0 fairly unlikely. A more |
245 | * detailed explanation is available in oa_buffer_check_unlocked(). |
246 | * |
247 | * Most of the implementation details for this workaround are in |
248 | * oa_buffer_check_unlocked() and _append_oa_reports() |
249 | * |
250 | * Note for posterity: previously the driver used to define an effective tail |
251 | * pointer that lagged the real pointer by a 'tail margin' measured in bytes |
252 | * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. |
253 | * This was flawed considering that the OA unit may also automatically generate |
254 | * non-periodic reports (such as on context switch) or the OA unit may be |
255 | * enabled without any periodic sampling. |
256 | */ |
257 | #define OA_TAIL_MARGIN_NSEC100000ULL 100000ULL |
258 | #define INVALID_TAIL_PTR0xffffffff 0xffffffff |
259 | |
260 | /* The default frequency for checking whether the OA unit has written new |
261 | * reports to the circular OA buffer... |
262 | */ |
263 | #define DEFAULT_POLL_FREQUENCY_HZ200 200 |
264 | #define DEFAULT_POLL_PERIOD_NS(1000000000L / 200) (NSEC_PER_SEC1000000000L / DEFAULT_POLL_FREQUENCY_HZ200) |
265 | |
266 | /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ |
267 | static u32 i915_perf_stream_paranoid = true1; |
268 | |
269 | /* The maximum exponent the hardware accepts is 63 (essentially it selects one |
270 | * of the 64bit timestamp bits to trigger reports from) but there's currently |
271 | * no known use case for sampling as infrequently as once per 47 thousand years. |
272 | * |
273 | * Since the timestamps included in OA reports are only 32bits it seems |
274 | * reasonable to limit the OA exponent where it's still possible to account for |
275 | * overflow in OA report timestamps. |
276 | */ |
277 | #define OA_EXPONENT_MAX31 31 |
278 | |
279 | #define INVALID_CTX_ID0xffffffff 0xffffffff |
280 | |
281 | /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ |
282 | #define OAREPORT_REASON_MASK0x3f 0x3f |
283 | #define OAREPORT_REASON_MASK_EXTENDED0x7f 0x7f |
284 | #define OAREPORT_REASON_SHIFT19 19 |
285 | #define OAREPORT_REASON_TIMER(1<<0) (1<<0) |
286 | #define OAREPORT_REASON_CTX_SWITCH(1<<3) (1<<3) |
287 | #define OAREPORT_REASON_CLK_RATIO(1<<5) (1<<5) |
288 | |
289 | |
290 | /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate |
291 | * |
292 | * The highest sampling frequency we can theoretically program the OA unit |
293 | * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. |
294 | * |
295 | * Initialized just before we register the sysctl parameter. |
296 | */ |
297 | static int oa_sample_rate_hard_limit; |
298 | |
299 | /* Theoretically we can program the OA unit to sample every 160ns but don't |
300 | * allow that by default unless root... |
301 | * |
302 | * The default threshold of 100000Hz is based on perf's similar |
303 | * kernel.perf_event_max_sample_rate sysctl parameter. |
304 | */ |
305 | static u32 i915_oa_max_sample_rate = 100000; |
306 | |
307 | /* XXX: beware if future OA HW adds new report formats that the current |
308 | * code assumes all reports have a power-of-two size and ~(size - 1) can |
309 | * be used as a mask to align the OA tail pointer. |
310 | */ |
311 | static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = { |
312 | [I915_OA_FORMAT_A13] = { 0, 64 }, |
313 | [I915_OA_FORMAT_A29] = { 1, 128 }, |
314 | [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, |
315 | /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ |
316 | [I915_OA_FORMAT_B4_C8] = { 4, 64 }, |
317 | [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, |
318 | [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, |
319 | [I915_OA_FORMAT_C4_B8] = { 7, 64 }, |
320 | [I915_OA_FORMAT_A12] = { 0, 64 }, |
321 | [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, |
322 | [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, |
323 | }; |
324 | |
325 | #define SAMPLE_OA_REPORT(1<<0) (1<<0) |
326 | |
327 | /** |
328 | * struct perf_open_properties - for validated properties given to open a stream |
329 | * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags |
330 | * @single_context: Whether a single or all gpu contexts should be monitored |
331 | * @hold_preemption: Whether the preemption is disabled for the filtered |
332 | * context |
333 | * @ctx_handle: A gem ctx handle for use with @single_context |
334 | * @metrics_set: An ID for an OA unit metric set advertised via sysfs |
335 | * @oa_format: An OA unit HW report format |
336 | * @oa_periodic: Whether to enable periodic OA unit sampling |
337 | * @oa_period_exponent: The OA unit sampling period is derived from this |
338 | * @engine: The engine (typically rcs0) being monitored by the OA unit |
339 | * @has_sseu: Whether @sseu was specified by userspace |
340 | * @sseu: internal SSEU configuration computed either from the userspace |
341 | * specified configuration in the opening parameters or a default value |
342 | * (see get_default_sseu_config()) |
343 | * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA |
344 | * data availability |
345 | * |
346 | * As read_properties_unlocked() enumerates and validates the properties given |
347 | * to open a stream of metrics the configuration is built up in the structure |
348 | * which starts out zero initialized. |
349 | */ |
350 | struct perf_open_properties { |
351 | u32 sample_flags; |
352 | |
353 | u64 single_context:1; |
354 | u64 hold_preemption:1; |
355 | u64 ctx_handle; |
356 | |
357 | /* OA sampling state */ |
358 | int metrics_set; |
359 | int oa_format; |
360 | bool_Bool oa_periodic; |
361 | int oa_period_exponent; |
362 | |
363 | struct intel_engine_cs *engine; |
364 | |
365 | bool_Bool has_sseu; |
366 | struct intel_sseu sseu; |
367 | |
368 | u64 poll_oa_period; |
369 | }; |
370 | |
371 | struct i915_oa_config_bo { |
372 | struct llist_node node; |
373 | |
374 | struct i915_oa_config *oa_config; |
375 | struct i915_vma *vma; |
376 | }; |
377 | |
378 | static struct ctl_table_header *sysctl_header; |
379 | |
380 | #ifdef notyet |
381 | static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); |
382 | #endif |
383 | |
384 | void i915_oa_config_release(struct kref *ref) |
385 | { |
386 | struct i915_oa_config *oa_config = |
387 | container_of(ref, typeof(*oa_config), ref)({ const __typeof( ((typeof(*oa_config) *)0)->ref ) *__mptr = (ref); (typeof(*oa_config) *)( (char *)__mptr - __builtin_offsetof (typeof(*oa_config), ref) );}); |
388 | |
389 | kfree(oa_config->flex_regs); |
390 | kfree(oa_config->b_counter_regs); |
391 | kfree(oa_config->mux_regs); |
392 | |
393 | kfree_rcu(oa_config, rcu)do { free((void *)oa_config, 145, 0); } while(0); |
394 | } |
395 | |
396 | struct i915_oa_config * |
397 | i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) |
398 | { |
399 | struct i915_oa_config *oa_config; |
400 | |
401 | rcu_read_lock(); |
402 | oa_config = idr_find(&perf->metrics_idr, metrics_set); |
403 | if (oa_config) |
404 | oa_config = i915_oa_config_get(oa_config); |
405 | rcu_read_unlock(); |
406 | |
407 | return oa_config; |
408 | } |
409 | |
410 | #ifdef notyet |
411 | |
412 | static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) |
413 | { |
414 | i915_oa_config_put(oa_bo->oa_config); |
415 | i915_vma_put(oa_bo->vma); |
416 | kfree(oa_bo); |
417 | } |
418 | |
419 | #endif |
420 | |
421 | static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) |
422 | { |
423 | struct intel_uncore *uncore = stream->uncore; |
424 | |
425 | return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR((const i915_reg_t){ .reg = (0xdb04) })) & |
426 | GEN12_OAG_OATAILPTR_MASK0xffffffc0; |
427 | } |
428 | |
429 | static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) |
430 | { |
431 | struct intel_uncore *uncore = stream->uncore; |
432 | |
433 | return intel_uncore_read(uncore, GEN8_OATAILPTR((const i915_reg_t){ .reg = (0x2B10) })) & GEN8_OATAILPTR_MASK0xffffffc0; |
434 | } |
435 | |
436 | static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) |
437 | { |
438 | struct intel_uncore *uncore = stream->uncore; |
439 | u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1((const i915_reg_t){ .reg = (0x2364) })); |
440 | |
441 | return oastatus1 & GEN7_OASTATUS1_TAIL_MASK0xffffffc0; |
442 | } |
443 | |
444 | #ifdef notyet |
445 | |
446 | /** |
447 | * oa_buffer_check_unlocked - check for data and update tail ptr state |
448 | * @stream: i915 stream instance |
449 | * |
450 | * This is either called via fops (for blocking reads in user ctx) or the poll |
451 | * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check |
452 | * if there is data available for userspace to read. |
453 | * |
454 | * This function is central to providing a workaround for the OA unit tail |
455 | * pointer having a race with respect to what data is visible to the CPU. |
456 | * It is responsible for reading tail pointers from the hardware and giving |
457 | * the pointers time to 'age' before they are made available for reading. |
458 | * (See description of OA_TAIL_MARGIN_NSEC above for further details.) |
459 | * |
460 | * Besides returning true when there is data available to read() this function |
461 | * also updates the tail, aging_tail and aging_timestamp in the oa_buffer |
462 | * object. |
463 | * |
464 | * Note: It's safe to read OA config state here unlocked, assuming that this is |
465 | * only called while the stream is enabled, while the global OA configuration |
466 | * can't be modified. |
467 | * |
468 | * Returns: %true if the OA buffer contains data, else %false |
469 | */ |
470 | static bool_Bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) |
471 | { |
472 | u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); |
473 | int report_size = stream->oa_buffer.format_size; |
474 | unsigned long flags; |
475 | bool_Bool pollin; |
476 | u32 hw_tail; |
477 | u64 now; |
478 | |
479 | /* We have to consider the (unlikely) possibility that read() errors |
480 | * could result in an OA buffer reset which might reset the head and |
481 | * tail state. |
482 | */ |
483 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
484 | |
485 | hw_tail = stream->perf->ops.oa_hw_tail_read(stream); |
486 | |
487 | /* The tail pointer increases in 64 byte increments, |
488 | * not in report_size steps... |
489 | */ |
490 | hw_tail &= ~(report_size - 1); |
491 | |
492 | now = ktime_get_mono_fast_ns(); |
493 | |
494 | if (hw_tail == stream->oa_buffer.aging_tail && |
495 | (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC100000ULL) { |
496 | /* If the HW tail hasn't move since the last check and the HW |
497 | * tail has been aging for long enough, declare it the new |
498 | * tail. |
499 | */ |
500 | stream->oa_buffer.tail = stream->oa_buffer.aging_tail; |
501 | } else { |
502 | u32 head, tail, aged_tail; |
503 | |
504 | /* NB: The head we observe here might effectively be a little |
505 | * out of date. If a read() is in progress, the head could be |
506 | * anywhere between this head and stream->oa_buffer.tail. |
507 | */ |
508 | head = stream->oa_buffer.head - gtt_offset; |
509 | aged_tail = stream->oa_buffer.tail - gtt_offset; |
510 | |
511 | hw_tail -= gtt_offset; |
512 | tail = hw_tail; |
513 | |
514 | /* Walk the stream backward until we find a report with dword 0 |
515 | * & 1 not at 0. Since the circular buffer pointers progress by |
516 | * increments of 64 bytes and that reports can be up to 256 |
517 | * bytes long, we can't tell whether a report has fully landed |
518 | * in memory before the first 2 dwords of the following report |
519 | * have effectively landed. |
520 | * |
521 | * This is assuming that the writes of the OA unit land in |
522 | * memory in the order they were written to. |
523 | * If not : (╯°□°)╯︵ ┻━┻ |
524 | */ |
525 | while (OA_TAKEN(tail, aged_tail)((tail - aged_tail) & ((16 << 20) - 1)) >= report_size) { |
526 | u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail); |
527 | |
528 | if (report32[0] != 0 || report32[1] != 0) |
529 | break; |
530 | |
531 | tail = (tail - report_size) & (OA_BUFFER_SIZE(16 << 20) - 1); |
532 | } |
533 | |
534 | if (OA_TAKEN(hw_tail, tail)((hw_tail - tail) & ((16 << 20) - 1)) > report_size && |
535 | __ratelimit(&stream->perf->tail_pointer_race)) |
536 | DRM_NOTE("unlanded report(s) head=0x%x "printk("\0015" "[" "drm" "] " "unlanded report(s) head=0x%x " "tail=0x%x hw_tail=0x%x\n", head, tail, hw_tail) |
537 | "tail=0x%x hw_tail=0x%x\n",printk("\0015" "[" "drm" "] " "unlanded report(s) head=0x%x " "tail=0x%x hw_tail=0x%x\n", head, tail, hw_tail) |
538 | head, tail, hw_tail)printk("\0015" "[" "drm" "] " "unlanded report(s) head=0x%x " "tail=0x%x hw_tail=0x%x\n", head, tail, hw_tail); |
539 | |
540 | stream->oa_buffer.tail = gtt_offset + tail; |
541 | stream->oa_buffer.aging_tail = gtt_offset + hw_tail; |
542 | stream->oa_buffer.aging_timestamp = now; |
543 | } |
544 | |
545 | pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,((stream->oa_buffer.tail - gtt_offset - stream->oa_buffer .head - gtt_offset) & ((16 << 20) - 1)) |
546 | stream->oa_buffer.head - gtt_offset)((stream->oa_buffer.tail - gtt_offset - stream->oa_buffer .head - gtt_offset) & ((16 << 20) - 1)) >= report_size; |
547 | |
548 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
549 | |
550 | return pollin; |
551 | } |
552 | |
553 | #endif |
554 | |
555 | /** |
556 | * append_oa_status - Appends a status record to a userspace read() buffer. |
557 | * @stream: An i915-perf stream opened for OA metrics |
558 | * @buf: destination buffer given by userspace |
559 | * @count: the number of bytes userspace wants to read |
560 | * @offset: (inout): the current position for writing into @buf |
561 | * @type: The kind of status to report to userspace |
562 | * |
563 | * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) |
564 | * into the userspace read() buffer. |
565 | * |
566 | * The @buf @offset will only be updated on success. |
567 | * |
568 | * Returns: 0 on success, negative error code on failure. |
569 | */ |
570 | static int append_oa_status(struct i915_perf_stream *stream, |
571 | char __user *buf, |
572 | size_t count, |
573 | size_t *offset, |
574 | enum drm_i915_perf_record_type type) |
575 | { |
576 | struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; |
577 | |
578 | if ((count - *offset) < header.size) |
579 | return -ENOSPC28; |
580 | |
581 | if (copy_to_user(buf + *offset, &header, sizeof(header))) |
582 | return -EFAULT14; |
583 | |
584 | (*offset) += header.size; |
585 | |
586 | return 0; |
587 | } |
588 | |
589 | /** |
590 | * append_oa_sample - Copies single OA report into userspace read() buffer. |
591 | * @stream: An i915-perf stream opened for OA metrics |
592 | * @buf: destination buffer given by userspace |
593 | * @count: the number of bytes userspace wants to read |
594 | * @offset: (inout): the current position for writing into @buf |
595 | * @report: A single OA report to (optionally) include as part of the sample |
596 | * |
597 | * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` |
598 | * properties when opening a stream, tracked as `stream->sample_flags`. This |
599 | * function copies the requested components of a single sample to the given |
600 | * read() @buf. |
601 | * |
602 | * The @buf @offset will only be updated on success. |
603 | * |
604 | * Returns: 0 on success, negative error code on failure. |
605 | */ |
606 | static int append_oa_sample(struct i915_perf_stream *stream, |
607 | char __user *buf, |
608 | size_t count, |
609 | size_t *offset, |
610 | const u8 *report) |
611 | { |
612 | int report_size = stream->oa_buffer.format_size; |
613 | struct drm_i915_perf_record_header header; |
614 | |
615 | header.type = DRM_I915_PERF_RECORD_SAMPLE; |
616 | header.pad = 0; |
617 | header.size = stream->sample_size; |
618 | |
619 | if ((count - *offset) < header.size) |
620 | return -ENOSPC28; |
621 | |
622 | buf += *offset; |
623 | if (copy_to_user(buf, &header, sizeof(header))) |
624 | return -EFAULT14; |
625 | buf += sizeof(header); |
626 | |
627 | if (copy_to_user(buf, report, report_size)) |
628 | return -EFAULT14; |
629 | |
630 | (*offset) += header.size; |
631 | |
632 | return 0; |
633 | } |
634 | |
635 | /** |
636 | * gen8_append_oa_reports - Copies all buffered OA reports into |
637 | * userspace read() buffer. |
638 | * @stream: An i915-perf stream opened for OA metrics |
639 | * @buf: destination buffer given by userspace |
640 | * @count: the number of bytes userspace wants to read |
641 | * @offset: (inout): the current position for writing into @buf |
642 | * |
643 | * Notably any error condition resulting in a short read (-%ENOSPC or |
644 | * -%EFAULT) will be returned even though one or more records may |
645 | * have been successfully copied. In this case it's up to the caller |
646 | * to decide if the error should be squashed before returning to |
647 | * userspace. |
648 | * |
649 | * Note: reports are consumed from the head, and appended to the |
650 | * tail, so the tail chases the head?... If you think that's mad |
651 | * and back-to-front you're not alone, but this follows the |
652 | * Gen PRM naming convention. |
653 | * |
654 | * Returns: 0 on success, negative error code on failure. |
655 | */ |
656 | static int gen8_append_oa_reports(struct i915_perf_stream *stream, |
657 | char __user *buf, |
658 | size_t count, |
659 | size_t *offset) |
660 | { |
661 | struct intel_uncore *uncore = stream->uncore; |
662 | int report_size = stream->oa_buffer.format_size; |
663 | u8 *oa_buf_base = stream->oa_buffer.vaddr; |
664 | u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); |
665 | u32 mask = (OA_BUFFER_SIZE(16 << 20) - 1); |
666 | size_t start_offset = *offset; |
667 | unsigned long flags; |
668 | u32 head, tail; |
669 | u32 taken; |
670 | int ret = 0; |
671 | |
672 | if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)({ int __ret = !!((!stream->enabled)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&uncore->i915->drm))-> dev), "", "drm_WARN_ON(" "!stream->enabled" ")"); __builtin_expect (!!(__ret), 0); })) |
673 | return -EIO5; |
674 | |
675 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
676 | |
677 | head = stream->oa_buffer.head; |
678 | tail = stream->oa_buffer.tail; |
679 | |
680 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
681 | |
682 | /* |
683 | * NB: oa_buffer.head/tail include the gtt_offset which we don't want |
684 | * while indexing relative to oa_buf_base. |
685 | */ |
686 | head -= gtt_offset; |
687 | tail -= gtt_offset; |
688 | |
689 | /* |
690 | * An out of bounds or misaligned head or tail pointer implies a driver |
691 | * bug since we validate + align the tail pointers we read from the |
692 | * hardware and we are in full control of the head pointer which should |
693 | * only be incremented by multiples of the report size (notably also |
694 | * all a power of two). |
695 | */ |
696 | if (drm_WARN_ONCE(&uncore->i915->drm,({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
697 | head > OA_BUFFER_SIZE || head % report_size ||({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
698 | tail > OA_BUFFER_SIZE || tail % report_size,({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
699 | "Inconsistent OA buffer pointers: head = %u, tail = %u\n",({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
700 | head, tail)({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); })) |
701 | return -EIO5; |
702 | |
703 | |
704 | for (/* none */; |
705 | (taken = OA_TAKEN(tail, head)((tail - head) & ((16 << 20) - 1))); |
Although the value stored to 'taken' is used in the enclosing expression, the value is never actually read from 'taken' | |
706 | head = (head + report_size) & mask) { |
707 | u8 *report = oa_buf_base + head; |
708 | u32 *report32 = (void *)report; |
709 | u32 ctx_id; |
710 | u32 reason; |
711 | |
712 | /* |
713 | * All the report sizes factor neatly into the buffer |
714 | * size so we never expect to see a report split |
715 | * between the beginning and end of the buffer. |
716 | * |
717 | * Given the initial alignment check a misalignment |
718 | * here would imply a driver bug that would result |
719 | * in an overrun. |
720 | */ |
721 | if (drm_WARN_ON(&uncore->i915->drm,({ int __ret = !!((((16 << 20) - head) < report_size )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& uncore->i915->drm))->dev), "", "drm_WARN_ON(" "((16 << 20) - head) < report_size" ")"); __builtin_expect(!!(__ret), 0); }) |
722 | (OA_BUFFER_SIZE - head) < report_size)({ int __ret = !!((((16 << 20) - head) < report_size )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& uncore->i915->drm))->dev), "", "drm_WARN_ON(" "((16 << 20) - head) < report_size" ")"); __builtin_expect(!!(__ret), 0); })) { |
723 | drm_err(&uncore->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Spurious OA head ptr: non-integral report offset\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
724 | "Spurious OA head ptr: non-integral report offset\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Spurious OA head ptr: non-integral report offset\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
725 | break; |
726 | } |
727 | |
728 | /* |
729 | * The reason field includes flags identifying what |
730 | * triggered this specific report (mostly timer |
731 | * triggered or e.g. due to a context switch). |
732 | * |
733 | * This field is never expected to be zero so we can |
734 | * check that the report isn't invalid before copying |
735 | * it to userspace... |
736 | */ |
737 | reason = ((report32[0] >> OAREPORT_REASON_SHIFT19) & |
738 | (GRAPHICS_VER(stream->perf->i915)((&(stream->perf->i915)->__runtime)->graphics .ip.ver) == 12 ? |
739 | OAREPORT_REASON_MASK_EXTENDED0x7f : |
740 | OAREPORT_REASON_MASK0x3f)); |
741 | |
742 | ctx_id = report32[2] & stream->specific_ctx_id_mask; |
743 | |
744 | /* |
745 | * Squash whatever is in the CTX_ID field if it's marked as |
746 | * invalid to be sure we avoid false-positive, single-context |
747 | * filtering below... |
748 | * |
749 | * Note: that we don't clear the valid_ctx_bit so userspace can |
750 | * understand that the ID has been squashed by the kernel. |
751 | */ |
752 | if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && |
753 | GRAPHICS_VER(stream->perf->i915)((&(stream->perf->i915)->__runtime)->graphics .ip.ver) <= 11) |
754 | ctx_id = report32[2] = INVALID_CTX_ID0xffffffff; |
755 | |
756 | /* |
757 | * NB: For Gen 8 the OA unit no longer supports clock gating |
758 | * off for a specific context and the kernel can't securely |
759 | * stop the counters from updating as system-wide / global |
760 | * values. |
761 | * |
762 | * Automatic reports now include a context ID so reports can be |
763 | * filtered on the cpu but it's not worth trying to |
764 | * automatically subtract/hide counter progress for other |
765 | * contexts while filtering since we can't stop userspace |
766 | * issuing MI_REPORT_PERF_COUNT commands which would still |
767 | * provide a side-band view of the real values. |
768 | * |
769 | * To allow userspace (such as Mesa/GL_INTEL_performance_query) |
770 | * to normalize counters for a single filtered context then it |
771 | * needs be forwarded bookend context-switch reports so that it |
772 | * can track switches in between MI_REPORT_PERF_COUNT commands |
773 | * and can itself subtract/ignore the progress of counters |
774 | * associated with other contexts. Note that the hardware |
775 | * automatically triggers reports when switching to a new |
776 | * context which are tagged with the ID of the newly active |
777 | * context. To avoid the complexity (and likely fragility) of |
778 | * reading ahead while parsing reports to try and minimize |
779 | * forwarding redundant context switch reports (i.e. between |
780 | * other, unrelated contexts) we simply elect to forward them |
781 | * all. |
782 | * |
783 | * We don't rely solely on the reason field to identify context |
784 | * switches since it's not-uncommon for periodic samples to |
785 | * identify a switch before any 'context switch' report. |
786 | */ |
787 | if (!stream->perf->exclusive_stream->ctx || |
788 | stream->specific_ctx_id == ctx_id || |
789 | stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || |
790 | reason & OAREPORT_REASON_CTX_SWITCH(1<<3)) { |
791 | |
792 | /* |
793 | * While filtering for a single context we avoid |
794 | * leaking the IDs of other contexts. |
795 | */ |
796 | if (stream->perf->exclusive_stream->ctx && |
797 | stream->specific_ctx_id != ctx_id) { |
798 | report32[2] = INVALID_CTX_ID0xffffffff; |
799 | } |
800 | |
801 | ret = append_oa_sample(stream, buf, count, offset, |
802 | report); |
803 | if (ret) |
804 | break; |
805 | |
806 | stream->oa_buffer.last_ctx_id = ctx_id; |
807 | } |
808 | |
809 | /* |
810 | * Clear out the first 2 dword as a mean to detect unlanded |
811 | * reports. |
812 | */ |
813 | report32[0] = 0; |
814 | report32[1] = 0; |
815 | } |
816 | |
817 | if (start_offset != *offset) { |
818 | i915_reg_t oaheadptr; |
819 | |
820 | oaheadptr = GRAPHICS_VER(stream->perf->i915)((&(stream->perf->i915)->__runtime)->graphics .ip.ver) == 12 ? |
821 | GEN12_OAG_OAHEADPTR((const i915_reg_t){ .reg = (0xdb00) }) : GEN8_OAHEADPTR((const i915_reg_t){ .reg = (0x2B0C) }); |
822 | |
823 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
824 | |
825 | /* |
826 | * We removed the gtt_offset for the copy loop above, indexing |
827 | * relative to oa_buf_base so put back here... |
828 | */ |
829 | head += gtt_offset; |
830 | intel_uncore_write(uncore, oaheadptr, |
831 | head & GEN12_OAG_OAHEADPTR_MASK0xffffffc0); |
832 | stream->oa_buffer.head = head; |
833 | |
834 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
835 | } |
836 | |
837 | return ret; |
838 | } |
839 | |
840 | /** |
841 | * gen8_oa_read - copy status records then buffered OA reports |
842 | * @stream: An i915-perf stream opened for OA metrics |
843 | * @buf: destination buffer given by userspace |
844 | * @count: the number of bytes userspace wants to read |
845 | * @offset: (inout): the current position for writing into @buf |
846 | * |
847 | * Checks OA unit status registers and if necessary appends corresponding |
848 | * status records for userspace (such as for a buffer full condition) and then |
849 | * initiate appending any buffered OA reports. |
850 | * |
851 | * Updates @offset according to the number of bytes successfully copied into |
852 | * the userspace buffer. |
853 | * |
854 | * NB: some data may be successfully copied to the userspace buffer |
855 | * even if an error is returned, and this is reflected in the |
856 | * updated @offset. |
857 | * |
858 | * Returns: zero on success or a negative error code |
859 | */ |
860 | static int gen8_oa_read(struct i915_perf_stream *stream, |
861 | char __user *buf, |
862 | size_t count, |
863 | size_t *offset) |
864 | { |
865 | struct intel_uncore *uncore = stream->uncore; |
866 | u32 oastatus; |
867 | i915_reg_t oastatus_reg; |
868 | int ret; |
869 | |
870 | if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)({ int __ret = !!((!stream->oa_buffer.vaddr)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&uncore->i915-> drm))->dev), "", "drm_WARN_ON(" "!stream->oa_buffer.vaddr" ")"); __builtin_expect(!!(__ret), 0); })) |
871 | return -EIO5; |
872 | |
873 | oastatus_reg = GRAPHICS_VER(stream->perf->i915)((&(stream->perf->i915)->__runtime)->graphics .ip.ver) == 12 ? |
874 | GEN12_OAG_OASTATUS((const i915_reg_t){ .reg = (0xdafc) }) : GEN8_OASTATUS((const i915_reg_t){ .reg = (0x2b08) }); |
875 | |
876 | oastatus = intel_uncore_read(uncore, oastatus_reg); |
877 | |
878 | /* |
879 | * We treat OABUFFER_OVERFLOW as a significant error: |
880 | * |
881 | * Although theoretically we could handle this more gracefully |
882 | * sometimes, some Gens don't correctly suppress certain |
883 | * automatically triggered reports in this condition and so we |
884 | * have to assume that old reports are now being trampled |
885 | * over. |
886 | * |
887 | * Considering how we don't currently give userspace control |
888 | * over the OA buffer size and always configure a large 16MB |
889 | * buffer, then a buffer overflow does anyway likely indicate |
890 | * that something has gone quite badly wrong. |
891 | */ |
892 | if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW(1 << 1)) { |
893 | ret = append_oa_status(stream, buf, count, offset, |
894 | DRM_I915_PERF_RECORD_OA_BUFFER_LOST); |
895 | if (ret) |
896 | return ret; |
897 | |
898 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA buffer overflow (exponent = %d): force restart\n" , stream->period_exponent) |
899 | "OA buffer overflow (exponent = %d): force restart\n",__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA buffer overflow (exponent = %d): force restart\n" , stream->period_exponent) |
900 | stream->period_exponent)__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA buffer overflow (exponent = %d): force restart\n" , stream->period_exponent); |
901 | |
902 | stream->perf->ops.oa_disable(stream); |
903 | stream->perf->ops.oa_enable(stream); |
904 | |
905 | /* |
906 | * Note: .oa_enable() is expected to re-init the oabuffer and |
907 | * reset GEN8_OASTATUS for us |
908 | */ |
909 | oastatus = intel_uncore_read(uncore, oastatus_reg); |
910 | } |
911 | |
912 | if (oastatus & GEN8_OASTATUS_REPORT_LOST(1 << 0)) { |
913 | ret = append_oa_status(stream, buf, count, offset, |
914 | DRM_I915_PERF_RECORD_OA_REPORT_LOST); |
915 | if (ret) |
916 | return ret; |
917 | |
918 | intel_uncore_rmw(uncore, oastatus_reg, |
919 | GEN8_OASTATUS_COUNTER_OVERFLOW(1 << 2) | |
920 | GEN8_OASTATUS_REPORT_LOST(1 << 0), |
921 | IS_GRAPHICS_VER(uncore->i915, 8, 11)(((&(uncore->i915)->__runtime)->graphics.ip.ver) >= (8) && ((&(uncore->i915)->__runtime) ->graphics.ip.ver) <= (11)) ? |
922 | (GEN8_OASTATUS_HEAD_POINTER_WRAP(1 << 16) | |
923 | GEN8_OASTATUS_TAIL_POINTER_WRAP(1 << 17)) : 0); |
924 | } |
925 | |
926 | return gen8_append_oa_reports(stream, buf, count, offset); |
927 | } |
928 | |
929 | /** |
930 | * gen7_append_oa_reports - Copies all buffered OA reports into |
931 | * userspace read() buffer. |
932 | * @stream: An i915-perf stream opened for OA metrics |
933 | * @buf: destination buffer given by userspace |
934 | * @count: the number of bytes userspace wants to read |
935 | * @offset: (inout): the current position for writing into @buf |
936 | * |
937 | * Notably any error condition resulting in a short read (-%ENOSPC or |
938 | * -%EFAULT) will be returned even though one or more records may |
939 | * have been successfully copied. In this case it's up to the caller |
940 | * to decide if the error should be squashed before returning to |
941 | * userspace. |
942 | * |
943 | * Note: reports are consumed from the head, and appended to the |
944 | * tail, so the tail chases the head?... If you think that's mad |
945 | * and back-to-front you're not alone, but this follows the |
946 | * Gen PRM naming convention. |
947 | * |
948 | * Returns: 0 on success, negative error code on failure. |
949 | */ |
950 | static int gen7_append_oa_reports(struct i915_perf_stream *stream, |
951 | char __user *buf, |
952 | size_t count, |
953 | size_t *offset) |
954 | { |
955 | struct intel_uncore *uncore = stream->uncore; |
956 | int report_size = stream->oa_buffer.format_size; |
957 | u8 *oa_buf_base = stream->oa_buffer.vaddr; |
958 | u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); |
959 | u32 mask = (OA_BUFFER_SIZE(16 << 20) - 1); |
960 | size_t start_offset = *offset; |
961 | unsigned long flags; |
962 | u32 head, tail; |
963 | u32 taken; |
964 | int ret = 0; |
965 | |
966 | if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)({ int __ret = !!((!stream->enabled)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&uncore->i915->drm))-> dev), "", "drm_WARN_ON(" "!stream->enabled" ")"); __builtin_expect (!!(__ret), 0); })) |
967 | return -EIO5; |
968 | |
969 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
970 | |
971 | head = stream->oa_buffer.head; |
972 | tail = stream->oa_buffer.tail; |
973 | |
974 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
975 | |
976 | /* NB: oa_buffer.head/tail include the gtt_offset which we don't want |
977 | * while indexing relative to oa_buf_base. |
978 | */ |
979 | head -= gtt_offset; |
980 | tail -= gtt_offset; |
981 | |
982 | /* An out of bounds or misaligned head or tail pointer implies a driver |
983 | * bug since we validate + align the tail pointers we read from the |
984 | * hardware and we are in full control of the head pointer which should |
985 | * only be incremented by multiples of the report size (notably also |
986 | * all a power of two). |
987 | */ |
988 | if (drm_WARN_ONCE(&uncore->i915->drm,({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
989 | head > OA_BUFFER_SIZE || head % report_size ||({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
990 | tail > OA_BUFFER_SIZE || tail % report_size,({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
991 | "Inconsistent OA buffer pointers: head = %u, tail = %u\n",({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
992 | head, tail)({ static int __warned; int __ret = !!(head > (16 << 20) || head % report_size || tail > (16 << 20) || tail % report_size); if (__ret && !__warned) { printf("%s %s: " "Inconsistent OA buffer pointers: head = %u, tail = %u\n", dev_driver_string ((&uncore->i915->drm)->dev), "", head, tail); __warned = 1; } __builtin_expect(!!(__ret), 0); })) |
993 | return -EIO5; |
994 | |
995 | |
996 | for (/* none */; |
997 | (taken = OA_TAKEN(tail, head)((tail - head) & ((16 << 20) - 1))); |
998 | head = (head + report_size) & mask) { |
999 | u8 *report = oa_buf_base + head; |
1000 | u32 *report32 = (void *)report; |
1001 | |
1002 | /* All the report sizes factor neatly into the buffer |
1003 | * size so we never expect to see a report split |
1004 | * between the beginning and end of the buffer. |
1005 | * |
1006 | * Given the initial alignment check a misalignment |
1007 | * here would imply a driver bug that would result |
1008 | * in an overrun. |
1009 | */ |
1010 | if (drm_WARN_ON(&uncore->i915->drm,({ int __ret = !!((((16 << 20) - head) < report_size )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& uncore->i915->drm))->dev), "", "drm_WARN_ON(" "((16 << 20) - head) < report_size" ")"); __builtin_expect(!!(__ret), 0); }) |
1011 | (OA_BUFFER_SIZE - head) < report_size)({ int __ret = !!((((16 << 20) - head) < report_size )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& uncore->i915->drm))->dev), "", "drm_WARN_ON(" "((16 << 20) - head) < report_size" ")"); __builtin_expect(!!(__ret), 0); })) { |
1012 | drm_err(&uncore->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Spurious OA head ptr: non-integral report offset\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
1013 | "Spurious OA head ptr: non-integral report offset\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Spurious OA head ptr: non-integral report offset\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1014 | break; |
1015 | } |
1016 | |
1017 | /* The report-ID field for periodic samples includes |
1018 | * some undocumented flags related to what triggered |
1019 | * the report and is never expected to be zero so we |
1020 | * can check that the report isn't invalid before |
1021 | * copying it to userspace... |
1022 | */ |
1023 | if (report32[0] == 0) { |
1024 | if (__ratelimit(&stream->perf->spurious_report_rs)) |
1025 | DRM_NOTE("Skipping spurious, invalid OA report\n")printk("\0015" "[" "drm" "] " "Skipping spurious, invalid OA report\n" ); |
1026 | continue; |
1027 | } |
1028 | |
1029 | ret = append_oa_sample(stream, buf, count, offset, report); |
1030 | if (ret) |
1031 | break; |
1032 | |
1033 | /* Clear out the first 2 dwords as a mean to detect unlanded |
1034 | * reports. |
1035 | */ |
1036 | report32[0] = 0; |
1037 | report32[1] = 0; |
1038 | } |
1039 | |
1040 | if (start_offset != *offset) { |
1041 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
1042 | |
1043 | /* We removed the gtt_offset for the copy loop above, indexing |
1044 | * relative to oa_buf_base so put back here... |
1045 | */ |
1046 | head += gtt_offset; |
1047 | |
1048 | intel_uncore_write(uncore, GEN7_OASTATUS2((const i915_reg_t){ .reg = (0x2368) }), |
1049 | (head & GEN7_OASTATUS2_HEAD_MASK0xffffffc0) | |
1050 | GEN7_OASTATUS2_MEM_SELECT_GGTT(1 << 0)); |
1051 | stream->oa_buffer.head = head; |
1052 | |
1053 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
1054 | } |
1055 | |
1056 | return ret; |
1057 | } |
1058 | |
1059 | /** |
1060 | * gen7_oa_read - copy status records then buffered OA reports |
1061 | * @stream: An i915-perf stream opened for OA metrics |
1062 | * @buf: destination buffer given by userspace |
1063 | * @count: the number of bytes userspace wants to read |
1064 | * @offset: (inout): the current position for writing into @buf |
1065 | * |
1066 | * Checks Gen 7 specific OA unit status registers and if necessary appends |
1067 | * corresponding status records for userspace (such as for a buffer full |
1068 | * condition) and then initiate appending any buffered OA reports. |
1069 | * |
1070 | * Updates @offset according to the number of bytes successfully copied into |
1071 | * the userspace buffer. |
1072 | * |
1073 | * Returns: zero on success or a negative error code |
1074 | */ |
1075 | static int gen7_oa_read(struct i915_perf_stream *stream, |
1076 | char __user *buf, |
1077 | size_t count, |
1078 | size_t *offset) |
1079 | { |
1080 | struct intel_uncore *uncore = stream->uncore; |
1081 | u32 oastatus1; |
1082 | int ret; |
1083 | |
1084 | if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)({ int __ret = !!((!stream->oa_buffer.vaddr)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&uncore->i915-> drm))->dev), "", "drm_WARN_ON(" "!stream->oa_buffer.vaddr" ")"); __builtin_expect(!!(__ret), 0); })) |
1085 | return -EIO5; |
1086 | |
1087 | oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1((const i915_reg_t){ .reg = (0x2364) })); |
1088 | |
1089 | /* XXX: On Haswell we don't have a safe way to clear oastatus1 |
1090 | * bits while the OA unit is enabled (while the tail pointer |
1091 | * may be updated asynchronously) so we ignore status bits |
1092 | * that have already been reported to userspace. |
1093 | */ |
1094 | oastatus1 &= ~stream->perf->gen7_latched_oastatus1; |
1095 | |
1096 | /* We treat OABUFFER_OVERFLOW as a significant error: |
1097 | * |
1098 | * - The status can be interpreted to mean that the buffer is |
1099 | * currently full (with a higher precedence than OA_TAKEN() |
1100 | * which will start to report a near-empty buffer after an |
1101 | * overflow) but it's awkward that we can't clear the status |
1102 | * on Haswell, so without a reset we won't be able to catch |
1103 | * the state again. |
1104 | * |
1105 | * - Since it also implies the HW has started overwriting old |
1106 | * reports it may also affect our sanity checks for invalid |
1107 | * reports when copying to userspace that assume new reports |
1108 | * are being written to cleared memory. |
1109 | * |
1110 | * - In the future we may want to introduce a flight recorder |
1111 | * mode where the driver will automatically maintain a safe |
1112 | * guard band between head/tail, avoiding this overflow |
1113 | * condition, but we avoid the added driver complexity for |
1114 | * now. |
1115 | */ |
1116 | if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)__builtin_expect(!!(oastatus1 & (1 << 1)), 0)) { |
1117 | ret = append_oa_status(stream, buf, count, offset, |
1118 | DRM_I915_PERF_RECORD_OA_BUFFER_LOST); |
1119 | if (ret) |
1120 | return ret; |
1121 | |
1122 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA buffer overflow (exponent = %d): force restart\n" , stream->period_exponent) |
1123 | "OA buffer overflow (exponent = %d): force restart\n",__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA buffer overflow (exponent = %d): force restart\n" , stream->period_exponent) |
1124 | stream->period_exponent)__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA buffer overflow (exponent = %d): force restart\n" , stream->period_exponent); |
1125 | |
1126 | stream->perf->ops.oa_disable(stream); |
1127 | stream->perf->ops.oa_enable(stream); |
1128 | |
1129 | oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1((const i915_reg_t){ .reg = (0x2364) })); |
1130 | } |
1131 | |
1132 | if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)__builtin_expect(!!(oastatus1 & (1 << 0)), 0)) { |
1133 | ret = append_oa_status(stream, buf, count, offset, |
1134 | DRM_I915_PERF_RECORD_OA_REPORT_LOST); |
1135 | if (ret) |
1136 | return ret; |
1137 | stream->perf->gen7_latched_oastatus1 |= |
1138 | GEN7_OASTATUS1_REPORT_LOST(1 << 0); |
1139 | } |
1140 | |
1141 | return gen7_append_oa_reports(stream, buf, count, offset); |
1142 | } |
1143 | |
1144 | #ifdef notyet |
1145 | |
1146 | /** |
1147 | * i915_oa_wait_unlocked - handles blocking IO until OA data available |
1148 | * @stream: An i915-perf stream opened for OA metrics |
1149 | * |
1150 | * Called when userspace tries to read() from a blocking stream FD opened |
1151 | * for OA metrics. It waits until the hrtimer callback finds a non-empty |
1152 | * OA buffer and wakes us. |
1153 | * |
1154 | * Note: it's acceptable to have this return with some false positives |
1155 | * since any subsequent read handling will return -EAGAIN if there isn't |
1156 | * really data ready for userspace yet. |
1157 | * |
1158 | * Returns: zero on success or a negative error code |
1159 | */ |
1160 | static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) |
1161 | { |
1162 | /* We would wait indefinitely if periodic sampling is not enabled */ |
1163 | if (!stream->periodic) |
1164 | return -EIO5; |
1165 | |
1166 | return wait_event_interruptible(stream->poll_wq,({ int __ret = 0; if (!(oa_buffer_check_unlocked(stream))) __ret = ({ long __ret = 0; struct wait_queue_entry __wq_entry; init_wait_entry (&__wq_entry, 0); do { int __error, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_perf.c" , 1167, "!cold")); prepare_to_wait(&stream->poll_wq, & __wq_entry, 0x100); deadline = jiffies + __ret; __wait = !(oa_buffer_check_unlocked (stream)); __error = sleep_finish(__ret, __wait); if ((0) > 0) __ret = deadline - jiffies; if (__error == -1 || __error == 4) { __ret = -4; break; } if ((0) > 0 && (__ret <= 0 || __error == 35)) { __ret = ((oa_buffer_check_unlocked(stream ))) ? 1 : 0; break; } } while (__ret > 0 && !(oa_buffer_check_unlocked (stream))); finish_wait(&stream->poll_wq, &__wq_entry ); __ret; }); __ret; }) |
1167 | oa_buffer_check_unlocked(stream))({ int __ret = 0; if (!(oa_buffer_check_unlocked(stream))) __ret = ({ long __ret = 0; struct wait_queue_entry __wq_entry; init_wait_entry (&__wq_entry, 0); do { int __error, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_perf.c" , 1167, "!cold")); prepare_to_wait(&stream->poll_wq, & __wq_entry, 0x100); deadline = jiffies + __ret; __wait = !(oa_buffer_check_unlocked (stream)); __error = sleep_finish(__ret, __wait); if ((0) > 0) __ret = deadline - jiffies; if (__error == -1 || __error == 4) { __ret = -4; break; } if ((0) > 0 && (__ret <= 0 || __error == 35)) { __ret = ((oa_buffer_check_unlocked(stream ))) ? 1 : 0; break; } } while (__ret > 0 && !(oa_buffer_check_unlocked (stream))); finish_wait(&stream->poll_wq, &__wq_entry ); __ret; }); __ret; }); |
1168 | } |
1169 | |
1170 | /** |
1171 | * i915_oa_poll_wait - call poll_wait() for an OA stream poll() |
1172 | * @stream: An i915-perf stream opened for OA metrics |
1173 | * @file: An i915 perf stream file |
1174 | * @wait: poll() state table |
1175 | * |
1176 | * For handling userspace polling on an i915 perf stream opened for OA metrics, |
1177 | * this starts a poll_wait with the wait queue that our hrtimer callback wakes |
1178 | * when it sees data ready to read in the circular OA buffer. |
1179 | */ |
1180 | static void i915_oa_poll_wait(struct i915_perf_stream *stream, |
1181 | struct file *file, |
1182 | poll_table *wait) |
1183 | { |
1184 | poll_wait(file, &stream->poll_wq, wait); |
1185 | } |
1186 | |
1187 | /** |
1188 | * i915_oa_read - just calls through to &i915_oa_ops->read |
1189 | * @stream: An i915-perf stream opened for OA metrics |
1190 | * @buf: destination buffer given by userspace |
1191 | * @count: the number of bytes userspace wants to read |
1192 | * @offset: (inout): the current position for writing into @buf |
1193 | * |
1194 | * Updates @offset according to the number of bytes successfully copied into |
1195 | * the userspace buffer. |
1196 | * |
1197 | * Returns: zero on success or a negative error code |
1198 | */ |
1199 | static int i915_oa_read(struct i915_perf_stream *stream, |
1200 | char __user *buf, |
1201 | size_t count, |
1202 | size_t *offset) |
1203 | { |
1204 | return stream->perf->ops.read(stream, buf, count, offset); |
1205 | } |
1206 | |
1207 | static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) |
1208 | { |
1209 | struct i915_gem_engines_iter it; |
1210 | struct i915_gem_context *ctx = stream->ctx; |
1211 | struct intel_context *ce; |
1212 | struct i915_gem_ww_ctx ww; |
1213 | int err = -ENODEV19; |
1214 | |
1215 | for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)for (i915_gem_engines_iter_init(&(it), (i915_gem_context_lock_engines (ctx))); ((ce) = i915_gem_engines_iter_next(&(it)));) { |
1216 | if (ce->engine != stream->engine) /* first match! */ |
1217 | continue; |
1218 | |
1219 | err = 0; |
1220 | break; |
1221 | } |
1222 | i915_gem_context_unlock_engines(ctx); |
1223 | |
1224 | if (err) |
1225 | return ERR_PTR(err); |
1226 | |
1227 | i915_gem_ww_ctx_init(&ww, true1); |
1228 | retry: |
1229 | /* |
1230 | * As the ID is the gtt offset of the context's vma we |
1231 | * pin the vma to ensure the ID remains fixed. |
1232 | */ |
1233 | err = intel_context_pin_ww(ce, &ww); |
1234 | if (err == -EDEADLK11) { |
1235 | err = i915_gem_ww_ctx_backoff(&ww); |
1236 | if (!err) |
1237 | goto retry; |
1238 | } |
1239 | i915_gem_ww_ctx_fini(&ww); |
1240 | |
1241 | if (err) |
1242 | return ERR_PTR(err); |
1243 | |
1244 | stream->pinned_ctx = ce; |
1245 | return stream->pinned_ctx; |
1246 | } |
1247 | |
1248 | /** |
1249 | * oa_get_render_ctx_id - determine and hold ctx hw id |
1250 | * @stream: An i915-perf stream opened for OA metrics |
1251 | * |
1252 | * Determine the render context hw id, and ensure it remains fixed for the |
1253 | * lifetime of the stream. This ensures that we don't have to worry about |
1254 | * updating the context ID in OACONTROL on the fly. |
1255 | * |
1256 | * Returns: zero on success or a negative error code |
1257 | */ |
1258 | static int oa_get_render_ctx_id(struct i915_perf_stream *stream) |
1259 | { |
1260 | struct intel_context *ce; |
1261 | |
1262 | ce = oa_pin_context(stream); |
1263 | if (IS_ERR(ce)) |
1264 | return PTR_ERR(ce); |
1265 | |
1266 | switch (GRAPHICS_VER(ce->engine->i915)((&(ce->engine->i915)->__runtime)->graphics.ip .ver)) { |
1267 | case 7: { |
1268 | /* |
1269 | * On Haswell we don't do any post processing of the reports |
1270 | * and don't need to use the mask. |
1271 | */ |
1272 | stream->specific_ctx_id = i915_ggtt_offset(ce->state); |
1273 | stream->specific_ctx_id_mask = 0; |
1274 | break; |
1275 | } |
1276 | |
1277 | case 8: |
1278 | case 9: |
1279 | if (intel_engine_uses_guc(ce->engine)) { |
1280 | /* |
1281 | * When using GuC, the context descriptor we write in |
1282 | * i915 is read by GuC and rewritten before it's |
1283 | * actually written into the hardware. The LRCA is |
1284 | * what is put into the context id field of the |
1285 | * context descriptor by GuC. Because it's aligned to |
1286 | * a page, the lower 12bits are always at 0 and |
1287 | * dropped by GuC. They won't be part of the context |
1288 | * ID in the OA reports, so squash those lower bits. |
1289 | */ |
1290 | stream->specific_ctx_id = ce->lrc.lrca >> 12; |
1291 | |
1292 | /* |
1293 | * GuC uses the top bit to signal proxy submission, so |
1294 | * ignore that bit. |
1295 | */ |
1296 | stream->specific_ctx_id_mask = |
1297 | (1U << (GEN8_CTX_ID_WIDTH21 - 1)) - 1; |
1298 | } else { |
1299 | stream->specific_ctx_id_mask = |
1300 | (1U << GEN8_CTX_ID_WIDTH21) - 1; |
1301 | stream->specific_ctx_id = stream->specific_ctx_id_mask; |
1302 | } |
1303 | break; |
1304 | |
1305 | case 11: |
1306 | case 12: |
1307 | if (GRAPHICS_VER_FULL(ce->engine->i915)(((&(ce->engine->i915)->__runtime)->graphics. ip.ver) << 8 | ((&(ce->engine->i915)->__runtime )->graphics.ip.rel)) >= IP_VER(12, 50)((12) << 8 | (50))) { |
1308 | stream->specific_ctx_id_mask = |
1309 | ((1U << XEHP_SW_CTX_ID_WIDTH16) - 1) << |
1310 | (XEHP_SW_CTX_ID_SHIFT39 - 32); |
1311 | stream->specific_ctx_id = |
1312 | (XEHP_MAX_CONTEXT_HW_ID0xFFFF - 1) << |
1313 | (XEHP_SW_CTX_ID_SHIFT39 - 32); |
1314 | } else { |
1315 | stream->specific_ctx_id_mask = |
1316 | ((1U << GEN11_SW_CTX_ID_WIDTH11) - 1) << (GEN11_SW_CTX_ID_SHIFT37 - 32); |
1317 | /* |
1318 | * Pick an unused context id |
1319 | * 0 - BITS_PER_LONG are used by other contexts |
1320 | * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context |
1321 | */ |
1322 | stream->specific_ctx_id = |
1323 | (GEN12_MAX_CONTEXT_HW_ID((1 << 11) - 1) - 1) << (GEN11_SW_CTX_ID_SHIFT37 - 32); |
1324 | } |
1325 | break; |
1326 | |
1327 | default: |
1328 | MISSING_CASE(GRAPHICS_VER(ce->engine->i915))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "((&(ce->engine->i915)->__runtime)->graphics.ip.ver)" , (long)(((&(ce->engine->i915)->__runtime)->graphics .ip.ver))); __builtin_expect(!!(__ret), 0); }); |
1329 | } |
1330 | |
1331 | ce->tag = stream->specific_ctx_id; |
1332 | |
1333 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n" , stream->specific_ctx_id, stream->specific_ctx_id_mask ) |
1334 | "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n" , stream->specific_ctx_id, stream->specific_ctx_id_mask ) |
1335 | stream->specific_ctx_id,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n" , stream->specific_ctx_id, stream->specific_ctx_id_mask ) |
1336 | stream->specific_ctx_id_mask)__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n" , stream->specific_ctx_id, stream->specific_ctx_id_mask ); |
1337 | |
1338 | return 0; |
1339 | } |
1340 | |
1341 | /** |
1342 | * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold |
1343 | * @stream: An i915-perf stream opened for OA metrics |
1344 | * |
1345 | * In case anything needed doing to ensure the context HW ID would remain valid |
1346 | * for the lifetime of the stream, then that can be undone here. |
1347 | */ |
1348 | static void oa_put_render_ctx_id(struct i915_perf_stream *stream) |
1349 | { |
1350 | struct intel_context *ce; |
1351 | |
1352 | ce = fetch_and_zero(&stream->pinned_ctx)({ typeof(*&stream->pinned_ctx) __T = *(&stream-> pinned_ctx); *(&stream->pinned_ctx) = (typeof(*&stream ->pinned_ctx))0; __T; }); |
1353 | if (ce) { |
1354 | ce->tag = 0; /* recomputed on next submission after parking */ |
1355 | intel_context_unpin(ce); |
1356 | } |
1357 | |
1358 | stream->specific_ctx_id = INVALID_CTX_ID0xffffffff; |
1359 | stream->specific_ctx_id_mask = 0; |
1360 | } |
1361 | |
1362 | static void |
1363 | free_oa_buffer(struct i915_perf_stream *stream) |
1364 | { |
1365 | i915_vma_unpin_and_release(&stream->oa_buffer.vma, |
1366 | I915_VMA_RELEASE_MAP(1UL << (0))); |
1367 | |
1368 | stream->oa_buffer.vaddr = NULL((void *)0); |
1369 | } |
1370 | |
1371 | static void |
1372 | free_oa_configs(struct i915_perf_stream *stream) |
1373 | { |
1374 | struct i915_oa_config_bo *oa_bo, *tmp; |
1375 | |
1376 | i915_oa_config_put(stream->oa_config); |
1377 | llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)for (oa_bo = ({ const __typeof( ((__typeof(*oa_bo) *)0)->node ) *__mptr = ((stream->oa_config_bos.first)); (__typeof(*oa_bo ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*oa_bo), node ) );}); ((char *)(oa_bo) + __builtin_offsetof(typeof(*(oa_bo) ), node)) != ((void *)0) && (tmp = ({ const __typeof( ((__typeof(*oa_bo) *)0)->node ) *__mptr = (oa_bo->node .next); (__typeof(*oa_bo) *)( (char *)__mptr - __builtin_offsetof (__typeof(*oa_bo), node) );}), oa_bo); oa_bo = tmp) |
1378 | free_oa_config_bo(oa_bo); |
1379 | } |
1380 | |
1381 | static void |
1382 | free_noa_wait(struct i915_perf_stream *stream) |
1383 | { |
1384 | i915_vma_unpin_and_release(&stream->noa_wait, 0); |
1385 | } |
1386 | |
1387 | static void i915_oa_stream_destroy(struct i915_perf_stream *stream) |
1388 | { |
1389 | struct i915_perf *perf = stream->perf; |
1390 | |
1391 | if (WARN_ON(stream != perf->exclusive_stream)({ int __ret = !!(stream != perf->exclusive_stream); if (__ret ) printf("WARNING %s failed at %s:%d\n", "stream != perf->exclusive_stream" , "/usr/src/sys/dev/pci/drm/i915/i915_perf.c", 1391); __builtin_expect (!!(__ret), 0); })) |
1392 | return; |
1393 | |
1394 | /* |
1395 | * Unset exclusive_stream first, it will be checked while disabling |
1396 | * the metric set on gen8+. |
1397 | * |
1398 | * See i915_oa_init_reg_state() and lrc_configure_all_contexts() |
1399 | */ |
1400 | WRITE_ONCE(perf->exclusive_stream, NULL)({ typeof(perf->exclusive_stream) __tmp = (((void *)0)); * (volatile typeof(perf->exclusive_stream) *)&(perf-> exclusive_stream) = __tmp; __tmp; }); |
1401 | perf->ops.disable_metric_set(stream); |
1402 | |
1403 | free_oa_buffer(stream); |
1404 | |
1405 | intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); |
1406 | intel_engine_pm_put(stream->engine); |
1407 | |
1408 | if (stream->ctx) |
1409 | oa_put_render_ctx_id(stream); |
1410 | |
1411 | free_oa_configs(stream); |
1412 | free_noa_wait(stream); |
1413 | |
1414 | if (perf->spurious_report_rs.missed) { |
1415 | DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",printk("\0015" "[" "drm" "] " "%d spurious OA report notices suppressed due to ratelimiting\n" , perf->spurious_report_rs.missed) |
1416 | perf->spurious_report_rs.missed)printk("\0015" "[" "drm" "] " "%d spurious OA report notices suppressed due to ratelimiting\n" , perf->spurious_report_rs.missed); |
1417 | } |
1418 | } |
1419 | |
1420 | #endif |
1421 | |
1422 | static void gen7_init_oa_buffer(struct i915_perf_stream *stream) |
1423 | { |
1424 | struct intel_uncore *uncore = stream->uncore; |
1425 | u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); |
1426 | unsigned long flags; |
1427 | |
1428 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
1429 | |
1430 | /* Pre-DevBDW: OABUFFER must be set with counters off, |
1431 | * before OASTATUS1, but after OASTATUS2 |
1432 | */ |
1433 | intel_uncore_write(uncore, GEN7_OASTATUS2((const i915_reg_t){ .reg = (0x2368) }), /* head */ |
1434 | gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT(1 << 0)); |
1435 | stream->oa_buffer.head = gtt_offset; |
1436 | |
1437 | intel_uncore_write(uncore, GEN7_OABUFFER((const i915_reg_t){ .reg = (0x23B0) }), gtt_offset); |
1438 | |
1439 | intel_uncore_write(uncore, GEN7_OASTATUS1((const i915_reg_t){ .reg = (0x2364) }), /* tail */ |
1440 | gtt_offset | OABUFFER_SIZE_16M(7 << 3)); |
1441 | |
1442 | /* Mark that we need updated tail pointers to read from... */ |
1443 | stream->oa_buffer.aging_tail = INVALID_TAIL_PTR0xffffffff; |
1444 | stream->oa_buffer.tail = gtt_offset; |
1445 | |
1446 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
1447 | |
1448 | /* On Haswell we have to track which OASTATUS1 flags we've |
1449 | * already seen since they can't be cleared while periodic |
1450 | * sampling is enabled. |
1451 | */ |
1452 | stream->perf->gen7_latched_oastatus1 = 0; |
1453 | |
1454 | /* NB: although the OA buffer will initially be allocated |
1455 | * zeroed via shmfs (and so this memset is redundant when |
1456 | * first allocating), we may re-init the OA buffer, either |
1457 | * when re-enabling a stream or in error/reset paths. |
1458 | * |
1459 | * The reason we clear the buffer for each re-init is for the |
1460 | * sanity check in gen7_append_oa_reports() that looks at the |
1461 | * report-id field to make sure it's non-zero which relies on |
1462 | * the assumption that new reports are being written to zeroed |
1463 | * memory... |
1464 | */ |
1465 | memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE)__builtin_memset((stream->oa_buffer.vaddr), (0), ((16 << 20))); |
1466 | } |
1467 | |
1468 | static void gen8_init_oa_buffer(struct i915_perf_stream *stream) |
1469 | { |
1470 | struct intel_uncore *uncore = stream->uncore; |
1471 | u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); |
1472 | unsigned long flags; |
1473 | |
1474 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
1475 | |
1476 | intel_uncore_write(uncore, GEN8_OASTATUS((const i915_reg_t){ .reg = (0x2b08) }), 0); |
1477 | intel_uncore_write(uncore, GEN8_OAHEADPTR((const i915_reg_t){ .reg = (0x2B0C) }), gtt_offset); |
1478 | stream->oa_buffer.head = gtt_offset; |
1479 | |
1480 | intel_uncore_write(uncore, GEN8_OABUFFER_UDW((const i915_reg_t){ .reg = (0x23b4) }), 0); |
1481 | |
1482 | /* |
1483 | * PRM says: |
1484 | * |
1485 | * "This MMIO must be set before the OATAILPTR |
1486 | * register and after the OAHEADPTR register. This is |
1487 | * to enable proper functionality of the overflow |
1488 | * bit." |
1489 | */ |
1490 | intel_uncore_write(uncore, GEN8_OABUFFER((const i915_reg_t){ .reg = (0x2b14) }), gtt_offset | |
1491 | OABUFFER_SIZE_16M(7 << 3) | GEN8_OABUFFER_MEM_SELECT_GGTT(1 << 0)); |
1492 | intel_uncore_write(uncore, GEN8_OATAILPTR((const i915_reg_t){ .reg = (0x2B10) }), gtt_offset & GEN8_OATAILPTR_MASK0xffffffc0); |
1493 | |
1494 | /* Mark that we need updated tail pointers to read from... */ |
1495 | stream->oa_buffer.aging_tail = INVALID_TAIL_PTR0xffffffff; |
1496 | stream->oa_buffer.tail = gtt_offset; |
1497 | |
1498 | /* |
1499 | * Reset state used to recognise context switches, affecting which |
1500 | * reports we will forward to userspace while filtering for a single |
1501 | * context. |
1502 | */ |
1503 | stream->oa_buffer.last_ctx_id = INVALID_CTX_ID0xffffffff; |
1504 | |
1505 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
1506 | |
1507 | /* |
1508 | * NB: although the OA buffer will initially be allocated |
1509 | * zeroed via shmfs (and so this memset is redundant when |
1510 | * first allocating), we may re-init the OA buffer, either |
1511 | * when re-enabling a stream or in error/reset paths. |
1512 | * |
1513 | * The reason we clear the buffer for each re-init is for the |
1514 | * sanity check in gen8_append_oa_reports() that looks at the |
1515 | * reason field to make sure it's non-zero which relies on |
1516 | * the assumption that new reports are being written to zeroed |
1517 | * memory... |
1518 | */ |
1519 | memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE)__builtin_memset((stream->oa_buffer.vaddr), (0), ((16 << 20))); |
1520 | } |
1521 | |
1522 | static void gen12_init_oa_buffer(struct i915_perf_stream *stream) |
1523 | { |
1524 | struct intel_uncore *uncore = stream->uncore; |
1525 | u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); |
1526 | unsigned long flags; |
1527 | |
1528 | spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags)do { flags = 0; mtx_enter(&stream->oa_buffer.ptr_lock) ; } while (0); |
1529 | |
1530 | intel_uncore_write(uncore, GEN12_OAG_OASTATUS((const i915_reg_t){ .reg = (0xdafc) }), 0); |
1531 | intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR((const i915_reg_t){ .reg = (0xdb00) }), |
1532 | gtt_offset & GEN12_OAG_OAHEADPTR_MASK0xffffffc0); |
1533 | stream->oa_buffer.head = gtt_offset; |
1534 | |
1535 | /* |
1536 | * PRM says: |
1537 | * |
1538 | * "This MMIO must be set before the OATAILPTR |
1539 | * register and after the OAHEADPTR register. This is |
1540 | * to enable proper functionality of the overflow |
1541 | * bit." |
1542 | */ |
1543 | intel_uncore_write(uncore, GEN12_OAG_OABUFFER((const i915_reg_t){ .reg = (0xdb08) }), gtt_offset | |
1544 | OABUFFER_SIZE_16M(7 << 3) | GEN8_OABUFFER_MEM_SELECT_GGTT(1 << 0)); |
1545 | intel_uncore_write(uncore, GEN12_OAG_OATAILPTR((const i915_reg_t){ .reg = (0xdb04) }), |
1546 | gtt_offset & GEN12_OAG_OATAILPTR_MASK0xffffffc0); |
1547 | |
1548 | /* Mark that we need updated tail pointers to read from... */ |
1549 | stream->oa_buffer.aging_tail = INVALID_TAIL_PTR0xffffffff; |
1550 | stream->oa_buffer.tail = gtt_offset; |
1551 | |
1552 | /* |
1553 | * Reset state used to recognise context switches, affecting which |
1554 | * reports we will forward to userspace while filtering for a single |
1555 | * context. |
1556 | */ |
1557 | stream->oa_buffer.last_ctx_id = INVALID_CTX_ID0xffffffff; |
1558 | |
1559 | spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags)do { (void)(flags); mtx_leave(&stream->oa_buffer.ptr_lock ); } while (0); |
1560 | |
1561 | /* |
1562 | * NB: although the OA buffer will initially be allocated |
1563 | * zeroed via shmfs (and so this memset is redundant when |
1564 | * first allocating), we may re-init the OA buffer, either |
1565 | * when re-enabling a stream or in error/reset paths. |
1566 | * |
1567 | * The reason we clear the buffer for each re-init is for the |
1568 | * sanity check in gen8_append_oa_reports() that looks at the |
1569 | * reason field to make sure it's non-zero which relies on |
1570 | * the assumption that new reports are being written to zeroed |
1571 | * memory... |
1572 | */ |
1573 | memset(stream->oa_buffer.vaddr, 0,__builtin_memset((stream->oa_buffer.vaddr), (0), (stream-> oa_buffer.vma->size)) |
1574 | stream->oa_buffer.vma->size)__builtin_memset((stream->oa_buffer.vaddr), (0), (stream-> oa_buffer.vma->size)); |
1575 | } |
1576 | |
1577 | #ifdef notyet |
1578 | |
1579 | static int alloc_oa_buffer(struct i915_perf_stream *stream) |
1580 | { |
1581 | struct drm_i915_privateinteldrm_softc *i915 = stream->perf->i915; |
1582 | struct drm_i915_gem_object *bo; |
1583 | struct i915_vma *vma; |
1584 | int ret; |
1585 | |
1586 | if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)({ int __ret = !!((stream->oa_buffer.vma)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&i915->drm))-> dev), "", "drm_WARN_ON(" "stream->oa_buffer.vma" ")"); __builtin_expect (!!(__ret), 0); })) |
1587 | return -ENODEV19; |
1588 | |
1589 | BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE)0; |
1590 | BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M)extern char _ctassert[(!((16 << 20) < (128 << 10 ) || (16 << 20) > (16 << 20))) ? 1 : -1 ] __attribute__ ((__unused__)); |
1591 | |
1592 | bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE(16 << 20)); |
1593 | if (IS_ERR(bo)) { |
1594 | drm_err(&i915->drm, "Failed to allocate OA buffer\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to allocate OA buffer\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1595 | return PTR_ERR(bo); |
1596 | } |
1597 | |
1598 | i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); |
1599 | |
1600 | /* PreHSW required 512K alignment, HSW requires 16M */ |
1601 | vma = i915_gem_object_ggtt_pin(bo, NULL((void *)0), 0, SZ_16M(16 << 20), 0); |
1602 | if (IS_ERR(vma)) { |
1603 | ret = PTR_ERR(vma); |
1604 | goto err_unref; |
1605 | } |
1606 | stream->oa_buffer.vma = vma; |
1607 | |
1608 | stream->oa_buffer.vaddr = |
1609 | i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB); |
1610 | if (IS_ERR(stream->oa_buffer.vaddr)) { |
1611 | ret = PTR_ERR(stream->oa_buffer.vaddr); |
1612 | goto err_unpin; |
1613 | } |
1614 | |
1615 | return 0; |
1616 | |
1617 | err_unpin: |
1618 | __i915_vma_unpin(vma); |
1619 | |
1620 | err_unref: |
1621 | i915_gem_object_put(bo); |
1622 | |
1623 | stream->oa_buffer.vaddr = NULL((void *)0); |
1624 | stream->oa_buffer.vma = NULL((void *)0); |
1625 | |
1626 | return ret; |
1627 | } |
1628 | |
1629 | static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, |
1630 | bool_Bool save, i915_reg_t reg, u32 offset, |
1631 | u32 dword_count) |
1632 | { |
1633 | u32 cmd; |
1634 | u32 d; |
1635 | |
1636 | cmd = save ? MI_STORE_REGISTER_MEM(((0x0) << 29) | (0x24) << 23 | (1)) : MI_LOAD_REGISTER_MEM(((0x0) << 29) | (0x29) << 23 | (1)); |
1637 | cmd |= MI_SRM_LRM_GLOBAL_GTT(1<<22); |
1638 | if (GRAPHICS_VER(stream->perf->i915)((&(stream->perf->i915)->__runtime)->graphics .ip.ver) >= 8) |
1639 | cmd++; |
1640 | |
1641 | for (d = 0; d < dword_count; d++) { |
1642 | *cs++ = cmd; |
1643 | *cs++ = i915_mmio_reg_offset(reg) + 4 * d; |
1644 | *cs++ = intel_gt_scratch_offset(stream->engine->gt, |
1645 | offset) + 4 * d; |
1646 | *cs++ = 0; |
1647 | } |
1648 | |
1649 | return cs; |
1650 | } |
1651 | |
1652 | static int alloc_noa_wait(struct i915_perf_stream *stream) |
1653 | { |
1654 | struct drm_i915_privateinteldrm_softc *i915 = stream->perf->i915; |
1655 | struct drm_i915_gem_object *bo; |
1656 | struct i915_vma *vma; |
1657 | const u64 delay_ticks = 0xffffffffffffffff - |
1658 | intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915), |
1659 | atomic64_read(&stream->perf->noa_programming_delay)({ typeof(*(&stream->perf->noa_programming_delay)) __tmp = *(volatile typeof(*(&stream->perf->noa_programming_delay )) *)&(*(&stream->perf->noa_programming_delay)) ; membar_datadep_consumer(); __tmp; })); |
1660 | const u32 base = stream->engine->mmio_base; |
1661 | #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)((const i915_reg_t){ .reg = ((base) + 0x600 + (x) * 8) }) |
1662 | u32 *batch, *ts0, *cs, *jump; |
1663 | struct i915_gem_ww_ctx ww; |
1664 | int ret, i; |
1665 | enum { |
1666 | START_TS, |
1667 | NOW_TS, |
1668 | DELTA_TS, |
1669 | JUMP_PREDICATE, |
1670 | DELTA_TARGET, |
1671 | N_CS_GPR |
1672 | }; |
1673 | |
1674 | bo = i915_gem_object_create_internal(i915, 4096); |
1675 | if (IS_ERR(bo)) { |
1676 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to allocate NOA wait batchbuffer\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
1677 | "Failed to allocate NOA wait batchbuffer\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to allocate NOA wait batchbuffer\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1678 | return PTR_ERR(bo); |
1679 | } |
1680 | |
1681 | i915_gem_ww_ctx_init(&ww, true1); |
1682 | retry: |
1683 | ret = i915_gem_object_lock(bo, &ww); |
1684 | if (ret) |
1685 | goto out_ww; |
1686 | |
1687 | /* |
1688 | * We pin in GGTT because we jump into this buffer now because |
1689 | * multiple OA config BOs will have a jump to this address and it |
1690 | * needs to be fixed during the lifetime of the i915/perf stream. |
1691 | */ |
1692 | vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL((void *)0), 0, 0, PIN_HIGH(1ULL << (5))); |
1693 | if (IS_ERR(vma)) { |
1694 | ret = PTR_ERR(vma); |
1695 | goto out_ww; |
1696 | } |
1697 | |
1698 | batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); |
1699 | if (IS_ERR(batch)) { |
1700 | ret = PTR_ERR(batch); |
1701 | goto err_unpin; |
1702 | } |
1703 | |
1704 | /* Save registers. */ |
1705 | for (i = 0; i < N_CS_GPR; i++) |
1706 | cs = save_restore_register( |
1707 | stream, cs, true1 /* save */, CS_GPR(i), |
1708 | INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); |
1709 | cs = save_restore_register( |
1710 | stream, cs, true1 /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x41c) }), |
1711 | INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); |
1712 | |
1713 | /* First timestamp snapshot location. */ |
1714 | ts0 = cs; |
1715 | |
1716 | /* |
1717 | * Initial snapshot of the timestamp register to implement the wait. |
1718 | * We work with 32b values, so clear out the top 32b bits of the |
1719 | * register because the ALU works 64bits. |
1720 | */ |
1721 | *cs++ = MI_LOAD_REGISTER_IMM(1)(((0x0) << 29) | (0x22) << 23 | (2*(1)-1)); |
1722 | *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; |
1723 | *cs++ = 0; |
1724 | *cs++ = MI_LOAD_REGISTER_REG(((0x0) << 29) | (0x2A) << 23 | (1)) | (3 - 2); |
1725 | *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)((const i915_reg_t){ .reg = ((base) + 0x358) })); |
1726 | *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); |
1727 | |
1728 | /* |
1729 | * This is the location we're going to jump back into until the |
1730 | * required amount of time has passed. |
1731 | */ |
1732 | jump = cs; |
1733 | |
1734 | /* |
1735 | * Take another snapshot of the timestamp register. Take care to clear |
1736 | * up the top 32bits of CS_GPR(1) as we're using it for other |
1737 | * operations below. |
1738 | */ |
1739 | *cs++ = MI_LOAD_REGISTER_IMM(1)(((0x0) << 29) | (0x22) << 23 | (2*(1)-1)); |
1740 | *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; |
1741 | *cs++ = 0; |
1742 | *cs++ = MI_LOAD_REGISTER_REG(((0x0) << 29) | (0x2A) << 23 | (1)) | (3 - 2); |
1743 | *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)((const i915_reg_t){ .reg = ((base) + 0x358) })); |
1744 | *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); |
1745 | |
1746 | /* |
1747 | * Do a diff between the 2 timestamps and store the result back into |
1748 | * CS_GPR(1). |
1749 | */ |
1750 | *cs++ = MI_MATH(5)(((0x0) << 29) | (0x1a) << 23 | ((5) - 1)); |
1751 | *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS))((0x080) << 20 | (0x20) << 10 | ((NOW_TS))); |
1752 | *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS))((0x080) << 20 | (0x21) << 10 | ((START_TS))); |
1753 | *cs++ = MI_MATH_SUB((0x101) << 20 | (0x0) << 10 | (0x0)); |
1754 | *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU)((0x180) << 20 | ((DELTA_TS)) << 10 | (0x31)); |
1755 | *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF)((0x180) << 20 | ((JUMP_PREDICATE)) << 10 | (0x33 )); |
1756 | |
1757 | /* |
1758 | * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the |
1759 | * timestamp have rolled over the 32bits) into the predicate register |
1760 | * to be used for the predicated jump. |
1761 | */ |
1762 | *cs++ = MI_LOAD_REGISTER_REG(((0x0) << 29) | (0x2A) << 23 | (1)) | (3 - 2); |
1763 | *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); |
1764 | *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x41c) })); |
1765 | |
1766 | /* Restart from the beginning if we had timestamps roll over. */ |
1767 | *cs++ = (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 8 ? |
1768 | MI_BATCH_BUFFER_START(((0x0) << 29) | (0x31) << 23 | (0)) : |
1769 | MI_BATCH_BUFFER_START_GEN8(((0x0) << 29) | (0x31) << 23 | (1))) | |
1770 | MI_BATCH_PREDICATE((u32)((1UL << (15)) + 0)); |
1771 | *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; |
1772 | *cs++ = 0; |
1773 | |
1774 | /* |
1775 | * Now add the diff between to previous timestamps and add it to : |
1776 | * (((1 * << 64) - 1) - delay_ns) |
1777 | * |
1778 | * When the Carry Flag contains 1 this means the elapsed time is |
1779 | * longer than the expected delay, and we can exit the wait loop. |
1780 | */ |
1781 | *cs++ = MI_LOAD_REGISTER_IMM(2)(((0x0) << 29) | (0x22) << 23 | (2*(2)-1)); |
1782 | *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); |
1783 | *cs++ = lower_32_bits(delay_ticks)((u32)(delay_ticks)); |
1784 | *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; |
1785 | *cs++ = upper_32_bits(delay_ticks)((u32)(((delay_ticks) >> 16) >> 16)); |
1786 | |
1787 | *cs++ = MI_MATH(4)(((0x0) << 29) | (0x1a) << 23 | ((4) - 1)); |
1788 | *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS))((0x080) << 20 | (0x20) << 10 | ((DELTA_TS))); |
1789 | *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET))((0x080) << 20 | (0x21) << 10 | ((DELTA_TARGET))); |
1790 | *cs++ = MI_MATH_ADD((0x100) << 20 | (0x0) << 10 | (0x0)); |
1791 | *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF)((0x580) << 20 | ((JUMP_PREDICATE)) << 10 | (0x33 )); |
1792 | |
1793 | *cs++ = MI_ARB_CHECK(((0x0) << 29) | (0x05) << 23 | (0)); |
1794 | |
1795 | /* |
1796 | * Transfer the result into the predicate register to be used for the |
1797 | * predicated jump. |
1798 | */ |
1799 | *cs++ = MI_LOAD_REGISTER_REG(((0x0) << 29) | (0x2A) << 23 | (1)) | (3 - 2); |
1800 | *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); |
1801 | *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x41c) })); |
1802 | |
1803 | /* Predicate the jump. */ |
1804 | *cs++ = (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 8 ? |
1805 | MI_BATCH_BUFFER_START(((0x0) << 29) | (0x31) << 23 | (0)) : |
1806 | MI_BATCH_BUFFER_START_GEN8(((0x0) << 29) | (0x31) << 23 | (1))) | |
1807 | MI_BATCH_PREDICATE((u32)((1UL << (15)) + 0)); |
1808 | *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; |
1809 | *cs++ = 0; |
1810 | |
1811 | /* Restore registers. */ |
1812 | for (i = 0; i < N_CS_GPR; i++) |
1813 | cs = save_restore_register( |
1814 | stream, cs, false0 /* restore */, CS_GPR(i), |
1815 | INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); |
1816 | cs = save_restore_register( |
1817 | stream, cs, false0 /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x41c) }), |
1818 | INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); |
1819 | |
1820 | /* And return to the ring. */ |
1821 | *cs++ = MI_BATCH_BUFFER_END(((0x0) << 29) | (0x0a) << 23 | (0)); |
1822 | |
1823 | GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch))((void)0); |
1824 | |
1825 | i915_gem_object_flush_map(bo); |
1826 | __i915_gem_object_release_map(bo); |
1827 | |
1828 | stream->noa_wait = vma; |
1829 | goto out_ww; |
1830 | |
1831 | err_unpin: |
1832 | i915_vma_unpin_and_release(&vma, 0); |
1833 | out_ww: |
1834 | if (ret == -EDEADLK11) { |
1835 | ret = i915_gem_ww_ctx_backoff(&ww); |
1836 | if (!ret) |
1837 | goto retry; |
1838 | } |
1839 | i915_gem_ww_ctx_fini(&ww); |
1840 | if (ret) |
1841 | i915_gem_object_put(bo); |
1842 | return ret; |
1843 | } |
1844 | |
1845 | #endif |
1846 | |
1847 | static u32 *write_cs_mi_lri(u32 *cs, |
1848 | const struct i915_oa_reg *reg_data, |
1849 | u32 n_regs) |
1850 | { |
1851 | u32 i; |
1852 | |
1853 | for (i = 0; i < n_regs; i++) { |
1854 | if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS(126)) == 0) { |
1855 | u32 n_lri = min_t(u32,({ u32 __min_a = (n_regs - i); u32 __min_b = ((126)); __min_a < __min_b ? __min_a : __min_b; }) |
1856 | n_regs - i,({ u32 __min_a = (n_regs - i); u32 __min_b = ((126)); __min_a < __min_b ? __min_a : __min_b; }) |
1857 | MI_LOAD_REGISTER_IMM_MAX_REGS)({ u32 __min_a = (n_regs - i); u32 __min_b = ((126)); __min_a < __min_b ? __min_a : __min_b; }); |
1858 | |
1859 | *cs++ = MI_LOAD_REGISTER_IMM(n_lri)(((0x0) << 29) | (0x22) << 23 | (2*(n_lri)-1)); |
1860 | } |
1861 | *cs++ = i915_mmio_reg_offset(reg_data[i].addr); |
1862 | *cs++ = reg_data[i].value; |
1863 | } |
1864 | |
1865 | return cs; |
1866 | } |
1867 | |
1868 | static int num_lri_dwords(int num_regs) |
1869 | { |
1870 | int count = 0; |
1871 | |
1872 | if (num_regs > 0) { |
1873 | count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS)(((num_regs) + (((126)) - 1)) / ((126))); |
1874 | count += num_regs * 2; |
1875 | } |
1876 | |
1877 | return count; |
1878 | } |
1879 | |
1880 | static struct i915_oa_config_bo * |
1881 | alloc_oa_config_buffer(struct i915_perf_stream *stream, |
1882 | struct i915_oa_config *oa_config) |
1883 | { |
1884 | struct drm_i915_gem_object *obj; |
1885 | struct i915_oa_config_bo *oa_bo; |
1886 | struct i915_gem_ww_ctx ww; |
1887 | size_t config_length = 0; |
1888 | u32 *cs; |
1889 | int err; |
1890 | |
1891 | oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL(0x0001 | 0x0004)); |
1892 | if (!oa_bo) |
1893 | return ERR_PTR(-ENOMEM12); |
1894 | |
1895 | config_length += num_lri_dwords(oa_config->mux_regs_len); |
1896 | config_length += num_lri_dwords(oa_config->b_counter_regs_len); |
1897 | config_length += num_lri_dwords(oa_config->flex_regs_len); |
1898 | config_length += 3; /* MI_BATCH_BUFFER_START */ |
1899 | config_length = roundup2(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE)(((sizeof(u32) * config_length) + (((1ULL << (12))) - 1 )) & (~((__typeof(sizeof(u32) * config_length))((1ULL << (12))) - 1))); |
1900 | |
1901 | obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); |
1902 | if (IS_ERR(obj)) { |
1903 | err = PTR_ERR(obj); |
1904 | goto err_free; |
1905 | } |
1906 | |
1907 | i915_gem_ww_ctx_init(&ww, true1); |
1908 | retry: |
1909 | err = i915_gem_object_lock(obj, &ww); |
1910 | if (err) |
1911 | goto out_ww; |
1912 | |
1913 | cs = i915_gem_object_pin_map(obj, I915_MAP_WB); |
1914 | if (IS_ERR(cs)) { |
1915 | err = PTR_ERR(cs); |
1916 | goto out_ww; |
1917 | } |
1918 | |
1919 | cs = write_cs_mi_lri(cs, |
1920 | oa_config->mux_regs, |
1921 | oa_config->mux_regs_len); |
1922 | cs = write_cs_mi_lri(cs, |
1923 | oa_config->b_counter_regs, |
1924 | oa_config->b_counter_regs_len); |
1925 | cs = write_cs_mi_lri(cs, |
1926 | oa_config->flex_regs, |
1927 | oa_config->flex_regs_len); |
1928 | |
1929 | /* Jump into the active wait. */ |
1930 | *cs++ = (GRAPHICS_VER(stream->perf->i915)((&(stream->perf->i915)->__runtime)->graphics .ip.ver) < 8 ? |
1931 | MI_BATCH_BUFFER_START(((0x0) << 29) | (0x31) << 23 | (0)) : |
1932 | MI_BATCH_BUFFER_START_GEN8(((0x0) << 29) | (0x31) << 23 | (1))); |
1933 | *cs++ = i915_ggtt_offset(stream->noa_wait); |
1934 | *cs++ = 0; |
1935 | |
1936 | i915_gem_object_flush_map(obj); |
1937 | __i915_gem_object_release_map(obj); |
1938 | |
1939 | oa_bo->vma = i915_vma_instance(obj, |
1940 | &stream->engine->gt->ggtt->vm, |
1941 | NULL((void *)0)); |
1942 | if (IS_ERR(oa_bo->vma)) { |
1943 | err = PTR_ERR(oa_bo->vma); |
1944 | goto out_ww; |
1945 | } |
1946 | |
1947 | oa_bo->oa_config = i915_oa_config_get(oa_config); |
1948 | llist_add(&oa_bo->node, &stream->oa_config_bos); |
1949 | |
1950 | out_ww: |
1951 | if (err == -EDEADLK11) { |
1952 | err = i915_gem_ww_ctx_backoff(&ww); |
1953 | if (!err) |
1954 | goto retry; |
1955 | } |
1956 | i915_gem_ww_ctx_fini(&ww); |
1957 | |
1958 | if (err) |
1959 | i915_gem_object_put(obj); |
1960 | err_free: |
1961 | if (err) { |
1962 | kfree(oa_bo); |
1963 | return ERR_PTR(err); |
1964 | } |
1965 | return oa_bo; |
1966 | } |
1967 | |
1968 | static struct i915_vma * |
1969 | get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) |
1970 | { |
1971 | struct i915_oa_config_bo *oa_bo; |
1972 | |
1973 | /* |
1974 | * Look for the buffer in the already allocated BOs attached |
1975 | * to the stream. |
1976 | */ |
1977 | llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node)for ((oa_bo) = ({ const __typeof( ((__typeof(*(oa_bo)) *)0)-> node ) *__mptr = ((stream->oa_config_bos.first)); (__typeof (*(oa_bo)) *)( (char *)__mptr - __builtin_offsetof(__typeof(* (oa_bo)), node) );}); ((char *)(oa_bo) + __builtin_offsetof(typeof (*(oa_bo)), node)) != ((void *)0); (oa_bo) = ({ const __typeof ( ((__typeof(*(oa_bo)) *)0)->node ) *__mptr = ((oa_bo)-> node.next); (__typeof(*(oa_bo)) *)( (char *)__mptr - __builtin_offsetof (__typeof(*(oa_bo)), node) );})) { |
1978 | if (oa_bo->oa_config == oa_config && |
1979 | memcmp(oa_bo->oa_config->uuid,__builtin_memcmp((oa_bo->oa_config->uuid), (oa_config-> uuid), (sizeof(oa_config->uuid))) |
1980 | oa_config->uuid,__builtin_memcmp((oa_bo->oa_config->uuid), (oa_config-> uuid), (sizeof(oa_config->uuid))) |
1981 | sizeof(oa_config->uuid))__builtin_memcmp((oa_bo->oa_config->uuid), (oa_config-> uuid), (sizeof(oa_config->uuid))) == 0) |
1982 | goto out; |
1983 | } |
1984 | |
1985 | oa_bo = alloc_oa_config_buffer(stream, oa_config); |
1986 | if (IS_ERR(oa_bo)) |
1987 | return ERR_CAST(oa_bo); |
1988 | |
1989 | out: |
1990 | return i915_vma_get(oa_bo->vma); |
1991 | } |
1992 | |
1993 | static int |
1994 | emit_oa_config(struct i915_perf_stream *stream, |
1995 | struct i915_oa_config *oa_config, |
1996 | struct intel_context *ce, |
1997 | struct i915_active *active) |
1998 | { |
1999 | struct i915_request *rq; |
2000 | struct i915_vma *vma; |
2001 | struct i915_gem_ww_ctx ww; |
2002 | int err; |
2003 | |
2004 | vma = get_oa_vma(stream, oa_config); |
2005 | if (IS_ERR(vma)) |
2006 | return PTR_ERR(vma); |
2007 | |
2008 | i915_gem_ww_ctx_init(&ww, true1); |
2009 | retry: |
2010 | err = i915_gem_object_lock(vma->obj, &ww); |
2011 | if (err) |
2012 | goto err; |
2013 | |
2014 | err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL(1ULL << (10)) | PIN_HIGH(1ULL << (5))); |
2015 | if (err) |
2016 | goto err; |
2017 | |
2018 | intel_engine_pm_get(ce->engine); |
2019 | rq = i915_request_create(ce); |
2020 | intel_engine_pm_put(ce->engine); |
2021 | if (IS_ERR(rq)) { |
2022 | err = PTR_ERR(rq); |
2023 | goto err_vma_unpin; |
2024 | } |
2025 | |
2026 | if (!IS_ERR_OR_NULL(active)) { |
2027 | /* After all individual context modifications */ |
2028 | err = i915_request_await_active(rq, active, |
2029 | I915_ACTIVE_AWAIT_ACTIVE(1UL << (1))); |
2030 | if (err) |
2031 | goto err_add_request; |
2032 | |
2033 | err = i915_active_add_request(active, rq); |
2034 | if (err) |
2035 | goto err_add_request; |
2036 | } |
2037 | |
2038 | err = i915_request_await_object(rq, vma->obj, 0); |
2039 | if (!err) |
2040 | err = i915_vma_move_to_active(vma, rq, 0); |
2041 | if (err) |
2042 | goto err_add_request; |
2043 | |
2044 | err = rq->engine->emit_bb_start(rq, |
2045 | vma->node.start, 0, |
2046 | I915_DISPATCH_SECURE(1UL << (0))); |
2047 | if (err) |
2048 | goto err_add_request; |
2049 | |
2050 | err_add_request: |
2051 | i915_request_add(rq); |
2052 | err_vma_unpin: |
2053 | i915_vma_unpin(vma); |
2054 | err: |
2055 | if (err == -EDEADLK11) { |
2056 | err = i915_gem_ww_ctx_backoff(&ww); |
2057 | if (!err) |
2058 | goto retry; |
2059 | } |
2060 | |
2061 | i915_gem_ww_ctx_fini(&ww); |
2062 | i915_vma_put(vma); |
2063 | return err; |
2064 | } |
2065 | |
2066 | static struct intel_context *oa_context(struct i915_perf_stream *stream) |
2067 | { |
2068 | return stream->pinned_ctx ?: stream->engine->kernel_context; |
2069 | } |
2070 | |
2071 | static int |
2072 | hsw_enable_metric_set(struct i915_perf_stream *stream, |
2073 | struct i915_active *active) |
2074 | { |
2075 | struct intel_uncore *uncore = stream->uncore; |
2076 | |
2077 | /* |
2078 | * PRM: |
2079 | * |
2080 | * OA unit is using “crclk” for its functionality. When trunk |
2081 | * level clock gating takes place, OA clock would be gated, |
2082 | * unable to count the events from non-render clock domain. |
2083 | * Render clock gating must be disabled when OA is enabled to |
2084 | * count the events from non-render domain. Unit level clock |
2085 | * gating for RCS should also be disabled. |
2086 | */ |
2087 | intel_uncore_rmw(uncore, GEN7_MISCCPCTL((const i915_reg_t){ .reg = (0x9424) }), |
2088 | GEN7_DOP_CLOCK_GATE_ENABLE(1 << 0), 0); |
2089 | intel_uncore_rmw(uncore, GEN6_UCGCTL1((const i915_reg_t){ .reg = (0x9400) }), |
2090 | 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE(1 << 7)); |
2091 | |
2092 | return emit_oa_config(stream, |
2093 | stream->oa_config, oa_context(stream), |
2094 | active); |
2095 | } |
2096 | |
2097 | static void hsw_disable_metric_set(struct i915_perf_stream *stream) |
2098 | { |
2099 | struct intel_uncore *uncore = stream->uncore; |
2100 | |
2101 | intel_uncore_rmw(uncore, GEN6_UCGCTL1((const i915_reg_t){ .reg = (0x9400) }), |
2102 | GEN6_CSUNIT_CLOCK_GATE_DISABLE(1 << 7), 0); |
2103 | intel_uncore_rmw(uncore, GEN7_MISCCPCTL((const i915_reg_t){ .reg = (0x9424) }), |
2104 | 0, GEN7_DOP_CLOCK_GATE_ENABLE(1 << 0)); |
2105 | |
2106 | intel_uncore_rmw(uncore, GDT_CHICKEN_BITS((const i915_reg_t){ .reg = (0x9840) }), GT_NOA_ENABLE0x00000080, 0); |
2107 | } |
2108 | |
2109 | static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, |
2110 | i915_reg_t reg) |
2111 | { |
2112 | u32 mmio = i915_mmio_reg_offset(reg); |
2113 | int i; |
2114 | |
2115 | /* |
2116 | * This arbitrary default will select the 'EU FPU0 Pipeline |
2117 | * Active' event. In the future it's anticipated that there |
2118 | * will be an explicit 'No Event' we can select, but not yet... |
2119 | */ |
2120 | if (!oa_config) |
2121 | return 0; |
2122 | |
2123 | for (i = 0; i < oa_config->flex_regs_len; i++) { |
2124 | if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) |
2125 | return oa_config->flex_regs[i].value; |
2126 | } |
2127 | |
2128 | return 0; |
2129 | } |
2130 | /* |
2131 | * NB: It must always remain pointer safe to run this even if the OA unit |
2132 | * has been disabled. |
2133 | * |
2134 | * It's fine to put out-of-date values into these per-context registers |
2135 | * in the case that the OA unit has been disabled. |
2136 | */ |
2137 | static void |
2138 | gen8_update_reg_state_unlocked(const struct intel_context *ce, |
2139 | const struct i915_perf_stream *stream) |
2140 | { |
2141 | u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; |
2142 | u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; |
2143 | /* The MMIO offsets for Flex EU registers aren't contiguous */ |
2144 | static const i915_reg_t flex_regs[] = { |
2145 | EU_PERF_CNTL0((const i915_reg_t){ .reg = (0xe458) }), |
2146 | EU_PERF_CNTL1((const i915_reg_t){ .reg = (0xe558) }), |
2147 | EU_PERF_CNTL2((const i915_reg_t){ .reg = (0xe658) }), |
2148 | EU_PERF_CNTL3((const i915_reg_t){ .reg = (0xe758) }), |
2149 | EU_PERF_CNTL4((const i915_reg_t){ .reg = (0xe45c) }), |
2150 | EU_PERF_CNTL5((const i915_reg_t){ .reg = (0xe55c) }), |
2151 | EU_PERF_CNTL6((const i915_reg_t){ .reg = (0xe65c) }), |
2152 | }; |
2153 | u32 *reg_state = ce->lrc_reg_state; |
2154 | int i; |
2155 | |
2156 | reg_state[ctx_oactxctrl + 1] = |
2157 | (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT2) | |
2158 | (stream->periodic ? GEN8_OA_TIMER_ENABLE(1 << 1) : 0) | |
2159 | GEN8_OA_COUNTER_RESUME(1 << 0); |
2160 | |
2161 | for (i = 0; i < ARRAY_SIZE(flex_regs)(sizeof((flex_regs)) / sizeof((flex_regs)[0])); i++) |
2162 | reg_state[ctx_flexeu0 + i * 2 + 1] = |
2163 | oa_config_flex_reg(stream->oa_config, flex_regs[i]); |
2164 | } |
2165 | |
2166 | struct flex { |
2167 | i915_reg_t reg; |
2168 | u32 offset; |
2169 | u32 value; |
2170 | }; |
2171 | |
2172 | static int |
2173 | gen8_store_flex(struct i915_request *rq, |
2174 | struct intel_context *ce, |
2175 | const struct flex *flex, unsigned int count) |
2176 | { |
2177 | u32 offset; |
2178 | u32 *cs; |
2179 | |
2180 | cs = intel_ring_begin(rq, 4 * count); |
2181 | if (IS_ERR(cs)) |
2182 | return PTR_ERR(cs); |
2183 | |
2184 | offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET(((0) + (1)) * (1 << 12)); |
2185 | do { |
2186 | *cs++ = MI_STORE_DWORD_IMM_GEN4(((0x0) << 29) | (0x20) << 23 | (2)) | MI_USE_GGTT(1 << 22); |
2187 | *cs++ = offset + flex->offset * sizeof(u32); |
2188 | *cs++ = 0; |
2189 | *cs++ = flex->value; |
2190 | } while (flex++, --count); |
2191 | |
2192 | intel_ring_advance(rq, cs); |
2193 | |
2194 | return 0; |
2195 | } |
2196 | |
2197 | static int |
2198 | gen8_load_flex(struct i915_request *rq, |
2199 | struct intel_context *ce, |
2200 | const struct flex *flex, unsigned int count) |
2201 | { |
2202 | u32 *cs; |
2203 | |
2204 | GEM_BUG_ON(!count || count > 63)((void)0); |
2205 | |
2206 | cs = intel_ring_begin(rq, 2 * count + 2); |
2207 | if (IS_ERR(cs)) |
2208 | return PTR_ERR(cs); |
2209 | |
2210 | *cs++ = MI_LOAD_REGISTER_IMM(count)(((0x0) << 29) | (0x22) << 23 | (2*(count)-1)); |
2211 | do { |
2212 | *cs++ = i915_mmio_reg_offset(flex->reg); |
2213 | *cs++ = flex->value; |
2214 | } while (flex++, --count); |
2215 | *cs++ = MI_NOOP(((0x0) << 29) | (0) << 23 | (0)); |
2216 | |
2217 | intel_ring_advance(rq, cs); |
2218 | |
2219 | return 0; |
2220 | } |
2221 | |
2222 | static int gen8_modify_context(struct intel_context *ce, |
2223 | const struct flex *flex, unsigned int count) |
2224 | { |
2225 | struct i915_request *rq; |
2226 | int err; |
2227 | |
2228 | rq = intel_engine_create_kernel_request(ce->engine); |
2229 | if (IS_ERR(rq)) |
2230 | return PTR_ERR(rq); |
2231 | |
2232 | /* Serialise with the remote context */ |
2233 | err = intel_context_prepare_remote_request(ce, rq); |
2234 | if (err == 0) |
2235 | err = gen8_store_flex(rq, ce, flex, count); |
2236 | |
2237 | i915_request_add(rq); |
2238 | return err; |
2239 | } |
2240 | |
2241 | static int |
2242 | gen8_modify_self(struct intel_context *ce, |
2243 | const struct flex *flex, unsigned int count, |
2244 | struct i915_active *active) |
2245 | { |
2246 | struct i915_request *rq; |
2247 | int err; |
2248 | |
2249 | intel_engine_pm_get(ce->engine); |
2250 | rq = i915_request_create(ce); |
2251 | intel_engine_pm_put(ce->engine); |
2252 | if (IS_ERR(rq)) |
2253 | return PTR_ERR(rq); |
2254 | |
2255 | if (!IS_ERR_OR_NULL(active)) { |
2256 | err = i915_active_add_request(active, rq); |
2257 | if (err) |
2258 | goto err_add_request; |
2259 | } |
2260 | |
2261 | err = gen8_load_flex(rq, ce, flex, count); |
2262 | if (err) |
2263 | goto err_add_request; |
2264 | |
2265 | err_add_request: |
2266 | i915_request_add(rq); |
2267 | return err; |
2268 | } |
2269 | |
2270 | static int gen8_configure_context(struct i915_gem_context *ctx, |
2271 | struct flex *flex, unsigned int count) |
2272 | { |
2273 | struct i915_gem_engines_iter it; |
2274 | struct intel_context *ce; |
2275 | int err = 0; |
2276 | |
2277 | for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)for (i915_gem_engines_iter_init(&(it), (i915_gem_context_lock_engines (ctx))); ((ce) = i915_gem_engines_iter_next(&(it)));) { |
2278 | GEM_BUG_ON(ce == ce->engine->kernel_context)((void)0); |
2279 | |
2280 | if (ce->engine->class != RENDER_CLASS0) |
2281 | continue; |
2282 | |
2283 | /* Otherwise OA settings will be set upon first use */ |
2284 | if (!intel_context_pin_if_active(ce)) |
2285 | continue; |
2286 | |
2287 | flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu); |
2288 | err = gen8_modify_context(ce, flex, count); |
2289 | |
2290 | intel_context_unpin(ce); |
2291 | if (err) |
2292 | break; |
2293 | } |
2294 | i915_gem_context_unlock_engines(ctx); |
2295 | |
2296 | return err; |
2297 | } |
2298 | |
2299 | static int gen12_configure_oar_context(struct i915_perf_stream *stream, |
2300 | struct i915_active *active) |
2301 | { |
2302 | int err; |
2303 | struct intel_context *ce = stream->pinned_ctx; |
2304 | u32 format = stream->oa_buffer.format; |
2305 | struct flex regs_context[] = { |
2306 | { |
2307 | GEN8_OACTXCONTROL((const i915_reg_t){ .reg = (0x2360) }), |
2308 | stream->perf->ctx_oactxctrl_offset + 1, |
2309 | active ? GEN8_OA_COUNTER_RESUME(1 << 0) : 0, |
2310 | }, |
2311 | }; |
2312 | /* Offsets in regs_lri are not used since this configuration is only |
2313 | * applied using LRI. Initialize the correct offsets for posterity. |
2314 | */ |
2315 | #define GEN12_OAR_OACONTROL_OFFSET0x5B0 0x5B0 |
2316 | struct flex regs_lri[] = { |
2317 | { |
2318 | GEN12_OAR_OACONTROL((const i915_reg_t){ .reg = (0x2960) }), |
2319 | GEN12_OAR_OACONTROL_OFFSET0x5B0 + 1, |
2320 | (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT1) | |
2321 | (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE(1 << 0) : 0) |
2322 | }, |
2323 | { |
2324 | RING_CONTEXT_CONTROL(ce->engine->mmio_base)((const i915_reg_t){ .reg = ((ce->engine->mmio_base) + 0x244 ) }), |
2325 | CTX_CONTEXT_CONTROL(0x02 + 1), |
2326 | _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,({ if (__builtin_constant_p(((u32)((1UL << (8)) + 0)))) do { } while (0); if (__builtin_constant_p(active ? ((u32)(( 1UL << (8)) + 0)) : 0)) do { } while (0); if (__builtin_constant_p (((u32)((1UL << (8)) + 0))) && __builtin_constant_p (active ? ((u32)((1UL << (8)) + 0)) : 0)) do { } while ( 0); ((((u32)((1UL << (8)) + 0))) << 16 | (active ? ((u32)((1UL << (8)) + 0)) : 0)); }) |
2327 | active ?({ if (__builtin_constant_p(((u32)((1UL << (8)) + 0)))) do { } while (0); if (__builtin_constant_p(active ? ((u32)(( 1UL << (8)) + 0)) : 0)) do { } while (0); if (__builtin_constant_p (((u32)((1UL << (8)) + 0))) && __builtin_constant_p (active ? ((u32)((1UL << (8)) + 0)) : 0)) do { } while ( 0); ((((u32)((1UL << (8)) + 0))) << 16 | (active ? ((u32)((1UL << (8)) + 0)) : 0)); }) |
2328 | GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :({ if (__builtin_constant_p(((u32)((1UL << (8)) + 0)))) do { } while (0); if (__builtin_constant_p(active ? ((u32)(( 1UL << (8)) + 0)) : 0)) do { } while (0); if (__builtin_constant_p (((u32)((1UL << (8)) + 0))) && __builtin_constant_p (active ? ((u32)((1UL << (8)) + 0)) : 0)) do { } while ( 0); ((((u32)((1UL << (8)) + 0))) << 16 | (active ? ((u32)((1UL << (8)) + 0)) : 0)); }) |
2329 | 0)({ if (__builtin_constant_p(((u32)((1UL << (8)) + 0)))) do { } while (0); if (__builtin_constant_p(active ? ((u32)(( 1UL << (8)) + 0)) : 0)) do { } while (0); if (__builtin_constant_p (((u32)((1UL << (8)) + 0))) && __builtin_constant_p (active ? ((u32)((1UL << (8)) + 0)) : 0)) do { } while ( 0); ((((u32)((1UL << (8)) + 0))) << 16 | (active ? ((u32)((1UL << (8)) + 0)) : 0)); }) |
2330 | }, |
2331 | }; |
2332 | |
2333 | /* Modify the context image of pinned context with regs_context*/ |
2334 | err = intel_context_lock_pinned(ce); |
2335 | if (err) |
2336 | return err; |
2337 | |
2338 | err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)(sizeof((regs_context)) / sizeof((regs_context)[0]))); |
2339 | intel_context_unlock_pinned(ce); |
2340 | if (err) |
2341 | return err; |
2342 | |
2343 | /* Apply regs_lri using LRI with pinned context */ |
2344 | return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri)(sizeof((regs_lri)) / sizeof((regs_lri)[0])), active); |
2345 | } |
2346 | |
2347 | /* |
2348 | * Manages updating the per-context aspects of the OA stream |
2349 | * configuration across all contexts. |
2350 | * |
2351 | * The awkward consideration here is that OACTXCONTROL controls the |
2352 | * exponent for periodic sampling which is primarily used for system |
2353 | * wide profiling where we'd like a consistent sampling period even in |
2354 | * the face of context switches. |
2355 | * |
2356 | * Our approach of updating the register state context (as opposed to |
2357 | * say using a workaround batch buffer) ensures that the hardware |
2358 | * won't automatically reload an out-of-date timer exponent even |
2359 | * transiently before a WA BB could be parsed. |
2360 | * |
2361 | * This function needs to: |
2362 | * - Ensure the currently running context's per-context OA state is |
2363 | * updated |
2364 | * - Ensure that all existing contexts will have the correct per-context |
2365 | * OA state if they are scheduled for use. |
2366 | * - Ensure any new contexts will be initialized with the correct |
2367 | * per-context OA state. |
2368 | * |
2369 | * Note: it's only the RCS/Render context that has any OA state. |
2370 | * Note: the first flex register passed must always be R_PWR_CLK_STATE |
2371 | */ |
2372 | static int |
2373 | oa_configure_all_contexts(struct i915_perf_stream *stream, |
2374 | struct flex *regs, |
2375 | size_t num_regs, |
2376 | struct i915_active *active) |
2377 | { |
2378 | struct drm_i915_privateinteldrm_softc *i915 = stream->perf->i915; |
2379 | struct intel_engine_cs *engine; |
2380 | struct i915_gem_context *ctx, *cn; |
2381 | int err; |
2382 | |
2383 | lockdep_assert_held(&stream->perf->lock)do { (void)(&stream->perf->lock); } while(0); |
2384 | |
2385 | /* |
2386 | * The OA register config is setup through the context image. This image |
2387 | * might be written to by the GPU on context switch (in particular on |
2388 | * lite-restore). This means we can't safely update a context's image, |
2389 | * if this context is scheduled/submitted to run on the GPU. |
2390 | * |
2391 | * We could emit the OA register config through the batch buffer but |
2392 | * this might leave small interval of time where the OA unit is |
2393 | * configured at an invalid sampling period. |
2394 | * |
2395 | * Note that since we emit all requests from a single ring, there |
2396 | * is still an implicit global barrier here that may cause a high |
2397 | * priority context to wait for an otherwise independent low priority |
2398 | * context. Contexts idle at the time of reconfiguration are not |
2399 | * trapped behind the barrier. |
2400 | */ |
2401 | spin_lock(&i915->gem.contexts.lock)mtx_enter(&i915->gem.contexts.lock); |
2402 | list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link)for (ctx = ({ const __typeof( ((__typeof(*ctx) *)0)->link ) *__mptr = ((&i915->gem.contexts.list)->next); (__typeof (*ctx) *)( (char *)__mptr - __builtin_offsetof(__typeof(*ctx) , link) );}), cn = ({ const __typeof( ((__typeof(*ctx) *)0)-> link ) *__mptr = (ctx->link.next); (__typeof(*ctx) *)( (char *)__mptr - __builtin_offsetof(__typeof(*ctx), link) );}); & ctx->link != (&i915->gem.contexts.list); ctx = cn, cn = ({ const __typeof( ((__typeof(*cn) *)0)->link ) *__mptr = (cn->link.next); (__typeof(*cn) *)( (char *)__mptr - __builtin_offsetof (__typeof(*cn), link) );})) { |
2403 | if (!kref_get_unless_zero(&ctx->ref)) |
2404 | continue; |
2405 | |
2406 | spin_unlock(&i915->gem.contexts.lock)mtx_leave(&i915->gem.contexts.lock); |
2407 | |
2408 | err = gen8_configure_context(ctx, regs, num_regs); |
2409 | if (err) { |
2410 | i915_gem_context_put(ctx); |
2411 | return err; |
2412 | } |
2413 | |
2414 | spin_lock(&i915->gem.contexts.lock)mtx_enter(&i915->gem.contexts.lock); |
2415 | list_safe_reset_next(ctx, cn, link)cn = ({ const __typeof( ((typeof(*(ctx)) *)0)->link ) *__mptr = (((ctx)->link.next)); (typeof(*(ctx)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(ctx)), link) );}); |
2416 | i915_gem_context_put(ctx); |
2417 | } |
2418 | spin_unlock(&i915->gem.contexts.lock)mtx_leave(&i915->gem.contexts.lock); |
2419 | |
2420 | /* |
2421 | * After updating all other contexts, we need to modify ourselves. |
2422 | * If we don't modify the kernel_context, we do not get events while |
2423 | * idle. |
2424 | */ |
2425 | for_each_uabi_engine(engine, i915)for ((engine) = (linux_root_RB_MINMAX((struct linux_root *)(& (i915)->uabi_engines), -1) ? ({ const __typeof( ((struct intel_engine_cs *)0)->uabi_node ) *__mptr = (linux_root_RB_MINMAX((struct linux_root *)(&(i915)->uabi_engines), -1)); (struct intel_engine_cs *)( (char *)__mptr - __builtin_offsetof(struct intel_engine_cs , uabi_node) );}) : ((void *)0)); (engine); (engine) = (linux_root_RB_NEXT ((&(engine)->uabi_node)) ? ({ const __typeof( ((struct intel_engine_cs *)0)->uabi_node ) *__mptr = (linux_root_RB_NEXT ((&(engine)->uabi_node))); (struct intel_engine_cs *)( (char *)__mptr - __builtin_offsetof(struct intel_engine_cs, uabi_node ) );}) : ((void *)0))) { |
2426 | struct intel_context *ce = engine->kernel_context; |
2427 | |
2428 | if (engine->class != RENDER_CLASS0) |
2429 | continue; |
2430 | |
2431 | regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu); |
2432 | |
2433 | err = gen8_modify_self(ce, regs, num_regs, active); |
2434 | if (err) |
2435 | return err; |
2436 | } |
2437 | |
2438 | return 0; |
2439 | } |
2440 | |
2441 | static int |
2442 | gen12_configure_all_contexts(struct i915_perf_stream *stream, |
2443 | const struct i915_oa_config *oa_config, |
2444 | struct i915_active *active) |
2445 | { |
2446 | struct flex regs[] = { |
2447 | { |
2448 | GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0xc8) }), |
2449 | CTX_R_PWR_CLK_STATE(0x42 + 1), |
2450 | }, |
2451 | }; |
2452 | |
2453 | return oa_configure_all_contexts(stream, |
2454 | regs, ARRAY_SIZE(regs)(sizeof((regs)) / sizeof((regs)[0])), |
2455 | active); |
2456 | } |
2457 | |
2458 | static int |
2459 | lrc_configure_all_contexts(struct i915_perf_stream *stream, |
2460 | const struct i915_oa_config *oa_config, |
2461 | struct i915_active *active) |
2462 | { |
2463 | /* The MMIO offsets for Flex EU registers aren't contiguous */ |
2464 | const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; |
2465 | #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) |
2466 | struct flex regs[] = { |
2467 | { |
2468 | GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0xc8) }), |
2469 | CTX_R_PWR_CLK_STATE(0x42 + 1), |
2470 | }, |
2471 | { |
2472 | GEN8_OACTXCONTROL((const i915_reg_t){ .reg = (0x2360) }), |
2473 | stream->perf->ctx_oactxctrl_offset + 1, |
2474 | }, |
2475 | { EU_PERF_CNTL0((const i915_reg_t){ .reg = (0xe458) }), ctx_flexeuN(0) }, |
2476 | { EU_PERF_CNTL1((const i915_reg_t){ .reg = (0xe558) }), ctx_flexeuN(1) }, |
2477 | { EU_PERF_CNTL2((const i915_reg_t){ .reg = (0xe658) }), ctx_flexeuN(2) }, |
2478 | { EU_PERF_CNTL3((const i915_reg_t){ .reg = (0xe758) }), ctx_flexeuN(3) }, |
2479 | { EU_PERF_CNTL4((const i915_reg_t){ .reg = (0xe45c) }), ctx_flexeuN(4) }, |
2480 | { EU_PERF_CNTL5((const i915_reg_t){ .reg = (0xe55c) }), ctx_flexeuN(5) }, |
2481 | { EU_PERF_CNTL6((const i915_reg_t){ .reg = (0xe65c) }), ctx_flexeuN(6) }, |
2482 | }; |
2483 | #undef ctx_flexeuN |
2484 | int i; |
2485 | |
2486 | regs[1].value = |
2487 | (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT2) | |
2488 | (stream->periodic ? GEN8_OA_TIMER_ENABLE(1 << 1) : 0) | |
2489 | GEN8_OA_COUNTER_RESUME(1 << 0); |
2490 | |
2491 | for (i = 2; i < ARRAY_SIZE(regs)(sizeof((regs)) / sizeof((regs)[0])); i++) |
2492 | regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); |
2493 | |
2494 | return oa_configure_all_contexts(stream, |
2495 | regs, ARRAY_SIZE(regs)(sizeof((regs)) / sizeof((regs)[0])), |
2496 | active); |
2497 | } |
2498 | |
2499 | static int |
2500 | gen8_enable_metric_set(struct i915_perf_stream *stream, |
2501 | struct i915_active *active) |
2502 | { |
2503 | struct intel_uncore *uncore = stream->uncore; |
2504 | struct i915_oa_config *oa_config = stream->oa_config; |
2505 | int ret; |
2506 | |
2507 | /* |
2508 | * We disable slice/unslice clock ratio change reports on SKL since |
2509 | * they are too noisy. The HW generates a lot of redundant reports |
2510 | * where the ratio hasn't really changed causing a lot of redundant |
2511 | * work to processes and increasing the chances we'll hit buffer |
2512 | * overruns. |
2513 | * |
2514 | * Although we don't currently use the 'disable overrun' OABUFFER |
2515 | * feature it's worth noting that clock ratio reports have to be |
2516 | * disabled before considering to use that feature since the HW doesn't |
2517 | * correctly block these reports. |
2518 | * |
2519 | * Currently none of the high-level metrics we have depend on knowing |
2520 | * this ratio to normalize. |
2521 | * |
2522 | * Note: This register is not power context saved and restored, but |
2523 | * that's OK considering that we disable RC6 while the OA unit is |
2524 | * enabled. |
2525 | * |
2526 | * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to |
2527 | * be read back from automatically triggered reports, as part of the |
2528 | * RPT_ID field. |
2529 | */ |
2530 | if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)(((&(stream->perf->i915)->__runtime)->graphics .ip.ver) >= (9) && ((&(stream->perf->i915 )->__runtime)->graphics.ip.ver) <= (11))) { |
2531 | intel_uncore_write(uncore, GEN8_OA_DEBUG((const i915_reg_t){ .reg = (0x2B04) }), |
2532 | _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |({ typeof((1 << 5) | (1 << 6)) _a = ((1 << 5 ) | (1 << 6)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) |
2533 | GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)({ typeof((1 << 5) | (1 << 6)) _a = ((1 << 5 ) | (1 << 6)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); })); |
2534 | } |
2535 | |
2536 | /* |
2537 | * Update all contexts prior writing the mux configurations as we need |
2538 | * to make sure all slices/subslices are ON before writing to NOA |
2539 | * registers. |
2540 | */ |
2541 | ret = lrc_configure_all_contexts(stream, oa_config, active); |
2542 | if (ret) |
2543 | return ret; |
2544 | |
2545 | return emit_oa_config(stream, |
2546 | stream->oa_config, oa_context(stream), |
2547 | active); |
2548 | } |
2549 | |
2550 | static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) |
2551 | { |
2552 | return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,({ if (__builtin_constant_p((1 << 1))) do { } while (0) ; if (__builtin_constant_p((stream->sample_flags & (1<< 0)) ? 0 : (1 << 1))) do { } while (0); if (__builtin_constant_p ((1 << 1)) && __builtin_constant_p((stream-> sample_flags & (1<<0)) ? 0 : (1 << 1))) do { } while (0); (((1 << 1)) << 16 | ((stream->sample_flags & (1<<0)) ? 0 : (1 << 1))); }) |
2553 | (stream->sample_flags & SAMPLE_OA_REPORT) ?({ if (__builtin_constant_p((1 << 1))) do { } while (0) ; if (__builtin_constant_p((stream->sample_flags & (1<< 0)) ? 0 : (1 << 1))) do { } while (0); if (__builtin_constant_p ((1 << 1)) && __builtin_constant_p((stream-> sample_flags & (1<<0)) ? 0 : (1 << 1))) do { } while (0); (((1 << 1)) << 16 | ((stream->sample_flags & (1<<0)) ? 0 : (1 << 1))); }) |
2554 | 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS)({ if (__builtin_constant_p((1 << 1))) do { } while (0) ; if (__builtin_constant_p((stream->sample_flags & (1<< 0)) ? 0 : (1 << 1))) do { } while (0); if (__builtin_constant_p ((1 << 1)) && __builtin_constant_p((stream-> sample_flags & (1<<0)) ? 0 : (1 << 1))) do { } while (0); (((1 << 1)) << 16 | ((stream->sample_flags & (1<<0)) ? 0 : (1 << 1))); }); |
2555 | } |
2556 | |
2557 | static int |
2558 | gen12_enable_metric_set(struct i915_perf_stream *stream, |
2559 | struct i915_active *active) |
2560 | { |
2561 | struct intel_uncore *uncore = stream->uncore; |
2562 | struct i915_oa_config *oa_config = stream->oa_config; |
2563 | bool_Bool periodic = stream->periodic; |
2564 | u32 period_exponent = stream->period_exponent; |
2565 | int ret; |
2566 | |
2567 | intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG((const i915_reg_t){ .reg = (0xdaf8) }), |
2568 | /* Disable clk ratio reports, like previous Gens. */ |
2569 | _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |({ typeof((1 << 5) | (1 << 6)) _a = ((1 << 5 ) | (1 << 6)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) |
2570 | GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO)({ typeof((1 << 5) | (1 << 6)) _a = ((1 << 5 ) | (1 << 6)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) | |
2571 | /* |
2572 | * If the user didn't require OA reports, instruct |
2573 | * the hardware not to emit ctx switch reports. |
2574 | */ |
2575 | oag_report_ctx_switches(stream)); |
2576 | |
2577 | intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL((const i915_reg_t){ .reg = (0x2b28) }), periodic ? |
2578 | (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME(1 << 0) | |
2579 | GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE(1 << 1) | |
2580 | (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT2)) |
2581 | : 0); |
2582 | |
2583 | /* |
2584 | * Update all contexts prior writing the mux configurations as we need |
2585 | * to make sure all slices/subslices are ON before writing to NOA |
2586 | * registers. |
2587 | */ |
2588 | ret = gen12_configure_all_contexts(stream, oa_config, active); |
2589 | if (ret) |
2590 | return ret; |
2591 | |
2592 | /* |
2593 | * For Gen12, performance counters are context |
2594 | * saved/restored. Only enable it for the context that |
2595 | * requested this. |
2596 | */ |
2597 | if (stream->ctx) { |
2598 | ret = gen12_configure_oar_context(stream, active); |
2599 | if (ret) |
2600 | return ret; |
2601 | } |
2602 | |
2603 | return emit_oa_config(stream, |
2604 | stream->oa_config, oa_context(stream), |
2605 | active); |
2606 | } |
2607 | |
2608 | static void gen8_disable_metric_set(struct i915_perf_stream *stream) |
2609 | { |
2610 | struct intel_uncore *uncore = stream->uncore; |
2611 | |
2612 | /* Reset all contexts' slices/subslices configurations. */ |
2613 | lrc_configure_all_contexts(stream, NULL((void *)0), NULL((void *)0)); |
2614 | |
2615 | intel_uncore_rmw(uncore, GDT_CHICKEN_BITS((const i915_reg_t){ .reg = (0x9840) }), GT_NOA_ENABLE0x00000080, 0); |
2616 | } |
2617 | |
2618 | static void gen11_disable_metric_set(struct i915_perf_stream *stream) |
2619 | { |
2620 | struct intel_uncore *uncore = stream->uncore; |
2621 | |
2622 | /* Reset all contexts' slices/subslices configurations. */ |
2623 | lrc_configure_all_contexts(stream, NULL((void *)0), NULL((void *)0)); |
2624 | |
2625 | /* Make sure we disable noa to save power. */ |
2626 | intel_uncore_rmw(uncore, RPM_CONFIG1((const i915_reg_t){ .reg = (0xd04) }), GEN10_GT_NOA_ENABLE(1 << 9), 0); |
2627 | } |
2628 | |
2629 | static void gen12_disable_metric_set(struct i915_perf_stream *stream) |
2630 | { |
2631 | struct intel_uncore *uncore = stream->uncore; |
2632 | |
2633 | /* Reset all contexts' slices/subslices configurations. */ |
2634 | gen12_configure_all_contexts(stream, NULL((void *)0), NULL((void *)0)); |
2635 | |
2636 | /* disable the context save/restore or OAR counters */ |
2637 | if (stream->ctx) |
2638 | gen12_configure_oar_context(stream, NULL((void *)0)); |
2639 | |
2640 | /* Make sure we disable noa to save power. */ |
2641 | intel_uncore_rmw(uncore, RPM_CONFIG1((const i915_reg_t){ .reg = (0xd04) }), GEN10_GT_NOA_ENABLE(1 << 9), 0); |
2642 | } |
2643 | |
2644 | static void gen7_oa_enable(struct i915_perf_stream *stream) |
2645 | { |
2646 | struct intel_uncore *uncore = stream->uncore; |
2647 | struct i915_gem_context *ctx = stream->ctx; |
2648 | u32 ctx_id = stream->specific_ctx_id; |
2649 | bool_Bool periodic = stream->periodic; |
2650 | u32 period_exponent = stream->period_exponent; |
2651 | u32 report_format = stream->oa_buffer.format; |
2652 | |
2653 | /* |
2654 | * Reset buf pointers so we don't forward reports from before now. |
2655 | * |
2656 | * Think carefully if considering trying to avoid this, since it |
2657 | * also ensures status flags and the buffer itself are cleared |
2658 | * in error paths, and we have checks for invalid reports based |
2659 | * on the assumption that certain fields are written to zeroed |
2660 | * memory which this helps maintains. |
2661 | */ |
2662 | gen7_init_oa_buffer(stream); |
2663 | |
2664 | intel_uncore_write(uncore, GEN7_OACONTROL((const i915_reg_t){ .reg = (0x2360) }), |
2665 | (ctx_id & GEN7_OACONTROL_CTX_MASK0xFFFFF000) | |
2666 | (period_exponent << |
2667 | GEN7_OACONTROL_TIMER_PERIOD_SHIFT6) | |
2668 | (periodic ? GEN7_OACONTROL_TIMER_ENABLE(1 << 5) : 0) | |
2669 | (report_format << GEN7_OACONTROL_FORMAT_SHIFT2) | |
2670 | (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE(1 << 1) : 0) | |
2671 | GEN7_OACONTROL_ENABLE(1 << 0)); |
2672 | } |
2673 | |
2674 | static void gen8_oa_enable(struct i915_perf_stream *stream) |
2675 | { |
2676 | struct intel_uncore *uncore = stream->uncore; |
2677 | u32 report_format = stream->oa_buffer.format; |
2678 | |
2679 | /* |
2680 | * Reset buf pointers so we don't forward reports from before now. |
2681 | * |
2682 | * Think carefully if considering trying to avoid this, since it |
2683 | * also ensures status flags and the buffer itself are cleared |
2684 | * in error paths, and we have checks for invalid reports based |
2685 | * on the assumption that certain fields are written to zeroed |
2686 | * memory which this helps maintains. |
2687 | */ |
2688 | gen8_init_oa_buffer(stream); |
2689 | |
2690 | /* |
2691 | * Note: we don't rely on the hardware to perform single context |
2692 | * filtering and instead filter on the cpu based on the context-id |
2693 | * field of reports |
2694 | */ |
2695 | intel_uncore_write(uncore, GEN8_OACONTROL((const i915_reg_t){ .reg = (0x2B00) }), |
2696 | (report_format << GEN8_OA_REPORT_FORMAT_SHIFT2) | |
2697 | GEN8_OA_COUNTER_ENABLE(1 << 0)); |
2698 | } |
2699 | |
2700 | static void gen12_oa_enable(struct i915_perf_stream *stream) |
2701 | { |
2702 | struct intel_uncore *uncore = stream->uncore; |
2703 | u32 report_format = stream->oa_buffer.format; |
2704 | |
2705 | /* |
2706 | * If we don't want OA reports from the OA buffer, then we don't even |
2707 | * need to program the OAG unit. |
2708 | */ |
2709 | if (!(stream->sample_flags & SAMPLE_OA_REPORT(1<<0))) |
2710 | return; |
2711 | |
2712 | gen12_init_oa_buffer(stream); |
2713 | |
2714 | intel_uncore_write(uncore, GEN12_OAG_OACONTROL((const i915_reg_t){ .reg = (0xdaf4) }), |
2715 | (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT2) | |
2716 | GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE(1 << 0)); |
2717 | } |
2718 | |
2719 | #ifdef notyet |
2720 | |
2721 | /** |
2722 | * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream |
2723 | * @stream: An i915 perf stream opened for OA metrics |
2724 | * |
2725 | * [Re]enables hardware periodic sampling according to the period configured |
2726 | * when opening the stream. This also starts a hrtimer that will periodically |
2727 | * check for data in the circular OA buffer for notifying userspace (e.g. |
2728 | * during a read() or poll()). |
2729 | */ |
2730 | static void i915_oa_stream_enable(struct i915_perf_stream *stream) |
2731 | { |
2732 | stream->pollin = false0; |
2733 | |
2734 | stream->perf->ops.oa_enable(stream); |
2735 | |
2736 | if (stream->sample_flags & SAMPLE_OA_REPORT(1<<0)) |
2737 | hrtimer_start(&stream->poll_check_timer, |
2738 | ns_to_ktime(stream->poll_oa_period), |
2739 | HRTIMER_MODE_REL_PINNED); |
2740 | } |
2741 | |
2742 | #endif |
2743 | |
2744 | static void gen7_oa_disable(struct i915_perf_stream *stream) |
2745 | { |
2746 | struct intel_uncore *uncore = stream->uncore; |
2747 | |
2748 | intel_uncore_write(uncore, GEN7_OACONTROL((const i915_reg_t){ .reg = (0x2360) }), 0); |
2749 | if (intel_wait_for_register(uncore, |
2750 | GEN7_OACONTROL((const i915_reg_t){ .reg = (0x2360) }), GEN7_OACONTROL_ENABLE(1 << 0), 0, |
2751 | 50)) |
2752 | drm_err(&stream->perf->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA to be disabled timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
2753 | "wait for OA to be disabled timed out\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA to be disabled timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2754 | } |
2755 | |
2756 | static void gen8_oa_disable(struct i915_perf_stream *stream) |
2757 | { |
2758 | struct intel_uncore *uncore = stream->uncore; |
2759 | |
2760 | intel_uncore_write(uncore, GEN8_OACONTROL((const i915_reg_t){ .reg = (0x2B00) }), 0); |
2761 | if (intel_wait_for_register(uncore, |
2762 | GEN8_OACONTROL((const i915_reg_t){ .reg = (0x2B00) }), GEN8_OA_COUNTER_ENABLE(1 << 0), 0, |
2763 | 50)) |
2764 | drm_err(&stream->perf->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA to be disabled timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
2765 | "wait for OA to be disabled timed out\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA to be disabled timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2766 | } |
2767 | |
2768 | static void gen12_oa_disable(struct i915_perf_stream *stream) |
2769 | { |
2770 | struct intel_uncore *uncore = stream->uncore; |
2771 | |
2772 | intel_uncore_write(uncore, GEN12_OAG_OACONTROL((const i915_reg_t){ .reg = (0xdaf4) }), 0); |
2773 | if (intel_wait_for_register(uncore, |
2774 | GEN12_OAG_OACONTROL((const i915_reg_t){ .reg = (0xdaf4) }), |
2775 | GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE(1 << 0), 0, |
2776 | 50)) |
2777 | drm_err(&stream->perf->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA to be disabled timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
2778 | "wait for OA to be disabled timed out\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA to be disabled timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2779 | |
2780 | intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR((const i915_reg_t){ .reg = (0xceec) }), 1); |
2781 | if (intel_wait_for_register(uncore, |
2782 | GEN12_OA_TLB_INV_CR((const i915_reg_t){ .reg = (0xceec) }), |
2783 | 1, 0, |
2784 | 50)) |
2785 | drm_err(&stream->perf->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA tlb invalidate timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
2786 | "wait for OA tlb invalidate timed out\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "wait for OA tlb invalidate timed out\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2787 | } |
2788 | |
2789 | #ifdef notyet |
2790 | |
2791 | /** |
2792 | * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream |
2793 | * @stream: An i915 perf stream opened for OA metrics |
2794 | * |
2795 | * Stops the OA unit from periodically writing counter reports into the |
2796 | * circular OA buffer. This also stops the hrtimer that periodically checks for |
2797 | * data in the circular OA buffer, for notifying userspace. |
2798 | */ |
2799 | static void i915_oa_stream_disable(struct i915_perf_stream *stream) |
2800 | { |
2801 | stream->perf->ops.oa_disable(stream); |
2802 | |
2803 | if (stream->sample_flags & SAMPLE_OA_REPORT(1<<0)) |
2804 | hrtimer_cancel(&stream->poll_check_timer)timeout_del_barrier(&stream->poll_check_timer); |
2805 | } |
2806 | |
2807 | static const struct i915_perf_stream_ops i915_oa_stream_ops = { |
2808 | .destroy = i915_oa_stream_destroy, |
2809 | .enable = i915_oa_stream_enable, |
2810 | .disable = i915_oa_stream_disable, |
2811 | .wait_unlocked = i915_oa_wait_unlocked, |
2812 | .poll_wait = i915_oa_poll_wait, |
2813 | .read = i915_oa_read, |
2814 | }; |
2815 | |
2816 | static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) |
2817 | { |
2818 | struct i915_active *active; |
2819 | int err; |
2820 | |
2821 | active = i915_active_create(); |
2822 | if (!active) |
2823 | return -ENOMEM12; |
2824 | |
2825 | err = stream->perf->ops.enable_metric_set(stream, active); |
2826 | if (err == 0) |
2827 | __i915_active_wait(active, TASK_UNINTERRUPTIBLE0); |
2828 | |
2829 | i915_active_put(active); |
2830 | return err; |
2831 | } |
2832 | |
2833 | static void |
2834 | get_default_sseu_config(struct intel_sseu *out_sseu, |
2835 | struct intel_engine_cs *engine) |
2836 | { |
2837 | const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu; |
2838 | |
2839 | *out_sseu = intel_sseu_from_device_info(devinfo_sseu); |
2840 | |
2841 | if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) == 11) { |
2842 | /* |
2843 | * We only need subslice count so it doesn't matter which ones |
2844 | * we select - just turn off low bits in the amount of half of |
2845 | * all available subslices per slice. |
2846 | */ |
2847 | out_sseu->subslice_mask = |
2848 | ~(~0 << (hweight8(out_sseu->subslice_mask) / 2)); |
2849 | out_sseu->slice_mask = 0x1; |
2850 | } |
2851 | } |
2852 | |
2853 | #endif |
2854 | |
2855 | static int |
2856 | get_sseu_config(struct intel_sseu *out_sseu, |
2857 | struct intel_engine_cs *engine, |
2858 | const struct drm_i915_gem_context_param_sseu *drm_sseu) |
2859 | { |
2860 | if (drm_sseu->engine.engine_class != engine->uabi_class || |
2861 | drm_sseu->engine.engine_instance != engine->uabi_instance) |
2862 | return -EINVAL22; |
2863 | |
2864 | return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu); |
2865 | } |
2866 | |
2867 | #ifdef notyet |
2868 | |
2869 | /** |
2870 | * i915_oa_stream_init - validate combined props for OA stream and init |
2871 | * @stream: An i915 perf stream |
2872 | * @param: The open parameters passed to `DRM_I915_PERF_OPEN` |
2873 | * @props: The property state that configures stream (individually validated) |
2874 | * |
2875 | * While read_properties_unlocked() validates properties in isolation it |
2876 | * doesn't ensure that the combination necessarily makes sense. |
2877 | * |
2878 | * At this point it has been determined that userspace wants a stream of |
2879 | * OA metrics, but still we need to further validate the combined |
2880 | * properties are OK. |
2881 | * |
2882 | * If the configuration makes sense then we can allocate memory for |
2883 | * a circular OA buffer and apply the requested metric set configuration. |
2884 | * |
2885 | * Returns: zero on success or a negative error code. |
2886 | */ |
2887 | static int i915_oa_stream_init(struct i915_perf_stream *stream, |
2888 | struct drm_i915_perf_open_param *param, |
2889 | struct perf_open_properties *props) |
2890 | { |
2891 | struct drm_i915_privateinteldrm_softc *i915 = stream->perf->i915; |
2892 | struct i915_perf *perf = stream->perf; |
2893 | int format_size; |
2894 | int ret; |
2895 | |
2896 | if (!props->engine) { |
2897 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA engine not specified\n") |
2898 | "OA engine not specified\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA engine not specified\n"); |
2899 | return -EINVAL22; |
2900 | } |
2901 | |
2902 | /* |
2903 | * If the sysfs metrics/ directory wasn't registered for some |
2904 | * reason then don't let userspace try their luck with config |
2905 | * IDs |
2906 | */ |
2907 | if (!perf->metrics_kobj) { |
2908 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA metrics weren't advertised via sysfs\n" ) |
2909 | "OA metrics weren't advertised via sysfs\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA metrics weren't advertised via sysfs\n" ); |
2910 | return -EINVAL22; |
2911 | } |
2912 | |
2913 | if (!(props->sample_flags & SAMPLE_OA_REPORT(1<<0)) && |
2914 | (GRAPHICS_VER(perf->i915)((&(perf->i915)->__runtime)->graphics.ip.ver) < 12 || !stream->ctx)) { |
2915 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Only OA report sampling supported\n") |
2916 | "Only OA report sampling supported\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Only OA report sampling supported\n"); |
2917 | return -EINVAL22; |
2918 | } |
2919 | |
2920 | if (!perf->ops.enable_metric_set) { |
2921 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA unit not supported\n") |
2922 | "OA unit not supported\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA unit not supported\n"); |
2923 | return -ENODEV19; |
2924 | } |
2925 | |
2926 | /* |
2927 | * To avoid the complexity of having to accurately filter |
2928 | * counter reports and marshal to the appropriate client |
2929 | * we currently only allow exclusive access |
2930 | */ |
2931 | if (perf->exclusive_stream) { |
2932 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA unit already in use\n") |
2933 | "OA unit already in use\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA unit already in use\n"); |
2934 | return -EBUSY16; |
2935 | } |
2936 | |
2937 | if (!props->oa_format) { |
2938 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA report format not specified\n") |
2939 | "OA report format not specified\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA report format not specified\n"); |
2940 | return -EINVAL22; |
2941 | } |
2942 | |
2943 | stream->engine = props->engine; |
2944 | stream->uncore = stream->engine->gt->uncore; |
2945 | |
2946 | stream->sample_size = sizeof(struct drm_i915_perf_record_header); |
2947 | |
2948 | format_size = perf->oa_formats[props->oa_format].size; |
2949 | |
2950 | stream->sample_flags = props->sample_flags; |
2951 | stream->sample_size += format_size; |
2952 | |
2953 | stream->oa_buffer.format_size = format_size; |
2954 | if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0)({ int __ret = !!((stream->oa_buffer.format_size == 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&i915 ->drm))->dev), "", "drm_WARN_ON(" "stream->oa_buffer.format_size == 0" ")"); __builtin_expect(!!(__ret), 0); })) |
2955 | return -EINVAL22; |
2956 | |
2957 | stream->hold_preemption = props->hold_preemption; |
2958 | |
2959 | stream->oa_buffer.format = |
2960 | perf->oa_formats[props->oa_format].format; |
2961 | |
2962 | stream->periodic = props->oa_periodic; |
2963 | if (stream->periodic) |
2964 | stream->period_exponent = props->oa_period_exponent; |
2965 | |
2966 | if (stream->ctx) { |
2967 | ret = oa_get_render_ctx_id(stream); |
2968 | if (ret) { |
2969 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid context id to filter with\n") |
2970 | "Invalid context id to filter with\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid context id to filter with\n"); |
2971 | return ret; |
2972 | } |
2973 | } |
2974 | |
2975 | ret = alloc_noa_wait(stream); |
2976 | if (ret) { |
2977 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unable to allocate NOA wait batch buffer\n" ) |
2978 | "Unable to allocate NOA wait batch buffer\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unable to allocate NOA wait batch buffer\n" ); |
2979 | goto err_noa_wait_alloc; |
2980 | } |
2981 | |
2982 | stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); |
2983 | if (!stream->oa_config) { |
2984 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid OA config id=%i\n", props-> metrics_set) |
2985 | "Invalid OA config id=%i\n", props->metrics_set)__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid OA config id=%i\n", props-> metrics_set); |
2986 | ret = -EINVAL22; |
2987 | goto err_config; |
2988 | } |
2989 | |
2990 | /* PRM - observability performance counters: |
2991 | * |
2992 | * OACONTROL, performance counter enable, note: |
2993 | * |
2994 | * "When this bit is set, in order to have coherent counts, |
2995 | * RC6 power state and trunk clock gating must be disabled. |
2996 | * This can be achieved by programming MMIO registers as |
2997 | * 0xA094=0 and 0xA090[31]=1" |
2998 | * |
2999 | * In our case we are expecting that taking pm + FORCEWAKE |
3000 | * references will effectively disable RC6. |
3001 | */ |
3002 | intel_engine_pm_get(stream->engine); |
3003 | intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); |
3004 | |
3005 | ret = alloc_oa_buffer(stream); |
3006 | if (ret) |
3007 | goto err_oa_buf_alloc; |
3008 | |
3009 | stream->ops = &i915_oa_stream_ops; |
3010 | |
3011 | perf->sseu = props->sseu; |
3012 | WRITE_ONCE(perf->exclusive_stream, stream)({ typeof(perf->exclusive_stream) __tmp = (stream); *(volatile typeof(perf->exclusive_stream) *)&(perf->exclusive_stream ) = __tmp; __tmp; }); |
3013 | |
3014 | ret = i915_perf_stream_enable_sync(stream); |
3015 | if (ret) { |
3016 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unable to enable metric set\n") |
3017 | "Unable to enable metric set\n")__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unable to enable metric set\n"); |
3018 | goto err_enable; |
3019 | } |
3020 | |
3021 | drm_dbg(&stream->perf->i915->drm,__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "opening stream oa config uuid=%s\n", stream ->oa_config->uuid) |
3022 | "opening stream oa config uuid=%s\n",__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "opening stream oa config uuid=%s\n", stream ->oa_config->uuid) |
3023 | stream->oa_config->uuid)__drm_dev_dbg(((void *)0), (&stream->perf->i915-> drm) ? (&stream->perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "opening stream oa config uuid=%s\n", stream ->oa_config->uuid); |
3024 | |
3025 | hrtimer_init(&stream->poll_check_timer, |
3026 | CLOCK_MONOTONIC3, HRTIMER_MODE_REL1); |
3027 | stream->poll_check_timer.function = oa_poll_check_timer_cb; |
3028 | init_waitqueue_head(&stream->poll_wq); |
3029 | mtx_init(&stream->oa_buffer.ptr_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&stream-> oa_buffer.ptr_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); |
3030 | |
3031 | return 0; |
3032 | |
3033 | err_enable: |
3034 | WRITE_ONCE(perf->exclusive_stream, NULL)({ typeof(perf->exclusive_stream) __tmp = (((void *)0)); * (volatile typeof(perf->exclusive_stream) *)&(perf-> exclusive_stream) = __tmp; __tmp; }); |
3035 | perf->ops.disable_metric_set(stream); |
3036 | |
3037 | free_oa_buffer(stream); |
3038 | |
3039 | err_oa_buf_alloc: |
3040 | free_oa_configs(stream); |
3041 | |
3042 | intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); |
3043 | intel_engine_pm_put(stream->engine); |
3044 | |
3045 | err_config: |
3046 | free_noa_wait(stream); |
3047 | |
3048 | err_noa_wait_alloc: |
3049 | if (stream->ctx) |
3050 | oa_put_render_ctx_id(stream); |
3051 | |
3052 | return ret; |
3053 | } |
3054 | |
3055 | #endif |
3056 | |
3057 | void i915_oa_init_reg_state(const struct intel_context *ce, |
3058 | const struct intel_engine_cs *engine) |
3059 | { |
3060 | struct i915_perf_stream *stream; |
3061 | |
3062 | if (engine->class != RENDER_CLASS0) |
3063 | return; |
3064 | |
3065 | /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ |
3066 | stream = READ_ONCE(engine->i915->perf.exclusive_stream)({ typeof(engine->i915->perf.exclusive_stream) __tmp = * (volatile typeof(engine->i915->perf.exclusive_stream) * )&(engine->i915->perf.exclusive_stream); membar_datadep_consumer (); __tmp; }); |
3067 | if (stream && GRAPHICS_VER(stream->perf->i915)((&(stream->perf->i915)->__runtime)->graphics .ip.ver) < 12) |
3068 | gen8_update_reg_state_unlocked(ce, stream); |
3069 | } |
3070 | |
3071 | #ifdef notyet |
3072 | |
3073 | /** |
3074 | * i915_perf_read - handles read() FOP for i915 perf stream FDs |
3075 | * @file: An i915 perf stream file |
3076 | * @buf: destination buffer given by userspace |
3077 | * @count: the number of bytes userspace wants to read |
3078 | * @ppos: (inout) file seek position (unused) |
3079 | * |
3080 | * The entry point for handling a read() on a stream file descriptor from |
3081 | * userspace. Most of the work is left to the i915_perf_read_locked() and |
3082 | * &i915_perf_stream_ops->read but to save having stream implementations (of |
3083 | * which we might have multiple later) we handle blocking read here. |
3084 | * |
3085 | * We can also consistently treat trying to read from a disabled stream |
3086 | * as an IO error so implementations can assume the stream is enabled |
3087 | * while reading. |
3088 | * |
3089 | * Returns: The number of bytes copied or a negative error code on failure. |
3090 | */ |
3091 | static ssize_t i915_perf_read(struct file *file, |
3092 | char __user *buf, |
3093 | size_t count, |
3094 | loff_t *ppos) |
3095 | { |
3096 | struct i915_perf_stream *stream = file->private_data; |
3097 | struct i915_perf *perf = stream->perf; |
3098 | size_t offset = 0; |
3099 | int ret; |
3100 | |
3101 | /* To ensure it's handled consistently we simply treat all reads of a |
3102 | * disabled stream as an error. In particular it might otherwise lead |
3103 | * to a deadlock for blocking file descriptors... |
3104 | */ |
3105 | if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT(1<<0))) |
3106 | return -EIO5; |
3107 | |
3108 | if (!(file->f_flags & O_NONBLOCK0x0004)) { |
3109 | /* There's the small chance of false positives from |
3110 | * stream->ops->wait_unlocked. |
3111 | * |
3112 | * E.g. with single context filtering since we only wait until |
3113 | * oabuffer has >= 1 report we don't immediately know whether |
3114 | * any reports really belong to the current context |
3115 | */ |
3116 | do { |
3117 | ret = stream->ops->wait_unlocked(stream); |
3118 | if (ret) |
3119 | return ret; |
3120 | |
3121 | mutex_lock(&perf->lock)rw_enter_write(&perf->lock); |
3122 | ret = stream->ops->read(stream, buf, count, &offset); |
3123 | mutex_unlock(&perf->lock)rw_exit_write(&perf->lock); |
3124 | } while (!offset && !ret); |
3125 | } else { |
3126 | mutex_lock(&perf->lock)rw_enter_write(&perf->lock); |
3127 | ret = stream->ops->read(stream, buf, count, &offset); |
3128 | mutex_unlock(&perf->lock)rw_exit_write(&perf->lock); |
3129 | } |
3130 | |
3131 | /* We allow the poll checking to sometimes report false positive EPOLLIN |
3132 | * events where we might actually report EAGAIN on read() if there's |
3133 | * not really any data available. In this situation though we don't |
3134 | * want to enter a busy loop between poll() reporting a EPOLLIN event |
3135 | * and read() returning -EAGAIN. Clearing the oa.pollin state here |
3136 | * effectively ensures we back off until the next hrtimer callback |
3137 | * before reporting another EPOLLIN event. |
3138 | * The exception to this is if ops->read() returned -ENOSPC which means |
3139 | * that more OA data is available than could fit in the user provided |
3140 | * buffer. In this case we want the next poll() call to not block. |
3141 | */ |
3142 | if (ret != -ENOSPC28) |
3143 | stream->pollin = false0; |
3144 | |
3145 | /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ |
3146 | return offset ?: (ret ?: -EAGAIN35); |
3147 | } |
3148 | |
3149 | static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) |
3150 | { |
3151 | struct i915_perf_stream *stream = |
3152 | container_of(hrtimer, typeof(*stream), poll_check_timer)({ const __typeof( ((typeof(*stream) *)0)->poll_check_timer ) *__mptr = (hrtimer); (typeof(*stream) *)( (char *)__mptr - __builtin_offsetof(typeof(*stream), poll_check_timer) );}); |
3153 | |
3154 | if (oa_buffer_check_unlocked(stream)) { |
3155 | stream->pollin = true1; |
3156 | wake_up(&stream->poll_wq); |
3157 | } |
3158 | |
3159 | hrtimer_forward_now(hrtimer, |
3160 | ns_to_ktime(stream->poll_oa_period)); |
3161 | |
3162 | return HRTIMER_RESTART; |
3163 | } |
3164 | |
3165 | /** |
3166 | * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream |
3167 | * @stream: An i915 perf stream |
3168 | * @file: An i915 perf stream file |
3169 | * @wait: poll() state table |
3170 | * |
3171 | * For handling userspace polling on an i915 perf stream, this calls through to |
3172 | * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that |
3173 | * will be woken for new stream data. |
3174 | * |
3175 | * Note: The &perf->lock mutex has been taken to serialize |
3176 | * with any non-file-operation driver hooks. |
3177 | * |
3178 | * Returns: any poll events that are ready without sleeping |
3179 | */ |
3180 | static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, |
3181 | struct file *file, |
3182 | poll_table *wait) |
3183 | { |
3184 | __poll_t events = 0; |
3185 | |
3186 | stream->ops->poll_wait(stream, file, wait); |
3187 | |
3188 | /* Note: we don't explicitly check whether there's something to read |
3189 | * here since this path may be very hot depending on what else |
3190 | * userspace is polling, or on the timeout in use. We rely solely on |
3191 | * the hrtimer/oa_poll_check_timer_cb to notify us when there are |
3192 | * samples to read. |
3193 | */ |
3194 | if (stream->pollin) |
3195 | events |= EPOLLIN; |
3196 | |
3197 | return events; |
3198 | } |
3199 | |
3200 | /** |
3201 | * i915_perf_poll - call poll_wait() with a suitable wait queue for stream |
3202 | * @file: An i915 perf stream file |
3203 | * @wait: poll() state table |
3204 | * |
3205 | * For handling userspace polling on an i915 perf stream, this ensures |
3206 | * poll_wait() gets called with a wait queue that will be woken for new stream |
3207 | * data. |
3208 | * |
3209 | * Note: Implementation deferred to i915_perf_poll_locked() |
3210 | * |
3211 | * Returns: any poll events that are ready without sleeping |
3212 | */ |
3213 | static __poll_t i915_perf_poll(struct file *file, poll_table *wait) |
3214 | { |
3215 | struct i915_perf_stream *stream = file->private_data; |
3216 | struct i915_perf *perf = stream->perf; |
3217 | __poll_t ret; |
3218 | |
3219 | mutex_lock(&perf->lock)rw_enter_write(&perf->lock); |
3220 | ret = i915_perf_poll_locked(stream, file, wait); |
3221 | mutex_unlock(&perf->lock)rw_exit_write(&perf->lock); |
3222 | |
3223 | return ret; |
3224 | } |
3225 | |
3226 | /** |
3227 | * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl |
3228 | * @stream: A disabled i915 perf stream |
3229 | * |
3230 | * [Re]enables the associated capture of data for this stream. |
3231 | * |
3232 | * If a stream was previously enabled then there's currently no intention |
3233 | * to provide userspace any guarantee about the preservation of previously |
3234 | * buffered data. |
3235 | */ |
3236 | static void i915_perf_enable_locked(struct i915_perf_stream *stream) |
3237 | { |
3238 | if (stream->enabled) |
3239 | return; |
3240 | |
3241 | /* Allow stream->ops->enable() to refer to this */ |
3242 | stream->enabled = true1; |
3243 | |
3244 | if (stream->ops->enable) |
3245 | stream->ops->enable(stream); |
3246 | |
3247 | if (stream->hold_preemption) |
3248 | intel_context_set_nopreempt(stream->pinned_ctx); |
3249 | } |
3250 | |
3251 | /** |
3252 | * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl |
3253 | * @stream: An enabled i915 perf stream |
3254 | * |
3255 | * Disables the associated capture of data for this stream. |
3256 | * |
3257 | * The intention is that disabling an re-enabling a stream will ideally be |
3258 | * cheaper than destroying and re-opening a stream with the same configuration, |
3259 | * though there are no formal guarantees about what state or buffered data |
3260 | * must be retained between disabling and re-enabling a stream. |
3261 | * |
3262 | * Note: while a stream is disabled it's considered an error for userspace |
3263 | * to attempt to read from the stream (-EIO). |
3264 | */ |
3265 | static void i915_perf_disable_locked(struct i915_perf_stream *stream) |
3266 | { |
3267 | if (!stream->enabled) |
3268 | return; |
3269 | |
3270 | /* Allow stream->ops->disable() to refer to this */ |
3271 | stream->enabled = false0; |
3272 | |
3273 | if (stream->hold_preemption) |
3274 | intel_context_clear_nopreempt(stream->pinned_ctx); |
3275 | |
3276 | if (stream->ops->disable) |
3277 | stream->ops->disable(stream); |
3278 | } |
3279 | |
3280 | static long i915_perf_config_locked(struct i915_perf_stream *stream, |
3281 | unsigned long metrics_set) |
3282 | { |
3283 | struct i915_oa_config *config; |
3284 | long ret = stream->oa_config->id; |
3285 | |
3286 | config = i915_perf_get_oa_config(stream->perf, metrics_set); |
3287 | if (!config) |
3288 | return -EINVAL22; |
3289 | |
3290 | if (config != stream->oa_config) { |
3291 | int err; |
3292 | |
3293 | /* |
3294 | * If OA is bound to a specific context, emit the |
3295 | * reconfiguration inline from that context. The update |
3296 | * will then be ordered with respect to submission on that |
3297 | * context. |
3298 | * |
3299 | * When set globally, we use a low priority kernel context, |
3300 | * so it will effectively take effect when idle. |
3301 | */ |
3302 | err = emit_oa_config(stream, config, oa_context(stream), NULL((void *)0)); |
3303 | if (!err) |
3304 | config = xchg(&stream->oa_config, config)__sync_lock_test_and_set(&stream->oa_config, config); |
3305 | else |
3306 | ret = err; |
3307 | } |
3308 | |
3309 | i915_oa_config_put(config); |
3310 | |
3311 | return ret; |
3312 | } |
3313 | |
3314 | /** |
3315 | * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs |
3316 | * @stream: An i915 perf stream |
3317 | * @cmd: the ioctl request |
3318 | * @arg: the ioctl data |
3319 | * |
3320 | * Note: The &perf->lock mutex has been taken to serialize |
3321 | * with any non-file-operation driver hooks. |
3322 | * |
3323 | * Returns: zero on success or a negative error code. Returns -EINVAL for |
3324 | * an unknown ioctl request. |
3325 | */ |
3326 | static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, |
3327 | unsigned int cmd, |
3328 | unsigned long arg) |
3329 | { |
3330 | switch (cmd) { |
3331 | case I915_PERF_IOCTL_ENABLE((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) | ((('i')) << 8) | ((0x0))): |
3332 | i915_perf_enable_locked(stream); |
3333 | return 0; |
3334 | case I915_PERF_IOCTL_DISABLE((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) | ((('i')) << 8) | ((0x1))): |
3335 | i915_perf_disable_locked(stream); |
3336 | return 0; |
3337 | case I915_PERF_IOCTL_CONFIG((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) | ((('i')) << 8) | ((0x2))): |
3338 | return i915_perf_config_locked(stream, arg); |
3339 | } |
3340 | |
3341 | return -EINVAL22; |
3342 | } |
3343 | |
3344 | /** |
3345 | * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs |
3346 | * @file: An i915 perf stream file |
3347 | * @cmd: the ioctl request |
3348 | * @arg: the ioctl data |
3349 | * |
3350 | * Implementation deferred to i915_perf_ioctl_locked(). |
3351 | * |
3352 | * Returns: zero on success or a negative error code. Returns -EINVAL for |
3353 | * an unknown ioctl request. |
3354 | */ |
3355 | static long i915_perf_ioctl(struct file *file, |
3356 | unsigned int cmd, |
3357 | unsigned long arg) |
3358 | { |
3359 | struct i915_perf_stream *stream = file->private_data; |
3360 | struct i915_perf *perf = stream->perf; |
3361 | long ret; |
3362 | |
3363 | mutex_lock(&perf->lock)rw_enter_write(&perf->lock); |
3364 | ret = i915_perf_ioctl_locked(stream, cmd, arg); |
3365 | mutex_unlock(&perf->lock)rw_exit_write(&perf->lock); |
3366 | |
3367 | return ret; |
3368 | } |
3369 | |
3370 | /** |
3371 | * i915_perf_destroy_locked - destroy an i915 perf stream |
3372 | * @stream: An i915 perf stream |
3373 | * |
3374 | * Frees all resources associated with the given i915 perf @stream, disabling |
3375 | * any associated data capture in the process. |
3376 | * |
3377 | * Note: The &perf->lock mutex has been taken to serialize |
3378 | * with any non-file-operation driver hooks. |
3379 | */ |
3380 | static void i915_perf_destroy_locked(struct i915_perf_stream *stream) |
3381 | { |
3382 | if (stream->enabled) |
3383 | i915_perf_disable_locked(stream); |
3384 | |
3385 | if (stream->ops->destroy) |
3386 | stream->ops->destroy(stream); |
3387 | |
3388 | if (stream->ctx) |
3389 | i915_gem_context_put(stream->ctx); |
3390 | |
3391 | kfree(stream); |
3392 | } |
3393 | |
3394 | /** |
3395 | * i915_perf_release - handles userspace close() of a stream file |
3396 | * @inode: anonymous inode associated with file |
3397 | * @file: An i915 perf stream file |
3398 | * |
3399 | * Cleans up any resources associated with an open i915 perf stream file. |
3400 | * |
3401 | * NB: close() can't really fail from the userspace point of view. |
3402 | * |
3403 | * Returns: zero on success or a negative error code. |
3404 | */ |
3405 | static int i915_perf_release(struct inode *inode, struct file *file) |
3406 | { |
3407 | struct i915_perf_stream *stream = file->private_data; |
3408 | struct i915_perf *perf = stream->perf; |
3409 | |
3410 | mutex_lock(&perf->lock)rw_enter_write(&perf->lock); |
3411 | i915_perf_destroy_locked(stream); |
3412 | mutex_unlock(&perf->lock)rw_exit_write(&perf->lock); |
3413 | |
3414 | /* Release the reference the perf stream kept on the driver. */ |
3415 | drm_dev_put(&perf->i915->drm); |
3416 | |
3417 | return 0; |
3418 | } |
3419 | |
3420 | |
3421 | static const struct file_operations fops = { |
3422 | .owner = THIS_MODULE((void *)0), |
3423 | .llseek = no_llseek, |
3424 | .release = i915_perf_release, |
3425 | .poll = i915_perf_poll, |
3426 | .read = i915_perf_read, |
3427 | .unlocked_ioctl = i915_perf_ioctl, |
3428 | /* Our ioctl have no arguments, so it's safe to use the same function |
3429 | * to handle 32bits compatibility. |
3430 | */ |
3431 | .compat_ioctl = i915_perf_ioctl, |
3432 | }; |
3433 | |
3434 | #endif /* notyet */ |
3435 | |
3436 | /** |
3437 | * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD |
3438 | * @perf: i915 perf instance |
3439 | * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` |
3440 | * @props: individually validated u64 property value pairs |
3441 | * @file: drm file |
3442 | * |
3443 | * See i915_perf_ioctl_open() for interface details. |
3444 | * |
3445 | * Implements further stream config validation and stream initialization on |
3446 | * behalf of i915_perf_open_ioctl() with the &perf->lock mutex |
3447 | * taken to serialize with any non-file-operation driver hooks. |
3448 | * |
3449 | * Note: at this point the @props have only been validated in isolation and |
3450 | * it's still necessary to validate that the combination of properties makes |
3451 | * sense. |
3452 | * |
3453 | * In the case where userspace is interested in OA unit metrics then further |
3454 | * config validation and stream initialization details will be handled by |
3455 | * i915_oa_stream_init(). The code here should only validate config state that |
3456 | * will be relevant to all stream types / backends. |
3457 | * |
3458 | * Returns: zero on success or a negative error code. |
3459 | */ |
3460 | static int |
3461 | i915_perf_open_ioctl_locked(struct i915_perf *perf, |
3462 | struct drm_i915_perf_open_param *param, |
3463 | struct perf_open_properties *props, |
3464 | struct drm_file *file) |
3465 | { |
3466 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
3467 | return -ENOSYS78; |
3468 | #ifdef notyet |
3469 | struct i915_gem_context *specific_ctx = NULL((void *)0); |
3470 | struct i915_perf_stream *stream = NULL((void *)0); |
3471 | unsigned long f_flags = 0; |
3472 | bool_Bool privileged_op = true1; |
3473 | int stream_fd; |
3474 | int ret; |
3475 | |
3476 | if (props->single_context) { |
3477 | u32 ctx_handle = props->ctx_handle; |
3478 | struct drm_i915_file_private *file_priv = file->driver_priv; |
3479 | |
3480 | specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); |
3481 | if (IS_ERR(specific_ctx)) { |
3482 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to look up context with ID %u for opening perf stream\n" , ctx_handle) |
3483 | "Failed to look up context with ID %u for opening perf stream\n",__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to look up context with ID %u for opening perf stream\n" , ctx_handle) |
3484 | ctx_handle)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to look up context with ID %u for opening perf stream\n" , ctx_handle); |
3485 | ret = PTR_ERR(specific_ctx); |
3486 | goto err; |
3487 | } |
3488 | } |
3489 | |
3490 | /* |
3491 | * On Haswell the OA unit supports clock gating off for a specific |
3492 | * context and in this mode there's no visibility of metrics for the |
3493 | * rest of the system, which we consider acceptable for a |
3494 | * non-privileged client. |
3495 | * |
3496 | * For Gen8->11 the OA unit no longer supports clock gating off for a |
3497 | * specific context and the kernel can't securely stop the counters |
3498 | * from updating as system-wide / global values. Even though we can |
3499 | * filter reports based on the included context ID we can't block |
3500 | * clients from seeing the raw / global counter values via |
3501 | * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to |
3502 | * enable the OA unit by default. |
3503 | * |
3504 | * For Gen12+ we gain a new OAR unit that only monitors the RCS on a |
3505 | * per context basis. So we can relax requirements there if the user |
3506 | * doesn't request global stream access (i.e. query based sampling |
3507 | * using MI_RECORD_PERF_COUNT. |
3508 | */ |
3509 | if (IS_HASWELL(perf->i915)IS_PLATFORM(perf->i915, INTEL_HASWELL) && specific_ctx) |
3510 | privileged_op = false0; |
3511 | else if (GRAPHICS_VER(perf->i915)((&(perf->i915)->__runtime)->graphics.ip.ver) == 12 && specific_ctx && |
3512 | (props->sample_flags & SAMPLE_OA_REPORT(1<<0)) == 0) |
3513 | privileged_op = false0; |
3514 | |
3515 | if (props->hold_preemption) { |
3516 | if (!props->single_context) { |
3517 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "preemption disable with no context\n" ) |
3518 | "preemption disable with no context\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "preemption disable with no context\n" ); |
3519 | ret = -EINVAL22; |
3520 | goto err; |
3521 | } |
3522 | privileged_op = true1; |
3523 | } |
3524 | |
3525 | /* |
3526 | * Asking for SSEU configuration is a priviliged operation. |
3527 | */ |
3528 | if (props->has_sseu) |
3529 | privileged_op = true1; |
3530 | else |
3531 | get_default_sseu_config(&props->sseu, props->engine); |
3532 | |
3533 | /* Similar to perf's kernel.perf_paranoid_cpu sysctl option |
3534 | * we check a dev.i915.perf_stream_paranoid sysctl option |
3535 | * to determine if it's ok to access system wide OA counters |
3536 | * without CAP_PERFMON or CAP_SYS_ADMIN privileges. |
3537 | */ |
3538 | if (privileged_op && |
3539 | i915_perf_stream_paranoid && !perfmon_capable()) { |
3540 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Insufficient privileges to open i915 perf stream\n" ) |
3541 | "Insufficient privileges to open i915 perf stream\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Insufficient privileges to open i915 perf stream\n" ); |
3542 | ret = -EACCES13; |
3543 | goto err_ctx; |
3544 | } |
3545 | |
3546 | stream = kzalloc(sizeof(*stream), GFP_KERNEL(0x0001 | 0x0004)); |
3547 | if (!stream) { |
3548 | ret = -ENOMEM12; |
3549 | goto err_ctx; |
3550 | } |
3551 | |
3552 | stream->perf = perf; |
3553 | stream->ctx = specific_ctx; |
3554 | stream->poll_oa_period = props->poll_oa_period; |
3555 | |
3556 | ret = i915_oa_stream_init(stream, param, props); |
3557 | if (ret) |
3558 | goto err_alloc; |
3559 | |
3560 | /* we avoid simply assigning stream->sample_flags = props->sample_flags |
3561 | * to have _stream_init check the combination of sample flags more |
3562 | * thoroughly, but still this is the expected result at this point. |
3563 | */ |
3564 | if (WARN_ON(stream->sample_flags != props->sample_flags)({ int __ret = !!(stream->sample_flags != props->sample_flags ); if (__ret) printf("WARNING %s failed at %s:%d\n", "stream->sample_flags != props->sample_flags" , "/usr/src/sys/dev/pci/drm/i915/i915_perf.c", 3564); __builtin_expect (!!(__ret), 0); })) { |
3565 | ret = -ENODEV19; |
3566 | goto err_flags; |
3567 | } |
3568 | |
3569 | if (param->flags & I915_PERF_FLAG_FD_CLOEXEC(1<<0)) |
3570 | f_flags |= O_CLOEXEC0x10000; |
3571 | if (param->flags & I915_PERF_FLAG_FD_NONBLOCK(1<<1)) |
3572 | f_flags |= O_NONBLOCK0x0004; |
3573 | |
3574 | stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); |
3575 | if (stream_fd < 0) { |
3576 | ret = stream_fd; |
3577 | goto err_flags; |
3578 | } |
3579 | |
3580 | if (!(param->flags & I915_PERF_FLAG_DISABLED(1<<2))) |
3581 | i915_perf_enable_locked(stream); |
3582 | |
3583 | /* Take a reference on the driver that will be kept with stream_fd |
3584 | * until its release. |
3585 | */ |
3586 | drm_dev_get(&perf->i915->drm); |
3587 | |
3588 | return stream_fd; |
3589 | |
3590 | err_flags: |
3591 | if (stream->ops->destroy) |
3592 | stream->ops->destroy(stream); |
3593 | err_alloc: |
3594 | kfree(stream); |
3595 | err_ctx: |
3596 | if (specific_ctx) |
3597 | i915_gem_context_put(specific_ctx); |
3598 | err: |
3599 | return ret; |
3600 | #endif |
3601 | } |
3602 | |
3603 | static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) |
3604 | { |
3605 | return intel_gt_clock_interval_to_ns(to_gt(perf->i915), |
3606 | 2ULL << exponent); |
3607 | } |
3608 | |
3609 | static __always_inlineinline __attribute__((__always_inline__)) bool_Bool |
3610 | oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format) |
3611 | { |
3612 | return test_bit(format, perf->format_mask); |
3613 | } |
3614 | |
3615 | static __always_inlineinline __attribute__((__always_inline__)) void |
3616 | oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format) |
3617 | { |
3618 | __set_bit(format, perf->format_mask); |
3619 | } |
3620 | |
3621 | /** |
3622 | * read_properties_unlocked - validate + copy userspace stream open properties |
3623 | * @perf: i915 perf instance |
3624 | * @uprops: The array of u64 key value pairs given by userspace |
3625 | * @n_props: The number of key value pairs expected in @uprops |
3626 | * @props: The stream configuration built up while validating properties |
3627 | * |
3628 | * Note this function only validates properties in isolation it doesn't |
3629 | * validate that the combination of properties makes sense or that all |
3630 | * properties necessary for a particular kind of stream have been set. |
3631 | * |
3632 | * Note that there currently aren't any ordering requirements for properties so |
3633 | * we shouldn't validate or assume anything about ordering here. This doesn't |
3634 | * rule out defining new properties with ordering requirements in the future. |
3635 | */ |
3636 | static int read_properties_unlocked(struct i915_perf *perf, |
3637 | u64 __user *uprops, |
3638 | u32 n_props, |
3639 | struct perf_open_properties *props) |
3640 | { |
3641 | u64 __user *uprop = uprops; |
3642 | u32 i; |
3643 | int ret; |
3644 | |
3645 | memset(props, 0, sizeof(struct perf_open_properties))__builtin_memset((props), (0), (sizeof(struct perf_open_properties ))); |
3646 | props->poll_oa_period = DEFAULT_POLL_PERIOD_NS(1000000000L / 200); |
3647 | |
3648 | if (!n_props) { |
3649 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "No i915 perf properties given\n" ) |
3650 | "No i915 perf properties given\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "No i915 perf properties given\n" ); |
3651 | return -EINVAL22; |
3652 | } |
3653 | |
3654 | /* At the moment we only support using i915-perf on the RCS. */ |
3655 | props->engine = intel_engine_lookup_user(perf->i915, |
3656 | I915_ENGINE_CLASS_RENDER, |
3657 | 0); |
3658 | if (!props->engine) { |
3659 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "No RENDER-capable engines\n" ) |
3660 | "No RENDER-capable engines\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "No RENDER-capable engines\n" ); |
3661 | return -EINVAL22; |
3662 | } |
3663 | |
3664 | /* Considering that ID = 0 is reserved and assuming that we don't |
3665 | * (currently) expect any configurations to ever specify duplicate |
3666 | * values for a particular property ID then the last _PROP_MAX value is |
3667 | * one greater than the maximum number of properties we expect to get |
3668 | * from userspace. |
3669 | */ |
3670 | if (n_props >= DRM_I915_PERF_PROP_MAX) { |
3671 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "More i915 perf properties specified than exist\n" ) |
3672 | "More i915 perf properties specified than exist\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "More i915 perf properties specified than exist\n" ); |
3673 | return -EINVAL22; |
3674 | } |
3675 | |
3676 | for (i = 0; i < n_props; i++) { |
3677 | u64 oa_period, oa_freq_hz; |
3678 | u64 id, value; |
3679 | |
3680 | ret = get_user(id, uprop)-copyin(uprop, &(id), sizeof(id)); |
3681 | if (ret) |
3682 | return ret; |
3683 | |
3684 | ret = get_user(value, uprop + 1)-copyin(uprop + 1, &(value), sizeof(value)); |
3685 | if (ret) |
3686 | return ret; |
3687 | |
3688 | if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { |
3689 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown i915 perf property ID\n" ) |
3690 | "Unknown i915 perf property ID\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown i915 perf property ID\n" ); |
3691 | return -EINVAL22; |
3692 | } |
3693 | |
3694 | switch ((enum drm_i915_perf_property_id)id) { |
3695 | case DRM_I915_PERF_PROP_CTX_HANDLE: |
3696 | props->single_context = 1; |
3697 | props->ctx_handle = value; |
3698 | break; |
3699 | case DRM_I915_PERF_PROP_SAMPLE_OA: |
3700 | if (value) |
3701 | props->sample_flags |= SAMPLE_OA_REPORT(1<<0); |
3702 | break; |
3703 | case DRM_I915_PERF_PROP_OA_METRICS_SET: |
3704 | if (value == 0) { |
3705 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown OA metric set ID\n" ) |
3706 | "Unknown OA metric set ID\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown OA metric set ID\n" ); |
3707 | return -EINVAL22; |
3708 | } |
3709 | props->metrics_set = value; |
3710 | break; |
3711 | case DRM_I915_PERF_PROP_OA_FORMAT: |
3712 | if (value == 0 || value >= I915_OA_FORMAT_MAX) { |
3713 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Out-of-range OA report format %llu\n" , value) |
3714 | "Out-of-range OA report format %llu\n",__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Out-of-range OA report format %llu\n" , value) |
3715 | value)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Out-of-range OA report format %llu\n" , value); |
3716 | return -EINVAL22; |
3717 | } |
3718 | if (!oa_format_valid(perf, value)) { |
3719 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unsupported OA report format %llu\n" , value) |
3720 | "Unsupported OA report format %llu\n",__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unsupported OA report format %llu\n" , value) |
3721 | value)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unsupported OA report format %llu\n" , value); |
3722 | return -EINVAL22; |
3723 | } |
3724 | props->oa_format = value; |
3725 | break; |
3726 | case DRM_I915_PERF_PROP_OA_EXPONENT: |
3727 | if (value > OA_EXPONENT_MAX31) { |
3728 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA timer exponent too high (> %u)\n" , 31) |
3729 | "OA timer exponent too high (> %u)\n",__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA timer exponent too high (> %u)\n" , 31) |
3730 | OA_EXPONENT_MAX)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA timer exponent too high (> %u)\n" , 31); |
3731 | return -EINVAL22; |
3732 | } |
3733 | |
3734 | /* Theoretically we can program the OA unit to sample |
3735 | * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns |
3736 | * for BXT. We don't allow such high sampling |
3737 | * frequencies by default unless root. |
3738 | */ |
3739 | |
3740 | BUILD_BUG_ON(sizeof(oa_period) != 8)extern char _ctassert[(!(sizeof(oa_period) != 8)) ? 1 : -1 ] __attribute__ ((__unused__)); |
3741 | oa_period = oa_exponent_to_ns(perf, value); |
3742 | |
3743 | /* This check is primarily to ensure that oa_period <= |
3744 | * UINT32_MAX (before passing to do_div which only |
3745 | * accepts a u32 denominator), but we can also skip |
3746 | * checking anything < 1Hz which implicitly can't be |
3747 | * limited via an integer oa_max_sample_rate. |
3748 | */ |
3749 | if (oa_period <= NSEC_PER_SEC1000000000L) { |
3750 | u64 tmp = NSEC_PER_SEC1000000000L; |
3751 | do_div(tmp, oa_period)({ uint32_t __base = (oa_period); uint32_t __rem = ((uint64_t )(tmp)) % __base; (tmp) = ((uint64_t)(tmp)) / __base; __rem; } ); |
3752 | oa_freq_hz = tmp; |
3753 | } else |
3754 | oa_freq_hz = 0; |
3755 | |
3756 | if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) { |
3757 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n" , i915_oa_max_sample_rate) |
3758 | "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n" , i915_oa_max_sample_rate) |
3759 | i915_oa_max_sample_rate)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n" , i915_oa_max_sample_rate); |
3760 | return -EACCES13; |
3761 | } |
3762 | |
3763 | props->oa_periodic = true1; |
3764 | props->oa_period_exponent = value; |
3765 | break; |
3766 | case DRM_I915_PERF_PROP_HOLD_PREEMPTION: |
3767 | props->hold_preemption = !!value; |
3768 | break; |
3769 | case DRM_I915_PERF_PROP_GLOBAL_SSEU: { |
3770 | struct drm_i915_gem_context_param_sseu user_sseu; |
3771 | |
3772 | if (GRAPHICS_VER_FULL(perf->i915)(((&(perf->i915)->__runtime)->graphics.ip.ver) << 8 | ((&(perf->i915)->__runtime)->graphics.ip.rel )) >= IP_VER(12, 50)((12) << 8 | (50))) { |
3773 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "SSEU config not supported on gfx %x\n" , (((&(perf->i915)->__runtime)->graphics.ip.ver) << 8 | ((&(perf->i915)->__runtime)->graphics .ip.rel))) |
3774 | "SSEU config not supported on gfx %x\n",__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "SSEU config not supported on gfx %x\n" , (((&(perf->i915)->__runtime)->graphics.ip.ver) << 8 | ((&(perf->i915)->__runtime)->graphics .ip.rel))) |
3775 | GRAPHICS_VER_FULL(perf->i915))__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "SSEU config not supported on gfx %x\n" , (((&(perf->i915)->__runtime)->graphics.ip.ver) << 8 | ((&(perf->i915)->__runtime)->graphics .ip.rel))); |
3776 | return -ENODEV19; |
3777 | } |
3778 | |
3779 | if (copy_from_user(&user_sseu, |
3780 | u64_to_user_ptr(value)((void *)(uintptr_t)(value)), |
3781 | sizeof(user_sseu))) { |
3782 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unable to copy global sseu parameter\n" ) |
3783 | "Unable to copy global sseu parameter\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unable to copy global sseu parameter\n" ); |
3784 | return -EFAULT14; |
3785 | } |
3786 | |
3787 | ret = get_sseu_config(&props->sseu, props->engine, &user_sseu); |
3788 | if (ret) { |
3789 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid SSEU configuration\n" ) |
3790 | "Invalid SSEU configuration\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid SSEU configuration\n" ); |
3791 | return ret; |
3792 | } |
3793 | props->has_sseu = true1; |
3794 | break; |
3795 | } |
3796 | case DRM_I915_PERF_PROP_POLL_OA_PERIOD: |
3797 | if (value < 100000 /* 100us */) { |
3798 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA availability timer too small (%lluns < 100us)\n" , value) |
3799 | "OA availability timer too small (%lluns < 100us)\n",__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA availability timer too small (%lluns < 100us)\n" , value) |
3800 | value)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA availability timer too small (%lluns < 100us)\n" , value); |
3801 | return -EINVAL22; |
3802 | } |
3803 | props->poll_oa_period = value; |
3804 | break; |
3805 | case DRM_I915_PERF_PROP_MAX: |
3806 | MISSING_CASE(id)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "id", (long)(id)); __builtin_expect(!!(__ret), 0); }); |
3807 | return -EINVAL22; |
3808 | } |
3809 | |
3810 | uprop += 2; |
3811 | } |
3812 | |
3813 | return 0; |
3814 | } |
3815 | |
3816 | /** |
3817 | * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD |
3818 | * @dev: drm device |
3819 | * @data: ioctl data copied from userspace (unvalidated) |
3820 | * @file: drm file |
3821 | * |
3822 | * Validates the stream open parameters given by userspace including flags |
3823 | * and an array of u64 key, value pair properties. |
3824 | * |
3825 | * Very little is assumed up front about the nature of the stream being |
3826 | * opened (for instance we don't assume it's for periodic OA unit metrics). An |
3827 | * i915-perf stream is expected to be a suitable interface for other forms of |
3828 | * buffered data written by the GPU besides periodic OA metrics. |
3829 | * |
3830 | * Note we copy the properties from userspace outside of the i915 perf |
3831 | * mutex to avoid an awkward lockdep with mmap_lock. |
3832 | * |
3833 | * Most of the implementation details are handled by |
3834 | * i915_perf_open_ioctl_locked() after taking the &perf->lock |
3835 | * mutex for serializing with any non-file-operation driver hooks. |
3836 | * |
3837 | * Return: A newly opened i915 Perf stream file descriptor or negative |
3838 | * error code on failure. |
3839 | */ |
3840 | int i915_perf_open_ioctl(struct drm_device *dev, void *data, |
3841 | struct drm_file *file) |
3842 | { |
3843 | struct i915_perf *perf = &to_i915(dev)->perf; |
3844 | struct drm_i915_perf_open_param *param = data; |
3845 | struct perf_open_properties props; |
3846 | u32 known_open_flags; |
3847 | int ret; |
3848 | |
3849 | if (!perf->i915) |
3850 | return -ENOTSUPP91; |
3851 | |
3852 | known_open_flags = I915_PERF_FLAG_FD_CLOEXEC(1<<0) | |
3853 | I915_PERF_FLAG_FD_NONBLOCK(1<<1) | |
3854 | I915_PERF_FLAG_DISABLED(1<<2); |
3855 | if (param->flags & ~known_open_flags) { |
3856 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown drm_i915_perf_open_param flag\n" ) |
3857 | "Unknown drm_i915_perf_open_param flag\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown drm_i915_perf_open_param flag\n" ); |
3858 | return -EINVAL22; |
3859 | } |
3860 | |
3861 | ret = read_properties_unlocked(perf, |
3862 | u64_to_user_ptr(param->properties_ptr)((void *)(uintptr_t)(param->properties_ptr)), |
3863 | param->num_properties, |
3864 | &props); |
3865 | if (ret) |
3866 | return ret; |
3867 | |
3868 | mutex_lock(&perf->lock)rw_enter_write(&perf->lock); |
3869 | ret = i915_perf_open_ioctl_locked(perf, param, &props, file); |
3870 | mutex_unlock(&perf->lock)rw_exit_write(&perf->lock); |
3871 | |
3872 | return ret; |
3873 | } |
3874 | |
3875 | /** |
3876 | * i915_perf_register - exposes i915-perf to userspace |
3877 | * @i915: i915 device instance |
3878 | * |
3879 | * In particular OA metric sets are advertised under a sysfs metrics/ |
3880 | * directory allowing userspace to enumerate valid IDs that can be |
3881 | * used to open an i915-perf stream. |
3882 | */ |
3883 | void i915_perf_register(struct drm_i915_privateinteldrm_softc *i915) |
3884 | { |
3885 | #ifdef __linux__ |
3886 | struct i915_perf *perf = &i915->perf; |
3887 | |
3888 | if (!perf->i915) |
3889 | return; |
3890 | |
3891 | /* To be sure we're synchronized with an attempted |
3892 | * i915_perf_open_ioctl(); considering that we register after |
3893 | * being exposed to userspace. |
3894 | */ |
3895 | mutex_lock(&perf->lock)rw_enter_write(&perf->lock); |
3896 | |
3897 | perf->metrics_kobj = |
3898 | kobject_create_and_add("metrics", |
3899 | &i915->drm.primary->kdev->kobj); |
3900 | |
3901 | mutex_unlock(&perf->lock)rw_exit_write(&perf->lock); |
3902 | #endif |
3903 | } |
3904 | |
3905 | /** |
3906 | * i915_perf_unregister - hide i915-perf from userspace |
3907 | * @i915: i915 device instance |
3908 | * |
3909 | * i915-perf state cleanup is split up into an 'unregister' and |
3910 | * 'deinit' phase where the interface is first hidden from |
3911 | * userspace by i915_perf_unregister() before cleaning up |
3912 | * remaining state in i915_perf_fini(). |
3913 | */ |
3914 | void i915_perf_unregister(struct drm_i915_privateinteldrm_softc *i915) |
3915 | { |
3916 | struct i915_perf *perf = &i915->perf; |
3917 | |
3918 | if (!perf->metrics_kobj) |
3919 | return; |
3920 | |
3921 | kobject_put(perf->metrics_kobj); |
3922 | perf->metrics_kobj = NULL((void *)0); |
3923 | } |
3924 | |
3925 | static bool_Bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) |
3926 | { |
3927 | static const i915_reg_t flex_eu_regs[] = { |
3928 | EU_PERF_CNTL0((const i915_reg_t){ .reg = (0xe458) }), |
3929 | EU_PERF_CNTL1((const i915_reg_t){ .reg = (0xe558) }), |
3930 | EU_PERF_CNTL2((const i915_reg_t){ .reg = (0xe658) }), |
3931 | EU_PERF_CNTL3((const i915_reg_t){ .reg = (0xe758) }), |
3932 | EU_PERF_CNTL4((const i915_reg_t){ .reg = (0xe45c) }), |
3933 | EU_PERF_CNTL5((const i915_reg_t){ .reg = (0xe55c) }), |
3934 | EU_PERF_CNTL6((const i915_reg_t){ .reg = (0xe65c) }), |
3935 | }; |
3936 | int i; |
3937 | |
3938 | for (i = 0; i < ARRAY_SIZE(flex_eu_regs)(sizeof((flex_eu_regs)) / sizeof((flex_eu_regs)[0])); i++) { |
3939 | if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) |
3940 | return true1; |
3941 | } |
3942 | return false0; |
3943 | } |
3944 | |
3945 | static bool_Bool reg_in_range_table(u32 addr, const struct i915_range *table) |
3946 | { |
3947 | while (table->start || table->end) { |
3948 | if (addr >= table->start && addr <= table->end) |
3949 | return true1; |
3950 | |
3951 | table++; |
3952 | } |
3953 | |
3954 | return false0; |
3955 | } |
3956 | |
3957 | #define REG_EQUAL(addr, mmio)((addr) == i915_mmio_reg_offset(mmio)) \ |
3958 | ((addr) == i915_mmio_reg_offset(mmio)) |
3959 | |
3960 | static const struct i915_range gen7_oa_b_counters[] = { |
3961 | { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */ |
3962 | { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */ |
3963 | { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */ |
3964 | {} |
3965 | }; |
3966 | |
3967 | static const struct i915_range gen12_oa_b_counters[] = { |
3968 | { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */ |
3969 | { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */ |
3970 | { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */ |
3971 | { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */ |
3972 | { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */ |
3973 | { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */ |
3974 | { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */ |
3975 | {} |
3976 | }; |
3977 | |
3978 | static const struct i915_range gen7_oa_mux_regs[] = { |
3979 | { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */ |
3980 | { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */ |
3981 | { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */ |
3982 | {} |
3983 | }; |
3984 | |
3985 | static const struct i915_range hsw_oa_mux_regs[] = { |
3986 | { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */ |
3987 | { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */ |
3988 | { .start = 0x25100, .end = 0x2ff90 }, |
3989 | {} |
3990 | }; |
3991 | |
3992 | static const struct i915_range chv_oa_mux_regs[] = { |
3993 | { .start = 0x182300, .end = 0x1823a4 }, |
3994 | {} |
3995 | }; |
3996 | |
3997 | static const struct i915_range gen8_oa_mux_regs[] = { |
3998 | { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */ |
3999 | { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ |
4000 | {} |
4001 | }; |
4002 | |
4003 | static const struct i915_range gen11_oa_mux_regs[] = { |
4004 | { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */ |
4005 | {} |
4006 | }; |
4007 | |
4008 | static const struct i915_range gen12_oa_mux_regs[] = { |
4009 | { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ |
4010 | { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ |
4011 | { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ |
4012 | { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */ |
4013 | { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ |
4014 | {} |
4015 | }; |
4016 | |
4017 | static bool_Bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) |
4018 | { |
4019 | return reg_in_range_table(addr, gen7_oa_b_counters); |
4020 | } |
4021 | |
4022 | static bool_Bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) |
4023 | { |
4024 | return reg_in_range_table(addr, gen7_oa_mux_regs) || |
4025 | reg_in_range_table(addr, gen8_oa_mux_regs); |
4026 | } |
4027 | |
4028 | static bool_Bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr) |
4029 | { |
4030 | return reg_in_range_table(addr, gen7_oa_mux_regs) || |
4031 | reg_in_range_table(addr, gen8_oa_mux_regs) || |
4032 | reg_in_range_table(addr, gen11_oa_mux_regs); |
4033 | } |
4034 | |
4035 | static bool_Bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) |
4036 | { |
4037 | return reg_in_range_table(addr, gen7_oa_mux_regs) || |
4038 | reg_in_range_table(addr, hsw_oa_mux_regs); |
4039 | } |
4040 | |
4041 | static bool_Bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) |
4042 | { |
4043 | return reg_in_range_table(addr, gen7_oa_mux_regs) || |
4044 | reg_in_range_table(addr, chv_oa_mux_regs); |
4045 | } |
4046 | |
4047 | static bool_Bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) |
4048 | { |
4049 | return reg_in_range_table(addr, gen12_oa_b_counters); |
4050 | } |
4051 | |
4052 | static bool_Bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) |
4053 | { |
4054 | return reg_in_range_table(addr, gen12_oa_mux_regs); |
4055 | } |
4056 | |
4057 | #ifdef notyet |
4058 | |
4059 | static u32 mask_reg_value(u32 reg, u32 val) |
4060 | { |
4061 | /* HALF_SLICE_CHICKEN2 is programmed with a the |
4062 | * WaDisableSTUnitPowerOptimization workaround. Make sure the value |
4063 | * programmed by userspace doesn't change this. |
4064 | */ |
4065 | if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)((reg) == i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0xe180 ) })))) |
4066 | val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE)({ typeof((1 << 13)) _a = ((1 << 13)); ({ if (__builtin_constant_p (_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); }); |
4067 | |
4068 | /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function |
4069 | * indicated by its name and a bunch of selection fields used by OA |
4070 | * configs. |
4071 | */ |
4072 | if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)((reg) == i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0x20cc ) })))) |
4073 | val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE)({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p (_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); }); |
4074 | |
4075 | return val; |
4076 | } |
4077 | |
4078 | static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, |
4079 | bool_Bool (*is_valid)(struct i915_perf *perf, u32 addr), |
4080 | u32 __user *regs, |
4081 | u32 n_regs) |
4082 | { |
4083 | struct i915_oa_reg *oa_regs; |
4084 | int err; |
4085 | u32 i; |
4086 | |
4087 | if (!n_regs) |
4088 | return NULL((void *)0); |
4089 | |
4090 | /* No is_valid function means we're not allowing any register to be programmed. */ |
4091 | GEM_BUG_ON(!is_valid)((void)0); |
4092 | if (!is_valid) |
4093 | return ERR_PTR(-EINVAL22); |
4094 | |
4095 | oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL(0x0001 | 0x0004)); |
4096 | if (!oa_regs) |
4097 | return ERR_PTR(-ENOMEM12); |
4098 | |
4099 | for (i = 0; i < n_regs; i++) { |
4100 | u32 addr, value; |
4101 | |
4102 | err = get_user(addr, regs)-copyin(regs, &(addr), sizeof(addr)); |
4103 | if (err) |
4104 | goto addr_err; |
4105 | |
4106 | if (!is_valid(perf, addr)) { |
4107 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid oa_reg address: %X\n" , addr) |
4108 | "Invalid oa_reg address: %X\n", addr)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid oa_reg address: %X\n" , addr); |
4109 | err = -EINVAL22; |
4110 | goto addr_err; |
4111 | } |
4112 | |
4113 | err = get_user(value, regs + 1)-copyin(regs + 1, &(value), sizeof(value)); |
4114 | if (err) |
4115 | goto addr_err; |
4116 | |
4117 | oa_regs[i].addr = _MMIO(addr)((const i915_reg_t){ .reg = (addr) }); |
4118 | oa_regs[i].value = mask_reg_value(addr, value); |
4119 | |
4120 | regs += 2; |
4121 | } |
4122 | |
4123 | return oa_regs; |
4124 | |
4125 | addr_err: |
4126 | kfree(oa_regs); |
4127 | return ERR_PTR(err); |
4128 | } |
4129 | |
4130 | static ssize_t show_dynamic_id(struct kobject *kobj, |
4131 | struct kobj_attribute *attr, |
4132 | char *buf) |
4133 | { |
4134 | struct i915_oa_config *oa_config = |
4135 | container_of(attr, typeof(*oa_config), sysfs_metric_id)({ const __typeof( ((typeof(*oa_config) *)0)->sysfs_metric_id ) *__mptr = (attr); (typeof(*oa_config) *)( (char *)__mptr - __builtin_offsetof(typeof(*oa_config), sysfs_metric_id) );}); |
4136 | |
4137 | return sprintf(buf, "%d\n", oa_config->id); |
4138 | } |
4139 | |
4140 | static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, |
4141 | struct i915_oa_config *oa_config) |
4142 | { |
4143 | sysfs_attr_init(&oa_config->sysfs_metric_id.attr); |
4144 | oa_config->sysfs_metric_id.attr.name = "id"; |
4145 | oa_config->sysfs_metric_id.attr.mode = S_IRUGO; |
4146 | oa_config->sysfs_metric_id.show = show_dynamic_id; |
4147 | oa_config->sysfs_metric_id.store = NULL((void *)0); |
4148 | |
4149 | oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; |
4150 | oa_config->attrs[1] = NULL((void *)0); |
4151 | |
4152 | oa_config->sysfs_metric.name = oa_config->uuid; |
4153 | oa_config->sysfs_metric.attrs = oa_config->attrs; |
4154 | |
4155 | return sysfs_create_group(perf->metrics_kobj,0 |
4156 | &oa_config->sysfs_metric)0; |
4157 | } |
4158 | |
4159 | #endif |
4160 | |
4161 | /** |
4162 | * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config |
4163 | * @dev: drm device |
4164 | * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from |
4165 | * userspace (unvalidated) |
4166 | * @file: drm file |
4167 | * |
4168 | * Validates the submitted OA register to be saved into a new OA config that |
4169 | * can then be used for programming the OA unit and its NOA network. |
4170 | * |
4171 | * Returns: A new allocated config number to be used with the perf open ioctl |
4172 | * or a negative error code on failure. |
4173 | */ |
4174 | int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, |
4175 | struct drm_file *file) |
4176 | { |
4177 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
4178 | return -ENOSYS78; |
4179 | #ifdef notyet |
4180 | struct i915_perf *perf = &to_i915(dev)->perf; |
4181 | struct drm_i915_perf_oa_config *args = data; |
4182 | struct i915_oa_config *oa_config, *tmp; |
4183 | struct i915_oa_reg *regs; |
4184 | int err, id; |
4185 | |
4186 | if (!perf->i915) |
4187 | return -ENOTSUPP91; |
4188 | |
4189 | if (!perf->metrics_kobj) { |
4190 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA metrics weren't advertised via sysfs\n" ) |
4191 | "OA metrics weren't advertised via sysfs\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA metrics weren't advertised via sysfs\n" ); |
4192 | return -EINVAL22; |
4193 | } |
4194 | |
4195 | if (i915_perf_stream_paranoid && !perfmon_capable()) { |
4196 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Insufficient privileges to add i915 OA config\n" ) |
4197 | "Insufficient privileges to add i915 OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Insufficient privileges to add i915 OA config\n" ); |
4198 | return -EACCES13; |
4199 | } |
4200 | |
4201 | if ((!args->mux_regs_ptr || !args->n_mux_regs) && |
4202 | (!args->boolean_regs_ptr || !args->n_boolean_regs) && |
4203 | (!args->flex_regs_ptr || !args->n_flex_regs)) { |
4204 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "No OA registers given\n" ) |
4205 | "No OA registers given\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "No OA registers given\n" ); |
4206 | return -EINVAL22; |
4207 | } |
4208 | |
4209 | oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL(0x0001 | 0x0004)); |
4210 | if (!oa_config) { |
4211 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to allocate memory for the OA config\n" ) |
4212 | "Failed to allocate memory for the OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to allocate memory for the OA config\n" ); |
4213 | return -ENOMEM12; |
4214 | } |
4215 | |
4216 | oa_config->perf = perf; |
4217 | kref_init(&oa_config->ref); |
4218 | |
4219 | if (!uuid_is_valid(args->uuid)) { |
4220 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid uuid format for OA config\n" ) |
4221 | "Invalid uuid format for OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Invalid uuid format for OA config\n" ); |
4222 | err = -EINVAL22; |
4223 | goto reg_err; |
4224 | } |
4225 | |
4226 | /* Last character in oa_config->uuid will be 0 because oa_config is |
4227 | * kzalloc. |
4228 | */ |
4229 | memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid))__builtin_memcpy((oa_config->uuid), (args->uuid), (sizeof (args->uuid))); |
4230 | |
4231 | oa_config->mux_regs_len = args->n_mux_regs; |
4232 | regs = alloc_oa_regs(perf, |
4233 | perf->ops.is_valid_mux_reg, |
4234 | u64_to_user_ptr(args->mux_regs_ptr)((void *)(uintptr_t)(args->mux_regs_ptr)), |
4235 | args->n_mux_regs); |
4236 | |
4237 | if (IS_ERR(regs)) { |
4238 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create OA config for mux_regs\n" ) |
4239 | "Failed to create OA config for mux_regs\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create OA config for mux_regs\n" ); |
4240 | err = PTR_ERR(regs); |
4241 | goto reg_err; |
4242 | } |
4243 | oa_config->mux_regs = regs; |
4244 | |
4245 | oa_config->b_counter_regs_len = args->n_boolean_regs; |
4246 | regs = alloc_oa_regs(perf, |
4247 | perf->ops.is_valid_b_counter_reg, |
4248 | u64_to_user_ptr(args->boolean_regs_ptr)((void *)(uintptr_t)(args->boolean_regs_ptr)), |
4249 | args->n_boolean_regs); |
4250 | |
4251 | if (IS_ERR(regs)) { |
4252 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create OA config for b_counter_regs\n" ) |
4253 | "Failed to create OA config for b_counter_regs\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create OA config for b_counter_regs\n" ); |
4254 | err = PTR_ERR(regs); |
4255 | goto reg_err; |
4256 | } |
4257 | oa_config->b_counter_regs = regs; |
4258 | |
4259 | if (GRAPHICS_VER(perf->i915)((&(perf->i915)->__runtime)->graphics.ip.ver) < 8) { |
4260 | if (args->n_flex_regs != 0) { |
4261 | err = -EINVAL22; |
4262 | goto reg_err; |
4263 | } |
4264 | } else { |
4265 | oa_config->flex_regs_len = args->n_flex_regs; |
4266 | regs = alloc_oa_regs(perf, |
4267 | perf->ops.is_valid_flex_reg, |
4268 | u64_to_user_ptr(args->flex_regs_ptr)((void *)(uintptr_t)(args->flex_regs_ptr)), |
4269 | args->n_flex_regs); |
4270 | |
4271 | if (IS_ERR(regs)) { |
4272 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create OA config for flex_regs\n" ) |
4273 | "Failed to create OA config for flex_regs\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create OA config for flex_regs\n" ); |
4274 | err = PTR_ERR(regs); |
4275 | goto reg_err; |
4276 | } |
4277 | oa_config->flex_regs = regs; |
4278 | } |
4279 | |
4280 | err = mutex_lock_interruptible(&perf->metrics_lock); |
4281 | if (err) |
4282 | goto reg_err; |
4283 | |
4284 | /* We shouldn't have too many configs, so this iteration shouldn't be |
4285 | * too costly. |
4286 | */ |
4287 | idr_for_each_entry(&perf->metrics_idr, tmp, id)for (id = 0; ((tmp) = idr_get_next(&perf->metrics_idr, &(id))) != ((void *)0); id++) { |
4288 | if (!strcmp(tmp->uuid, oa_config->uuid)) { |
4289 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA config already exists with this uuid\n" ) |
4290 | "OA config already exists with this uuid\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "OA config already exists with this uuid\n" ); |
4291 | err = -EADDRINUSE48; |
4292 | goto sysfs_err; |
4293 | } |
4294 | } |
4295 | |
4296 | err = create_dynamic_oa_sysfs_entry(perf, oa_config); |
4297 | if (err) { |
4298 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create sysfs entry for OA config\n" ) |
4299 | "Failed to create sysfs entry for OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create sysfs entry for OA config\n" ); |
4300 | goto sysfs_err; |
4301 | } |
4302 | |
4303 | /* Config id 0 is invalid, id 1 for kernel stored test config. */ |
4304 | oa_config->id = idr_alloc(&perf->metrics_idr, |
4305 | oa_config, 2, |
4306 | 0, GFP_KERNEL(0x0001 | 0x0004)); |
4307 | if (oa_config->id < 0) { |
4308 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create sysfs entry for OA config\n" ) |
4309 | "Failed to create sysfs entry for OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to create sysfs entry for OA config\n" ); |
4310 | err = oa_config->id; |
4311 | goto sysfs_err; |
4312 | } |
4313 | id = oa_config->id; |
4314 | |
4315 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Added config %s id=%i\n" , oa_config->uuid, oa_config->id) |
4316 | "Added config %s id=%i\n", oa_config->uuid, oa_config->id)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Added config %s id=%i\n" , oa_config->uuid, oa_config->id); |
4317 | mutex_unlock(&perf->metrics_lock)rw_exit_write(&perf->metrics_lock); |
4318 | |
4319 | return id; |
4320 | |
4321 | sysfs_err: |
4322 | mutex_unlock(&perf->metrics_lock)rw_exit_write(&perf->metrics_lock); |
4323 | reg_err: |
4324 | i915_oa_config_put(oa_config); |
4325 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to add new OA config\n" ) |
4326 | "Failed to add new OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to add new OA config\n" ); |
4327 | return err; |
4328 | #endif |
4329 | } |
4330 | |
4331 | /** |
4332 | * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config |
4333 | * @dev: drm device |
4334 | * @data: ioctl data (pointer to u64 integer) copied from userspace |
4335 | * @file: drm file |
4336 | * |
4337 | * Configs can be removed while being used, the will stop appearing in sysfs |
4338 | * and their content will be freed when the stream using the config is closed. |
4339 | * |
4340 | * Returns: 0 on success or a negative error code on failure. |
4341 | */ |
4342 | int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, |
4343 | struct drm_file *file) |
4344 | { |
4345 | struct i915_perf *perf = &to_i915(dev)->perf; |
4346 | u64 *arg = data; |
4347 | struct i915_oa_config *oa_config; |
4348 | int ret; |
4349 | |
4350 | if (!perf->i915) |
4351 | return -ENOTSUPP91; |
4352 | |
4353 | if (i915_perf_stream_paranoid && !perfmon_capable()) { |
4354 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Insufficient privileges to remove i915 OA config\n" ) |
4355 | "Insufficient privileges to remove i915 OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Insufficient privileges to remove i915 OA config\n" ); |
4356 | return -EACCES13; |
4357 | } |
4358 | |
4359 | ret = mutex_lock_interruptible(&perf->metrics_lock); |
4360 | if (ret) |
4361 | return ret; |
4362 | |
4363 | oa_config = idr_find(&perf->metrics_idr, *arg); |
4364 | if (!oa_config) { |
4365 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to remove unknown OA config\n" ) |
4366 | "Failed to remove unknown OA config\n")__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Failed to remove unknown OA config\n" ); |
4367 | ret = -ENOENT2; |
4368 | goto err_unlock; |
4369 | } |
4370 | |
4371 | GEM_BUG_ON(*arg != oa_config->id)((void)0); |
4372 | |
4373 | sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); |
4374 | |
4375 | idr_remove(&perf->metrics_idr, *arg); |
4376 | |
4377 | mutex_unlock(&perf->metrics_lock)rw_exit_write(&perf->metrics_lock); |
4378 | |
4379 | drm_dbg(&perf->i915->drm,__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Removed config %s id=%i\n" , oa_config->uuid, oa_config->id) |
4380 | "Removed config %s id=%i\n", oa_config->uuid, oa_config->id)__drm_dev_dbg(((void *)0), (&perf->i915->drm) ? (& perf->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Removed config %s id=%i\n" , oa_config->uuid, oa_config->id); |
4381 | |
4382 | i915_oa_config_put(oa_config); |
4383 | |
4384 | return 0; |
4385 | |
4386 | err_unlock: |
4387 | mutex_unlock(&perf->metrics_lock)rw_exit_write(&perf->metrics_lock); |
4388 | return ret; |
4389 | } |
4390 | |
4391 | #ifdef notyet |
4392 | static struct ctl_table oa_table[] = { |
4393 | { |
4394 | .procname = "perf_stream_paranoid", |
4395 | .data = &i915_perf_stream_paranoid, |
4396 | .maxlen = sizeof(i915_perf_stream_paranoid), |
4397 | .mode = 0644, |
4398 | .proc_handler = proc_dointvec_minmax, |
4399 | .extra1 = SYSCTL_ZERO, |
4400 | .extra2 = SYSCTL_ONE, |
4401 | }, |
4402 | { |
4403 | .procname = "oa_max_sample_rate", |
4404 | .data = &i915_oa_max_sample_rate, |
4405 | .maxlen = sizeof(i915_oa_max_sample_rate), |
4406 | .mode = 0644, |
4407 | .proc_handler = proc_dointvec_minmax, |
4408 | .extra1 = SYSCTL_ZERO, |
4409 | .extra2 = &oa_sample_rate_hard_limit, |
4410 | }, |
4411 | {} |
4412 | }; |
4413 | #endif |
4414 | |
4415 | static void oa_init_supported_formats(struct i915_perf *perf) |
4416 | { |
4417 | struct drm_i915_privateinteldrm_softc *i915 = perf->i915; |
4418 | enum intel_platform platform = INTEL_INFO(i915)(&(i915)->__info)->platform; |
4419 | |
4420 | switch (platform) { |
4421 | case INTEL_HASWELL: |
4422 | oa_format_add(perf, I915_OA_FORMAT_A13); |
4423 | oa_format_add(perf, I915_OA_FORMAT_A13); |
4424 | oa_format_add(perf, I915_OA_FORMAT_A29); |
4425 | oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8); |
4426 | oa_format_add(perf, I915_OA_FORMAT_B4_C8); |
4427 | oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8); |
4428 | oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16); |
4429 | oa_format_add(perf, I915_OA_FORMAT_C4_B8); |
4430 | break; |
4431 | |
4432 | case INTEL_BROADWELL: |
4433 | case INTEL_CHERRYVIEW: |
4434 | case INTEL_SKYLAKE: |
4435 | case INTEL_BROXTON: |
4436 | case INTEL_KABYLAKE: |
4437 | case INTEL_GEMINILAKE: |
4438 | case INTEL_COFFEELAKE: |
4439 | case INTEL_COMETLAKE: |
4440 | case INTEL_ICELAKE: |
4441 | case INTEL_ELKHARTLAKE: |
4442 | case INTEL_JASPERLAKE: |
4443 | case INTEL_TIGERLAKE: |
4444 | case INTEL_ROCKETLAKE: |
4445 | case INTEL_DG1: |
4446 | case INTEL_ALDERLAKE_S: |
4447 | case INTEL_ALDERLAKE_P: |
4448 | oa_format_add(perf, I915_OA_FORMAT_A12); |
4449 | oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8); |
4450 | oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8); |
4451 | oa_format_add(perf, I915_OA_FORMAT_C4_B8); |
4452 | break; |
4453 | |
4454 | default: |
4455 | MISSING_CASE(platform)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "platform", (long)(platform)); __builtin_expect(!!(__ret), 0 ); }); |
4456 | } |
4457 | } |
4458 | |
4459 | /** |
4460 | * i915_perf_init - initialize i915-perf state on module bind |
4461 | * @i915: i915 device instance |
4462 | * |
4463 | * Initializes i915-perf state without exposing anything to userspace. |
4464 | * |
4465 | * Note: i915-perf initialization is split into an 'init' and 'register' |
4466 | * phase with the i915_perf_register() exposing state to userspace. |
4467 | */ |
4468 | void i915_perf_init(struct drm_i915_privateinteldrm_softc *i915) |
4469 | { |
4470 | struct i915_perf *perf = &i915->perf; |
4471 | |
4472 | /* XXX const struct i915_perf_ops! */ |
4473 | |
4474 | /* i915_perf is not enabled for DG2 yet */ |
4475 | if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2)) |
4476 | return; |
4477 | |
4478 | perf->oa_formats = oa_formats; |
4479 | if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL)) { |
4480 | perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; |
4481 | perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; |
4482 | perf->ops.is_valid_flex_reg = NULL((void *)0); |
4483 | perf->ops.enable_metric_set = hsw_enable_metric_set; |
4484 | perf->ops.disable_metric_set = hsw_disable_metric_set; |
4485 | perf->ops.oa_enable = gen7_oa_enable; |
4486 | perf->ops.oa_disable = gen7_oa_disable; |
4487 | perf->ops.read = gen7_oa_read; |
4488 | perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; |
4489 | } else if (HAS_LOGICAL_RING_CONTEXTS(i915)((&(i915)->__info)->has_logical_ring_contexts)) { |
4490 | /* Note: that although we could theoretically also support the |
4491 | * legacy ringbuffer mode on BDW (and earlier iterations of |
4492 | * this driver, before upstreaming did this) it didn't seem |
4493 | * worth the complexity to maintain now that BDW+ enable |
4494 | * execlist mode by default. |
4495 | */ |
4496 | perf->ops.read = gen8_oa_read; |
4497 | |
4498 | if (IS_GRAPHICS_VER(i915, 8, 9)(((&(i915)->__runtime)->graphics.ip.ver) >= (8) && ((&(i915)->__runtime)->graphics.ip.ver) <= (9))) { |
4499 | perf->ops.is_valid_b_counter_reg = |
4500 | gen7_is_valid_b_counter_addr; |
4501 | perf->ops.is_valid_mux_reg = |
4502 | gen8_is_valid_mux_addr; |
4503 | perf->ops.is_valid_flex_reg = |
4504 | gen8_is_valid_flex_addr; |
4505 | |
4506 | if (IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) { |
4507 | perf->ops.is_valid_mux_reg = |
4508 | chv_is_valid_mux_addr; |
4509 | } |
4510 | |
4511 | perf->ops.oa_enable = gen8_oa_enable; |
4512 | perf->ops.oa_disable = gen8_oa_disable; |
4513 | perf->ops.enable_metric_set = gen8_enable_metric_set; |
4514 | perf->ops.disable_metric_set = gen8_disable_metric_set; |
4515 | perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; |
4516 | |
4517 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 8) { |
4518 | perf->ctx_oactxctrl_offset = 0x120; |
4519 | perf->ctx_flexeu0_offset = 0x2ce; |
4520 | |
4521 | perf->gen8_valid_ctx_bit = BIT(25)(1UL << (25)); |
4522 | } else { |
4523 | perf->ctx_oactxctrl_offset = 0x128; |
4524 | perf->ctx_flexeu0_offset = 0x3de; |
4525 | |
4526 | perf->gen8_valid_ctx_bit = BIT(16)(1UL << (16)); |
4527 | } |
4528 | } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 11) { |
4529 | perf->ops.is_valid_b_counter_reg = |
4530 | gen7_is_valid_b_counter_addr; |
4531 | perf->ops.is_valid_mux_reg = |
4532 | gen11_is_valid_mux_addr; |
4533 | perf->ops.is_valid_flex_reg = |
4534 | gen8_is_valid_flex_addr; |
4535 | |
4536 | perf->ops.oa_enable = gen8_oa_enable; |
4537 | perf->ops.oa_disable = gen8_oa_disable; |
4538 | perf->ops.enable_metric_set = gen8_enable_metric_set; |
4539 | perf->ops.disable_metric_set = gen11_disable_metric_set; |
4540 | perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; |
4541 | |
4542 | perf->ctx_oactxctrl_offset = 0x124; |
4543 | perf->ctx_flexeu0_offset = 0x78e; |
4544 | |
4545 | perf->gen8_valid_ctx_bit = BIT(16)(1UL << (16)); |
4546 | } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 12) { |
4547 | perf->ops.is_valid_b_counter_reg = |
4548 | gen12_is_valid_b_counter_addr; |
4549 | perf->ops.is_valid_mux_reg = |
4550 | gen12_is_valid_mux_addr; |
4551 | perf->ops.is_valid_flex_reg = |
4552 | gen8_is_valid_flex_addr; |
4553 | |
4554 | perf->ops.oa_enable = gen12_oa_enable; |
4555 | perf->ops.oa_disable = gen12_oa_disable; |
4556 | perf->ops.enable_metric_set = gen12_enable_metric_set; |
4557 | perf->ops.disable_metric_set = gen12_disable_metric_set; |
4558 | perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; |
4559 | |
4560 | perf->ctx_flexeu0_offset = 0; |
4561 | perf->ctx_oactxctrl_offset = 0x144; |
4562 | } |
4563 | } |
4564 | |
4565 | if (perf->ops.enable_metric_set) { |
4566 | rw_init(&perf->lock, "perflk")_rw_init_flags(&perf->lock, "perflk", 0, ((void *)0)); |
4567 | |
4568 | /* Choose a representative limit */ |
4569 | oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2; |
4570 | |
4571 | rw_init(&perf->metrics_lock, "metricslk")_rw_init_flags(&perf->metrics_lock, "metricslk", 0, (( void *)0)); |
4572 | idr_init_base(&perf->metrics_idr, 1); |
4573 | |
4574 | /* We set up some ratelimit state to potentially throttle any |
4575 | * _NOTES about spurious, invalid OA reports which we don't |
4576 | * forward to userspace. |
4577 | * |
4578 | * We print a _NOTE about any throttling when closing the |
4579 | * stream instead of waiting until driver _fini which no one |
4580 | * would ever see. |
4581 | * |
4582 | * Using the same limiting factors as printk_ratelimit() |
4583 | */ |
4584 | ratelimit_state_init(&perf->spurious_report_rs, 5 * HZhz, 10); |
4585 | /* Since we use a DRM_NOTE for spurious reports it would be |
4586 | * inconsistent to let __ratelimit() automatically print a |
4587 | * warning for throttling. |
4588 | */ |
4589 | ratelimit_set_flags(&perf->spurious_report_rs, |
4590 | RATELIMIT_MSG_ON_RELEASE(1 << 0)); |
4591 | |
4592 | ratelimit_state_init(&perf->tail_pointer_race, |
4593 | 5 * HZhz, 10); |
4594 | ratelimit_set_flags(&perf->tail_pointer_race, |
4595 | RATELIMIT_MSG_ON_RELEASE(1 << 0)); |
4596 | |
4597 | atomic64_set(&perf->noa_programming_delay,({ typeof(*(&perf->noa_programming_delay)) __tmp = ((500 * 1000)); *(volatile typeof(*(&perf->noa_programming_delay )) *)&(*(&perf->noa_programming_delay)) = __tmp; __tmp ; }) |
4598 | 500 * 1000 /* 500us */)({ typeof(*(&perf->noa_programming_delay)) __tmp = ((500 * 1000)); *(volatile typeof(*(&perf->noa_programming_delay )) *)&(*(&perf->noa_programming_delay)) = __tmp; __tmp ; }); |
4599 | |
4600 | perf->i915 = i915; |
4601 | |
4602 | oa_init_supported_formats(perf); |
4603 | } |
4604 | } |
4605 | |
4606 | static int destroy_config(int id, void *p, void *data) |
4607 | { |
4608 | i915_oa_config_put(p); |
4609 | return 0; |
4610 | } |
4611 | |
4612 | int i915_perf_sysctl_register(void) |
4613 | { |
4614 | #ifdef notyet |
4615 | sysctl_header = register_sysctl("dev/i915", oa_table); |
4616 | #endif |
4617 | return 0; |
4618 | } |
4619 | |
4620 | void i915_perf_sysctl_unregister(void) |
4621 | { |
4622 | #ifdef notyet |
4623 | unregister_sysctl_table(sysctl_header); |
4624 | #endif |
4625 | } |
4626 | |
4627 | /** |
4628 | * i915_perf_fini - Counter part to i915_perf_init() |
4629 | * @i915: i915 device instance |
4630 | */ |
4631 | void i915_perf_fini(struct drm_i915_privateinteldrm_softc *i915) |
4632 | { |
4633 | struct i915_perf *perf = &i915->perf; |
4634 | |
4635 | if (!perf->i915) |
4636 | return; |
4637 | |
4638 | idr_for_each(&perf->metrics_idr, destroy_config, perf); |
4639 | idr_destroy(&perf->metrics_idr); |
4640 | |
4641 | memset(&perf->ops, 0, sizeof(perf->ops))__builtin_memset((&perf->ops), (0), (sizeof(perf->ops ))); |
4642 | perf->i915 = NULL((void *)0); |
4643 | } |
4644 | |
4645 | /** |
4646 | * i915_perf_ioctl_version - Version of the i915-perf subsystem |
4647 | * |
4648 | * This version number is used by userspace to detect available features. |
4649 | */ |
4650 | int i915_perf_ioctl_version(void) |
4651 | { |
4652 | /* |
4653 | * 1: Initial version |
4654 | * I915_PERF_IOCTL_ENABLE |
4655 | * I915_PERF_IOCTL_DISABLE |
4656 | * |
4657 | * 2: Added runtime modification of OA config. |
4658 | * I915_PERF_IOCTL_CONFIG |
4659 | * |
4660 | * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold |
4661 | * preemption on a particular context so that performance data is |
4662 | * accessible from a delta of MI_RPC reports without looking at the |
4663 | * OA buffer. |
4664 | * |
4665 | * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can |
4666 | * be run for the duration of the performance recording based on |
4667 | * their SSEU configuration. |
4668 | * |
4669 | * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the |
4670 | * interval for the hrtimer used to check for OA data. |
4671 | */ |
4672 | return 5; |
4673 | } |
4674 | |
4675 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 |
4676 | #include "selftests/i915_perf.c" |
4677 | #endif |