Bug Summary

File:dev/pci/drm/i915/intel_uncore.h
Warning:line 308, column 1
Passed-by-value struct argument contains uninitialized data (e.g., field: 'reg')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name intel_dvo.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/display/intel_dvo.c

/usr/src/sys/dev/pci/drm/i915/display/intel_dvo.c

1/*
2 * Copyright 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2007 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Eric Anholt <eric@anholt.net>
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30
31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_crtc.h>
33
34#include "i915_drv.h"
35#include "intel_connector.h"
36#include "intel_display_types.h"
37#include "intel_dvo.h"
38#include "intel_dvo_dev.h"
39#include "intel_gmbus.h"
40#include "intel_panel.h"
41
42#define INTEL_DVO_CHIP_NONE0 0
43#define INTEL_DVO_CHIP_LVDS1 1
44#define INTEL_DVO_CHIP_TMDS2 2
45#define INTEL_DVO_CHIP_TVOUT4 4
46#define INTEL_DVO_CHIP_LVDS_NO_FIXED5 5
47
48#define SIL164_ADDR0x38 0x38
49#define CH7xxx_ADDR0x76 0x76
50#define TFP410_ADDR0x38 0x38
51#define NS2501_ADDR0x38 0x38
52
53static const struct intel_dvo_device intel_dvo_devices[] = {
54 {
55 .type = INTEL_DVO_CHIP_TMDS2,
56 .name = "sil164",
57 .dvo_reg = DVOC((const i915_reg_t){ .reg = (0x61160) }),
58 .dvo_srcdim_reg = DVOC_SRCDIM((const i915_reg_t){ .reg = (0x61164) }),
59 .slave_addr = SIL164_ADDR0x38,
60 .dev_ops = &sil164_ops,
61 },
62 {
63 .type = INTEL_DVO_CHIP_TMDS2,
64 .name = "ch7xxx",
65 .dvo_reg = DVOC((const i915_reg_t){ .reg = (0x61160) }),
66 .dvo_srcdim_reg = DVOC_SRCDIM((const i915_reg_t){ .reg = (0x61164) }),
67 .slave_addr = CH7xxx_ADDR0x76,
68 .dev_ops = &ch7xxx_ops,
69 },
70 {
71 .type = INTEL_DVO_CHIP_TMDS2,
72 .name = "ch7xxx",
73 .dvo_reg = DVOC((const i915_reg_t){ .reg = (0x61160) }),
74 .dvo_srcdim_reg = DVOC_SRCDIM((const i915_reg_t){ .reg = (0x61164) }),
75 .slave_addr = 0x75, /* For some ch7010 */
76 .dev_ops = &ch7xxx_ops,
77 },
78 {
79 .type = INTEL_DVO_CHIP_LVDS1,
80 .name = "ivch",
81 .dvo_reg = DVOA((const i915_reg_t){ .reg = (0x61120) }),
82 .dvo_srcdim_reg = DVOA_SRCDIM((const i915_reg_t){ .reg = (0x61124) }),
83 .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
84 .dev_ops = &ivch_ops,
85 },
86 {
87 .type = INTEL_DVO_CHIP_TMDS2,
88 .name = "tfp410",
89 .dvo_reg = DVOC((const i915_reg_t){ .reg = (0x61160) }),
90 .dvo_srcdim_reg = DVOC_SRCDIM((const i915_reg_t){ .reg = (0x61164) }),
91 .slave_addr = TFP410_ADDR0x38,
92 .dev_ops = &tfp410_ops,
93 },
94 {
95 .type = INTEL_DVO_CHIP_LVDS1,
96 .name = "ch7017",
97 .dvo_reg = DVOC((const i915_reg_t){ .reg = (0x61160) }),
98 .dvo_srcdim_reg = DVOC_SRCDIM((const i915_reg_t){ .reg = (0x61164) }),
99 .slave_addr = 0x75,
100 .gpio = GMBUS_PIN_DPB5,
101 .dev_ops = &ch7017_ops,
102 },
103 {
104 .type = INTEL_DVO_CHIP_LVDS_NO_FIXED5,
105 .name = "ns2501",
106 .dvo_reg = DVOB((const i915_reg_t){ .reg = (0x61140) }),
107 .dvo_srcdim_reg = DVOB_SRCDIM((const i915_reg_t){ .reg = (0x61144) }),
108 .slave_addr = NS2501_ADDR0x38,
109 .dev_ops = &ns2501_ops,
110 },
111};
112
113struct intel_dvo {
114 struct intel_encoder base;
115
116 struct intel_dvo_device dev;
117
118 struct intel_connector *attached_connector;
119
120 bool_Bool panel_wants_dither;
121};
122
123static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
124{
125 return container_of(encoder, struct intel_dvo, base)({ const __typeof( ((struct intel_dvo *)0)->base ) *__mptr
= (encoder); (struct intel_dvo *)( (char *)__mptr - __builtin_offsetof
(struct intel_dvo, base) );})
;
126}
127
128static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
129{
130 return enc_to_dvo(intel_attached_encoder(connector));
131}
132
133static bool_Bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
134{
135 struct drm_device *dev = connector->base.dev;
136 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev);
137 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
138 u32 tmp;
139
140 tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
141
142 if (!(tmp & DVO_ENABLE(1 << 31)))
143 return false0;
144
145 return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
146}
147
148static bool_Bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
149 enum pipe *pipe)
150{
151 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev);
152 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
153 u32 tmp;
154
155 tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
156
157 *pipe = (tmp & DVO_PIPE_SEL_MASK(1 << 30)) >> DVO_PIPE_SEL_SHIFT30;
158
159 return tmp & DVO_ENABLE(1 << 31);
160}
161
162static void intel_dvo_get_config(struct intel_encoder *encoder,
163 struct intel_crtc_state *pipe_config)
164{
165 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev);
166 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
167 u32 tmp, flags = 0;
168
169 pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO)(1UL << (INTEL_OUTPUT_DVO));
170
171 tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
172 if (tmp & DVO_HSYNC_ACTIVE_HIGH(1 << 3))
173 flags |= DRM_MODE_FLAG_PHSYNC(1<<0);
174 else
175 flags |= DRM_MODE_FLAG_NHSYNC(1<<1);
176 if (tmp & DVO_VSYNC_ACTIVE_HIGH(1 << 4))
177 flags |= DRM_MODE_FLAG_PVSYNC(1<<2);
178 else
179 flags |= DRM_MODE_FLAG_NVSYNC(1<<3);
180
181 pipe_config->hw.adjusted_mode.flags |= flags;
182
183 pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
184}
185
186static void intel_disable_dvo(struct intel_atomic_state *state,
187 struct intel_encoder *encoder,
188 const struct intel_crtc_state *old_crtc_state,
189 const struct drm_connector_state *old_conn_state)
190{
191 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev);
192 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
193 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
194 u32 temp = intel_de_read(dev_priv, dvo_reg);
195
196 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false0);
197 intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE(1 << 31));
198 intel_de_read(dev_priv, dvo_reg);
199}
200
201static void intel_enable_dvo(struct intel_atomic_state *state,
202 struct intel_encoder *encoder,
203 const struct intel_crtc_state *pipe_config,
204 const struct drm_connector_state *conn_state)
205{
206 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev);
207 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
208 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
209 u32 temp = intel_de_read(dev_priv, dvo_reg);
210
211 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
212 &pipe_config->hw.mode,
213 &pipe_config->hw.adjusted_mode);
214
215 intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE(1 << 31));
216 intel_de_read(dev_priv, dvo_reg);
217
218 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true1);
219}
220
221static enum drm_mode_status
222intel_dvo_mode_valid(struct drm_connector *connector,
223 struct drm_display_mode *mode)
224{
225 struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)({ const __typeof( ((struct intel_connector *)0)->base ) *
__mptr = (connector); (struct intel_connector *)( (char *)__mptr
- __builtin_offsetof(struct intel_connector, base) );})
);
226 const struct drm_display_mode *fixed_mode =
227 to_intel_connector(connector)({ const __typeof( ((struct intel_connector *)0)->base ) *
__mptr = (connector); (struct intel_connector *)( (char *)__mptr
- __builtin_offsetof(struct intel_connector, base) );})
->panel.fixed_mode;
228 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
229 int target_clock = mode->clock;
230
231 if (mode->flags & DRM_MODE_FLAG_DBLSCAN(1<<5))
232 return MODE_NO_DBLESCAN;
233
234 /* XXX: Validate clock range */
235
236 if (fixed_mode) {
237 if (mode->hdisplay > fixed_mode->hdisplay)
238 return MODE_PANEL;
239 if (mode->vdisplay > fixed_mode->vdisplay)
240 return MODE_PANEL;
241
242 target_clock = fixed_mode->clock;
243 }
244
245 if (target_clock > max_dotclk)
246 return MODE_CLOCK_HIGH;
247
248 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
249}
250
251static int intel_dvo_compute_config(struct intel_encoder *encoder,
252 struct intel_crtc_state *pipe_config,
253 struct drm_connector_state *conn_state)
254{
255 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
256 const struct drm_display_mode *fixed_mode =
257 intel_dvo->attached_connector->panel.fixed_mode;
258 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
259
260 /*
261 * If we have timings from the BIOS for the panel, put them in
262 * to the adjusted mode. The CRTC will be set up for this mode,
263 * with the panel scaling set up to source from the H/VDisplay
264 * of the original mode.
265 */
266 if (fixed_mode)
267 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
268
269 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN(1<<5))
270 return -EINVAL22;
271
272 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
273
274 return 0;
275}
276
277static void intel_dvo_pre_enable(struct intel_atomic_state *state,
278 struct intel_encoder *encoder,
279 const struct intel_crtc_state *pipe_config,
280 const struct drm_connector_state *conn_state)
281{
282 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev);
283 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (pipe_config->uapi.crtc); (struct intel_crtc *)( (char *
)__mptr - __builtin_offsetof(struct intel_crtc, base) );})
;
284 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
285 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
286 enum pipe pipe = crtc->pipe;
287 u32 dvo_val;
288 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
289 i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
290
291 /* Save the data order, since I don't know what it should be set to. */
292 dvo_val = intel_de_read(dev_priv, dvo_reg) &
293 (DVO_PRESERVE_MASK(0x7 << 24) | DVO_DATA_ORDER_GBRG(1 << 6));
294 dvo_val |= DVO_DATA_ORDER_FP(1 << 14) | DVO_BORDER_ENABLE(1 << 7) |
295 DVO_BLANK_ACTIVE_HIGH(1 << 2);
296
297 dvo_val |= DVO_PIPE_SEL(pipe)((pipe) << 30);
298 dvo_val |= DVO_PIPE_STALL(1 << 28);
299 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC(1<<0))
300 dvo_val |= DVO_HSYNC_ACTIVE_HIGH(1 << 3);
301 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC(1<<2))
302 dvo_val |= DVO_VSYNC_ACTIVE_HIGH(1 << 4);
303
304 /*I915_WRITE(DVOB_SRCDIM,
305 (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
306 (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
307 intel_de_write(dev_priv, dvo_srcdim_reg,
308 (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT12) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT0));
309 /*I915_WRITE(DVOB, dvo_val);*/
310 intel_de_write(dev_priv, dvo_reg, dvo_val);
311}
312
313static enum drm_connector_status
314intel_dvo_detect(struct drm_connector *connector, bool_Bool force)
315{
316 struct drm_i915_privateinteldrm_softc *i915 = to_i915(connector->dev);
317 struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)({ const __typeof( ((struct intel_connector *)0)->base ) *
__mptr = (connector); (struct intel_connector *)( (char *)__mptr
- __builtin_offsetof(struct intel_connector, base) );})
);
318
319 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",__drm_dbg(DRM_UT_KMS, "[CONNECTOR:%d:%s]\n", connector->base
.id, connector->name)
320 connector->base.id, connector->name)__drm_dbg(DRM_UT_KMS, "[CONNECTOR:%d:%s]\n", connector->base
.id, connector->name)
;
321
322 if (!INTEL_DISPLAY_ENABLED(i915)(({ int __ret = !!((!((&(i915)->__info)->pipe_mask !=
0))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((
&(i915)->drm))->dev), "", "drm_WARN_ON(" "!((&(i915)->__info)->pipe_mask != 0)"
")"); __builtin_expect(!!(__ret), 0); }), !(i915)->params
.disable_display)
)
323 return connector_status_disconnected;
324
325 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
326}
327
328static int intel_dvo_get_modes(struct drm_connector *connector)
329{
330 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(connector->dev);
331 const struct drm_display_mode *fixed_mode =
332 to_intel_connector(connector)({ const __typeof( ((struct intel_connector *)0)->base ) *
__mptr = (connector); (struct intel_connector *)( (char *)__mptr
- __builtin_offsetof(struct intel_connector, base) );})
->panel.fixed_mode;
333 int num_modes;
334
335 /*
336 * We should probably have an i2c driver get_modes function for those
337 * devices which will have a fixed set of modes determined by the chip
338 * (TV-out, for example), but for now with just TMDS and LVDS,
339 * that's not the case.
340 */
341 num_modes = intel_ddc_get_modes(connector,
342 intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC4));
343 if (num_modes)
344 return num_modes;
345
346 if (fixed_mode) {
347 struct drm_display_mode *mode;
348
349 mode = drm_mode_duplicate(connector->dev, fixed_mode);
350 if (mode) {
351 drm_mode_probed_add(connector, mode);
352 num_modes++;
353 }
354 }
355
356 return num_modes;
357}
358
359static const struct drm_connector_funcs intel_dvo_connector_funcs = {
360 .detect = intel_dvo_detect,
361 .late_register = intel_connector_register,
362 .early_unregister = intel_connector_unregister,
363 .destroy = intel_connector_destroy,
364 .fill_modes = drm_helper_probe_single_connector_modes,
365 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
366 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
367};
368
369static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
370 .mode_valid = intel_dvo_mode_valid,
371 .get_modes = intel_dvo_get_modes,
372};
373
374static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
375{
376 struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)({ const __typeof( ((struct intel_encoder *)0)->base ) *__mptr
= (encoder); (struct intel_encoder *)( (char *)__mptr - __builtin_offsetof
(struct intel_encoder, base) );})
);
377
378 if (intel_dvo->dev.dev_ops->destroy)
379 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
380
381 intel_encoder_destroy(encoder);
382}
383
384static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
385 .destroy = intel_dvo_enc_destroy,
386};
387
388/*
389 * Attempts to get a fixed panel timing for LVDS (currently only the i830).
390 *
391 * Other chips with DVO LVDS will need to extend this to deal with the LVDS
392 * chip being on DVOB/C and having multiple pipes.
393 */
394static struct drm_display_mode *
395intel_dvo_get_current_mode(struct intel_encoder *encoder)
396{
397 struct drm_display_mode *mode;
398
399 mode = intel_encoder_current_mode(encoder);
400 if (mode) {
401 DRM_DEBUG_KMS("using current (BIOS) mode: ")__drm_dbg(DRM_UT_KMS, "using current (BIOS) mode: ");
402 drm_mode_debug_printmodeline(mode);
403 mode->type |= DRM_MODE_TYPE_PREFERRED(1<<3);
404 }
405
406 return mode;
407}
408
409static enum port intel_dvo_port(i915_reg_t dvo_reg)
410{
411 if (i915_mmio_reg_equal(dvo_reg, DVOA((const i915_reg_t){ .reg = (0x61120) })))
412 return PORT_A;
413 else if (i915_mmio_reg_equal(dvo_reg, DVOB((const i915_reg_t){ .reg = (0x61140) })))
414 return PORT_B;
415 else
416 return PORT_C;
417}
418
419void intel_dvo_init(struct drm_i915_privateinteldrm_softc *dev_priv)
420{
421 struct intel_encoder *intel_encoder;
422 struct intel_dvo *intel_dvo;
423 struct intel_connector *intel_connector;
424 int i;
425 int encoder_type = DRM_MODE_ENCODER_NONE0;
426
427 intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL(0x0001 | 0x0004));
428 if (!intel_dvo)
1
Assuming 'intel_dvo' is non-null
2
Taking false branch
429 return;
430
431 intel_connector = intel_connector_alloc();
432 if (!intel_connector) {
3
Assuming 'intel_connector' is non-null
4
Taking false branch
433 kfree(intel_dvo);
434 return;
435 }
436
437 intel_dvo->attached_connector = intel_connector;
438
439 intel_encoder = &intel_dvo->base;
440
441 intel_encoder->disable = intel_disable_dvo;
442 intel_encoder->enable = intel_enable_dvo;
443 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
444 intel_encoder->get_config = intel_dvo_get_config;
445 intel_encoder->compute_config = intel_dvo_compute_config;
446 intel_encoder->pre_enable = intel_dvo_pre_enable;
447 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
448
449 /* Now, try to find a controller */
450 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices)(sizeof((intel_dvo_devices)) / sizeof((intel_dvo_devices)[0])
)
; i++) {
5
Loop condition is true. Entering loop body
451 struct drm_connector *connector = &intel_connector->base;
452 const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
453 struct i2c_adapter *i2c;
454 int gpio;
455 bool_Bool dvoinit;
456 enum pipe pipe;
457 u32 dpll[I915_MAX_PIPES];
458 enum port port;
459
460 /*
461 * Allow the I2C driver info to specify the GPIO to be used in
462 * special cases, but otherwise default to what's defined
463 * in the spec.
464 */
465 if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
6
Assuming the condition is true
7
Taking true branch
466 gpio = dvo->gpio;
467 else if (dvo->type == INTEL_DVO_CHIP_LVDS1)
468 gpio = GMBUS_PIN_SSC1;
469 else
470 gpio = GMBUS_PIN_DPB5;
471
472 /*
473 * Set up the I2C bus necessary for the chip we're probing.
474 * It appears that everything is on GPIOE except for panels
475 * on i830 laptops, which are on GPIOB (DVOA).
476 */
477 i2c = intel_gmbus_get_adapter(dev_priv, gpio);
478
479 intel_dvo->dev = *dvo;
480
481 /*
482 * GMBUS NAK handling seems to be unstable, hence let the
483 * transmitter detection run in bit banging mode for now.
484 */
485 intel_gmbus_force_bit(i2c, true1);
486
487 /*
488 * ns2501 requires the DVO 2x clock before it will
489 * respond to i2c accesses, so make sure we have
490 * have the clock enabled before we attempt to
491 * initialize the device.
492 */
493 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__info)->pipe_mask & (1UL <<
(pipe)))) {} else
{
8
Loop condition is true. Entering loop body
9
Assuming the condition is true
10
Taking true branch
11
Loop condition is true. Entering loop body
12
Assuming the condition is true
13
Taking true branch
14
Loop condition is true. Entering loop body
15
Assuming the condition is true
16
Taking true branch
17
Loop condition is true. Entering loop body
18
Assuming the condition is false
19
Taking false branch
494 dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6014), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6018), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6030) })[(pipe)]))
})
)
;
20
Calling 'intel_de_read'
495 intel_de_write(dev_priv, DPLL(pipe)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6014), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6018), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6030) })[(pipe)]))
})
,
496 dpll[pipe] | DPLL_DVO_2X_MODE(1 << 30));
497 }
498
499 dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
500
501 /* restore the DVO 2x clock state to original */
502 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__info)->pipe_mask & (1UL <<
(pipe)))) {} else
{
503 intel_de_write(dev_priv, DPLL(pipe)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6014), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6018), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6030) })[(pipe)]))
})
, dpll[pipe]);
504 }
505
506 intel_gmbus_force_bit(i2c, false0);
507
508 if (!dvoinit)
509 continue;
510
511 port = intel_dvo_port(dvo->dvo_reg);
512 drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
513 &intel_dvo_enc_funcs, encoder_type,
514 "DVO %c", port_name(port)((port) + 'A'));
515
516 intel_encoder->type = INTEL_OUTPUT_DVO;
517 intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
518 intel_encoder->port = port;
519 intel_encoder->pipe_mask = ~0;
520
521 if (dvo->type != INTEL_DVO_CHIP_LVDS1)
522 intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
523 (1 << INTEL_OUTPUT_DVO);
524
525 switch (dvo->type) {
526 case INTEL_DVO_CHIP_TMDS2:
527 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT(1 << 1) |
528 DRM_CONNECTOR_POLL_DISCONNECT(1 << 2);
529 drm_connector_init(&dev_priv->drm, connector,
530 &intel_dvo_connector_funcs,
531 DRM_MODE_CONNECTOR_DVII2);
532 encoder_type = DRM_MODE_ENCODER_TMDS2;
533 break;
534 case INTEL_DVO_CHIP_LVDS_NO_FIXED5:
535 case INTEL_DVO_CHIP_LVDS1:
536 drm_connector_init(&dev_priv->drm, connector,
537 &intel_dvo_connector_funcs,
538 DRM_MODE_CONNECTOR_LVDS7);
539 encoder_type = DRM_MODE_ENCODER_LVDS3;
540 break;
541 }
542
543 drm_connector_helper_add(connector,
544 &intel_dvo_connector_helper_funcs);
545 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
546 connector->interlace_allowed = false0;
547 connector->doublescan_allowed = false0;
548
549 intel_connector_attach_encoder(intel_connector, intel_encoder);
550 if (dvo->type == INTEL_DVO_CHIP_LVDS1) {
551 /*
552 * For our LVDS chipsets, we should hopefully be able
553 * to dig the fixed panel mode out of the BIOS data.
554 * However, it's in a different format from the BIOS
555 * data on chipsets with integrated LVDS (stored in AIM
556 * headers, likely), so for now, just get the current
557 * mode being output through DVO.
558 */
559 intel_panel_init(&intel_connector->panel,
560 intel_dvo_get_current_mode(intel_encoder),
561 NULL((void *)0));
562 intel_dvo->panel_wants_dither = true1;
563 }
564
565 return;
566 }
567
568 kfree(intel_dvo);
569 kfree(intel_connector);
570}

/usr/src/sys/dev/pci/drm/i915/display/intel_de.h

1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#ifndef __INTEL_DE_H__
7#define __INTEL_DE_H__
8
9#include "i915_drv.h"
10#include "i915_reg.h"
11#include "intel_uncore.h"
12
13static inline u32
14intel_de_read(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg)
15{
16 return intel_uncore_read(&i915->uncore, reg);
21
Calling 'intel_uncore_read'
17}
18
19static inline void
20intel_de_posting_read(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg)
21{
22 intel_uncore_posting_read(&i915->uncore, reg)((void)intel_uncore_read_notrace(&i915->uncore, reg));
23}
24
25/* Note: read the warnings for intel_uncore_*_fw() functions! */
26static inline u32
27intel_de_read_fw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg)
28{
29 return intel_uncore_read_fw(&i915->uncore, reg)__raw_uncore_read32(&i915->uncore, reg);
30}
31
32static inline void
33intel_de_write(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 val)
34{
35 intel_uncore_write(&i915->uncore, reg, val);
36}
37
38/* Note: read the warnings for intel_uncore_*_fw() functions! */
39static inline void
40intel_de_write_fw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 val)
41{
42 intel_uncore_write_fw(&i915->uncore, reg, val)__raw_uncore_write32(&i915->uncore, reg, val);
43}
44
45static inline void
46intel_de_rmw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 clear, u32 set)
47{
48 intel_uncore_rmw(&i915->uncore, reg, clear, set);
49}
50
51static inline int
52intel_de_wait_for_register(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg,
53 u32 mask, u32 value, unsigned int timeout)
54{
55 return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
56}
57
58static inline int
59intel_de_wait_for_set(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg,
60 u32 mask, unsigned int timeout)
61{
62 return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
63}
64
65static inline int
66intel_de_wait_for_clear(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg,
67 u32 mask, unsigned int timeout)
68{
69 return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
70}
71
72#endif /* __INTEL_DE_H__ */

/usr/src/sys/dev/pci/drm/i915/intel_uncore.h

1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __INTEL_UNCORE_H__
26#define __INTEL_UNCORE_H__
27
28#include <linux/spinlock.h>
29#include <linux/notifier.h>
30#include <linux/hrtimer.h>
31#include <linux/io-64-nonatomic-lo-hi.h>
32
33#include "i915_reg.h"
34
35struct drm_i915_privateinteldrm_softc;
36struct intel_runtime_pm;
37struct intel_uncore;
38struct intel_gt;
39
40struct intel_uncore_mmio_debug {
41 spinlock_t lock; /** lock is also taken in irq contexts. */
42 int unclaimed_mmio_check;
43 int saved_mmio_check;
44 u32 suspend_count;
45};
46
47enum forcewake_domain_id {
48 FW_DOMAIN_ID_RENDER = 0,
49 FW_DOMAIN_ID_BLITTER,
50 FW_DOMAIN_ID_MEDIA,
51 FW_DOMAIN_ID_MEDIA_VDBOX0,
52 FW_DOMAIN_ID_MEDIA_VDBOX1,
53 FW_DOMAIN_ID_MEDIA_VDBOX2,
54 FW_DOMAIN_ID_MEDIA_VDBOX3,
55 FW_DOMAIN_ID_MEDIA_VEBOX0,
56 FW_DOMAIN_ID_MEDIA_VEBOX1,
57
58 FW_DOMAIN_ID_COUNT
59};
60
61enum forcewake_domains {
62 FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER)(1UL << (FW_DOMAIN_ID_RENDER)),
63 FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER)(1UL << (FW_DOMAIN_ID_BLITTER)),
64 FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA)(1UL << (FW_DOMAIN_ID_MEDIA)),
65 FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX0)),
66 FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX1)),
67 FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX2)),
68 FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX3)),
69 FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX0)),
70 FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX1)),
71
72 FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT)(1UL << (FW_DOMAIN_ID_COUNT)) - 1
73};
74
75struct intel_uncore_funcs {
76 void (*force_wake_get)(struct intel_uncore *uncore,
77 enum forcewake_domains domains);
78 void (*force_wake_put)(struct intel_uncore *uncore,
79 enum forcewake_domains domains);
80
81 enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
82 i915_reg_t r);
83 enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
84 i915_reg_t r);
85
86 u8 (*mmio_readb)(struct intel_uncore *uncore,
87 i915_reg_t r, bool_Bool trace);
88 u16 (*mmio_readw)(struct intel_uncore *uncore,
89 i915_reg_t r, bool_Bool trace);
90 u32 (*mmio_readl)(struct intel_uncore *uncore,
91 i915_reg_t r, bool_Bool trace);
92 u64 (*mmio_readq)(struct intel_uncore *uncore,
93 i915_reg_t r, bool_Bool trace);
94
95 void (*mmio_writeb)(struct intel_uncore *uncore,
96 i915_reg_t r, u8 val, bool_Bool trace);
97 void (*mmio_writew)(struct intel_uncore *uncore,
98 i915_reg_t r, u16 val, bool_Bool trace);
99 void (*mmio_writel)(struct intel_uncore *uncore,
100 i915_reg_t r, u32 val, bool_Bool trace);
101};
102
103struct intel_forcewake_range {
104 u32 start;
105 u32 end;
106
107 enum forcewake_domains domains;
108};
109
110struct intel_uncore {
111 void __iomem *regs;
112
113 struct drm_i915_privateinteldrm_softc *i915;
114 struct intel_runtime_pm *rpm;
115
116 spinlock_t lock; /** lock is also taken in irq contexts. */
117
118 unsigned int flags;
119#define UNCORE_HAS_FORCEWAKE(1UL << (0)) BIT(0)(1UL << (0))
120#define UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1)) BIT(1)(1UL << (1))
121#define UNCORE_HAS_DBG_UNCLAIMED(1UL << (2)) BIT(2)(1UL << (2))
122#define UNCORE_HAS_FIFO(1UL << (3)) BIT(3)(1UL << (3))
123
124 const struct intel_forcewake_range *fw_domains_table;
125 unsigned int fw_domains_table_entries;
126
127 struct notifier_block pmic_bus_access_nb;
128 struct intel_uncore_funcs funcs;
129
130 unsigned int fifo_count;
131
132 enum forcewake_domains fw_domains;
133 enum forcewake_domains fw_domains_active;
134 enum forcewake_domains fw_domains_timer;
135 enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
136
137 struct intel_uncore_forcewake_domain {
138 struct intel_uncore *uncore;
139 enum forcewake_domain_id id;
140 enum forcewake_domains mask;
141 unsigned int wake_count;
142 bool_Bool active;
143 struct timeout timer;
144 u32 __iomem *reg_set;
145 u32 __iomem *reg_ack;
146 } *fw_domain[FW_DOMAIN_ID_COUNT];
147
148 unsigned int user_forcewake_count;
149
150 struct intel_uncore_mmio_debug *debug;
151};
152
153/* Iterate over initialised fw domains */
154#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__)for (tmp__ = (mask__); tmp__ ;) if (!(domain__ = (uncore__)->
fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__ &= ~(1UL <<
(__idx)); __idx; })])) {} else
\
155 for (tmp__ = (mask__); tmp__ ;) \
156 for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])if (!(domain__ = (uncore__)->fw_domain[({ int __idx = ffs(
tmp__) - 1; tmp__ &= ~(1UL << (__idx)); __idx; })])
) {} else
157
158#define for_each_fw_domain(domain__, uncore__, tmp__)for (tmp__ = ((uncore__)->fw_domains); tmp__ ;) if (!(domain__
= (uncore__)->fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__
&= ~(1UL << (__idx)); __idx; })])) {} else
\
159 for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)for (tmp__ = ((uncore__)->fw_domains); tmp__ ;) if (!(domain__
= (uncore__)->fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__
&= ~(1UL << (__idx)); __idx; })])) {} else
160
161static inline bool_Bool
162intel_uncore_has_forcewake(const struct intel_uncore *uncore)
163{
164 return uncore->flags & UNCORE_HAS_FORCEWAKE(1UL << (0));
165}
166
167static inline bool_Bool
168intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
169{
170 return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1));
171}
172
173static inline bool_Bool
174intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
175{
176 return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED(1UL << (2));
177}
178
179static inline bool_Bool
180intel_uncore_has_fifo(const struct intel_uncore *uncore)
181{
182 return uncore->flags & UNCORE_HAS_FIFO(1UL << (3));
183}
184
185void
186intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
187void intel_uncore_init_early(struct intel_uncore *uncore,
188 struct drm_i915_privateinteldrm_softc *i915);
189int intel_uncore_init_mmio(struct intel_uncore *uncore);
190void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
191 struct intel_gt *gt);
192bool_Bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
193bool_Bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
194void intel_uncore_fini_mmio(struct intel_uncore *uncore);
195void intel_uncore_suspend(struct intel_uncore *uncore);
196void intel_uncore_resume_early(struct intel_uncore *uncore);
197void intel_uncore_runtime_resume(struct intel_uncore *uncore);
198
199void assert_forcewakes_inactive(struct intel_uncore *uncore);
200void assert_forcewakes_active(struct intel_uncore *uncore,
201 enum forcewake_domains fw_domains);
202const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
203
204enum forcewake_domains
205intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
206 i915_reg_t reg, unsigned int op);
207#define FW_REG_READ(1) (1)
208#define FW_REG_WRITE(2) (2)
209
210void intel_uncore_forcewake_get(struct intel_uncore *uncore,
211 enum forcewake_domains domains);
212void intel_uncore_forcewake_put(struct intel_uncore *uncore,
213 enum forcewake_domains domains);
214void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
215 enum forcewake_domains fw_domains);
216
217/*
218 * Like above but the caller must manage the uncore.lock itself.
219 * Must be used with I915_READ_FW and friends.
220 */
221void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
222 enum forcewake_domains domains);
223void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
224 enum forcewake_domains domains);
225
226void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
227void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
228
229int __intel_wait_for_register(struct intel_uncore *uncore,
230 i915_reg_t reg,
231 u32 mask,
232 u32 value,
233 unsigned int fast_timeout_us,
234 unsigned int slow_timeout_ms,
235 u32 *out_value);
236static inline int
237intel_wait_for_register(struct intel_uncore *uncore,
238 i915_reg_t reg,
239 u32 mask,
240 u32 value,
241 unsigned int timeout_ms)
242{
243 return __intel_wait_for_register(uncore, reg, mask, value, 2,
244 timeout_ms, NULL((void *)0));
245}
246
247int __intel_wait_for_register_fw(struct intel_uncore *uncore,
248 i915_reg_t reg,
249 u32 mask,
250 u32 value,
251 unsigned int fast_timeout_us,
252 unsigned int slow_timeout_ms,
253 u32 *out_value);
254static inline int
255intel_wait_for_register_fw(struct intel_uncore *uncore,
256 i915_reg_t reg,
257 u32 mask,
258 u32 value,
259 unsigned int timeout_ms)
260{
261 return __intel_wait_for_register_fw(uncore, reg, mask, value,
262 2, timeout_ms, NULL((void *)0));
263}
264
265/* register access functions */
266#define __raw_read(x__, s__) \
267static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
268 i915_reg_t reg) \
269{ \
270 return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
271}
272
273#define __raw_write(x__, s__) \
274static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
275 i915_reg_t reg, u##x__ val) \
276{ \
277 write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
278}
279__raw_read(8, b)
280__raw_read(16, w)
281__raw_read(32, l)
282__raw_read(64, q)
283
284__raw_write(8, b)
285__raw_write(16, w)
286__raw_write(32, l)
287__raw_write(64, q)
288
289#undef __raw_read
290#undef __raw_write
291
292#define __uncore_read(name__, x__, s__, trace__) \
293static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
294 i915_reg_t reg) \
295{ \
296 return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
297}
298
299#define __uncore_write(name__, x__, s__, trace__) \
300static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
301 i915_reg_t reg, u##x__ val) \
302{ \
303 uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
304}
305
306__uncore_read(read8, 8, b, true1)
307__uncore_read(read16, 16, w, true1)
308__uncore_read(read, 32, l, true1)
22
Passed-by-value struct argument contains uninitialized data (e.g., field: 'reg')
309__uncore_read(read16_notrace, 16, w, false0)
310__uncore_read(read_notrace, 32, l, false0)
311
312__uncore_write(write8, 8, b, true1)
313__uncore_write(write16, 16, w, true1)
314__uncore_write(write, 32, l, true1)
315__uncore_write(write_notrace, 32, l, false0)
316
317/* Be very careful with read/write 64-bit values. On 32-bit machines, they
318 * will be implemented using 2 32-bit writes in an arbitrary order with
319 * an arbitrary delay between them. This can cause the hardware to
320 * act upon the intermediate value, possibly leading to corruption and
321 * machine death. For this reason we do not support I915_WRITE64, or
322 * uncore->funcs.mmio_writeq.
323 *
324 * When reading a 64-bit value as two 32-bit values, the delay may cause
325 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
326 * occasionally a 64-bit register does not actually support a full readq
327 * and must be read using two 32-bit reads.
328 *
329 * You have been warned.
330 */
331__uncore_read(read64, 64, q, true1)
332
333static inline u64
334intel_uncore_read64_2x32(struct intel_uncore *uncore,
335 i915_reg_t lower_reg, i915_reg_t upper_reg)
336{
337 u32 upper, lower, old_upper, loop = 0;
338 upper = intel_uncore_read(uncore, upper_reg);
339 do {
340 old_upper = upper;
341 lower = intel_uncore_read(uncore, lower_reg);
342 upper = intel_uncore_read(uncore, upper_reg);
343 } while (upper != old_upper && loop++ < 2);
344 return (u64)upper << 32 | lower;
345}
346
347#define intel_uncore_posting_read(...)((void)intel_uncore_read_notrace(...)) ((void)intel_uncore_read_notrace(__VA_ARGS__))
348#define intel_uncore_posting_read16(...)((void)intel_uncore_read16_notrace(...)) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
349
350#undef __uncore_read
351#undef __uncore_write
352
353/* These are untraced mmio-accessors that are only valid to be used inside
354 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
355 * controlled.
356 *
357 * Think twice, and think again, before using these.
358 *
359 * As an example, these accessors can possibly be used between:
360 *
361 * spin_lock_irq(&uncore->lock);
362 * intel_uncore_forcewake_get__locked();
363 *
364 * and
365 *
366 * intel_uncore_forcewake_put__locked();
367 * spin_unlock_irq(&uncore->lock);
368 *
369 *
370 * Note: some registers may not need forcewake held, so
371 * intel_uncore_forcewake_{get,put} can be omitted, see
372 * intel_uncore_forcewake_for_reg().
373 *
374 * Certain architectures will die if the same cacheline is concurrently accessed
375 * by different clients (e.g. on Ivybridge). Access to registers should
376 * therefore generally be serialised, by either the dev_priv->uncore.lock or
377 * a more localised lock guarding all access to that bank of registers.
378 */
379#define intel_uncore_read_fw(...)__raw_uncore_read32(...) __raw_uncore_read32(__VA_ARGS__)
380#define intel_uncore_write_fw(...)__raw_uncore_write32(...) __raw_uncore_write32(__VA_ARGS__)
381#define intel_uncore_write64_fw(...)__raw_uncore_write64(...) __raw_uncore_write64(__VA_ARGS__)
382#define intel_uncore_posting_read_fw(...)((void)__raw_uncore_read32(...)) ((void)intel_uncore_read_fw(__VA_ARGS__)__raw_uncore_read32(__VA_ARGS__))
383
384static inline void intel_uncore_rmw(struct intel_uncore *uncore,
385 i915_reg_t reg, u32 clear, u32 set)
386{
387 u32 old, val;
388
389 old = intel_uncore_read(uncore, reg);
390 val = (old & ~clear) | set;
391 if (val != old)
392 intel_uncore_write(uncore, reg, val);
393}
394
395static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
396 i915_reg_t reg, u32 clear, u32 set)
397{
398 u32 old, val;
399
400 old = intel_uncore_read_fw(uncore, reg)__raw_uncore_read32(uncore, reg);
401 val = (old & ~clear) | set;
402 if (val != old)
403 intel_uncore_write_fw(uncore, reg, val)__raw_uncore_write32(uncore, reg, val);
404}
405
406static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
407 i915_reg_t reg, u32 val,
408 u32 mask, u32 expected_val)
409{
410 u32 reg_val;
411
412 intel_uncore_write(uncore, reg, val);
413 reg_val = intel_uncore_read(uncore, reg);
414
415 return (reg_val & mask) != expected_val ? -EINVAL22 : 0;
416}
417
418#define raw_reg_read(base, reg)ioread32(base + i915_mmio_reg_offset(reg)) \
419 readl(base + i915_mmio_reg_offset(reg))ioread32(base + i915_mmio_reg_offset(reg))
420#define raw_reg_write(base, reg, value)iowrite32(value, base + i915_mmio_reg_offset(reg)) \
421 writel(value, base + i915_mmio_reg_offset(reg))iowrite32(value, base + i915_mmio_reg_offset(reg))
422
423#endif /* !__INTEL_UNCORE_H__ */