Bug Summary

File:dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Warning:line 7819, column 3
Access to field 'plane_count' results in a dereference of a null pointer (loaded from variable 'status')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name amdgpu_dm.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c

/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c

1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
29#include "dm_services_types.h"
30#include "dc.h"
31#include "dc/inc/core_types.h"
32#include "dal_asic_id.h"
33#include "dmub/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
36#include "dc/dc_dmub_srv.h"
37
38#include "vid.h"
39#include "amdgpu.h"
40#include "amdgpu_display.h"
41#include "amdgpu_ucode.h"
42#include "atom.h"
43#include "amdgpu_dm.h"
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
46#include <drm/drm_hdcp.h>
47#endif
48#include "amdgpu_pm.h"
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
53#include "amdgpu_dm_mst_types.h"
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
63#include <linux/types.h>
64#include <linux/pm_runtime.h>
65#include <linux/pci.h>
66#include <linux/firmware.h>
67#include <linux/component.h>
68
69#include <drm/drm_atomic.h>
70#include <drm/drm_atomic_uapi.h>
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
73#include <drm/drm_fb_helper.h>
74#include <drm/drm_fourcc.h>
75#include <drm/drm_edid.h>
76#include <drm/drm_vblank.h>
77#include <drm/drm_audio_component.h>
78#include <drm/drm_hdcp.h>
79
80#if defined(CONFIG_DRM_AMD_DC_DCN1)
81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
87
88#include "soc15_common.h"
89#endif
90
91#include "modules/inc/mod_freesync.h"
92#include "modules/power/power_helpers.h"
93#include "modules/inc/mod_info_packet.h"
94
95#define FIRMWARE_RENOIR_DMUB"amdgpu/renoir_dmcub.bin" "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
98#define FIRMWARE_SIENNA_CICHLID_DMUB"amdgpu/sienna_cichlid_dmcub.bin" "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100#define FIRMWARE_NAVY_FLOUNDER_DMUB"amdgpu/navy_flounder_dmcub.bin" "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102#endif
103#define FIRMWARE_GREEN_SARDINE_DMUB"amdgpu/green_sardine_dmcub.bin" "amdgpu/green_sardine_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105
106#define FIRMWARE_RAVEN_DMCU"amdgpu/raven_dmcu.bin" "amdgpu/raven_dmcu.bin"
107MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108
109#define FIRMWARE_NAVI12_DMCU"amdgpu/navi12_dmcu.bin" "amdgpu/navi12_dmcu.bin"
110MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111
112/* Number of bytes in PSP header for firmware. */
113#define PSP_HEADER_BYTES0x100 0x100
114
115/* Number of bytes in PSP footer for firmware. */
116#define PSP_FOOTER_BYTES0x100 0x100
117
118/**
119 * DOC: overview
120 *
121 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123 * requests into DC requests, and DC responses into DRM responses.
124 *
125 * The root control structure is &struct amdgpu_display_manager.
126 */
127
128/* basic init/fini API */
129static int amdgpu_dm_init(struct amdgpu_device *adev);
130static void amdgpu_dm_fini(struct amdgpu_device *adev);
131
132static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133{
134 switch (link->dpcd_caps.dongle_type) {
135 case DISPLAY_DONGLE_NONE:
136 return DRM_MODE_SUBCONNECTOR_Native;
137 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 return DRM_MODE_SUBCONNECTOR_VGA;
139 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 return DRM_MODE_SUBCONNECTOR_DVID;
142 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_HDMIA;
145 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 default:
147 return DRM_MODE_SUBCONNECTOR_Unknown;
148 }
149}
150
151static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152{
153 struct dc_link *link = aconnector->dc_link;
154 struct drm_connector *connector = &aconnector->base;
155 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156
157 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort10)
158 return;
159
160 if (aconnector->dc_sink)
161 subconnector = get_subconnector_type(link);
162
163 drm_object_property_set_value(&connector->base,
164 connector->dev->mode_config.dp_subconnector_property,
165 subconnector);
166}
167
168/*
169 * initializes drm_device display related structures, based on the information
170 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171 * drm_encoder, drm_mode_config
172 *
173 * Returns 0 on success
174 */
175static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176/* removes and deallocates the drm structures, created by the above function */
177static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178
179static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 struct drm_plane *plane,
181 unsigned long possible_crtcs,
182 const struct dc_plane_cap *plane_cap);
183static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 struct drm_plane *plane,
185 uint32_t link_index);
186static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 struct amdgpu_dm_connector *amdgpu_dm_connector,
188 uint32_t link_index,
189 struct amdgpu_encoder *amdgpu_encoder);
190static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 struct amdgpu_encoder *aencoder,
192 uint32_t link_index);
193
194static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195
196static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 struct drm_atomic_state *state,
198 bool_Bool nonblock);
199
200static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201
202static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 struct drm_atomic_state *state);
204
205static void handle_cursor_update(struct drm_plane *plane,
206 struct drm_plane_state *old_plane_state);
207
208static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209static bool_Bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210static bool_Bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211static bool_Bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212static bool_Bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213
214/*
215 * dm_vblank_get_counter
216 *
217 * @brief
218 * Get counter for number of vertical blanks
219 *
220 * @param
221 * struct amdgpu_device *adev - [in] desired amdgpu device
222 * int disp_idx - [in] which CRTC to get the counter from
223 *
224 * @return
225 * Counter for vertical blanks
226 */
227static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228{
229 if (crtc >= adev->mode_info.num_crtc)
230 return 0;
231 else {
232 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233
234 if (acrtc->dm_irq_params.stream == NULL((void *)0)) {
235 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc)
236 crtc)__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc);
237 return 0;
238 }
239
240 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 }
242}
243
244static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 u32 *vbl, u32 *position)
246{
247 uint32_t v_blank_start, v_blank_end, h_position, v_position;
248
249 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 return -EINVAL22;
251 else {
252 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253
254 if (acrtc->dm_irq_params.stream == NULL((void *)0)) {
255 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc)
256 crtc)__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc);
257 return 0;
258 }
259
260 /*
261 * TODO rework base driver to use values directly.
262 * for now parse it back into reg-format
263 */
264 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 &v_blank_start,
266 &v_blank_end,
267 &h_position,
268 &v_position);
269
270 *position = v_position | (h_position << 16);
271 *vbl = v_blank_start | (v_blank_end << 16);
272 }
273
274 return 0;
275}
276
277static bool_Bool dm_is_idle(void *handle)
278{
279 /* XXX todo */
280 return true1;
281}
282
283static int dm_wait_for_idle(void *handle)
284{
285 /* XXX todo */
286 return 0;
287}
288
289static bool_Bool dm_check_soft_reset(void *handle)
290{
291 return false0;
292}
293
294static int dm_soft_reset(void *handle)
295{
296 /* XXX todo */
297 return 0;
298}
299
300static struct amdgpu_crtc *
301get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 int otg_inst)
303{
304 struct drm_device *dev = adev_to_drm(adev);
305 struct drm_crtc *crtc;
306 struct amdgpu_crtc *amdgpu_crtc;
307
308 if (otg_inst == -1) {
309 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 309); __builtin_expect(!!(__ret), 0); })
;
310 return adev->mode_info.crtcs[0];
311 }
312
313 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->head
) *__mptr = ((&dev->mode_config.crtc_list)->next);
(__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*crtc), head) );}); &crtc->head != (&dev->mode_config
.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)
->head ) *__mptr = (crtc->head.next); (__typeof(*crtc) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), head)
);}))
{
314 amdgpu_crtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
315
316 if (amdgpu_crtc->otg_inst == otg_inst)
317 return amdgpu_crtc;
318 }
319
320 return NULL((void *)0);
321}
322
323static inline bool_Bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324{
325 return acrtc->dm_irq_params.freesync_config.state ==
326 VRR_STATE_ACTIVE_VARIABLE ||
327 acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_FIXED;
329}
330
331static inline bool_Bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332{
333 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335}
336
337/**
338 * dm_pflip_high_irq() - Handle pageflip interrupt
339 * @interrupt_params: ignored
340 *
341 * Handles the pageflip interrupt by notifying all interested parties
342 * that the pageflip has been completed.
343 */
344static void dm_pflip_high_irq(void *interrupt_params)
345{
346 struct amdgpu_crtc *amdgpu_crtc;
347 struct common_irq_params *irq_params = interrupt_params;
348 struct amdgpu_device *adev = irq_params->adev;
349 unsigned long flags;
350 struct drm_pending_vblank_event *e;
351 uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 bool_Bool vrr_active;
353
354 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355
356 /* IRQ could occur when in initial stage */
357 /* TODO work and BO cleanup */
358 if (amdgpu_crtc == NULL((void *)0)) {
359 DRM_DEBUG_DRIVER("CRTC is null, returning.\n")__drm_dbg(DRM_UT_DRIVER, "CRTC is null, returning.\n");
360 return;
361 }
362
363 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
364
365 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
367 amdgpu_crtc->pflip_status,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
368 AMDGPU_FLIP_SUBMITTED,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
369 amdgpu_crtc->crtc_id,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
370 amdgpu_crtc)__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
;
371 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
372 return;
373 }
374
375 /* page flip completed. */
376 e = amdgpu_crtc->event;
377 amdgpu_crtc->event = NULL((void *)0);
378
379 if (!e)
380 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 380); __builtin_expect(!!(__ret), 0); })
;
381
382 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383
384 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 if (!vrr_active ||
386 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 &v_blank_end, &hpos, &vpos) ||
388 (vpos < v_blank_start)) {
389 /* Update to correct count and vblank timestamp if racing with
390 * vblank irq. This also updates to the correct vblank timestamp
391 * even in VRR mode, as scanout is past the front-porch atm.
392 */
393 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394
395 /* Wake up userspace by sending the pageflip event with proper
396 * count and timestamp of vblank of flip completion.
397 */
398 if (e) {
399 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400
401 /* Event sent, so done with vblank for this flip */
402 drm_crtc_vblank_put(&amdgpu_crtc->base);
403 }
404 } else if (e) {
405 /* VRR active and inside front-porch: vblank count and
406 * timestamp for pageflip event will only be up to date after
407 * drm_crtc_handle_vblank() has been executed from late vblank
408 * irq handler after start of back-porch (vline 0). We queue the
409 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 * updated timestamp and count, once it runs after us.
411 *
412 * We need to open-code this instead of using the helper
413 * drm_crtc_arm_vblank_event(), as that helper would
414 * call drm_crtc_accurate_vblank_count(), which we must
415 * not call in VRR mode while we are in front-porch!
416 */
417
418 /* sequence will be replaced by real count during send-out. */
419 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 e->pipe = amdgpu_crtc->crtc_id;
421
422 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 e = NULL((void *)0);
424 }
425
426 /* Keep track of vblank of this flip for flip throttling. We use the
427 * cooked hw counter, as that one incremented at start of this vblank
428 * of pageflip completion, so last_flip_vblank is the forbidden count
429 * for queueing new pageflips if vsync + VRR is enabled.
430 */
431 amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433
434 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
436
437 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",__drm_dbg(DRM_UT_DRIVER, "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n"
, amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int) !e)
438 amdgpu_crtc->crtc_id, amdgpu_crtc,__drm_dbg(DRM_UT_DRIVER, "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n"
, amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int) !e)
439 vrr_active, (int) !e)__drm_dbg(DRM_UT_DRIVER, "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n"
, amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int) !e)
;
440}
441
442static void dm_vupdate_high_irq(void *interrupt_params)
443{
444 struct common_irq_params *irq_params = interrupt_params;
445 struct amdgpu_device *adev = irq_params->adev;
446 struct amdgpu_crtc *acrtc;
447 unsigned long flags;
448 int vrr_active;
449
450 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451
452 if (acrtc) {
453 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454
455 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d\n", acrtc->
crtc_id, vrr_active)
456 acrtc->crtc_id,__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d\n", acrtc->
crtc_id, vrr_active)
457 vrr_active)__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d\n", acrtc->
crtc_id, vrr_active)
;
458
459 /* Core vblank handling is done here after end of front-porch in
460 * vrr mode, as vblank timestamping will give valid results
461 * while now done after front-porch. This will also deliver
462 * page-flip completion events that have been queued to us
463 * if a pageflip happened inside front-porch.
464 */
465 if (vrr_active) {
466 drm_crtc_handle_vblank(&acrtc->base);
467
468 /* BTR processing for pre-DCE12 ASICs */
469 if (acrtc->dm_irq_params.stream &&
470 adev->family < AMDGPU_FAMILY_AI141) {
471 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
472 mod_freesync_handle_v_update(
473 adev->dm.freesync_module,
474 acrtc->dm_irq_params.stream,
475 &acrtc->dm_irq_params.vrr_params);
476
477 dc_stream_adjust_vmin_vmax(
478 adev->dm.dc,
479 acrtc->dm_irq_params.stream,
480 &acrtc->dm_irq_params.vrr_params.adjust);
481 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
482 }
483 }
484 }
485}
486
487/**
488 * dm_crtc_high_irq() - Handles CRTC interrupt
489 * @interrupt_params: used for determining the CRTC instance
490 *
491 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492 * event handler.
493 */
494static void dm_crtc_high_irq(void *interrupt_params)
495{
496 struct common_irq_params *irq_params = interrupt_params;
497 struct amdgpu_device *adev = irq_params->adev;
498 struct amdgpu_crtc *acrtc;
499 unsigned long flags;
500 int vrr_active;
501
502 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 if (!acrtc)
504 return;
505
506 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507
508 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d, planes:%d\n",
acrtc->crtc_id, vrr_active, acrtc->dm_irq_params.active_planes
)
509 vrr_active, acrtc->dm_irq_params.active_planes)__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d, planes:%d\n",
acrtc->crtc_id, vrr_active, acrtc->dm_irq_params.active_planes
)
;
510
511 /**
512 * Core vblank handling at start of front-porch is only possible
513 * in non-vrr mode, as only there vblank timestamping will give
514 * valid results while done in front-porch. Otherwise defer it
515 * to dm_vupdate_high_irq after end of front-porch.
516 */
517 if (!vrr_active)
518 drm_crtc_handle_vblank(&acrtc->base);
519
520 /**
521 * Following stuff must happen at start of vblank, for crc
522 * computation and below-the-range btr support in vrr mode.
523 */
524 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525
526 /* BTR updates need to happen before VUPDATE on Vega and above. */
527 if (adev->family < AMDGPU_FAMILY_AI141)
528 return;
529
530 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
531
532 if (acrtc->dm_irq_params.stream &&
533 acrtc->dm_irq_params.vrr_params.supported &&
534 acrtc->dm_irq_params.freesync_config.state ==
535 VRR_STATE_ACTIVE_VARIABLE) {
536 mod_freesync_handle_v_update(adev->dm.freesync_module,
537 acrtc->dm_irq_params.stream,
538 &acrtc->dm_irq_params.vrr_params);
539
540 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 &acrtc->dm_irq_params.vrr_params.adjust);
542 }
543
544 /*
545 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 * In that case, pageflip completion interrupts won't fire and pageflip
547 * completion events won't get delivered. Prevent this by sending
548 * pending pageflip events from here if a flip is still pending.
549 *
550 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 * avoid race conditions between flip programming and completion,
552 * which could cause too early flip completion events.
553 */
554 if (adev->family >= AMDGPU_FAMILY_RV142 &&
555 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 acrtc->dm_irq_params.active_planes == 0) {
557 if (acrtc->event) {
558 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 acrtc->event = NULL((void *)0);
560 drm_crtc_vblank_put(&acrtc->base);
561 }
562 acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 }
564
565 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
566}
567
568static int dm_set_clockgating_state(void *handle,
569 enum amd_clockgating_state state)
570{
571 return 0;
572}
573
574static int dm_set_powergating_state(void *handle,
575 enum amd_powergating_state state)
576{
577 return 0;
578}
579
580/* Prototypes of private functions */
581static int dm_early_init(void* handle);
582
583/* Allocate memory for FBC compressed data */
584static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585{
586 struct drm_device *dev = connector->dev;
587 struct amdgpu_device *adev = drm_to_adev(dev);
588 struct dm_compressor_info *compressor = &adev->dm.compressor;
589 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
590 struct drm_display_mode *mode;
591 unsigned long max_size = 0;
592
593 if (adev->dm.dc->fbc_compressor == NULL((void *)0))
594 return;
595
596 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 return;
598
599 if (compressor->bo_ptr)
600 return;
601
602
603 list_for_each_entry(mode, &connector->modes, head)for (mode = ({ const __typeof( ((__typeof(*mode) *)0)->head
) *__mptr = ((&connector->modes)->next); (__typeof
(*mode) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mode
), head) );}); &mode->head != (&connector->modes
); mode = ({ const __typeof( ((__typeof(*mode) *)0)->head )
*__mptr = (mode->head.next); (__typeof(*mode) *)( (char *
)__mptr - __builtin_offsetof(__typeof(*mode), head) );}))
{
604 if (max_size < mode->htotal * mode->vtotal)
605 max_size = mode->htotal * mode->vtotal;
606 }
607
608 if (max_size) {
609 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE(1 << 12),
610 AMDGPU_GEM_DOMAIN_GTT0x2, &compressor->bo_ptr,
611 &compressor->gpu_addr, &compressor->cpu_addr);
612
613 if (r)
614 DRM_ERROR("DM: Failed to initialize FBC\n")__drm_err("DM: Failed to initialize FBC\n");
615 else {
616 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 DRM_INFO("DM: FBC alloc %lu\n", max_size*4)printk("\0016" "[" "drm" "] " "DM: FBC alloc %lu\n", max_size
*4)
;
618 }
619
620 }
621
622}
623
624static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 int pipe, bool_Bool *enabled,
626 unsigned char *buf, int max_bytes)
627{
628 struct drm_device *dev = dev_get_drvdata(kdev)((void *)0);
629 struct amdgpu_device *adev = drm_to_adev(dev);
630 struct drm_connector *connector;
631 struct drm_connector_list_iter conn_iter;
632 struct amdgpu_dm_connector *aconnector;
633 int ret = 0;
634
635 *enabled = false0;
636
637 mutex_lock(&adev->dm.audio_lock)rw_enter_write(&adev->dm.audio_lock);
638
639 drm_connector_list_iter_begin(dev, &conn_iter);
640 drm_for_each_connector_iter(connector, &conn_iter)while ((connector = drm_connector_list_iter_next(&conn_iter
)))
{
641 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
642 if (aconnector->audio_inst != port)
643 continue;
644
645 *enabled = true1;
646 ret = drm_eld_size(connector->eld);
647 memcpy(buf, connector->eld, min(max_bytes, ret))__builtin_memcpy((buf), (connector->eld), ((((max_bytes)<
(ret))?(max_bytes):(ret))))
;
648
649 break;
650 }
651 drm_connector_list_iter_end(&conn_iter);
652
653 mutex_unlock(&adev->dm.audio_lock)rw_exit_write(&adev->dm.audio_lock);
654
655 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled)__drm_dbg(DRM_UT_KMS, "Get ELD : idx=%d ret=%d en=%d\n", port
, ret, *enabled)
;
656
657 return ret;
658}
659
660static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 .get_eld = amdgpu_dm_audio_component_get_eld,
662};
663
664static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 struct device *hda_kdev, void *data)
666{
667 struct drm_device *dev = dev_get_drvdata(kdev)((void *)0);
668 struct amdgpu_device *adev = drm_to_adev(dev);
669 struct drm_audio_component *acomp = data;
670
671 acomp->ops = &amdgpu_dm_audio_component_ops;
672 acomp->dev = kdev;
673 adev->dm.audio_component = acomp;
674
675 return 0;
676}
677
678static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 struct device *hda_kdev, void *data)
680{
681 struct drm_device *dev = dev_get_drvdata(kdev)((void *)0);
682 struct amdgpu_device *adev = drm_to_adev(dev);
683 struct drm_audio_component *acomp = data;
684
685 acomp->ops = NULL((void *)0);
686 acomp->dev = NULL((void *)0);
687 adev->dm.audio_component = NULL((void *)0);
688}
689
690#ifdef notyet
691static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
694};
695#endif
696
697static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
698{
699 int i, ret;
700
701 if (!amdgpu_audio)
702 return 0;
703
704 adev->mode_info.audio.enabled = true1;
705
706 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707
708 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 adev->mode_info.audio.pin[i].channels = -1;
710 adev->mode_info.audio.pin[i].rate = -1;
711 adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 adev->mode_info.audio.pin[i].status_bits = 0;
713 adev->mode_info.audio.pin[i].category_code = 0;
714 adev->mode_info.audio.pin[i].connected = false0;
715 adev->mode_info.audio.pin[i].id =
716 adev->dm.dc->res_pool->audios[i]->inst;
717 adev->mode_info.audio.pin[i].offset = 0;
718 }
719
720 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops)0;
721 if (ret < 0)
722 return ret;
723
724 adev->dm.audio_registered = true1;
725
726 return 0;
727}
728
729static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
730{
731 if (!amdgpu_audio)
732 return;
733
734 if (!adev->mode_info.audio.enabled)
735 return;
736
737 if (adev->dm.audio_registered) {
738 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 adev->dm.audio_registered = false0;
740 }
741
742 /* TODO: Disable audio? */
743
744 adev->mode_info.audio.enabled = false0;
745}
746
747static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748{
749 struct drm_audio_component *acomp = adev->dm.audio_component;
750
751 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 DRM_DEBUG_KMS("Notify ELD: %d\n", pin)__drm_dbg(DRM_UT_KMS, "Notify ELD: %d\n", pin);
753
754 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
755 pin, -1);
756 }
757}
758
759static int dm_dmub_hw_init(struct amdgpu_device *adev)
760{
761 const struct dmcub_firmware_header_v1_0 *hdr;
762 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764 const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 struct abm *abm = adev->dm.dc->res_pool->abm;
767 struct dmub_srv_hw_params hw_params;
768 enum dmub_status status;
769 const unsigned char *fw_inst_const, *fw_bss_data;
770 uint32_t i, fw_inst_const_size, fw_bss_data_size;
771 bool_Bool has_hw_support;
772
773 if (!dmub_srv)
774 /* DMUB isn't supported on the ASIC. */
775 return 0;
776
777 if (!fb_info) {
778 DRM_ERROR("No framebuffer info for DMUB service.\n")__drm_err("No framebuffer info for DMUB service.\n");
779 return -EINVAL22;
780 }
781
782 if (!dmub_fw) {
783 /* Firmware required for DMUB support. */
784 DRM_ERROR("No firmware provided for DMUB.\n")__drm_err("No firmware provided for DMUB.\n");
785 return -EINVAL22;
786 }
787
788 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 if (status != DMUB_STATUS_OK) {
790 DRM_ERROR("Error checking HW support for DMUB: %d\n", status)__drm_err("Error checking HW support for DMUB: %d\n", status);
791 return -EINVAL22;
792 }
793
794 if (!has_hw_support) {
795 DRM_INFO("DMUB unsupported on ASIC\n")printk("\0016" "[" "drm" "] " "DMUB unsupported on ASIC\n");
796 return 0;
797 }
798
799 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800
801 fw_inst_const = dmub_fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
803 PSP_HEADER_BYTES0x100;
804
805 fw_bss_data = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
807 le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes));
808
809 /* Copy firmware and bios info into FB memory. */
810 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes)) -
811 PSP_HEADER_BYTES0x100 - PSP_FOOTER_BYTES0x100;
812
813 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes)((__uint32_t)(hdr->bss_data_bytes));
814
815 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 * amdgpu_ucode_init_single_fw will load dmub firmware
817 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 * will be done by dm_dmub_hw_init
819 */
820 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,__builtin_memcpy((fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr
), (fw_inst_const), (fw_inst_const_size))
822 fw_inst_const_size)__builtin_memcpy((fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr
), (fw_inst_const), (fw_inst_const_size))
;
823 }
824
825 if (fw_bss_data_size)
826 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,__builtin_memcpy((fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr
), (fw_bss_data), (fw_bss_data_size))
827 fw_bss_data, fw_bss_data_size)__builtin_memcpy((fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr
), (fw_bss_data), (fw_bss_data_size))
;
828
829 /* Copy firmware bios info into FB memory. */
830 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,__builtin_memcpy((fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr
), (adev->bios), (adev->bios_size))
831 adev->bios_size)__builtin_memcpy((fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr
), (adev->bios), (adev->bios_size))
;
832
833 /* Reset regions that need to be reset. */
834 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,__builtin_memset((fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_4_MAILBOX].size))
835 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size)__builtin_memset((fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_4_MAILBOX].size))
;
836
837 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,__builtin_memset((fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size))
838 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size)__builtin_memset((fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size))
;
839
840 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,__builtin_memset((fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_6_FW_STATE].size))
841 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size)__builtin_memset((fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_6_FW_STATE].size))
;
842
843 /* Initialize hardware. */
844 memset(&hw_params, 0, sizeof(hw_params))__builtin_memset((&hw_params), (0), (sizeof(hw_params)));
845 hw_params.fb_base = adev->gmc.fb_start;
846 hw_params.fb_offset = adev->gmc.aper_base;
847
848 /* backdoor load firmware and trigger dmub running */
849 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 hw_params.load_inst_const = true1;
851
852 if (dmcu)
853 hw_params.psp_version = dmcu->psp_version;
854
855 for (i = 0; i < fb_info->num_fb; ++i)
856 hw_params.fb[i] = &fb_info->fb[i];
857
858 status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 if (status != DMUB_STATUS_OK) {
860 DRM_ERROR("Error initializing DMUB HW: %d\n", status)__drm_err("Error initializing DMUB HW: %d\n", status);
861 return -EINVAL22;
862 }
863
864 /* Wait for firmware load to finish. */
865 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 if (status != DMUB_STATUS_OK)
867 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status)printk("\0014" "[" "drm" "] " "Wait for DMUB auto-load failed: %d\n"
, status)
;
868
869 /* Init DMCU and ABM if available. */
870 if (dmcu && abm) {
871 dmcu->funcs->dmcu_init(dmcu);
872 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
873 }
874
875 if (!adev->dm.dc->ctx->dmub_srv)
876 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
877 if (!adev->dm.dc->ctx->dmub_srv) {
878 DRM_ERROR("Couldn't allocate DC DMUB server!\n")__drm_err("Couldn't allocate DC DMUB server!\n");
879 return -ENOMEM12;
880 }
881
882 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",printk("\0016" "[" "drm" "] " "DMUB hardware initialized: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
883 adev->dm.dmcub_fw_version)printk("\0016" "[" "drm" "] " "DMUB hardware initialized: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
;
884
885 return 0;
886}
887
888static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
889 struct drm_atomic_state *state)
890{
891 struct drm_connector *connector;
892 struct drm_crtc *crtc;
893 struct amdgpu_dm_connector *amdgpu_dm_connector;
894 struct drm_connector_state *conn_state;
895 struct dm_crtc_state *acrtc_state;
896 struct drm_crtc_state *crtc_state;
897 struct dc_stream_state *stream;
898 struct drm_device *dev = adev_to_drm(adev);
899
900 list_for_each_entry(connector, &dev->mode_config.connector_list, head)for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
{
901
902 amdgpu_dm_connector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
903 conn_state = connector->state;
904
905 if (!(conn_state && conn_state->crtc))
906 continue;
907
908 crtc = conn_state->crtc;
909 acrtc_state = to_dm_crtc_state(crtc->state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc->state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
910
911 if (!(acrtc_state && acrtc_state->stream))
912 continue;
913
914 stream = acrtc_state->stream;
915
916 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
917 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
918 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
919 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
920 conn_state = drm_atomic_get_connector_state(state, connector);
921 crtc_state = drm_atomic_get_crtc_state(state, crtc);
922 crtc_state->mode_changed = true1;
923 }
924 }
925}
926
927static int amdgpu_dm_init(struct amdgpu_device *adev)
928{
929 struct dc_init_data init_data;
930#ifdef CONFIG_DRM_AMD_DC_HDCP
931 struct dc_callback_init init_params;
932#endif
933 int r;
934
935 adev->dm.ddev = adev_to_drm(adev);
936 adev->dm.adev = adev;
937
938 /* Zero all the fields */
939 memset(&init_data, 0, sizeof(init_data))__builtin_memset((&init_data), (0), (sizeof(init_data)));
940#ifdef CONFIG_DRM_AMD_DC_HDCP
941 memset(&init_params, 0, sizeof(init_params))__builtin_memset((&init_params), (0), (sizeof(init_params
)))
;
942#endif
943
944 rw_init(&adev->dm.dc_lock, "dmdc")_rw_init_flags(&adev->dm.dc_lock, "dmdc", 0, ((void *)
0))
;
945 rw_init(&adev->dm.audio_lock, "dmaud")_rw_init_flags(&adev->dm.audio_lock, "dmaud", 0, ((void
*)0))
;
946
947 if(amdgpu_dm_irq_init(adev)) {
948 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n")__drm_err("amdgpu: failed to initialize DM IRQ support.\n");
949 goto error;
950 }
951
952 init_data.asic_id.chip_family = adev->family;
953
954 init_data.asic_id.pci_revision_id = adev->pdev->revision;
955 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
956 init_data.asic_id.chip_id = adev->pdev->device;
957
958 init_data.asic_id.vram_width = adev->gmc.vram_width;
959 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
960 init_data.asic_id.atombios_base_address =
961 adev->mode_info.atom_context->bios;
962
963 init_data.driver = adev;
964
965 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
966
967 if (!adev->dm.cgs_device) {
968 DRM_ERROR("amdgpu: failed to create cgs device.\n")__drm_err("amdgpu: failed to create cgs device.\n");
969 goto error;
970 }
971
972 init_data.cgs_device = adev->dm.cgs_device;
973
974 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
975
976 switch (adev->asic_type) {
977 case CHIP_CARRIZO:
978 case CHIP_STONEY:
979 case CHIP_RAVEN:
980 case CHIP_RENOIR:
981 init_data.flags.gpu_vm_support = true1;
982 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)((adev->external_rev_id >= 0xA1) && (adev->external_rev_id
< 0xFF))
)
983 init_data.flags.disable_dmcu = true1;
984 break;
985 default:
986 break;
987 }
988
989 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
990 init_data.flags.fbc_support = true1;
991
992 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
993 init_data.flags.multi_mon_pp_mclk_switch = true1;
994
995 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
996 init_data.flags.disable_fractional_pwm = true1;
997
998 init_data.flags.power_down_display_on_boot = true1;
999
1000 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1001
1002 /* Display Core create. */
1003 adev->dm.dc = dc_create(&init_data);
1004
1005 if (adev->dm.dc) {
1006 DRM_INFO("Display Core initialized with v%s!\n", DC_VER)printk("\0016" "[" "drm" "] " "Display Core initialized with v%s!\n"
, "3.2.104")
;
1007 } else {
1008 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER)printk("\0016" "[" "drm" "] " "Display Core failed to initialize with v%s!\n"
, "3.2.104")
;
1009 goto error;
1010 }
1011
1012 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1013 adev->dm.dc->debug.force_single_disp_pipe_split = false0;
1014 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1015 }
1016
1017 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1018 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false0 : true1;
1019
1020 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1021 adev->dm.dc->debug.disable_stutter = true1;
1022
1023 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1024 adev->dm.dc->debug.disable_dsc = true1;
1025
1026 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1027 adev->dm.dc->debug.disable_clock_gate = true1;
1028
1029 r = dm_dmub_hw_init(adev);
1030 if (r) {
1031 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r)__drm_err("DMUB interface failed to initialize: status=%d\n",
r)
;
1032 goto error;
1033 }
1034
1035 dc_hardware_init(adev->dm.dc);
1036
1037 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1038 if (!adev->dm.freesync_module) {
1039 DRM_ERROR(__drm_err("amdgpu: failed to initialize freesync_module.\n")
1040 "amdgpu: failed to initialize freesync_module.\n")__drm_err("amdgpu: failed to initialize freesync_module.\n");
1041 } else
1042 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",__drm_dbg(DRM_UT_DRIVER, "amdgpu: freesync_module init done %p.\n"
, adev->dm.freesync_module)
1043 adev->dm.freesync_module)__drm_dbg(DRM_UT_DRIVER, "amdgpu: freesync_module init done %p.\n"
, adev->dm.freesync_module)
;
1044
1045 amdgpu_dm_init_color_mod();
1046
1047#ifdef CONFIG_DRM_AMD_DC_HDCP
1048 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1049 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1050
1051 if (!adev->dm.hdcp_workqueue)
1052 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n")__drm_err("amdgpu: failed to initialize hdcp_workqueue.\n");
1053 else
1054 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue)__drm_dbg(DRM_UT_DRIVER, "amdgpu: hdcp_workqueue init done %p.\n"
, adev->dm.hdcp_workqueue)
;
1055
1056 dc_init_callbacks(adev->dm.dc, &init_params);
1057 }
1058#endif
1059 if (amdgpu_dm_initialize_drm_device(adev)) {
1060 DRM_ERROR(__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
1061 "amdgpu: failed to initialize sw for display support.\n")__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
;
1062 goto error;
1063 }
1064
1065 /* create fake encoders for MST */
1066 dm_dp_create_fake_mst_encoders(adev);
1067
1068 /* TODO: Add_display_info? */
1069
1070 /* TODO use dynamic cursor width */
1071 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1072 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1073
1074 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1075 DRM_ERROR(__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
1076 "amdgpu: failed to initialize sw for display support.\n")__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
;
1077 goto error;
1078 }
1079
1080 DRM_DEBUG_DRIVER("KMS initialized.\n")__drm_dbg(DRM_UT_DRIVER, "KMS initialized.\n");
1081
1082 return 0;
1083error:
1084 amdgpu_dm_fini(adev);
1085
1086 return -EINVAL22;
1087}
1088
1089static void amdgpu_dm_fini(struct amdgpu_device *adev)
1090{
1091 int i;
1092
1093 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1094 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1095 }
1096
1097 amdgpu_dm_audio_fini(adev);
1098
1099 amdgpu_dm_destroy_drm_device(&adev->dm);
1100
1101#ifdef CONFIG_DRM_AMD_DC_HDCP
1102 if (adev->dm.hdcp_workqueue) {
1103 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1104 adev->dm.hdcp_workqueue = NULL((void *)0);
1105 }
1106
1107 if (adev->dm.dc)
1108 dc_deinit_callbacks(adev->dm.dc);
1109#endif
1110 if (adev->dm.dc->ctx->dmub_srv) {
1111 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1112 adev->dm.dc->ctx->dmub_srv = NULL((void *)0);
1113 }
1114
1115 if (adev->dm.dmub_bo)
1116 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1117 &adev->dm.dmub_bo_gpu_addr,
1118 &adev->dm.dmub_bo_cpu_addr);
1119
1120 /* DC Destroy TODO: Replace destroy DAL */
1121 if (adev->dm.dc)
1122 dc_destroy(&adev->dm.dc);
1123 /*
1124 * TODO: pageflip, vlank interrupt
1125 *
1126 * amdgpu_dm_irq_fini(adev);
1127 */
1128
1129 if (adev->dm.cgs_device) {
1130 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1131 adev->dm.cgs_device = NULL((void *)0);
1132 }
1133 if (adev->dm.freesync_module) {
1134 mod_freesync_destroy(adev->dm.freesync_module);
1135 adev->dm.freesync_module = NULL((void *)0);
1136 }
1137
1138 mutex_destroy(&adev->dm.audio_lock);
1139 mutex_destroy(&adev->dm.dc_lock);
1140
1141 return;
1142}
1143
1144static int load_dmcu_fw(struct amdgpu_device *adev)
1145{
1146 const char *fw_name_dmcu = NULL((void *)0);
1147 int r;
1148 const struct dmcu_firmware_header_v1_0 *hdr;
1149
1150 switch(adev->asic_type) {
1151#if defined(CONFIG_DRM_AMD_DC_SI)
1152 case CHIP_TAHITI:
1153 case CHIP_PITCAIRN:
1154 case CHIP_VERDE:
1155 case CHIP_OLAND:
1156#endif
1157 case CHIP_BONAIRE:
1158 case CHIP_HAWAII:
1159 case CHIP_KAVERI:
1160 case CHIP_KABINI:
1161 case CHIP_MULLINS:
1162 case CHIP_TONGA:
1163 case CHIP_FIJI:
1164 case CHIP_CARRIZO:
1165 case CHIP_STONEY:
1166 case CHIP_POLARIS11:
1167 case CHIP_POLARIS10:
1168 case CHIP_POLARIS12:
1169 case CHIP_VEGAM:
1170 case CHIP_VEGA10:
1171 case CHIP_VEGA12:
1172 case CHIP_VEGA20:
1173 case CHIP_NAVI10:
1174 case CHIP_NAVI14:
1175 case CHIP_RENOIR:
1176#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1177 case CHIP_SIENNA_CICHLID:
1178 case CHIP_NAVY_FLOUNDER:
1179#endif
1180 return 0;
1181 case CHIP_NAVI12:
1182 fw_name_dmcu = FIRMWARE_NAVI12_DMCU"amdgpu/navi12_dmcu.bin";
1183 break;
1184 case CHIP_RAVEN:
1185 if (ASICREV_IS_PICASSO(adev->external_rev_id)((adev->external_rev_id >= 0x41) && (adev->external_rev_id
< 0x81))
)
1186 fw_name_dmcu = FIRMWARE_RAVEN_DMCU"amdgpu/raven_dmcu.bin";
1187 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)((adev->external_rev_id >= 0x81) && (adev->external_rev_id
< 0x91))
)
1188 fw_name_dmcu = FIRMWARE_RAVEN_DMCU"amdgpu/raven_dmcu.bin";
1189 else
1190 return 0;
1191 break;
1192 default:
1193 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type)__drm_err("Unsupported ASIC type: 0x%X\n", adev->asic_type
)
;
1194 return -EINVAL22;
1195 }
1196
1197 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1198 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n")__drm_dbg(DRM_UT_KMS, "dm: DMCU firmware not supported on direct or SMU loading\n"
)
;
1199 return 0;
1200 }
1201
1202 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1203 if (r == -ENOENT2) {
1204 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1205 DRM_DEBUG_KMS("dm: DMCU firmware not found\n")__drm_dbg(DRM_UT_KMS, "dm: DMCU firmware not found\n");
1206 adev->dm.fw_dmcu = NULL((void *)0);
1207 return 0;
1208 }
1209 if (r) {
1210 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
1211 fw_name_dmcu)printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
;
1212 return r;
1213 }
1214
1215 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1216 if (r) {
1217 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
1218 fw_name_dmcu)printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
;
1219 release_firmware(adev->dm.fw_dmcu);
1220 adev->dm.fw_dmcu = NULL((void *)0);
1221 return r;
1222 }
1223
1224 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1226 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1227 adev->firmware.fw_size +=
1228 roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE)(((((__uint32_t)(hdr->header.ucode_size_bytes)) - ((__uint32_t
)(hdr->intv_size_bytes))) + (((1 << 12)) - 1)) &
(~((__typeof(((__uint32_t)(hdr->header.ucode_size_bytes))
- ((__uint32_t)(hdr->intv_size_bytes))))((1 << 12))
- 1)))
;
1229
1230 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1231 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1232 adev->firmware.fw_size +=
1233 roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE)(((((__uint32_t)(hdr->intv_size_bytes))) + (((1 << 12
)) - 1)) & (~((__typeof(((__uint32_t)(hdr->intv_size_bytes
))))((1 << 12)) - 1)))
;
1234
1235 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version)((__uint32_t)(hdr->header.ucode_version));
1236
1237 DRM_DEBUG_KMS("PSP loading DMCU firmware\n")__drm_dbg(DRM_UT_KMS, "PSP loading DMCU firmware\n");
1238
1239 return 0;
1240}
1241
1242static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1243{
1244 struct amdgpu_device *adev = ctx;
1245
1246 return dm_read_reg(adev->dm.dc->ctx, address)dm_read_reg_func(adev->dm.dc->ctx, address, __func__);
1247}
1248
1249static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1250 uint32_t value)
1251{
1252 struct amdgpu_device *adev = ctx;
1253
1254 return dm_write_reg(adev->dm.dc->ctx, address, value)dm_write_reg_func(adev->dm.dc->ctx, address, value, __func__
)
;
1255}
1256
1257static int dm_dmub_sw_init(struct amdgpu_device *adev)
1258{
1259 struct dmub_srv_create_params create_params;
1260 struct dmub_srv_region_params region_params;
1261 struct dmub_srv_region_info region_info;
1262 struct dmub_srv_fb_params fb_params;
1263 struct dmub_srv_fb_info *fb_info;
1264 struct dmub_srv *dmub_srv;
1265 const struct dmcub_firmware_header_v1_0 *hdr;
1266 const char *fw_name_dmub;
1267 enum dmub_asic dmub_asic;
1268 enum dmub_status status;
1269 int r;
1270
1271 switch (adev->asic_type) {
1272 case CHIP_RENOIR:
1273 dmub_asic = DMUB_ASIC_DCN21;
1274 fw_name_dmub = FIRMWARE_RENOIR_DMUB"amdgpu/renoir_dmcub.bin";
1275 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)((adev->external_rev_id >= 0xA1) && (adev->external_rev_id
< 0xFF))
)
1276 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB"amdgpu/green_sardine_dmcub.bin";
1277 break;
1278#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1279 case CHIP_SIENNA_CICHLID:
1280 dmub_asic = DMUB_ASIC_DCN30;
1281 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB"amdgpu/sienna_cichlid_dmcub.bin";
1282 break;
1283 case CHIP_NAVY_FLOUNDER:
1284 dmub_asic = DMUB_ASIC_DCN30;
1285 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB"amdgpu/navy_flounder_dmcub.bin";
1286 break;
1287#endif
1288
1289 default:
1290 /* ASIC doesn't support DMUB. */
1291 return 0;
1292 }
1293
1294 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1295 if (r) {
1296 DRM_ERROR("DMUB firmware loading failed: %d\n", r)__drm_err("DMUB firmware loading failed: %d\n", r);
1297 return 0;
1298 }
1299
1300 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1301 if (r) {
1302 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r)__drm_err("Couldn't validate DMUB firmware: %d\n", r);
1303 return 0;
1304 }
1305
1306 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1307 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version)((__uint32_t)(hdr->header.ucode_version));
1308
1309 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1310 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1311 AMDGPU_UCODE_ID_DMCUB;
1312 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1313 adev->dm.dmub_fw;
1314 adev->firmware.fw_size +=
1315 roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE)(((((__uint32_t)(hdr->inst_const_bytes))) + (((1 << 12
)) - 1)) & (~((__typeof(((__uint32_t)(hdr->inst_const_bytes
))))((1 << 12)) - 1)))
;
1316
1317 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",printk("\0016" "[" "drm" "] " "Loading DMUB firmware via PSP: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
1318 adev->dm.dmcub_fw_version)printk("\0016" "[" "drm" "] " "Loading DMUB firmware via PSP: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
;
1319 }
1320
1321
1322 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL(0x0001 | 0x0004));
1323 dmub_srv = adev->dm.dmub_srv;
1324
1325 if (!dmub_srv) {
1326 DRM_ERROR("Failed to allocate DMUB service!\n")__drm_err("Failed to allocate DMUB service!\n");
1327 return -ENOMEM12;
1328 }
1329
1330 memset(&create_params, 0, sizeof(create_params))__builtin_memset((&create_params), (0), (sizeof(create_params
)))
;
1331 create_params.user_ctx = adev;
1332 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1333 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1334 create_params.asic = dmub_asic;
1335
1336 /* Create the DMUB service. */
1337 status = dmub_srv_create(dmub_srv, &create_params);
1338 if (status != DMUB_STATUS_OK) {
1339 DRM_ERROR("Error creating DMUB service: %d\n", status)__drm_err("Error creating DMUB service: %d\n", status);
1340 return -EINVAL22;
1341 }
1342
1343 /* Calculate the size of all the regions for the DMUB service. */
1344 memset(&region_params, 0, sizeof(region_params))__builtin_memset((&region_params), (0), (sizeof(region_params
)))
;
1345
1346 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes)) -
1347 PSP_HEADER_BYTES0x100 - PSP_FOOTER_BYTES0x100;
1348 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes)((__uint32_t)(hdr->bss_data_bytes));
1349 region_params.vbios_size = adev->bios_size;
1350 region_params.fw_bss_data = region_params.bss_data_size ?
1351 adev->dm.dmub_fw->data +
1352 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
1353 le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes)) : NULL((void *)0);
1354 region_params.fw_inst_const =
1355 adev->dm.dmub_fw->data +
1356 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
1357 PSP_HEADER_BYTES0x100;
1358
1359 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1360 &region_info);
1361
1362 if (status != DMUB_STATUS_OK) {
1363 DRM_ERROR("Error calculating DMUB region info: %d\n", status)__drm_err("Error calculating DMUB region info: %d\n", status);
1364 return -EINVAL22;
1365 }
1366
1367 /*
1368 * Allocate a framebuffer based on the total size of all the regions.
1369 * TODO: Move this into GART.
1370 */
1371 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE(1 << 12),
1372 AMDGPU_GEM_DOMAIN_VRAM0x4, &adev->dm.dmub_bo,
1373 &adev->dm.dmub_bo_gpu_addr,
1374 &adev->dm.dmub_bo_cpu_addr);
1375 if (r)
1376 return r;
1377
1378 /* Rebase the regions on the framebuffer address. */
1379 memset(&fb_params, 0, sizeof(fb_params))__builtin_memset((&fb_params), (0), (sizeof(fb_params)));
1380 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1381 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1382 fb_params.region_info = &region_info;
1383
1384 adev->dm.dmub_fb_info =
1385 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL(0x0001 | 0x0004));
1386 fb_info = adev->dm.dmub_fb_info;
1387
1388 if (!fb_info) {
1389 DRM_ERROR(__drm_err("Failed to allocate framebuffer info for DMUB service!\n"
)
1390 "Failed to allocate framebuffer info for DMUB service!\n")__drm_err("Failed to allocate framebuffer info for DMUB service!\n"
)
;
1391 return -ENOMEM12;
1392 }
1393
1394 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1395 if (status != DMUB_STATUS_OK) {
1396 DRM_ERROR("Error calculating DMUB FB info: %d\n", status)__drm_err("Error calculating DMUB FB info: %d\n", status);
1397 return -EINVAL22;
1398 }
1399
1400 return 0;
1401}
1402
1403static int dm_sw_init(void *handle)
1404{
1405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1406 int r;
1407
1408 r = dm_dmub_sw_init(adev);
1409 if (r)
1410 return r;
1411
1412 return load_dmcu_fw(adev);
1413}
1414
1415static int dm_sw_fini(void *handle)
1416{
1417 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1418
1419 kfree(adev->dm.dmub_fb_info);
1420 adev->dm.dmub_fb_info = NULL((void *)0);
1421
1422 if (adev->dm.dmub_srv) {
1423 dmub_srv_destroy(adev->dm.dmub_srv);
1424 adev->dm.dmub_srv = NULL((void *)0);
1425 }
1426
1427 release_firmware(adev->dm.dmub_fw);
1428 adev->dm.dmub_fw = NULL((void *)0);
1429
1430 release_firmware(adev->dm.fw_dmcu);
1431 adev->dm.fw_dmcu = NULL((void *)0);
1432
1433 return 0;
1434}
1435
1436static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1437{
1438 struct amdgpu_dm_connector *aconnector;
1439 struct drm_connector *connector;
1440 struct drm_connector_list_iter iter;
1441 int ret = 0;
1442
1443 drm_connector_list_iter_begin(dev, &iter);
1444 drm_for_each_connector_iter(connector, &iter)while ((connector = drm_connector_list_iter_next(&iter))) {
1445 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
1446 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1447 aconnector->mst_mgr.aux) {
1448 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",__drm_dbg(DRM_UT_DRIVER, "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
1449 aconnector,__drm_dbg(DRM_UT_DRIVER, "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
1450 aconnector->base.base.id)__drm_dbg(DRM_UT_DRIVER, "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
;
1451
1452 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true1);
1453 if (ret < 0) {
1454 DRM_ERROR("DM_MST: Failed to start MST\n")__drm_err("DM_MST: Failed to start MST\n");
1455 aconnector->dc_link->type =
1456 dc_connection_single;
1457 break;
1458 }
1459 }
1460 }
1461 drm_connector_list_iter_end(&iter);
1462
1463 return ret;
1464}
1465
1466static int dm_late_init(void *handle)
1467{
1468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1469
1470 struct dmcu_iram_parameters params;
1471 unsigned int linear_lut[16];
1472 int i;
1473 struct dmcu *dmcu = NULL((void *)0);
1474 bool_Bool ret = true1;
1475
1476 dmcu = adev->dm.dc->res_pool->dmcu;
1477
1478 for (i = 0; i < 16; i++)
1479 linear_lut[i] = 0xFFFF * i / 15;
1480
1481 params.set = 0;
1482 params.backlight_ramping_start = 0xCCCC;
1483 params.backlight_ramping_reduction = 0xCCCCCCCC;
1484 params.backlight_lut_array_size = 16;
1485 params.backlight_lut_array = linear_lut;
1486
1487 /* Min backlight level after ABM reduction, Don't allow below 1%
1488 * 0xFFFF x 0.01 = 0x28F
1489 */
1490 params.min_abm_backlight = 0x28F;
1491
1492 /* In the case where abm is implemented on dmcub,
1493 * dmcu object will be null.
1494 * ABM 2.4 and up are implemented on dmcub.
1495 */
1496 if (dmcu)
1497 ret = dmcu_load_iram(dmcu, params);
1498 else if (adev->dm.dc->ctx->dmub_srv)
1499 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1500
1501 if (!ret)
1502 return -EINVAL22;
1503
1504 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1505}
1506
1507static void s3_handle_mst(struct drm_device *dev, bool_Bool suspend)
1508{
1509 struct amdgpu_dm_connector *aconnector;
1510 struct drm_connector *connector;
1511 struct drm_connector_list_iter iter;
1512 struct drm_dp_mst_topology_mgr *mgr;
1513 int ret;
1514 bool_Bool need_hotplug = false0;
1515
1516 drm_connector_list_iter_begin(dev, &iter);
1517 drm_for_each_connector_iter(connector, &iter)while ((connector = drm_connector_list_iter_next(&iter))) {
1518 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
1519 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1520 aconnector->mst_port)
1521 continue;
1522
1523 mgr = &aconnector->mst_mgr;
1524
1525 if (suspend) {
1526 drm_dp_mst_topology_mgr_suspend(mgr);
1527 } else {
1528 ret = drm_dp_mst_topology_mgr_resume(mgr, true1);
1529 if (ret < 0) {
1530 drm_dp_mst_topology_mgr_set_mst(mgr, false0);
1531 need_hotplug = true1;
1532 }
1533 }
1534 }
1535 drm_connector_list_iter_end(&iter);
1536
1537 if (need_hotplug)
1538 drm_kms_helper_hotplug_event(dev);
1539}
1540
1541static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1542{
1543 struct smu_context *smu = &adev->smu;
1544 int ret = 0;
1545
1546 if (!is_support_sw_smu(adev))
1547 return 0;
1548
1549 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1550 * on window driver dc implementation.
1551 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1552 * should be passed to smu during boot up and resume from s3.
1553 * boot up: dc calculate dcn watermark clock settings within dc_create,
1554 * dcn20_resource_construct
1555 * then call pplib functions below to pass the settings to smu:
1556 * smu_set_watermarks_for_clock_ranges
1557 * smu_set_watermarks_table
1558 * navi10_set_watermarks_table
1559 * smu_write_watermarks_table
1560 *
1561 * For Renoir, clock settings of dcn watermark are also fixed values.
1562 * dc has implemented different flow for window driver:
1563 * dc_hardware_init / dc_set_power_state
1564 * dcn10_init_hw
1565 * notify_wm_ranges
1566 * set_wm_ranges
1567 * -- Linux
1568 * smu_set_watermarks_for_clock_ranges
1569 * renoir_set_watermarks_table
1570 * smu_write_watermarks_table
1571 *
1572 * For Linux,
1573 * dc_hardware_init -> amdgpu_dm_init
1574 * dc_set_power_state --> dm_resume
1575 *
1576 * therefore, this function apply to navi10/12/14 but not Renoir
1577 * *
1578 */
1579 switch(adev->asic_type) {
1580 case CHIP_NAVI10:
1581 case CHIP_NAVI14:
1582 case CHIP_NAVI12:
1583 break;
1584 default:
1585 return 0;
1586 }
1587
1588 ret = smu_write_watermarks_table(smu);
1589 if (ret) {
1590 DRM_ERROR("Failed to update WMTABLE!\n")__drm_err("Failed to update WMTABLE!\n");
1591 return ret;
1592 }
1593
1594 return 0;
1595}
1596
1597/**
1598 * dm_hw_init() - Initialize DC device
1599 * @handle: The base driver device containing the amdgpu_dm device.
1600 *
1601 * Initialize the &struct amdgpu_display_manager device. This involves calling
1602 * the initializers of each DM component, then populating the struct with them.
1603 *
1604 * Although the function implies hardware initialization, both hardware and
1605 * software are initialized here. Splitting them out to their relevant init
1606 * hooks is a future TODO item.
1607 *
1608 * Some notable things that are initialized here:
1609 *
1610 * - Display Core, both software and hardware
1611 * - DC modules that we need (freesync and color management)
1612 * - DRM software states
1613 * - Interrupt sources and handlers
1614 * - Vblank support
1615 * - Debug FS entries, if enabled
1616 */
1617static int dm_hw_init(void *handle)
1618{
1619 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1620 /* Create DAL display manager */
1621 amdgpu_dm_init(adev);
1622 amdgpu_dm_hpd_init(adev);
1623
1624 return 0;
1625}
1626
1627/**
1628 * dm_hw_fini() - Teardown DC device
1629 * @handle: The base driver device containing the amdgpu_dm device.
1630 *
1631 * Teardown components within &struct amdgpu_display_manager that require
1632 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1633 * were loaded. Also flush IRQ workqueues and disable them.
1634 */
1635static int dm_hw_fini(void *handle)
1636{
1637 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1638
1639 amdgpu_dm_hpd_fini(adev);
1640
1641 amdgpu_dm_irq_fini(adev);
1642 amdgpu_dm_fini(adev);
1643 return 0;
1644}
1645
1646
1647static int dm_enable_vblank(struct drm_crtc *crtc);
1648static void dm_disable_vblank(struct drm_crtc *crtc);
1649
1650static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1651 struct dc_state *state, bool_Bool enable)
1652{
1653 enum dc_irq_source irq_source;
1654 struct amdgpu_crtc *acrtc;
1655 int rc = -EBUSY16;
1656 int i = 0;
1657
1658 for (i = 0; i < state->stream_count; i++) {
1659 acrtc = get_crtc_by_otg_inst(
1660 adev, state->stream_status[i].primary_otg_inst);
1661
1662 if (acrtc && state->stream_status[i].plane_count != 0) {
1663 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1664 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY16;
1665 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",__drm_dbg(DRM_UT_CORE, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
1666 acrtc->crtc_id, enable ? "en" : "dis", rc)__drm_dbg(DRM_UT_CORE, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
;
1667 if (rc)
1668 DRM_WARN("Failed to %s pflip interrupts\n",printk("\0014" "[" "drm" "] " "Failed to %s pflip interrupts\n"
, enable ? "enable" : "disable")
1669 enable ? "enable" : "disable")printk("\0014" "[" "drm" "] " "Failed to %s pflip interrupts\n"
, enable ? "enable" : "disable")
;
1670
1671 if (enable) {
1672 rc = dm_enable_vblank(&acrtc->base);
1673 if (rc)
1674 DRM_WARN("Failed to enable vblank interrupts\n")printk("\0014" "[" "drm" "] " "Failed to enable vblank interrupts\n"
)
;
1675 } else {
1676 dm_disable_vblank(&acrtc->base);
1677 }
1678
1679 }
1680 }
1681
1682}
1683
1684static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1685{
1686 struct dc_state *context = NULL((void *)0);
1687 enum dc_status res = DC_ERROR_UNEXPECTED;
1688 int i;
1689 struct dc_stream_state *del_streams[MAX_PIPES6];
1690 int del_streams_count = 0;
1691
1692 memset(del_streams, 0, sizeof(del_streams))__builtin_memset((del_streams), (0), (sizeof(del_streams)));
1693
1694 context = dc_create_state(dc);
1695 if (context == NULL((void *)0))
1696 goto context_alloc_fail;
1697
1698 dc_resource_state_copy_construct_current(dc, context);
1699
1700 /* First remove from context all streams */
1701 for (i = 0; i < context->stream_count; i++) {
1702 struct dc_stream_state *stream = context->streams[i];
1703
1704 del_streams[del_streams_count++] = stream;
1705 }
1706
1707 /* Remove all planes for removed streams and then remove the streams */
1708 for (i = 0; i < del_streams_count; i++) {
1709 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1710 res = DC_FAIL_DETACH_SURFACES;
1711 goto fail;
1712 }
1713
1714 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1715 if (res != DC_OK)
1716 goto fail;
1717 }
1718
1719
1720 res = dc_validate_global_state(dc, context, false0);
1721
1722 if (res != DC_OK) {
1723 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res)__drm_err("%s:resource validation failed, dc_status:%d\n", __func__
, res)
;
1724 goto fail;
1725 }
1726
1727 res = dc_commit_state(dc, context);
1728
1729fail:
1730 dc_release_state(context);
1731
1732context_alloc_fail:
1733 return res;
1734}
1735
1736static int dm_suspend(void *handle)
1737{
1738 struct amdgpu_device *adev = handle;
1739 struct amdgpu_display_manager *dm = &adev->dm;
1740 int ret = 0;
1741
1742 if (amdgpu_in_reset(adev)) {
1743 mutex_lock(&dm->dc_lock)rw_enter_write(&dm->dc_lock);
1744 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1745
1746 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false0);
1747
1748 amdgpu_dm_commit_zero_streams(dm->dc);
1749
1750 amdgpu_dm_irq_suspend(adev);
1751
1752 return ret;
1753 }
1754
1755 WARN_ON(adev->dm.cached_state)({ int __ret = !!(adev->dm.cached_state); if (__ret) printf
("WARNING %s failed at %s:%d\n", "adev->dm.cached_state", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 1755); __builtin_expect(!!(__ret), 0); })
;
1756 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1757
1758 s3_handle_mst(adev_to_drm(adev), true1);
1759
1760 amdgpu_dm_irq_suspend(adev);
1761
1762 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1763
1764 return 0;
1765}
1766
1767static struct amdgpu_dm_connector *
1768amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1769 struct drm_crtc *crtc)
1770{
1771 uint32_t i;
1772 struct drm_connector_state *new_con_state;
1773 struct drm_connector *connector;
1774 struct drm_crtc *crtc_from_state;
1775
1776 for_each_new_connector_in_state(state, connector, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (new_con_state) =
(state)->connectors[i].new_state, (void)(new_con_state) ,
1))) {} else
{
1777 crtc_from_state = new_con_state->crtc;
1778
1779 if (crtc_from_state == crtc)
1780 return to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
1781 }
1782
1783 return NULL((void *)0);
1784}
1785
1786static void emulated_link_detect(struct dc_link *link)
1787{
1788 struct dc_sink_init_data sink_init_data = { 0 };
1789 struct display_sink_capability sink_caps = { 0 };
1790 enum dc_edid_status edid_status;
1791 struct dc_context *dc_ctx = link->ctx;
1792 struct dc_sink *sink = NULL((void *)0);
1793 struct dc_sink *prev_sink = NULL((void *)0);
1794
1795 link->type = dc_connection_none;
1796 prev_sink = link->local_sink;
1797
1798 if (prev_sink)
1799 dc_sink_release(prev_sink);
1800
1801 switch (link->connector_signal) {
1802 case SIGNAL_TYPE_HDMI_TYPE_A: {
1803 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1804 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1805 break;
1806 }
1807
1808 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1809 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1810 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1811 break;
1812 }
1813
1814 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1815 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1816 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1817 break;
1818 }
1819
1820 case SIGNAL_TYPE_LVDS: {
1821 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1822 sink_caps.signal = SIGNAL_TYPE_LVDS;
1823 break;
1824 }
1825
1826 case SIGNAL_TYPE_EDP: {
1827 sink_caps.transaction_type =
1828 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1829 sink_caps.signal = SIGNAL_TYPE_EDP;
1830 break;
1831 }
1832
1833 case SIGNAL_TYPE_DISPLAY_PORT: {
1834 sink_caps.transaction_type =
1835 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1836 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1837 break;
1838 }
1839
1840 default:
1841 DC_ERROR("Invalid connector type! signal:%d\n",do { (void)(dc_ctx); __drm_err("Invalid connector type! signal:%d\n"
, link->connector_signal); } while (0)
1842 link->connector_signal)do { (void)(dc_ctx); __drm_err("Invalid connector type! signal:%d\n"
, link->connector_signal); } while (0)
;
1843 return;
1844 }
1845
1846 sink_init_data.link = link;
1847 sink_init_data.sink_signal = sink_caps.signal;
1848
1849 sink = dc_sink_create(&sink_init_data);
1850 if (!sink) {
1851 DC_ERROR("Failed to create sink!\n")do { (void)(dc_ctx); __drm_err("Failed to create sink!\n"); }
while (0)
;
1852 return;
1853 }
1854
1855 /* dc_sink_create returns a new reference */
1856 link->local_sink = sink;
1857
1858 edid_status = dm_helpers_read_local_edid(
1859 link->ctx,
1860 link,
1861 sink);
1862
1863 if (edid_status != EDID_OK)
1864 DC_ERROR("Failed to read EDID")do { (void)(dc_ctx); __drm_err("Failed to read EDID"); } while
(0)
;
1865
1866}
1867
1868static void dm_gpureset_commit_state(struct dc_state *dc_state,
1869 struct amdgpu_display_manager *dm)
1870{
1871 struct {
1872 struct dc_surface_update surface_updates[MAX_SURFACES3];
1873 struct dc_plane_info plane_infos[MAX_SURFACES3];
1874 struct dc_scaling_info scaling_infos[MAX_SURFACES3];
1875 struct dc_flip_addrs flip_addrs[MAX_SURFACES3];
1876 struct dc_stream_update stream_update;
1877 } * bundle;
1878 int k, m;
1879
1880 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL(0x0001 | 0x0004));
1881
1882 if (!bundle) {
1883 dm_error("Failed to allocate update bundle\n")__drm_err("Failed to allocate update bundle\n");
1884 goto cleanup;
1885 }
1886
1887 for (k = 0; k < dc_state->stream_count; k++) {
1888 bundle->stream_update.stream = dc_state->streams[k];
1889
1890 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1891 bundle->surface_updates[m].surface =
1892 dc_state->stream_status->plane_states[m];
1893 bundle->surface_updates[m].surface->force_full_update =
1894 true1;
1895 }
1896 dc_commit_updates_for_stream(
1897 dm->dc, bundle->surface_updates,
1898 dc_state->stream_status->plane_count,
1899 dc_state->streams[k], &bundle->stream_update, dc_state);
1900 }
1901
1902cleanup:
1903 kfree(bundle);
1904
1905 return;
1906}
1907
1908static void dm_set_dpms_off(struct dc_link *link)
1909{
1910 struct dc_stream_state *stream_state;
1911 struct amdgpu_dm_connector *aconnector = link->priv;
1912 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1913 struct dc_stream_update stream_update;
1914 bool_Bool dpms_off = true1;
1915
1916 memset(&stream_update, 0, sizeof(stream_update))__builtin_memset((&stream_update), (0), (sizeof(stream_update
)))
;
1917 stream_update.dpms_off = &dpms_off;
1918
1919 mutex_lock(&adev->dm.dc_lock)rw_enter_write(&adev->dm.dc_lock);
1920 stream_state = dc_stream_find_from_link(link);
1921
1922 if (stream_state == NULL((void *)0)) {
1923 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n")__drm_dbg(DRM_UT_DRIVER, "Error finding stream state associated with link!\n"
)
;
1924 mutex_unlock(&adev->dm.dc_lock)rw_exit_write(&adev->dm.dc_lock);
1925 return;
1926 }
1927
1928 stream_update.stream = stream_state;
1929 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL((void *)0), 0,
1930 stream_state, &stream_update,
1931 stream_state->ctx->dc->current_state);
1932 mutex_unlock(&adev->dm.dc_lock)rw_exit_write(&adev->dm.dc_lock);
1933}
1934
1935static int dm_resume(void *handle)
1936{
1937 struct amdgpu_device *adev = handle;
1938 struct drm_device *ddev = adev_to_drm(adev);
1939 struct amdgpu_display_manager *dm = &adev->dm;
1940 struct amdgpu_dm_connector *aconnector;
1941 struct drm_connector *connector;
1942 struct drm_connector_list_iter iter;
1943 struct drm_crtc *crtc;
1944 struct drm_crtc_state *new_crtc_state;
1945 struct dm_crtc_state *dm_new_crtc_state;
1946 struct drm_plane *plane;
1947 struct drm_plane_state *new_plane_state;
1948 struct dm_plane_state *dm_new_plane_state;
1949 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (dm->atomic_obj.state); (struct dm_atomic_state *
)( (char *)__mptr - __builtin_offsetof(struct dm_atomic_state
, base) );})
;
1950 enum dc_connection_type new_connection_type = dc_connection_none;
1951 struct dc_state *dc_state;
1952 int i, r, j;
1953
1954 if (amdgpu_in_reset(adev)) {
1955 dc_state = dm->cached_dc_state;
1956
1957 r = dm_dmub_hw_init(adev);
1958 if (r)
1959 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r)__drm_err("DMUB interface failed to initialize: status=%d\n",
r)
;
1960
1961 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1962 dc_resume(dm->dc);
1963
1964 amdgpu_dm_irq_resume_early(adev);
1965
1966 for (i = 0; i < dc_state->stream_count; i++) {
1967 dc_state->streams[i]->mode_changed = true1;
1968 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
1969 dc_state->stream_status[i].plane_states[j]->update_flags.raw
1970 = 0xffffffff;
1971 }
1972 }
1973
1974 WARN_ON(!dc_commit_state(dm->dc, dc_state))({ int __ret = !!(!dc_commit_state(dm->dc, dc_state)); if (
__ret) printf("WARNING %s failed at %s:%d\n", "!dc_commit_state(dm->dc, dc_state)"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 1974); __builtin_expect(!!(__ret), 0); })
;
1975
1976 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1977
1978 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true1);
1979
1980 dc_release_state(dm->cached_dc_state);
1981 dm->cached_dc_state = NULL((void *)0);
1982
1983 amdgpu_dm_irq_resume_late(adev);
1984
1985 mutex_unlock(&dm->dc_lock)rw_exit_write(&dm->dc_lock);
1986
1987 return 0;
1988 }
1989 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1990 dc_release_state(dm_state->context);
1991 dm_state->context = dc_create_state(dm->dc);
1992 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1993 dc_resource_state_construct(dm->dc, dm_state->context);
1994
1995 /* Before powering on DC we need to re-initialize DMUB. */
1996 r = dm_dmub_hw_init(adev);
1997 if (r)
1998 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r)__drm_err("DMUB interface failed to initialize: status=%d\n",
r)
;
1999
2000 /* power on hardware */
2001 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2002
2003 /* program HPD filter */
2004 dc_resume(dm->dc);
2005
2006 /*
2007 * early enable HPD Rx IRQ, should be done before set mode as short
2008 * pulse interrupts are used for MST
2009 */
2010 amdgpu_dm_irq_resume_early(adev);
2011
2012 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2013 s3_handle_mst(ddev, false0);
2014
2015 /* Do detection*/
2016 drm_connector_list_iter_begin(ddev, &iter);
2017 drm_for_each_connector_iter(connector, &iter)while ((connector = drm_connector_list_iter_next(&iter))) {
2018 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
2019
2020 /*
2021 * this is the case when traversing through already created
2022 * MST connectors, should be skipped
2023 */
2024 if (aconnector->mst_port)
2025 continue;
2026
2027 mutex_lock(&aconnector->hpd_lock)rw_enter_write(&aconnector->hpd_lock);
2028 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2029 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
2030
2031 if (aconnector->base.force && new_connection_type == dc_connection_none)
2032 emulated_link_detect(aconnector->dc_link);
2033 else
2034 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2035
2036 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2037 aconnector->fake_enable = false0;
2038
2039 if (aconnector->dc_sink)
2040 dc_sink_release(aconnector->dc_sink);
2041 aconnector->dc_sink = NULL((void *)0);
2042 amdgpu_dm_update_connector_after_detect(aconnector);
2043 mutex_unlock(&aconnector->hpd_lock)rw_exit_write(&aconnector->hpd_lock);
2044 }
2045 drm_connector_list_iter_end(&iter);
2046
2047 /* Force mode set in atomic commit */
2048 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (dm->cached_state)->dev->mode_config
.num_crtc; (i)++) if (!((dm->cached_state)->crtcs[i].ptr
&& ((crtc) = (dm->cached_state)->crtcs[i].ptr,
(void)(crtc) , (new_crtc_state) = (dm->cached_state)->
crtcs[i].new_state, (void)(new_crtc_state) , 1))) {} else
2049 new_crtc_state->active_changed = true1;
2050
2051 /*
2052 * atomic_check is expected to create the dc states. We need to release
2053 * them here, since they were duplicated as part of the suspend
2054 * procedure.
2055 */
2056 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (dm->cached_state)->dev->mode_config
.num_crtc; (i)++) if (!((dm->cached_state)->crtcs[i].ptr
&& ((crtc) = (dm->cached_state)->crtcs[i].ptr,
(void)(crtc) , (new_crtc_state) = (dm->cached_state)->
crtcs[i].new_state, (void)(new_crtc_state) , 1))) {} else
{
2057 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
2058 if (dm_new_crtc_state->stream) {
2059 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1)({ int __ret = !!(kref_read(&dm_new_crtc_state->stream
->refcount) > 1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "kref_read(&dm_new_crtc_state->stream->refcount) > 1"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 2059); __builtin_expect(!!(__ret), 0); })
;
2060 dc_stream_release(dm_new_crtc_state->stream);
2061 dm_new_crtc_state->stream = NULL((void *)0);
2062 }
2063 }
2064
2065 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i)for ((i) = 0; (i) < (dm->cached_state)->dev->mode_config
.num_total_plane; (i)++) if (!((dm->cached_state)->planes
[i].ptr && ((plane) = (dm->cached_state)->planes
[i].ptr, (void)(plane) , (new_plane_state) = (dm->cached_state
)->planes[i].new_state, (void)(new_plane_state) , 1))) {} else
{
2066 dm_new_plane_state = to_dm_plane_state(new_plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_plane_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
2067 if (dm_new_plane_state->dc_state) {
2068 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1)({ int __ret = !!(kref_read(&dm_new_plane_state->dc_state
->refcount) > 1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "kref_read(&dm_new_plane_state->dc_state->refcount) > 1"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 2068); __builtin_expect(!!(__ret), 0); })
;
2069 dc_plane_state_release(dm_new_plane_state->dc_state);
2070 dm_new_plane_state->dc_state = NULL((void *)0);
2071 }
2072 }
2073
2074 drm_atomic_helper_resume(ddev, dm->cached_state);
2075
2076 dm->cached_state = NULL((void *)0);
2077
2078 amdgpu_dm_irq_resume_late(adev);
2079
2080 amdgpu_dm_smu_write_watermarks_table(adev);
2081
2082 return 0;
2083}
2084
2085/**
2086 * DOC: DM Lifecycle
2087 *
2088 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2089 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2090 * the base driver's device list to be initialized and torn down accordingly.
2091 *
2092 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2093 */
2094
2095static const struct amd_ip_funcs amdgpu_dm_funcs = {
2096 .name = "dm",
2097 .early_init = dm_early_init,
2098 .late_init = dm_late_init,
2099 .sw_init = dm_sw_init,
2100 .sw_fini = dm_sw_fini,
2101 .hw_init = dm_hw_init,
2102 .hw_fini = dm_hw_fini,
2103 .suspend = dm_suspend,
2104 .resume = dm_resume,
2105 .is_idle = dm_is_idle,
2106 .wait_for_idle = dm_wait_for_idle,
2107 .check_soft_reset = dm_check_soft_reset,
2108 .soft_reset = dm_soft_reset,
2109 .set_clockgating_state = dm_set_clockgating_state,
2110 .set_powergating_state = dm_set_powergating_state,
2111};
2112
2113const struct amdgpu_ip_block_version dm_ip_block =
2114{
2115 .type = AMD_IP_BLOCK_TYPE_DCE,
2116 .major = 1,
2117 .minor = 0,
2118 .rev = 0,
2119 .funcs = &amdgpu_dm_funcs,
2120};
2121
2122
2123/**
2124 * DOC: atomic
2125 *
2126 * *WIP*
2127 */
2128
2129static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2130 .fb_create = amdgpu_display_user_framebuffer_create,
2131 .output_poll_changed = drm_fb_helper_output_poll_changed,
2132 .atomic_check = amdgpu_dm_atomic_check,
2133 .atomic_commit = amdgpu_dm_atomic_commit,
2134};
2135
2136static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2137 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2138};
2139
2140static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2141{
2142 u32 max_cll, min_cll, max, min, q, r;
2143 struct amdgpu_dm_backlight_caps *caps;
2144 struct amdgpu_display_manager *dm;
2145 struct drm_connector *conn_base;
2146 struct amdgpu_device *adev;
2147 struct dc_link *link = NULL((void *)0);
2148 static const u8 pre_computed_values[] = {
2149 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2150 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2151
2152 if (!aconnector || !aconnector->dc_link)
2153 return;
2154
2155 link = aconnector->dc_link;
2156 if (link->connector_signal != SIGNAL_TYPE_EDP)
2157 return;
2158
2159 conn_base = &aconnector->base;
2160 adev = drm_to_adev(conn_base->dev);
2161 dm = &adev->dm;
2162 caps = &dm->backlight_caps;
2163 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2164 caps->aux_support = false0;
2165 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2166 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2167
2168 if (caps->ext_caps->bits.oled == 1 /*||
2169 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2170 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2171 caps->aux_support = true1;
2172
2173 if (amdgpu_backlight == 0)
2174 caps->aux_support = false0;
2175 else if (amdgpu_backlight == 1)
2176 caps->aux_support = true1;
2177
2178 /* From the specification (CTA-861-G), for calculating the maximum
2179 * luminance we need to use:
2180 * Luminance = 50*2**(CV/32)
2181 * Where CV is a one-byte value.
2182 * For calculating this expression we may need float point precision;
2183 * to avoid this complexity level, we take advantage that CV is divided
2184 * by a constant. From the Euclids division algorithm, we know that CV
2185 * can be written as: CV = 32*q + r. Next, we replace CV in the
2186 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2187 * need to pre-compute the value of r/32. For pre-computing the values
2188 * We just used the following Ruby line:
2189 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2190 * The results of the above expressions can be verified at
2191 * pre_computed_values.
2192 */
2193 q = max_cll >> 5;
2194 r = max_cll % 32;
2195 max = (1 << q) * pre_computed_values[r];
2196
2197 // min luminance: maxLum * (CV/255)^2 / 100
2198 q = DIV_ROUND_CLOSEST(min_cll, 255)(((min_cll) + ((255) / 2)) / (255));
2199 min = max * DIV_ROUND_CLOSEST((q * q), 100)((((q * q)) + ((100) / 2)) / (100));
2200
2201 caps->aux_max_input_signal = max;
2202 caps->aux_min_input_signal = min;
2203}
2204
2205void amdgpu_dm_update_connector_after_detect(
2206 struct amdgpu_dm_connector *aconnector)
2207{
2208 struct drm_connector *connector = &aconnector->base;
2209 struct drm_device *dev = connector->dev;
2210 struct dc_sink *sink;
2211
2212 /* MST handled by drm_mst framework */
2213 if (aconnector->mst_mgr.mst_state == true1)
2214 return;
2215
2216 sink = aconnector->dc_link->local_sink;
2217 if (sink)
2218 dc_sink_retain(sink);
2219
2220 /*
2221 * Edid mgmt connector gets first update only in mode_valid hook and then
2222 * the connector sink is set to either fake or physical sink depends on link status.
2223 * Skip if already done during boot.
2224 */
2225 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2226 && aconnector->dc_em_sink) {
2227
2228 /*
2229 * For S3 resume with headless use eml_sink to fake stream
2230 * because on resume connector->sink is set to NULL
2231 */
2232 mutex_lock(&dev->mode_config.mutex)rw_enter_write(&dev->mode_config.mutex);
2233
2234 if (sink) {
2235 if (aconnector->dc_sink) {
2236 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2237 /*
2238 * retain and release below are used to
2239 * bump up refcount for sink because the link doesn't point
2240 * to it anymore after disconnect, so on next crtc to connector
2241 * reshuffle by UMD we will get into unwanted dc_sink release
2242 */
2243 dc_sink_release(aconnector->dc_sink);
2244 }
2245 aconnector->dc_sink = sink;
2246 dc_sink_retain(aconnector->dc_sink);
2247 amdgpu_dm_update_freesync_caps(connector,
2248 aconnector->edid);
2249 } else {
2250 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2251 if (!aconnector->dc_sink) {
2252 aconnector->dc_sink = aconnector->dc_em_sink;
2253 dc_sink_retain(aconnector->dc_sink);
2254 }
2255 }
2256
2257 mutex_unlock(&dev->mode_config.mutex)rw_exit_write(&dev->mode_config.mutex);
2258
2259 if (sink)
2260 dc_sink_release(sink);
2261 return;
2262 }
2263
2264 /*
2265 * TODO: temporary guard to look for proper fix
2266 * if this sink is MST sink, we should not do anything
2267 */
2268 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2269 dc_sink_release(sink);
2270 return;
2271 }
2272
2273 if (aconnector->dc_sink == sink) {
2274 /*
2275 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2276 * Do nothing!!
2277 */
2278 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: dc_sink didn't change.\n"
, aconnector->connector_id)
2279 aconnector->connector_id)__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: dc_sink didn't change.\n"
, aconnector->connector_id)
;
2280 if (sink)
2281 dc_sink_release(sink);
2282 return;
2283 }
2284
2285 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n"
, aconnector->connector_id, aconnector->dc_sink, sink)
2286 aconnector->connector_id, aconnector->dc_sink, sink)__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n"
, aconnector->connector_id, aconnector->dc_sink, sink)
;
2287
2288 mutex_lock(&dev->mode_config.mutex)rw_enter_write(&dev->mode_config.mutex);
2289
2290 /*
2291 * 1. Update status of the drm connector
2292 * 2. Send an event and let userspace tell us what to do
2293 */
2294 if (sink) {
2295 /*
2296 * TODO: check if we still need the S3 mode update workaround.
2297 * If yes, put it here.
2298 */
2299 if (aconnector->dc_sink) {
2300 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2301 dc_sink_release(aconnector->dc_sink);
2302 }
2303
2304 aconnector->dc_sink = sink;
2305 dc_sink_retain(aconnector->dc_sink);
2306 if (sink->dc_edid.length == 0) {
2307 aconnector->edid = NULL((void *)0);
2308 if (aconnector->dc_link->aux_mode) {
2309 drm_dp_cec_unset_edid(
2310 &aconnector->dm_dp_aux.aux);
2311 }
2312 } else {
2313 aconnector->edid =
2314 (struct edid *)sink->dc_edid.raw_edid;
2315
2316 drm_connector_update_edid_property(connector,
2317 aconnector->edid);
2318 if (aconnector->dc_link->aux_mode)
2319 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2320 aconnector->edid);
2321 }
2322
2323 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2324 update_connector_ext_caps(aconnector);
2325 } else {
2326 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2327 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2328 drm_connector_update_edid_property(connector, NULL((void *)0));
2329 aconnector->num_modes = 0;
2330 dc_sink_release(aconnector->dc_sink);
2331 aconnector->dc_sink = NULL((void *)0);
2332 aconnector->edid = NULL((void *)0);
2333#ifdef CONFIG_DRM_AMD_DC_HDCP
2334 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2335 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED2)
2336 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED1;
2337#endif
2338 }
2339
2340 mutex_unlock(&dev->mode_config.mutex)rw_exit_write(&dev->mode_config.mutex);
2341
2342 update_subconnector_property(aconnector);
2343
2344 if (sink)
2345 dc_sink_release(sink);
2346}
2347
2348static void handle_hpd_irq(void *param)
2349{
2350 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2351 struct drm_connector *connector = &aconnector->base;
2352 struct drm_device *dev = connector->dev;
2353 enum dc_connection_type new_connection_type = dc_connection_none;
2354#ifdef CONFIG_DRM_AMD_DC_HDCP
2355 struct amdgpu_device *adev = drm_to_adev(dev);
2356#endif
2357
2358 /*
2359 * In case of failure or MST no need to update connector status or notify the OS
2360 * since (for MST case) MST does this in its own context.
2361 */
2362 mutex_lock(&aconnector->hpd_lock)rw_enter_write(&aconnector->hpd_lock);
2363
2364#ifdef CONFIG_DRM_AMD_DC_HDCP
2365 if (adev->dm.hdcp_workqueue)
2366 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2367#endif
2368 if (aconnector->fake_enable)
2369 aconnector->fake_enable = false0;
2370
2371 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2372 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
2373
2374 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2375 emulated_link_detect(aconnector->dc_link);
2376
2377
2378 drm_modeset_lock_all(dev);
2379 dm_restore_drm_connector_state(dev, connector);
2380 drm_modeset_unlock_all(dev);
2381
2382 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2383 drm_kms_helper_hotplug_event(dev);
2384
2385 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2386 if (new_connection_type == dc_connection_none &&
2387 aconnector->dc_link->type == dc_connection_none)
2388 dm_set_dpms_off(aconnector->dc_link);
2389
2390 amdgpu_dm_update_connector_after_detect(aconnector);
2391
2392 drm_modeset_lock_all(dev);
2393 dm_restore_drm_connector_state(dev, connector);
2394 drm_modeset_unlock_all(dev);
2395
2396 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2397 drm_kms_helper_hotplug_event(dev);
2398 }
2399 mutex_unlock(&aconnector->hpd_lock)rw_exit_write(&aconnector->hpd_lock);
2400
2401}
2402
2403static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2404{
2405 uint8_t esi[DP_PSR_ERROR_STATUS0x2006 - DP_SINK_COUNT_ESI0x2002] = { 0 };
2406 uint8_t dret;
2407 bool_Bool new_irq_handled = false0;
2408 int dpcd_addr;
2409 int dpcd_bytes_to_read;
2410
2411 const int max_process_count = 30;
2412 int process_count = 0;
2413
2414 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2415
2416 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2417 dpcd_bytes_to_read = DP_LANE0_1_STATUS0x202 - DP_SINK_COUNT0x200;
2418 /* DPCD 0x200 - 0x201 for downstream IRQ */
2419 dpcd_addr = DP_SINK_COUNT0x200;
2420 } else {
2421 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS0x2006 - DP_SINK_COUNT_ESI0x2002;
2422 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2423 dpcd_addr = DP_SINK_COUNT_ESI0x2002;
2424 }
2425
2426 dret = drm_dp_dpcd_read(
2427 &aconnector->dm_dp_aux.aux,
2428 dpcd_addr,
2429 esi,
2430 dpcd_bytes_to_read);
2431
2432 while (dret == dpcd_bytes_to_read &&
2433 process_count < max_process_count) {
2434 uint8_t retry;
2435 dret = 0;
2436
2437 process_count++;
2438
2439 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2])__drm_dbg(DRM_UT_DRIVER, "ESI %02x %02x %02x\n", esi[0], esi[
1], esi[2])
;
2440 /* handle HPD short pulse irq */
2441 if (aconnector->mst_mgr.mst_state)
2442 drm_dp_mst_hpd_irq(
2443 &aconnector->mst_mgr,
2444 esi,
2445 &new_irq_handled);
2446
2447 if (new_irq_handled) {
2448 /* ACK at DPCD to notify down stream */
2449 const int ack_dpcd_bytes_to_write =
2450 dpcd_bytes_to_read - 1;
2451
2452 for (retry = 0; retry < 3; retry++) {
2453 uint8_t wret;
2454
2455 wret = drm_dp_dpcd_write(
2456 &aconnector->dm_dp_aux.aux,
2457 dpcd_addr + 1,
2458 &esi[1],
2459 ack_dpcd_bytes_to_write);
2460 if (wret == ack_dpcd_bytes_to_write)
2461 break;
2462 }
2463
2464 /* check if there is new irq to be handled */
2465 dret = drm_dp_dpcd_read(
2466 &aconnector->dm_dp_aux.aux,
2467 dpcd_addr,
2468 esi,
2469 dpcd_bytes_to_read);
2470
2471 new_irq_handled = false0;
2472 } else {
2473 break;
2474 }
2475 }
2476
2477 if (process_count == max_process_count)
2478 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n")__drm_dbg(DRM_UT_DRIVER, "Loop exceeded max iterations\n");
2479}
2480
2481static void handle_hpd_rx_irq(void *param)
2482{
2483 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2484 struct drm_connector *connector = &aconnector->base;
2485 struct drm_device *dev = connector->dev;
2486 struct dc_link *dc_link = aconnector->dc_link;
2487 bool_Bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2488 enum dc_connection_type new_connection_type = dc_connection_none;
2489#ifdef CONFIG_DRM_AMD_DC_HDCP
2490 union hpd_irq_data hpd_irq_data;
2491 struct amdgpu_device *adev = drm_to_adev(dev);
2492
2493 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data))__builtin_memset((&hpd_irq_data), (0), (sizeof(hpd_irq_data
)))
;
2494#endif
2495
2496 /*
2497 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2498 * conflict, after implement i2c helper, this mutex should be
2499 * retired.
2500 */
2501 if (dc_link->type != dc_connection_mst_branch)
2502 mutex_lock(&aconnector->hpd_lock)rw_enter_write(&aconnector->hpd_lock);
2503
2504
2505#ifdef CONFIG_DRM_AMD_DC_HDCP
2506 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL((void *)0)) &&
2507#else
2508 if (dc_link_handle_hpd_rx_irq(dc_link, NULL((void *)0), NULL((void *)0)) &&
2509#endif
2510 !is_mst_root_connector) {
2511 /* Downstream Port status changed. */
2512 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2513 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
2514
2515 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2516 emulated_link_detect(dc_link);
2517
2518 if (aconnector->fake_enable)
2519 aconnector->fake_enable = false0;
2520
2521 amdgpu_dm_update_connector_after_detect(aconnector);
2522
2523
2524 drm_modeset_lock_all(dev);
2525 dm_restore_drm_connector_state(dev, connector);
2526 drm_modeset_unlock_all(dev);
2527
2528 drm_kms_helper_hotplug_event(dev);
2529 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2530
2531 if (aconnector->fake_enable)
2532 aconnector->fake_enable = false0;
2533
2534 amdgpu_dm_update_connector_after_detect(aconnector);
2535
2536
2537 drm_modeset_lock_all(dev);
2538 dm_restore_drm_connector_state(dev, connector);
2539 drm_modeset_unlock_all(dev);
2540
2541 drm_kms_helper_hotplug_event(dev);
2542 }
2543 }
2544#ifdef CONFIG_DRM_AMD_DC_HDCP
2545 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2546 if (adev->dm.hdcp_workqueue)
2547 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2548 }
2549#endif
2550 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2551 (dc_link->type == dc_connection_mst_branch))
2552 dm_handle_hpd_rx_irq(aconnector);
2553
2554 if (dc_link->type != dc_connection_mst_branch) {
2555 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2556 mutex_unlock(&aconnector->hpd_lock)rw_exit_write(&aconnector->hpd_lock);
2557 }
2558}
2559
2560static void register_hpd_handlers(struct amdgpu_device *adev)
2561{
2562 struct drm_device *dev = adev_to_drm(adev);
2563 struct drm_connector *connector;
2564 struct amdgpu_dm_connector *aconnector;
2565 const struct dc_link *dc_link;
2566 struct dc_interrupt_params int_params = {0};
2567
2568 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2569 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2570
2571 list_for_each_entry(connector,for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
2572 &dev->mode_config.connector_list, head)for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
{
2573
2574 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
2575 dc_link = aconnector->dc_link;
2576
2577 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2578 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2579 int_params.irq_source = dc_link->irq_source_hpd;
2580
2581 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2582 handle_hpd_irq,
2583 (void *) aconnector);
2584 }
2585
2586 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2587
2588 /* Also register for DP short pulse (hpd_rx). */
2589 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2590 int_params.irq_source = dc_link->irq_source_hpd_rx;
2591
2592 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2593 handle_hpd_rx_irq,
2594 (void *) aconnector);
2595 }
2596 }
2597}
2598
2599#if defined(CONFIG_DRM_AMD_DC_SI)
2600/* Register IRQ sources and initialize IRQ callbacks */
2601static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2602{
2603 struct dc *dc = adev->dm.dc;
2604 struct common_irq_params *c_irq_params;
2605 struct dc_interrupt_params int_params = {0};
2606 int r;
2607 int i;
2608 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY0;
2609
2610 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2611 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2612
2613 /*
2614 * Actions of amdgpu_irq_add_id():
2615 * 1. Register a set() function with base driver.
2616 * Base driver will call set() function to enable/disable an
2617 * interrupt in DC hardware.
2618 * 2. Register amdgpu_dm_irq_handler().
2619 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2620 * coming from DC hardware.
2621 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2622 * for acknowledging and handling. */
2623
2624 /* Use VBLANK interrupt */
2625 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2626 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2627 if (r) {
2628 DRM_ERROR("Failed to add crtc irq id!\n")__drm_err("Failed to add crtc irq id!\n");
2629 return r;
2630 }
2631
2632 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2633 int_params.irq_source =
2634 dc_interrupt_to_irq_source(dc, i+1 , 0);
2635
2636 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2637
2638 c_irq_params->adev = adev;
2639 c_irq_params->irq_src = int_params.irq_source;
2640
2641 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2642 dm_crtc_high_irq, c_irq_params);
2643 }
2644
2645 /* Use GRPH_PFLIP interrupt */
2646 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP8;
2647 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP18; i += 2) {
2648 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2649 if (r) {
2650 DRM_ERROR("Failed to add page flip irq id!\n")__drm_err("Failed to add page flip irq id!\n");
2651 return r;
2652 }
2653
2654 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2655 int_params.irq_source =
2656 dc_interrupt_to_irq_source(dc, i, 0);
2657
2658 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2659
2660 c_irq_params->adev = adev;
2661 c_irq_params->irq_src = int_params.irq_source;
2662
2663 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2664 dm_pflip_high_irq, c_irq_params);
2665
2666 }
2667
2668 /* HPD */
2669 r = amdgpu_irq_add_id(adev, client_id,
2670 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A42, &adev->hpd_irq);
2671 if (r) {
2672 DRM_ERROR("Failed to add hpd irq id!\n")__drm_err("Failed to add hpd irq id!\n");
2673 return r;
2674 }
2675
2676 register_hpd_handlers(adev);
2677
2678 return 0;
2679}
2680#endif
2681
2682/* Register IRQ sources and initialize IRQ callbacks */
2683static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2684{
2685 struct dc *dc = adev->dm.dc;
2686 struct common_irq_params *c_irq_params;
2687 struct dc_interrupt_params int_params = {0};
2688 int r;
2689 int i;
2690 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY0;
2691
2692 if (adev->asic_type >= CHIP_VEGA10)
2693 client_id = SOC15_IH_CLIENTID_DCE;
2694
2695 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2696 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2697
2698 /*
2699 * Actions of amdgpu_irq_add_id():
2700 * 1. Register a set() function with base driver.
2701 * Base driver will call set() function to enable/disable an
2702 * interrupt in DC hardware.
2703 * 2. Register amdgpu_dm_irq_handler().
2704 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2705 * coming from DC hardware.
2706 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2707 * for acknowledging and handling. */
2708
2709 /* Use VBLANK interrupt */
2710 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT019; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT024; i++) {
2711 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2712 if (r) {
2713 DRM_ERROR("Failed to add crtc irq id!\n")__drm_err("Failed to add crtc irq id!\n");
2714 return r;
2715 }
2716
2717 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2718 int_params.irq_source =
2719 dc_interrupt_to_irq_source(dc, i, 0);
2720
2721 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2722
2723 c_irq_params->adev = adev;
2724 c_irq_params->irq_src = int_params.irq_source;
2725
2726 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2727 dm_crtc_high_irq, c_irq_params);
2728 }
2729
2730 /* Use VUPDATE interrupt */
2731 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT7; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT17; i += 2) {
2732 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2733 if (r) {
2734 DRM_ERROR("Failed to add vupdate irq id!\n")__drm_err("Failed to add vupdate irq id!\n");
2735 return r;
2736 }
2737
2738 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2739 int_params.irq_source =
2740 dc_interrupt_to_irq_source(dc, i, 0);
2741
2742 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2743
2744 c_irq_params->adev = adev;
2745 c_irq_params->irq_src = int_params.irq_source;
2746
2747 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2748 dm_vupdate_high_irq, c_irq_params);
2749 }
2750
2751 /* Use GRPH_PFLIP interrupt */
2752 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP8;
2753 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP18; i += 2) {
2754 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2755 if (r) {
2756 DRM_ERROR("Failed to add page flip irq id!\n")__drm_err("Failed to add page flip irq id!\n");
2757 return r;
2758 }
2759
2760 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2761 int_params.irq_source =
2762 dc_interrupt_to_irq_source(dc, i, 0);
2763
2764 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2765
2766 c_irq_params->adev = adev;
2767 c_irq_params->irq_src = int_params.irq_source;
2768
2769 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2770 dm_pflip_high_irq, c_irq_params);
2771
2772 }
2773
2774 /* HPD */
2775 r = amdgpu_irq_add_id(adev, client_id,
2776 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A42, &adev->hpd_irq);
2777 if (r) {
2778 DRM_ERROR("Failed to add hpd irq id!\n")__drm_err("Failed to add hpd irq id!\n");
2779 return r;
2780 }
2781
2782 register_hpd_handlers(adev);
2783
2784 return 0;
2785}
2786
2787#if defined(CONFIG_DRM_AMD_DC_DCN1)
2788/* Register IRQ sources and initialize IRQ callbacks */
2789static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2790{
2791 struct dc *dc = adev->dm.dc;
2792 struct common_irq_params *c_irq_params;
2793 struct dc_interrupt_params int_params = {0};
2794 int r;
2795 int i;
2796
2797 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2798 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2799
2800 /*
2801 * Actions of amdgpu_irq_add_id():
2802 * 1. Register a set() function with base driver.
2803 * Base driver will call set() function to enable/disable an
2804 * interrupt in DC hardware.
2805 * 2. Register amdgpu_dm_irq_handler().
2806 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2807 * coming from DC hardware.
2808 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2809 * for acknowledging and handling.
2810 */
2811
2812 /* Use VSTARTUP interrupt */
2813 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP0x3C;
2814 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP0x3C + adev->mode_info.num_crtc - 1;
2815 i++) {
2816 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2817
2818 if (r) {
2819 DRM_ERROR("Failed to add crtc irq id!\n")__drm_err("Failed to add crtc irq id!\n");
2820 return r;
2821 }
2822
2823 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2824 int_params.irq_source =
2825 dc_interrupt_to_irq_source(dc, i, 0);
2826
2827 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2828
2829 c_irq_params->adev = adev;
2830 c_irq_params->irq_src = int_params.irq_source;
2831
2832 amdgpu_dm_irq_register_interrupt(
2833 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2834 }
2835
2836 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2837 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2838 * to trigger at end of each vblank, regardless of state of the lock,
2839 * matching DCE behaviour.
2840 */
2841 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT0x57;
2842 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT0x57 + adev->mode_info.num_crtc - 1;
2843 i++) {
2844 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2845
2846 if (r) {
2847 DRM_ERROR("Failed to add vupdate irq id!\n")__drm_err("Failed to add vupdate irq id!\n");
2848 return r;
2849 }
2850
2851 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2852 int_params.irq_source =
2853 dc_interrupt_to_irq_source(dc, i, 0);
2854
2855 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2856
2857 c_irq_params->adev = adev;
2858 c_irq_params->irq_src = int_params.irq_source;
2859
2860 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2861 dm_vupdate_high_irq, c_irq_params);
2862 }
2863
2864 /* Use GRPH_PFLIP interrupt */
2865 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT0x4F;
2866 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT0x4F + adev->mode_info.num_crtc - 1;
2867 i++) {
2868 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2869 if (r) {
2870 DRM_ERROR("Failed to add page flip irq id!\n")__drm_err("Failed to add page flip irq id!\n");
2871 return r;
2872 }
2873
2874 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 int_params.irq_source =
2876 dc_interrupt_to_irq_source(dc, i, 0);
2877
2878 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2879
2880 c_irq_params->adev = adev;
2881 c_irq_params->irq_src = int_params.irq_source;
2882
2883 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 dm_pflip_high_irq, c_irq_params);
2885
2886 }
2887
2888 /* HPD */
2889 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT9,
2890 &adev->hpd_irq);
2891 if (r) {
2892 DRM_ERROR("Failed to add hpd irq id!\n")__drm_err("Failed to add hpd irq id!\n");
2893 return r;
2894 }
2895
2896 register_hpd_handlers(adev);
2897
2898 return 0;
2899}
2900#endif
2901
2902/*
2903 * Acquires the lock for the atomic state object and returns
2904 * the new atomic state.
2905 *
2906 * This should only be called during atomic check.
2907 */
2908static int dm_atomic_get_state(struct drm_atomic_state *state,
2909 struct dm_atomic_state **dm_state)
2910{
2911 struct drm_device *dev = state->dev;
2912 struct amdgpu_device *adev = drm_to_adev(dev);
2913 struct amdgpu_display_manager *dm = &adev->dm;
2914 struct drm_private_state *priv_state;
2915
2916 if (*dm_state)
2917 return 0;
2918
2919 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2920 if (IS_ERR(priv_state))
2921 return PTR_ERR(priv_state);
2922
2923 *dm_state = to_dm_atomic_state(priv_state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (priv_state); (struct dm_atomic_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_atomic_state, base) );})
;
2924
2925 return 0;
2926}
2927
2928static struct dm_atomic_state *
2929dm_atomic_get_new_state(struct drm_atomic_state *state)
2930{
2931 struct drm_device *dev = state->dev;
2932 struct amdgpu_device *adev = drm_to_adev(dev);
2933 struct amdgpu_display_manager *dm = &adev->dm;
2934 struct drm_private_obj *obj;
2935 struct drm_private_state *new_obj_state;
2936 int i;
2937
2938 for_each_new_private_obj_in_state(state, obj, new_obj_state, i)for ((i) = 0; (i) < (state)->num_private_objs &&
((obj) = (state)->private_objs[i].ptr, (new_obj_state) = (
state)->private_objs[i].new_state, 1); (i)++)
{
2939 if (obj->funcs == dm->atomic_obj.funcs)
2940 return to_dm_atomic_state(new_obj_state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (new_obj_state); (struct dm_atomic_state *)( (char *
)__mptr - __builtin_offsetof(struct dm_atomic_state, base) );
})
;
2941 }
2942
2943 return NULL((void *)0);
2944}
2945
2946static struct drm_private_state *
2947dm_atomic_duplicate_state(struct drm_private_obj *obj)
2948{
2949 struct dm_atomic_state *old_state, *new_state;
2950
2951 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL(0x0001 | 0x0004));
2952 if (!new_state)
2953 return NULL((void *)0);
2954
2955 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2956
2957 old_state = to_dm_atomic_state(obj->state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (obj->state); (struct dm_atomic_state *)( (char *
)__mptr - __builtin_offsetof(struct dm_atomic_state, base) );
})
;
2958
2959 if (old_state && old_state->context)
2960 new_state->context = dc_copy_state(old_state->context);
2961
2962 if (!new_state->context) {
2963 kfree(new_state);
2964 return NULL((void *)0);
2965 }
2966
2967 return &new_state->base;
2968}
2969
2970static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2971 struct drm_private_state *state)
2972{
2973 struct dm_atomic_state *dm_state = to_dm_atomic_state(state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (state); (struct dm_atomic_state *)( (char *)__mptr -
__builtin_offsetof(struct dm_atomic_state, base) );})
;
2974
2975 if (dm_state && dm_state->context)
2976 dc_release_state(dm_state->context);
2977
2978 kfree(dm_state);
2979}
2980
2981static struct drm_private_state_funcs dm_atomic_state_funcs = {
2982 .atomic_duplicate_state = dm_atomic_duplicate_state,
2983 .atomic_destroy_state = dm_atomic_destroy_state,
2984};
2985
2986static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2987{
2988 struct dm_atomic_state *state;
2989 int r;
2990
2991 adev->mode_info.mode_config_initialized = true1;
2992
2993 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2994 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2995
2996 adev_to_drm(adev)->mode_config.max_width = 16384;
2997 adev_to_drm(adev)->mode_config.max_height = 16384;
2998
2999 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3000 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3001 /* indicates support for immediate flip */
3002 adev_to_drm(adev)->mode_config.async_page_flip = true1;
3003
3004 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3005
3006 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
3007 if (!state)
3008 return -ENOMEM12;
3009
3010 state->context = dc_create_state(adev->dm.dc);
3011 if (!state->context) {
3012 kfree(state);
3013 return -ENOMEM12;
3014 }
3015
3016 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3017
3018 drm_atomic_private_obj_init(adev_to_drm(adev),
3019 &adev->dm.atomic_obj,
3020 &state->base,
3021 &dm_atomic_state_funcs);
3022
3023 r = amdgpu_display_modeset_create_props(adev);
3024 if (r) {
3025 dc_release_state(state->context);
3026 kfree(state);
3027 return r;
3028 }
3029
3030 r = amdgpu_dm_audio_init(adev);
3031 if (r) {
3032 dc_release_state(state->context);
3033 kfree(state);
3034 return r;
3035 }
3036
3037 return 0;
3038}
3039
3040#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT12 12
3041#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT255 255
3042#define AUX_BL_DEFAULT_TRANSITION_TIME_MS50 50
3043
3044#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE1) ||\
3045 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3046
3047static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3048{
3049#if defined(CONFIG_ACPI1)
3050 struct amdgpu_dm_backlight_caps caps;
3051
3052 memset(&caps, 0, sizeof(caps))__builtin_memset((&caps), (0), (sizeof(caps)));
3053
3054 if (dm->backlight_caps.caps_valid)
3055 return;
3056
3057 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3058 if (caps.caps_valid) {
3059 dm->backlight_caps.caps_valid = true1;
3060 if (caps.aux_support)
3061 return;
3062 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3063 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3064 } else {
3065 dm->backlight_caps.min_input_signal =
3066 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT12;
3067 dm->backlight_caps.max_input_signal =
3068 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT255;
3069 }
3070#else
3071 if (dm->backlight_caps.aux_support)
3072 return;
3073
3074 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT12;
3075 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT255;
3076#endif
3077}
3078
3079static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3080 unsigned *min, unsigned *max)
3081{
3082 if (!caps)
3083 return 0;
3084
3085 if (caps->aux_support) {
3086 // Firmware limits are in nits, DC API wants millinits.
3087 *max = 1000 * caps->aux_max_input_signal;
3088 *min = 1000 * caps->aux_min_input_signal;
3089 } else {
3090 // Firmware limits are 8-bit, PWM control is 16-bit.
3091 *max = 0x101 * caps->max_input_signal;
3092 *min = 0x101 * caps->min_input_signal;
3093 }
3094 return 1;
3095}
3096
3097static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3098 uint32_t brightness)
3099{
3100 unsigned min, max;
3101
3102 if (!get_brightness_range(caps, &min, &max))
3103 return brightness;
3104
3105 // Rescale 0..255 to min..max
3106 return min + DIV_ROUND_CLOSEST((max - min) * brightness,((((max - min) * brightness) + ((0xFF) / 2)) / (0xFF))
3107 AMDGPU_MAX_BL_LEVEL)((((max - min) * brightness) + ((0xFF) / 2)) / (0xFF));
3108}
3109
3110static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3111 uint32_t brightness)
3112{
3113 unsigned min, max;
3114
3115 if (!get_brightness_range(caps, &min, &max))
3116 return brightness;
3117
3118 if (brightness < min)
3119 return 0;
3120 // Rescale min..max to 0..255
3121 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),(((0xFF * (brightness - min)) + ((max - min) / 2)) / (max - min
))
3122 max - min)(((0xFF * (brightness - min)) + ((max - min) / 2)) / (max - min
))
;
3123}
3124
3125static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3126{
3127 struct amdgpu_display_manager *dm = bl_get_data(bd)(bd)->data;
3128 struct amdgpu_dm_backlight_caps caps;
3129 struct dc_link *link = NULL((void *)0);
3130 u32 brightness;
3131 bool_Bool rc;
3132
3133 amdgpu_dm_update_backlight_caps(dm);
3134 caps = dm->backlight_caps;
3135
3136 link = (struct dc_link *)dm->backlight_link;
3137
3138 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3139 // Change brightness based on AUX property
3140 if (caps.aux_support)
3141 rc = dc_link_set_backlight_level_nits(link, true1, brightness,
3142 AUX_BL_DEFAULT_TRANSITION_TIME_MS50);
3143 else
3144 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3145
3146 return rc ? 0 : 1;
3147}
3148
3149static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3150{
3151 struct amdgpu_display_manager *dm = bl_get_data(bd)(bd)->data;
3152 struct amdgpu_dm_backlight_caps caps;
3153
3154 amdgpu_dm_update_backlight_caps(dm);
3155 caps = dm->backlight_caps;
3156
3157 if (caps.aux_support) {
3158 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3159 u32 avg, peak;
3160 bool_Bool rc;
3161
3162 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3163 if (!rc)
3164 return bd->props.brightness;
3165 return convert_brightness_to_user(&caps, avg);
3166 } else {
3167 int ret = dc_link_get_backlight_level(dm->backlight_link);
3168
3169 if (ret == DC_ERROR_UNEXPECTED)
3170 return bd->props.brightness;
3171 return convert_brightness_to_user(&caps, ret);
3172 }
3173}
3174
3175static const struct backlight_ops amdgpu_dm_backlight_ops = {
3176 .options = BL_CORE_SUSPENDRESUME1,
3177 .get_brightness = amdgpu_dm_backlight_get_brightness,
3178 .update_status = amdgpu_dm_backlight_update_status,
3179};
3180
3181static void
3182amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3183{
3184 char bl_name[16];
3185 struct backlight_properties props = { 0 };
3186
3187 amdgpu_dm_update_backlight_caps(dm);
3188
3189 props.max_brightness = AMDGPU_MAX_BL_LEVEL0xFF;
3190 props.brightness = AMDGPU_MAX_BL_LEVEL0xFF;
3191 props.type = BACKLIGHT_RAW0;
3192
3193 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3194 adev_to_drm(dm->adev)->primary->index);
3195
3196 dm->backlight_dev = backlight_device_register(bl_name,
3197 adev_to_drm(dm->adev)->dev,
3198 dm,
3199 &amdgpu_dm_backlight_ops,
3200 &props);
3201
3202 if (IS_ERR(dm->backlight_dev))
3203 DRM_ERROR("DM: Backlight registration failed!\n")__drm_err("DM: Backlight registration failed!\n");
3204 else
3205 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name)__drm_dbg(DRM_UT_DRIVER, "DM: Registered Backlight device: %s\n"
, bl_name)
;
3206}
3207
3208#endif
3209
3210static int initialize_plane(struct amdgpu_display_manager *dm,
3211 struct amdgpu_mode_info *mode_info, int plane_id,
3212 enum drm_plane_type plane_type,
3213 const struct dc_plane_cap *plane_cap)
3214{
3215 struct drm_plane *plane;
3216 unsigned long possible_crtcs;
3217 int ret = 0;
3218
3219 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL(0x0001 | 0x0004));
3220 if (!plane) {
3221 DRM_ERROR("KMS: Failed to allocate plane\n")__drm_err("KMS: Failed to allocate plane\n");
3222 return -ENOMEM12;
3223 }
3224 plane->type = plane_type;
3225
3226 /*
3227 * HACK: IGT tests expect that the primary plane for a CRTC
3228 * can only have one possible CRTC. Only expose support for
3229 * any CRTC if they're not going to be used as a primary plane
3230 * for a CRTC - like overlay or underlay planes.
3231 */
3232 possible_crtcs = 1 << plane_id;
3233 if (plane_id >= dm->dc->caps.max_streams)
3234 possible_crtcs = 0xff;
3235
3236 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3237
3238 if (ret) {
3239 DRM_ERROR("KMS: Failed to initialize plane\n")__drm_err("KMS: Failed to initialize plane\n");
3240 kfree(plane);
3241 return ret;
3242 }
3243
3244 if (mode_info)
3245 mode_info->planes[plane_id] = plane;
3246
3247 return ret;
3248}
3249
3250
3251static void register_backlight_device(struct amdgpu_display_manager *dm,
3252 struct dc_link *link)
3253{
3254#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE1) ||\
3255 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3256
3257 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3258 link->type != dc_connection_none) {
3259 /*
3260 * Event if registration failed, we should continue with
3261 * DM initialization because not having a backlight control
3262 * is better then a black screen.
3263 */
3264 amdgpu_dm_register_backlight_device(dm);
3265
3266 if (dm->backlight_dev)
3267 dm->backlight_link = link;
3268 }
3269#endif
3270}
3271
3272
3273/*
3274 * In this architecture, the association
3275 * connector -> encoder -> crtc
3276 * id not really requried. The crtc and connector will hold the
3277 * display_index as an abstraction to use with DAL component
3278 *
3279 * Returns 0 on success
3280 */
3281static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3282{
3283 struct amdgpu_display_manager *dm = &adev->dm;
3284 int32_t i;
3285 struct amdgpu_dm_connector *aconnector = NULL((void *)0);
3286 struct amdgpu_encoder *aencoder = NULL((void *)0);
3287 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3288 uint32_t link_cnt;
3289 int32_t primary_planes;
3290 enum dc_connection_type new_connection_type = dc_connection_none;
3291 const struct dc_plane_cap *plane;
3292
3293 dm->display_indexes_num = dm->dc->caps.max_streams;
3294 /* Update the actual used number of crtc */
3295 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3296
3297 link_cnt = dm->dc->caps.max_links;
3298 if (amdgpu_dm_mode_config_init(dm->adev)) {
3299 DRM_ERROR("DM: Failed to initialize mode config\n")__drm_err("DM: Failed to initialize mode config\n");
3300 return -EINVAL22;
3301 }
3302
3303 /* There is one primary plane per CRTC */
3304 primary_planes = dm->dc->caps.max_streams;
3305 ASSERT(primary_planes <= AMDGPU_MAX_PLANES)do { if (({ static int __warned; int __ret = !!(!(primary_planes
<= 6)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n"
, "!(primary_planes <= 6)", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 3305); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do
{} while (0); } while (0)
;
3306
3307 /*
3308 * Initialize primary planes, implicit planes for legacy IOCTLS.
3309 * Order is reversed to match iteration order in atomic check.
3310 */
3311 for (i = (primary_planes - 1); i >= 0; i--) {
3312 plane = &dm->dc->caps.planes[i];
3313
3314 if (initialize_plane(dm, mode_info, i,
3315 DRM_PLANE_TYPE_PRIMARY, plane)) {
3316 DRM_ERROR("KMS: Failed to initialize primary plane\n")__drm_err("KMS: Failed to initialize primary plane\n");
3317 goto fail;
3318 }
3319 }
3320
3321 /*
3322 * Initialize overlay planes, index starting after primary planes.
3323 * These planes have a higher DRM index than the primary planes since
3324 * they should be considered as having a higher z-order.
3325 * Order is reversed to match iteration order in atomic check.
3326 *
3327 * Only support DCN for now, and only expose one so we don't encourage
3328 * userspace to use up all the pipes.
3329 */
3330 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3331 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3332
3333 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3334 continue;
3335
3336 if (!plane->blends_with_above || !plane->blends_with_below)
3337 continue;
3338
3339 if (!plane->pixel_format_support.argb8888)
3340 continue;
3341
3342 if (initialize_plane(dm, NULL((void *)0), primary_planes + i,
3343 DRM_PLANE_TYPE_OVERLAY, plane)) {
3344 DRM_ERROR("KMS: Failed to initialize overlay plane\n")__drm_err("KMS: Failed to initialize overlay plane\n");
3345 goto fail;
3346 }
3347
3348 /* Only create one overlay plane. */
3349 break;
3350 }
3351
3352 for (i = 0; i < dm->dc->caps.max_streams; i++)
3353 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3354 DRM_ERROR("KMS: Failed to initialize crtc\n")__drm_err("KMS: Failed to initialize crtc\n");
3355 goto fail;
3356 }
3357
3358 /* loops over all connectors on the board */
3359 for (i = 0; i < link_cnt; i++) {
3360 struct dc_link *link = NULL((void *)0);
3361
3362 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX31) {
3363 DRM_ERROR(__drm_err("KMS: Cannot support more than %d display indexes\n"
, 31)
3364 "KMS: Cannot support more than %d display indexes\n",__drm_err("KMS: Cannot support more than %d display indexes\n"
, 31)
3365 AMDGPU_DM_MAX_DISPLAY_INDEX)__drm_err("KMS: Cannot support more than %d display indexes\n"
, 31)
;
3366 continue;
3367 }
3368
3369 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL(0x0001 | 0x0004));
3370 if (!aconnector)
3371 goto fail;
3372
3373 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL(0x0001 | 0x0004));
3374 if (!aencoder)
3375 goto fail;
3376
3377 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3378 DRM_ERROR("KMS: Failed to initialize encoder\n")__drm_err("KMS: Failed to initialize encoder\n");
3379 goto fail;
3380 }
3381
3382 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3383 DRM_ERROR("KMS: Failed to initialize connector\n")__drm_err("KMS: Failed to initialize connector\n");
3384 goto fail;
3385 }
3386
3387 link = dc_get_link_at_index(dm->dc, i);
3388
3389 if (!dc_link_detect_sink(link, &new_connection_type))
3390 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
3391
3392 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3393 emulated_link_detect(link);
3394 amdgpu_dm_update_connector_after_detect(aconnector);
3395
3396 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3397 amdgpu_dm_update_connector_after_detect(aconnector);
3398 register_backlight_device(dm, link);
3399 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3400 amdgpu_dm_set_psr_caps(link);
3401 }
3402
3403
3404 }
3405
3406 /* Software is initialized. Now we can register interrupt handlers. */
3407 switch (adev->asic_type) {
3408#if defined(CONFIG_DRM_AMD_DC_SI)
3409 case CHIP_TAHITI:
3410 case CHIP_PITCAIRN:
3411 case CHIP_VERDE:
3412 case CHIP_OLAND:
3413 if (dce60_register_irq_handlers(dm->adev)) {
3414 DRM_ERROR("DM: Failed to initialize IRQ\n")__drm_err("DM: Failed to initialize IRQ\n");
3415 goto fail;
3416 }
3417 break;
3418#endif
3419 case CHIP_BONAIRE:
3420 case CHIP_HAWAII:
3421 case CHIP_KAVERI:
3422 case CHIP_KABINI:
3423 case CHIP_MULLINS:
3424 case CHIP_TONGA:
3425 case CHIP_FIJI:
3426 case CHIP_CARRIZO:
3427 case CHIP_STONEY:
3428 case CHIP_POLARIS11:
3429 case CHIP_POLARIS10:
3430 case CHIP_POLARIS12:
3431 case CHIP_VEGAM:
3432 case CHIP_VEGA10:
3433 case CHIP_VEGA12:
3434 case CHIP_VEGA20:
3435 if (dce110_register_irq_handlers(dm->adev)) {
3436 DRM_ERROR("DM: Failed to initialize IRQ\n")__drm_err("DM: Failed to initialize IRQ\n");
3437 goto fail;
3438 }
3439 break;
3440#if defined(CONFIG_DRM_AMD_DC_DCN1)
3441 case CHIP_RAVEN:
3442 case CHIP_NAVI12:
3443 case CHIP_NAVI10:
3444 case CHIP_NAVI14:
3445 case CHIP_RENOIR:
3446#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3447 case CHIP_SIENNA_CICHLID:
3448 case CHIP_NAVY_FLOUNDER:
3449#endif
3450 if (dcn10_register_irq_handlers(dm->adev)) {
3451 DRM_ERROR("DM: Failed to initialize IRQ\n")__drm_err("DM: Failed to initialize IRQ\n");
3452 goto fail;
3453 }
3454 break;
3455#endif
3456 default:
3457 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type)__drm_err("Unsupported ASIC type: 0x%X\n", adev->asic_type
)
;
3458 goto fail;
3459 }
3460
3461 return 0;
3462fail:
3463 kfree(aencoder);
3464 kfree(aconnector);
3465
3466 return -EINVAL22;
3467}
3468
3469static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3470{
3471 drm_mode_config_cleanup(dm->ddev);
3472 drm_atomic_private_obj_fini(&dm->atomic_obj);
3473 return;
3474}
3475
3476/******************************************************************************
3477 * amdgpu_display_funcs functions
3478 *****************************************************************************/
3479
3480/*
3481 * dm_bandwidth_update - program display watermarks
3482 *
3483 * @adev: amdgpu_device pointer
3484 *
3485 * Calculate and program the display watermarks and line buffer allocation.
3486 */
3487static void dm_bandwidth_update(struct amdgpu_device *adev)
3488{
3489 /* TODO: implement later */
3490}
3491
3492static const struct amdgpu_display_funcs dm_display_funcs = {
3493 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3494 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3495 .backlight_set_level = NULL((void *)0), /* never called for DC */
3496 .backlight_get_level = NULL((void *)0), /* never called for DC */
3497 .hpd_sense = NULL((void *)0),/* called unconditionally */
3498 .hpd_set_polarity = NULL((void *)0), /* called unconditionally */
3499 .hpd_get_gpio_reg = NULL((void *)0), /* VBIOS parsing. DAL does it. */
3500 .page_flip_get_scanoutpos =
3501 dm_crtc_get_scanoutpos,/* called unconditionally */
3502 .add_encoder = NULL((void *)0), /* VBIOS parsing. DAL does it. */
3503 .add_connector = NULL((void *)0), /* VBIOS parsing. DAL does it. */
3504};
3505
3506#if defined(CONFIG_DEBUG_KERNEL_DC)
3507
3508static ssize_t s3_debug_store(struct device *device,
3509 struct device_attribute *attr,
3510 const char *buf,
3511 size_t count)
3512{
3513 int ret;
3514 int s3_state;
3515 struct drm_device *drm_dev = dev_get_drvdata(device)((void *)0);
3516 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3517
3518 ret = kstrtoint(buf, 0, &s3_state);
3519
3520 if (ret == 0) {
3521 if (s3_state) {
3522 dm_resume(adev);
3523 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3524 } else
3525 dm_suspend(adev);
3526 }
3527
3528 return ret == 0 ? count : 0;
3529}
3530
3531DEVICE_ATTR_WO(s3_debug);
3532
3533#endif
3534
3535static int dm_early_init(void *handle)
3536{
3537 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3538
3539 switch (adev->asic_type) {
3540#if defined(CONFIG_DRM_AMD_DC_SI)
3541 case CHIP_TAHITI:
3542 case CHIP_PITCAIRN:
3543 case CHIP_VERDE:
3544 adev->mode_info.num_crtc = 6;
3545 adev->mode_info.num_hpd = 6;
3546 adev->mode_info.num_dig = 6;
3547 break;
3548 case CHIP_OLAND:
3549 adev->mode_info.num_crtc = 2;
3550 adev->mode_info.num_hpd = 2;
3551 adev->mode_info.num_dig = 2;
3552 break;
3553#endif
3554 case CHIP_BONAIRE:
3555 case CHIP_HAWAII:
3556 adev->mode_info.num_crtc = 6;
3557 adev->mode_info.num_hpd = 6;
3558 adev->mode_info.num_dig = 6;
3559 break;
3560 case CHIP_KAVERI:
3561 adev->mode_info.num_crtc = 4;
3562 adev->mode_info.num_hpd = 6;
3563 adev->mode_info.num_dig = 7;
3564 break;
3565 case CHIP_KABINI:
3566 case CHIP_MULLINS:
3567 adev->mode_info.num_crtc = 2;
3568 adev->mode_info.num_hpd = 6;
3569 adev->mode_info.num_dig = 6;
3570 break;
3571 case CHIP_FIJI:
3572 case CHIP_TONGA:
3573 adev->mode_info.num_crtc = 6;
3574 adev->mode_info.num_hpd = 6;
3575 adev->mode_info.num_dig = 7;
3576 break;
3577 case CHIP_CARRIZO:
3578 adev->mode_info.num_crtc = 3;
3579 adev->mode_info.num_hpd = 6;
3580 adev->mode_info.num_dig = 9;
3581 break;
3582 case CHIP_STONEY:
3583 adev->mode_info.num_crtc = 2;
3584 adev->mode_info.num_hpd = 6;
3585 adev->mode_info.num_dig = 9;
3586 break;
3587 case CHIP_POLARIS11:
3588 case CHIP_POLARIS12:
3589 adev->mode_info.num_crtc = 5;
3590 adev->mode_info.num_hpd = 5;
3591 adev->mode_info.num_dig = 5;
3592 break;
3593 case CHIP_POLARIS10:
3594 case CHIP_VEGAM:
3595 adev->mode_info.num_crtc = 6;
3596 adev->mode_info.num_hpd = 6;
3597 adev->mode_info.num_dig = 6;
3598 break;
3599 case CHIP_VEGA10:
3600 case CHIP_VEGA12:
3601 case CHIP_VEGA20:
3602 adev->mode_info.num_crtc = 6;
3603 adev->mode_info.num_hpd = 6;
3604 adev->mode_info.num_dig = 6;
3605 break;
3606#if defined(CONFIG_DRM_AMD_DC_DCN1)
3607 case CHIP_RAVEN:
3608 adev->mode_info.num_crtc = 4;
3609 adev->mode_info.num_hpd = 4;
3610 adev->mode_info.num_dig = 4;
3611 break;
3612#endif
3613 case CHIP_NAVI10:
3614 case CHIP_NAVI12:
3615#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3616 case CHIP_SIENNA_CICHLID:
3617 case CHIP_NAVY_FLOUNDER:
3618#endif
3619 adev->mode_info.num_crtc = 6;
3620 adev->mode_info.num_hpd = 6;
3621 adev->mode_info.num_dig = 6;
3622 break;
3623 case CHIP_NAVI14:
3624 adev->mode_info.num_crtc = 5;
3625 adev->mode_info.num_hpd = 5;
3626 adev->mode_info.num_dig = 5;
3627 break;
3628 case CHIP_RENOIR:
3629 adev->mode_info.num_crtc = 4;
3630 adev->mode_info.num_hpd = 4;
3631 adev->mode_info.num_dig = 4;
3632 break;
3633 default:
3634 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type)__drm_err("Unsupported ASIC type: 0x%X\n", adev->asic_type
)
;
3635 return -EINVAL22;
3636 }
3637
3638 amdgpu_dm_set_irq_funcs(adev);
3639
3640 if (adev->mode_info.funcs == NULL((void *)0))
3641 adev->mode_info.funcs = &dm_display_funcs;
3642
3643 /*
3644 * Note: Do NOT change adev->audio_endpt_rreg and
3645 * adev->audio_endpt_wreg because they are initialised in
3646 * amdgpu_device_init()
3647 */
3648#if defined(CONFIG_DEBUG_KERNEL_DC)
3649 device_create_file(0
3650 adev_to_drm(adev)->dev,0
3651 &dev_attr_s3_debug)0;
3652#endif
3653
3654 return 0;
3655}
3656
3657static bool_Bool modeset_required(struct drm_crtc_state *crtc_state,
3658 struct dc_stream_state *new_stream,
3659 struct dc_stream_state *old_stream)
3660{
3661 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3662}
3663
3664static bool_Bool modereset_required(struct drm_crtc_state *crtc_state)
3665{
3666 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3667}
3668
3669static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3670{
3671 drm_encoder_cleanup(encoder);
3672 kfree(encoder);
3673}
3674
3675static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3676 .destroy = amdgpu_dm_encoder_destroy,
3677};
3678
3679
3680static int fill_dc_scaling_info(const struct drm_plane_state *state,
3681 struct dc_scaling_info *scaling_info)
3682{
3683 int scale_w, scale_h;
3684
3685 memset(scaling_info, 0, sizeof(*scaling_info))__builtin_memset((scaling_info), (0), (sizeof(*scaling_info))
)
;
3686
3687 /* Source is fixed 16.16 but we ignore mantissa for now... */
3688 scaling_info->src_rect.x = state->src_x >> 16;
3689 scaling_info->src_rect.y = state->src_y >> 16;
3690
3691 /*
3692 * For reasons we don't (yet) fully understand a non-zero
3693 * src_y coordinate into an NV12 buffer can cause a
3694 * system hang. To avoid hangs (and maybe be overly cautious)
3695 * let's reject both non-zero src_x and src_y.
3696 *
3697 * We currently know of only one use-case to reproduce a
3698 * scenario with non-zero src_x and src_y for NV12, which
3699 * is to gesture the YouTube Android app into full screen
3700 * on ChromeOS.
3701 */
3702 if (state->fb &&
3703 state->fb->format->format == DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
&&
3704 (scaling_info->src_rect.x != 0 ||
3705 scaling_info->src_rect.y != 0))
3706 return -EINVAL22;
3707
3708 /*
3709 * For reasons we don't (yet) fully understand a non-zero
3710 * src_y coordinate into an NV12 buffer can cause a
3711 * system hang. To avoid hangs (and maybe be overly cautious)
3712 * let's reject both non-zero src_x and src_y.
3713 *
3714 * We currently know of only one use-case to reproduce a
3715 * scenario with non-zero src_x and src_y for NV12, which
3716 * is to gesture the YouTube Android app into full screen
3717 * on ChromeOS.
3718 */
3719 if (state->fb &&
3720 state->fb->format->format == DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
&&
3721 (scaling_info->src_rect.x != 0 ||
3722 scaling_info->src_rect.y != 0))
3723 return -EINVAL22;
3724
3725 scaling_info->src_rect.width = state->src_w >> 16;
3726 if (scaling_info->src_rect.width == 0)
3727 return -EINVAL22;
3728
3729 scaling_info->src_rect.height = state->src_h >> 16;
3730 if (scaling_info->src_rect.height == 0)
3731 return -EINVAL22;
3732
3733 scaling_info->dst_rect.x = state->crtc_x;
3734 scaling_info->dst_rect.y = state->crtc_y;
3735
3736 if (state->crtc_w == 0)
3737 return -EINVAL22;
3738
3739 scaling_info->dst_rect.width = state->crtc_w;
3740
3741 if (state->crtc_h == 0)
3742 return -EINVAL22;
3743
3744 scaling_info->dst_rect.height = state->crtc_h;
3745
3746 /* DRM doesn't specify clipping on destination output. */
3747 scaling_info->clip_rect = scaling_info->dst_rect;
3748
3749 /* TODO: Validate scaling per-format with DC plane caps */
3750 scale_w = scaling_info->dst_rect.width * 1000 /
3751 scaling_info->src_rect.width;
3752
3753 if (scale_w < 250 || scale_w > 16000)
3754 return -EINVAL22;
3755
3756 scale_h = scaling_info->dst_rect.height * 1000 /
3757 scaling_info->src_rect.height;
3758
3759 if (scale_h < 250 || scale_h > 16000)
3760 return -EINVAL22;
3761
3762 /*
3763 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3764 * assume reasonable defaults based on the format.
3765 */
3766
3767 return 0;
3768}
3769
3770static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3771 uint64_t *tiling_flags, bool_Bool *tmz_surface)
3772{
3773 struct amdgpu_bo *rbo;
3774 int r;
3775
3776 if (!amdgpu_fb) {
3777 *tiling_flags = 0;
3778 *tmz_surface = false0;
3779 return 0;
3780 }
3781
3782 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0])({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr
= ((amdgpu_fb->base.obj[0])); (struct amdgpu_bo *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_bo, tbo.base) );
})
;
3783 r = amdgpu_bo_reserve(rbo, false0);
3784
3785 if (unlikely(r)__builtin_expect(!!(r), 0)) {
3786 /* Don't show error message when returning -ERESTARTSYS */
3787 if (r != -ERESTARTSYS4)
3788 DRM_ERROR("Unable to reserve buffer: %d\n", r)__drm_err("Unable to reserve buffer: %d\n", r);
3789 return r;
3790 }
3791
3792 if (tiling_flags)
3793 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3794
3795 if (tmz_surface)
3796 *tmz_surface = amdgpu_bo_encrypted(rbo);
3797
3798 amdgpu_bo_unreserve(rbo);
3799
3800 return r;
3801}
3802
3803static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3804{
3805 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B)(((__u64)(tiling_flags) >> 5) & 0xFFFFFF);
3806
3807 return offset ? (address + offset * 256) : 0;
3808}
3809
3810static int
3811fill_plane_dcc_attributes(struct amdgpu_device *adev,
3812 const struct amdgpu_framebuffer *afb,
3813 const enum surface_pixel_format format,
3814 const enum dc_rotation_angle rotation,
3815 const struct plane_size *plane_size,
3816 const union dc_tiling_info *tiling_info,
3817 const uint64_t info,
3818 struct dc_plane_dcc_param *dcc,
3819 struct dc_plane_address *address,
3820 bool_Bool force_disable_dcc)
3821{
3822 struct dc *dc = adev->dm.dc;
3823 struct dc_dcc_surface_param input;
3824 struct dc_surface_dcc_cap output;
3825 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B)(((__u64)(info) >> 5) & 0xFFFFFF);
3826 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B)(((__u64)(info) >> 43) & 0x1) != 0;
3827 uint64_t dcc_address;
3828
3829 memset(&input, 0, sizeof(input))__builtin_memset((&input), (0), (sizeof(input)));
3830 memset(&output, 0, sizeof(output))__builtin_memset((&output), (0), (sizeof(output)));
3831
3832 if (force_disable_dcc)
3833 return 0;
3834
3835 if (!offset)
3836 return 0;
3837
3838 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3839 return 0;
3840
3841 if (!dc->cap_funcs.get_dcc_compression_cap)
3842 return -EINVAL22;
3843
3844 input.format = format;
3845 input.surface_size.width = plane_size->surface_size.width;
3846 input.surface_size.height = plane_size->surface_size.height;
3847 input.swizzle_mode = tiling_info->gfx9.swizzle;
3848
3849 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3850 input.scan = SCAN_DIRECTION_HORIZONTAL;
3851 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3852 input.scan = SCAN_DIRECTION_VERTICAL;
3853
3854 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3855 return -EINVAL22;
3856
3857 if (!output.capable)
3858 return -EINVAL22;
3859
3860 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3861 return -EINVAL22;
3862
3863 dcc->enable = 1;
3864 dcc->meta_pitch =
3865 AMDGPU_TILING_GET(info, DCC_PITCH_MAX)(((__u64)(info) >> 29) & 0x3FFF) + 1;
3866 dcc->independent_64b_blks = i64b;
3867
3868 dcc_address = get_dcc_address(afb->address, info);
3869 address->grph.meta_addr.low_part = lower_32_bits(dcc_address)((u32)(dcc_address));
3870 address->grph.meta_addr.high_part = upper_32_bits(dcc_address)((u32)(((dcc_address) >> 16) >> 16));
3871
3872 return 0;
3873}
3874
3875static int
3876fill_plane_buffer_attributes(struct amdgpu_device *adev,
3877 const struct amdgpu_framebuffer *afb,
3878 const enum surface_pixel_format format,
3879 const enum dc_rotation_angle rotation,
3880 const uint64_t tiling_flags,
3881 union dc_tiling_info *tiling_info,
3882 struct plane_size *plane_size,
3883 struct dc_plane_dcc_param *dcc,
3884 struct dc_plane_address *address,
3885 bool_Bool tmz_surface,
3886 bool_Bool force_disable_dcc)
3887{
3888 const struct drm_framebuffer *fb = &afb->base;
3889 int ret;
3890
3891 memset(tiling_info, 0, sizeof(*tiling_info))__builtin_memset((tiling_info), (0), (sizeof(*tiling_info)));
3892 memset(plane_size, 0, sizeof(*plane_size))__builtin_memset((plane_size), (0), (sizeof(*plane_size)));
3893 memset(dcc, 0, sizeof(*dcc))__builtin_memset((dcc), (0), (sizeof(*dcc)));
3894 memset(address, 0, sizeof(*address))__builtin_memset((address), (0), (sizeof(*address)));
3895
3896 address->tmz_surface = tmz_surface;
3897
3898 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3899 plane_size->surface_size.x = 0;
3900 plane_size->surface_size.y = 0;
3901 plane_size->surface_size.width = fb->width;
3902 plane_size->surface_size.height = fb->height;
3903 plane_size->surface_pitch =
3904 fb->pitches[0] / fb->format->cpp[0];
3905
3906 address->type = PLN_ADDR_TYPE_GRAPHICS;
3907 address->grph.addr.low_part = lower_32_bits(afb->address)((u32)(afb->address));
3908 address->grph.addr.high_part = upper_32_bits(afb->address)((u32)(((afb->address) >> 16) >> 16));
3909 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3910 uint64_t chroma_addr = afb->address + fb->offsets[1];
3911
3912 plane_size->surface_size.x = 0;
3913 plane_size->surface_size.y = 0;
3914 plane_size->surface_size.width = fb->width;
3915 plane_size->surface_size.height = fb->height;
3916 plane_size->surface_pitch =
3917 fb->pitches[0] / fb->format->cpp[0];
3918
3919 plane_size->chroma_size.x = 0;
3920 plane_size->chroma_size.y = 0;
3921 /* TODO: set these based on surface format */
3922 plane_size->chroma_size.width = fb->width / 2;
3923 plane_size->chroma_size.height = fb->height / 2;
3924
3925 plane_size->chroma_pitch =
3926 fb->pitches[1] / fb->format->cpp[1];
3927
3928 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3929 address->video_progressive.luma_addr.low_part =
3930 lower_32_bits(afb->address)((u32)(afb->address));
3931 address->video_progressive.luma_addr.high_part =
3932 upper_32_bits(afb->address)((u32)(((afb->address) >> 16) >> 16));
3933 address->video_progressive.chroma_addr.low_part =
3934 lower_32_bits(chroma_addr)((u32)(chroma_addr));
3935 address->video_progressive.chroma_addr.high_part =
3936 upper_32_bits(chroma_addr)((u32)(((chroma_addr) >> 16) >> 16));
3937 }
3938
3939 /* Fill GFX8 params */
3940 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)(((__u64)(tiling_flags) >> 0) & 0xf) == DC_ARRAY_2D_TILED_THIN1) {
3941 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3942
3943 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH)(((__u64)(tiling_flags) >> 15) & 0x3);
3944 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT)(((__u64)(tiling_flags) >> 17) & 0x3);
3945 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT)(((__u64)(tiling_flags) >> 19) & 0x3);
3946 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT)(((__u64)(tiling_flags) >> 9) & 0x7);
3947 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS)(((__u64)(tiling_flags) >> 21) & 0x3);
3948
3949 /* XXX fix me for VI */
3950 tiling_info->gfx8.num_banks = num_banks;
3951 tiling_info->gfx8.array_mode =
3952 DC_ARRAY_2D_TILED_THIN1;
3953 tiling_info->gfx8.tile_split = tile_split;
3954 tiling_info->gfx8.bank_width = bankw;
3955 tiling_info->gfx8.bank_height = bankh;
3956 tiling_info->gfx8.tile_aspect = mtaspect;
3957 tiling_info->gfx8.tile_mode =
3958 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3959 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)(((__u64)(tiling_flags) >> 0) & 0xf)
3960 == DC_ARRAY_1D_TILED_THIN1) {
3961 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3962 }
3963
3964 tiling_info->gfx8.pipe_config =
3965 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG)(((__u64)(tiling_flags) >> 4) & 0x1f);
3966
3967 if (adev->asic_type == CHIP_VEGA10 ||
3968 adev->asic_type == CHIP_VEGA12 ||
3969 adev->asic_type == CHIP_VEGA20 ||
3970 adev->asic_type == CHIP_NAVI10 ||
3971 adev->asic_type == CHIP_NAVI14 ||
3972 adev->asic_type == CHIP_NAVI12 ||
3973#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3974 adev->asic_type == CHIP_SIENNA_CICHLID ||
3975 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3976#endif
3977 adev->asic_type == CHIP_RENOIR ||
3978 adev->asic_type == CHIP_RAVEN) {
3979 /* Fill GFX9 params */
3980 tiling_info->gfx9.num_pipes =
3981 adev->gfx.config.gb_addr_config_fields.num_pipes;
3982 tiling_info->gfx9.num_banks =
3983 adev->gfx.config.gb_addr_config_fields.num_banks;
3984 tiling_info->gfx9.pipe_interleave =
3985 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3986 tiling_info->gfx9.num_shader_engines =
3987 adev->gfx.config.gb_addr_config_fields.num_se;
3988 tiling_info->gfx9.max_compressed_frags =
3989 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3990 tiling_info->gfx9.num_rb_per_se =
3991 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3992 tiling_info->gfx9.swizzle =
3993 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE)(((__u64)(tiling_flags) >> 0) & 0x1f);
3994 tiling_info->gfx9.shaderEnable = 1;
3995
3996#ifdef CONFIG_DRM_AMD_DC_DCN3_01
3997 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3998 adev->asic_type == CHIP_NAVY_FLOUNDER)
3999 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4000#endif
4001 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4002 plane_size, tiling_info,
4003 tiling_flags, dcc, address,
4004 force_disable_dcc);
4005 if (ret)
4006 return ret;
4007 }
4008
4009 return 0;
4010}
4011
4012static void
4013fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4014 bool_Bool *per_pixel_alpha, bool_Bool *global_alpha,
4015 int *global_alpha_value)
4016{
4017 *per_pixel_alpha = false0;
4018 *global_alpha = false0;
4019 *global_alpha_value = 0xff;
4020
4021 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4022 return;
4023
4024 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI0) {
4025 static const uint32_t alpha_formats[] = {
4026 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
4027 DRM_FORMAT_RGBA8888((__u32)('R') | ((__u32)('A') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
4028 DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
4029 };
4030 uint32_t format = plane_state->fb->format->format;
4031 unsigned int i;
4032
4033 for (i = 0; i < ARRAY_SIZE(alpha_formats)(sizeof((alpha_formats)) / sizeof((alpha_formats)[0])); ++i) {
4034 if (format == alpha_formats[i]) {
4035 *per_pixel_alpha = true1;
4036 break;
4037 }
4038 }
4039 }
4040
4041 if (plane_state->alpha < 0xffff) {
4042 *global_alpha = true1;
4043 *global_alpha_value = plane_state->alpha >> 8;
4044 }
4045}
4046
4047static int
4048fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4049 const enum surface_pixel_format format,
4050 enum dc_color_space *color_space)
4051{
4052 bool_Bool full_range;
4053
4054 *color_space = COLOR_SPACE_SRGB;
4055
4056 /* DRM color properties only affect non-RGB formats. */
4057 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4058 return 0;
4059
4060 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4061
4062 switch (plane_state->color_encoding) {
4063 case DRM_COLOR_YCBCR_BT601:
4064 if (full_range)
4065 *color_space = COLOR_SPACE_YCBCR601;
4066 else
4067 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4068 break;
4069
4070 case DRM_COLOR_YCBCR_BT709:
4071 if (full_range)
4072 *color_space = COLOR_SPACE_YCBCR709;
4073 else
4074 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4075 break;
4076
4077 case DRM_COLOR_YCBCR_BT2020:
4078 if (full_range)
4079 *color_space = COLOR_SPACE_2020_YCBCR;
4080 else
4081 return -EINVAL22;
4082 break;
4083
4084 default:
4085 return -EINVAL22;
4086 }
4087
4088 return 0;
4089}
4090
4091static int
4092fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4093 const struct drm_plane_state *plane_state,
4094 const uint64_t tiling_flags,
4095 struct dc_plane_info *plane_info,
4096 struct dc_plane_address *address,
4097 bool_Bool tmz_surface,
4098 bool_Bool force_disable_dcc)
4099{
4100 const struct drm_framebuffer *fb = plane_state->fb;
4101 const struct amdgpu_framebuffer *afb =
4102 to_amdgpu_framebuffer(plane_state->fb)({ const __typeof( ((struct amdgpu_framebuffer *)0)->base )
*__mptr = (plane_state->fb); (struct amdgpu_framebuffer *
)( (char *)__mptr - __builtin_offsetof(struct amdgpu_framebuffer
, base) );})
;
4103 struct drm_format_name_buf format_name;
4104 int ret;
4105
4106 memset(plane_info, 0, sizeof(*plane_info))__builtin_memset((plane_info), (0), (sizeof(*plane_info)));
4107
4108 switch (fb->format->format) {
4109 case DRM_FORMAT_C8((__u32)('C') | ((__u32)('8') << 8) | ((__u32)(' ') <<
16) | ((__u32)(' ') << 24))
:
4110 plane_info->format =
4111 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4112 break;
4113 case DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
:
4114 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4115 break;
4116 case DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4117 case DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4118 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4119 break;
4120 case DRM_FORMAT_XRGB2101010((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4121 case DRM_FORMAT_ARGB2101010((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4122 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4123 break;
4124 case DRM_FORMAT_XBGR2101010((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4125 case DRM_FORMAT_ABGR2101010((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4126 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4127 break;
4128 case DRM_FORMAT_XBGR8888((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4129 case DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4130 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4131 break;
4132 case DRM_FORMAT_NV21((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('2') <<
16) | ((__u32)('1') << 24))
:
4133 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4134 break;
4135 case DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
:
4136 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4137 break;
4138 case DRM_FORMAT_P010((__u32)('P') | ((__u32)('0') << 8) | ((__u32)('1') <<
16) | ((__u32)('0') << 24))
:
4139 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4140 break;
4141 case DRM_FORMAT_XRGB16161616F((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4142 case DRM_FORMAT_ARGB16161616F((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4143 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4144 break;
4145 case DRM_FORMAT_XBGR16161616F((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4146 case DRM_FORMAT_ABGR16161616F((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4147 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4148 break;
4149 default:
4150 DRM_ERROR(__drm_err("Unsupported screen format %s\n", drm_get_format_name
(fb->format->format, &format_name))
4151 "Unsupported screen format %s\n",__drm_err("Unsupported screen format %s\n", drm_get_format_name
(fb->format->format, &format_name))
4152 drm_get_format_name(fb->format->format, &format_name))__drm_err("Unsupported screen format %s\n", drm_get_format_name
(fb->format->format, &format_name))
;
4153 return -EINVAL22;
4154 }
4155
4156 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK( (1<<0) | (1<<1) | (1<<2) | (1<<3))) {
4157 case DRM_MODE_ROTATE_0(1<<0):
4158 plane_info->rotation = ROTATION_ANGLE_0;
4159 break;
4160 case DRM_MODE_ROTATE_90(1<<1):
4161 plane_info->rotation = ROTATION_ANGLE_90;
4162 break;
4163 case DRM_MODE_ROTATE_180(1<<2):
4164 plane_info->rotation = ROTATION_ANGLE_180;
4165 break;
4166 case DRM_MODE_ROTATE_270(1<<3):
4167 plane_info->rotation = ROTATION_ANGLE_270;
4168 break;
4169 default:
4170 plane_info->rotation = ROTATION_ANGLE_0;
4171 break;
4172 }
4173
4174 plane_info->visible = true1;
4175 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4176
4177 plane_info->layer_index = 0;
4178
4179 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4180 &plane_info->color_space);
4181 if (ret)
4182 return ret;
4183
4184 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4185 plane_info->rotation, tiling_flags,
4186 &plane_info->tiling_info,
4187 &plane_info->plane_size,
4188 &plane_info->dcc, address, tmz_surface,
4189 force_disable_dcc);
4190 if (ret)
4191 return ret;
4192
4193 fill_blending_from_plane_state(
4194 plane_state, &plane_info->per_pixel_alpha,
4195 &plane_info->global_alpha, &plane_info->global_alpha_value);
4196
4197 return 0;
4198}
4199
4200static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4201 struct dc_plane_state *dc_plane_state,
4202 struct drm_plane_state *plane_state,
4203 struct drm_crtc_state *crtc_state)
4204{
4205 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc_state); (struct dm_crtc_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_crtc_state, base) );})
;
4206 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (plane_state); (struct dm_plane_state *)( (char *)__mptr -
__builtin_offsetof(struct dm_plane_state, base) );})
;
4207 struct dc_scaling_info scaling_info;
4208 struct dc_plane_info plane_info;
4209 int ret;
4210 bool_Bool force_disable_dcc = false0;
4211
4212 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4213 if (ret)
4214 return ret;
4215
4216 dc_plane_state->src_rect = scaling_info.src_rect;
4217 dc_plane_state->dst_rect = scaling_info.dst_rect;
4218 dc_plane_state->clip_rect = scaling_info.clip_rect;
4219 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4220
4221 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4222 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4223 dm_plane_state->tiling_flags,
4224 &plane_info,
4225 &dc_plane_state->address,
4226 dm_plane_state->tmz_surface,
4227 force_disable_dcc);
4228 if (ret)
4229 return ret;
4230
4231 dc_plane_state->format = plane_info.format;
4232 dc_plane_state->color_space = plane_info.color_space;
4233 dc_plane_state->format = plane_info.format;
4234 dc_plane_state->plane_size = plane_info.plane_size;
4235 dc_plane_state->rotation = plane_info.rotation;
4236 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4237 dc_plane_state->stereo_format = plane_info.stereo_format;
4238 dc_plane_state->tiling_info = plane_info.tiling_info;
4239 dc_plane_state->visible = plane_info.visible;
4240 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4241 dc_plane_state->global_alpha = plane_info.global_alpha;
4242 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4243 dc_plane_state->dcc = plane_info.dcc;
4244 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4245
4246 /*
4247 * Always set input transfer function, since plane state is refreshed
4248 * every time.
4249 */
4250 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4251 if (ret)
4252 return ret;
4253
4254 return 0;
4255}
4256
4257static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4258 const struct dm_connector_state *dm_state,
4259 struct dc_stream_state *stream)
4260{
4261 enum amdgpu_rmx_type rmx_type;
4262
4263 struct rect src = { 0 }; /* viewport in composition space*/
4264 struct rect dst = { 0 }; /* stream addressable area */
4265
4266 /* no mode. nothing to be done */
4267 if (!mode)
4268 return;
4269
4270 /* Full screen scaling by default */
4271 src.width = mode->hdisplay;
4272 src.height = mode->vdisplay;
4273 dst.width = stream->timing.h_addressable;
4274 dst.height = stream->timing.v_addressable;
4275
4276 if (dm_state) {
4277 rmx_type = dm_state->scaling;
4278 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4279 if (src.width * dst.height <
4280 src.height * dst.width) {
4281 /* height needs less upscaling/more downscaling */
4282 dst.width = src.width *
4283 dst.height / src.height;
4284 } else {
4285 /* width needs less upscaling/more downscaling */
4286 dst.height = src.height *
4287 dst.width / src.width;
4288 }
4289 } else if (rmx_type == RMX_CENTER) {
4290 dst = src;
4291 }
4292
4293 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4294 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4295
4296 if (dm_state->underscan_enable) {
4297 dst.x += dm_state->underscan_hborder / 2;
4298 dst.y += dm_state->underscan_vborder / 2;
4299 dst.width -= dm_state->underscan_hborder;
4300 dst.height -= dm_state->underscan_vborder;
4301 }
4302 }
4303
4304 stream->src = src;
4305 stream->dst = dst;
4306
4307 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",__drm_dbg(DRM_UT_DRIVER, "Destination Rectangle x:%d y:%d width:%d height:%d\n"
, dst.x, dst.y, dst.width, dst.height)
4308 dst.x, dst.y, dst.width, dst.height)__drm_dbg(DRM_UT_DRIVER, "Destination Rectangle x:%d y:%d width:%d height:%d\n"
, dst.x, dst.y, dst.width, dst.height)
;
4309
4310}
4311
4312static enum dc_color_depth
4313convert_color_depth_from_display_info(const struct drm_connector *connector,
4314 bool_Bool is_y420, int requested_bpc)
4315{
4316 uint8_t bpc;
4317
4318 if (is_y420) {
4319 bpc = 8;
4320
4321 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4322 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48(1 << 2))
4323 bpc = 16;
4324 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36(1 << 1))
4325 bpc = 12;
4326 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30(1 << 0))
4327 bpc = 10;
4328 } else {
4329 bpc = (uint8_t)connector->display_info.bpc;
4330 /* Assume 8 bpc by default if no bpc is specified. */
4331 bpc = bpc ? bpc : 8;
4332 }
4333
4334 if (requested_bpc > 0) {
4335 /*
4336 * Cap display bpc based on the user requested value.
4337 *
4338 * The value for state->max_bpc may not correctly updated
4339 * depending on when the connector gets added to the state
4340 * or if this was called outside of atomic check, so it
4341 * can't be used directly.
4342 */
4343 bpc = min_t(u8, bpc, requested_bpc)({ u8 __min_a = (bpc); u8 __min_b = (requested_bpc); __min_a <
__min_b ? __min_a : __min_b; })
;
4344
4345 /* Round down to the nearest even number. */
4346 bpc = bpc - (bpc & 1);
4347 }
4348
4349 switch (bpc) {
4350 case 0:
4351 /*
4352 * Temporary Work around, DRM doesn't parse color depth for
4353 * EDID revision before 1.4
4354 * TODO: Fix edid parsing
4355 */
4356 return COLOR_DEPTH_888;
4357 case 6:
4358 return COLOR_DEPTH_666;
4359 case 8:
4360 return COLOR_DEPTH_888;
4361 case 10:
4362 return COLOR_DEPTH_101010;
4363 case 12:
4364 return COLOR_DEPTH_121212;
4365 case 14:
4366 return COLOR_DEPTH_141414;
4367 case 16:
4368 return COLOR_DEPTH_161616;
4369 default:
4370 return COLOR_DEPTH_UNDEFINED;
4371 }
4372}
4373
4374static enum dc_aspect_ratio
4375get_aspect_ratio(const struct drm_display_mode *mode_in)
4376{
4377 /* 1-1 mapping, since both enums follow the HDMI spec. */
4378 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4379}
4380
4381static enum dc_color_space
4382get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4383{
4384 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4385
4386 switch (dc_crtc_timing->pixel_encoding) {
4387 case PIXEL_ENCODING_YCBCR422:
4388 case PIXEL_ENCODING_YCBCR444:
4389 case PIXEL_ENCODING_YCBCR420:
4390 {
4391 /*
4392 * 27030khz is the separation point between HDTV and SDTV
4393 * according to HDMI spec, we use YCbCr709 and YCbCr601
4394 * respectively
4395 */
4396 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4397 if (dc_crtc_timing->flags.Y_ONLY)
4398 color_space =
4399 COLOR_SPACE_YCBCR709_LIMITED;
4400 else
4401 color_space = COLOR_SPACE_YCBCR709;
4402 } else {
4403 if (dc_crtc_timing->flags.Y_ONLY)
4404 color_space =
4405 COLOR_SPACE_YCBCR601_LIMITED;
4406 else
4407 color_space = COLOR_SPACE_YCBCR601;
4408 }
4409
4410 }
4411 break;
4412 case PIXEL_ENCODING_RGB:
4413 color_space = COLOR_SPACE_SRGB;
4414 break;
4415
4416 default:
4417 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 4417); __builtin_expect(!!(__ret), 0); })
;
4418 break;
4419 }
4420
4421 return color_space;
4422}
4423
4424static bool_Bool adjust_colour_depth_from_display_info(
4425 struct dc_crtc_timing *timing_out,
4426 const struct drm_display_info *info)
4427{
4428 enum dc_color_depth depth = timing_out->display_color_depth;
4429 int normalized_clk;
4430 do {
4431 normalized_clk = timing_out->pix_clk_100hz / 10;
4432 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4433 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4434 normalized_clk /= 2;
4435 /* Adjusting pix clock following on HDMI spec based on colour depth */
4436 switch (depth) {
4437 case COLOR_DEPTH_888:
4438 break;
4439 case COLOR_DEPTH_101010:
4440 normalized_clk = (normalized_clk * 30) / 24;
4441 break;
4442 case COLOR_DEPTH_121212:
4443 normalized_clk = (normalized_clk * 36) / 24;
4444 break;
4445 case COLOR_DEPTH_161616:
4446 normalized_clk = (normalized_clk * 48) / 24;
4447 break;
4448 default:
4449 /* The above depths are the only ones valid for HDMI. */
4450 return false0;
4451 }
4452 if (normalized_clk <= info->max_tmds_clock) {
4453 timing_out->display_color_depth = depth;
4454 return true1;
4455 }
4456 } while (--depth > COLOR_DEPTH_666);
4457 return false0;
4458}
4459
4460static void fill_stream_properties_from_drm_display_mode(
4461 struct dc_stream_state *stream,
4462 const struct drm_display_mode *mode_in,
4463 const struct drm_connector *connector,
4464 const struct drm_connector_state *connector_state,
4465 const struct dc_stream_state *old_stream,
4466 int requested_bpc)
4467{
4468 struct dc_crtc_timing *timing_out = &stream->timing;
4469 const struct drm_display_info *info = &connector->display_info;
4470 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
4471 struct hdmi_vendor_infoframe hv_frame;
4472 struct hdmi_avi_infoframe avi_frame;
4473
4474 memset(&hv_frame, 0, sizeof(hv_frame))__builtin_memset((&hv_frame), (0), (sizeof(hv_frame)));
4475 memset(&avi_frame, 0, sizeof(avi_frame))__builtin_memset((&avi_frame), (0), (sizeof(avi_frame)));
4476
4477 timing_out->h_border_left = 0;
4478 timing_out->h_border_right = 0;
4479 timing_out->v_border_top = 0;
4480 timing_out->v_border_bottom = 0;
4481 /* TODO: un-hardcode */
4482 if (drm_mode_is_420_only(info, mode_in)
4483 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4484 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4485 else if (drm_mode_is_420_also(info, mode_in)
4486 && aconnector->force_yuv420_output)
4487 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4488 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444(1<<1))
4489 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4490 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4491 else
4492 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4493
4494 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4495 timing_out->display_color_depth = convert_color_depth_from_display_info(
4496 connector,
4497 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4498 requested_bpc);
4499 timing_out->scan_type = SCANNING_TYPE_NODATA;
4500 timing_out->hdmi_vic = 0;
4501
4502 if(old_stream) {
4503 timing_out->vic = old_stream->timing.vic;
4504 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4505 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4506 } else {
4507 timing_out->vic = drm_match_cea_mode(mode_in);
4508 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC(1<<0))
4509 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4510 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC(1<<2))
4511 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4512 }
4513
4514 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4515 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4516 timing_out->vic = avi_frame.video_code;
4517 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4518 timing_out->hdmi_vic = hv_frame.vic;
4519 }
4520
4521 timing_out->h_addressable = mode_in->crtc_hdisplay;
4522 timing_out->h_total = mode_in->crtc_htotal;
4523 timing_out->h_sync_width =
4524 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4525 timing_out->h_front_porch =
4526 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4527 timing_out->v_total = mode_in->crtc_vtotal;
4528 timing_out->v_addressable = mode_in->crtc_vdisplay;
4529 timing_out->v_front_porch =
4530 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4531 timing_out->v_sync_width =
4532 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4533 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4534 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4535
4536 stream->output_color_space = get_output_color_space(timing_out);
4537
4538 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4539 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4540 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4541 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4542 drm_mode_is_420_also(info, mode_in) &&
4543 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4544 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4545 adjust_colour_depth_from_display_info(timing_out, info);
4546 }
4547 }
4548}
4549
4550static void fill_audio_info(struct audio_info *audio_info,
4551 const struct drm_connector *drm_connector,
4552 const struct dc_sink *dc_sink)
4553{
4554 int i = 0;
4555 int cea_revision = 0;
4556 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4557
4558 audio_info->manufacture_id = edid_caps->manufacturer_id;
4559 audio_info->product_id = edid_caps->product_id;
4560
4561 cea_revision = drm_connector->display_info.cea_rev;
4562
4563#ifdef __linux__
4564 strscpy(audio_info->display_name,
4565 edid_caps->display_name,
4566 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS20);
4567#else
4568 strncpy(audio_info->display_name,
4569 edid_caps->display_name,
4570 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS20 - 1);
4571#endif
4572
4573 if (cea_revision >= 3) {
4574 audio_info->mode_count = edid_caps->audio_mode_count;
4575
4576 for (i = 0; i < audio_info->mode_count; ++i) {
4577 audio_info->modes[i].format_code =
4578 (enum audio_format_code)
4579 (edid_caps->audio_modes[i].format_code);
4580 audio_info->modes[i].channel_count =
4581 edid_caps->audio_modes[i].channel_count;
4582 audio_info->modes[i].sample_rates.all =
4583 edid_caps->audio_modes[i].sample_rate;
4584 audio_info->modes[i].sample_size =
4585 edid_caps->audio_modes[i].sample_size;
4586 }
4587 }
4588
4589 audio_info->flags.all = edid_caps->speaker_flags;
4590
4591 /* TODO: We only check for the progressive mode, check for interlace mode too */
4592 if (drm_connector->latency_present[0]) {
4593 audio_info->video_latency = drm_connector->video_latency[0];
4594 audio_info->audio_latency = drm_connector->audio_latency[0];
4595 }
4596
4597 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4598
4599}
4600
4601static void
4602copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4603 struct drm_display_mode *dst_mode)
4604{
4605 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4606 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4607 dst_mode->crtc_clock = src_mode->crtc_clock;
4608 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4609 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4610 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4611 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4612 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4613 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4614 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4615 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4616 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4617 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4618 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4619}
4620
4621static void
4622decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4623 const struct drm_display_mode *native_mode,
4624 bool_Bool scale_enabled)
4625{
4626 if (scale_enabled) {
4627 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4628 } else if (native_mode->clock == drm_mode->clock &&
4629 native_mode->htotal == drm_mode->htotal &&
4630 native_mode->vtotal == drm_mode->vtotal) {
4631 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4632 } else {
4633 /* no scaling nor amdgpu inserted, no need to patch */
4634 }
4635}
4636
4637static struct dc_sink *
4638create_fake_sink(struct amdgpu_dm_connector *aconnector)
4639{
4640 struct dc_sink_init_data sink_init_data = { 0 };
4641 struct dc_sink *sink = NULL((void *)0);
4642 sink_init_data.link = aconnector->dc_link;
4643 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4644
4645 sink = dc_sink_create(&sink_init_data);
4646 if (!sink) {
4647 DRM_ERROR("Failed to create sink!\n")__drm_err("Failed to create sink!\n");
4648 return NULL((void *)0);
4649 }
4650 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4651
4652 return sink;
4653}
4654
4655static void set_multisync_trigger_params(
4656 struct dc_stream_state *stream)
4657{
4658 if (stream->triggered_crtc_reset.enabled) {
4659 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4660 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4661 }
4662}
4663
4664static void set_master_stream(struct dc_stream_state *stream_set[],
4665 int stream_count)
4666{
4667 int j, highest_rfr = 0, master_stream = 0;
4668
4669 for (j = 0; j < stream_count; j++) {
4670 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4671 int refresh_rate = 0;
4672
4673 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4674 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4675 if (refresh_rate > highest_rfr) {
4676 highest_rfr = refresh_rate;
4677 master_stream = j;
4678 }
4679 }
4680 }
4681 for (j = 0; j < stream_count; j++) {
4682 if (stream_set[j])
4683 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4684 }
4685}
4686
4687static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4688{
4689 int i = 0;
4690
4691 if (context->stream_count < 2)
4692 return;
4693 for (i = 0; i < context->stream_count ; i++) {
4694 if (!context->streams[i])
4695 continue;
4696 /*
4697 * TODO: add a function to read AMD VSDB bits and set
4698 * crtc_sync_master.multi_sync_enabled flag
4699 * For now it's set to false
4700 */
4701 set_multisync_trigger_params(context->streams[i]);
4702 }
4703 set_master_stream(context->streams, context->stream_count);
4704}
4705
4706static struct dc_stream_state *
4707create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4708 const struct drm_display_mode *drm_mode,
4709 const struct dm_connector_state *dm_state,
4710 const struct dc_stream_state *old_stream,
4711 int requested_bpc)
4712{
4713 struct drm_display_mode *preferred_mode = NULL((void *)0);
4714 struct drm_connector *drm_connector;
4715 const struct drm_connector_state *con_state =
4716 dm_state ? &dm_state->base : NULL((void *)0);
4717 struct dc_stream_state *stream = NULL((void *)0);
4718 struct drm_display_mode mode = *drm_mode;
4719 bool_Bool native_mode_found = false0;
4720 bool_Bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false0;
4721 int mode_refresh;
4722 int preferred_refresh = 0;
4723#if defined(CONFIG_DRM_AMD_DC_DCN1)
4724 struct dsc_dec_dpcd_caps dsc_caps;
4725#endif
4726 uint32_t link_bandwidth_kbps;
4727
4728 struct dc_sink *sink = NULL((void *)0);
4729 if (aconnector == NULL((void *)0)) {
4730 DRM_ERROR("aconnector is NULL!\n")__drm_err("aconnector is NULL!\n");
4731 return stream;
4732 }
4733
4734 drm_connector = &aconnector->base;
4735
4736 if (!aconnector->dc_sink) {
4737 sink = create_fake_sink(aconnector);
4738 if (!sink)
4739 return stream;
4740 } else {
4741 sink = aconnector->dc_sink;
4742 dc_sink_retain(sink);
4743 }
4744
4745 stream = dc_create_stream_for_sink(sink);
4746
4747 if (stream == NULL((void *)0)) {
4748 DRM_ERROR("Failed to create stream for sink!\n")__drm_err("Failed to create stream for sink!\n");
4749 goto finish;
4750 }
4751
4752 stream->dm_stream_context = aconnector;
4753
4754 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4755 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4756
4757 list_for_each_entry(preferred_mode, &aconnector->base.modes, head)for (preferred_mode = ({ const __typeof( ((__typeof(*preferred_mode
) *)0)->head ) *__mptr = ((&aconnector->base.modes)
->next); (__typeof(*preferred_mode) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*preferred_mode), head) );}); &preferred_mode->
head != (&aconnector->base.modes); preferred_mode = ({
const __typeof( ((__typeof(*preferred_mode) *)0)->head ) *
__mptr = (preferred_mode->head.next); (__typeof(*preferred_mode
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*preferred_mode
), head) );}))
{
4758 /* Search for preferred mode */
4759 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED(1<<3)) {
4760 native_mode_found = true1;
4761 break;
4762 }
4763 }
4764 if (!native_mode_found)
4765 preferred_mode = list_first_entry_or_null((list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
4766 &aconnector->base.modes,(list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
4767 struct drm_display_mode,(list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
4768 head)(list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
;
4769
4770 mode_refresh = drm_mode_vrefresh(&mode);
4771
4772 if (preferred_mode == NULL((void *)0)) {
4773 /*
4774 * This may not be an error, the use case is when we have no
4775 * usermode calls to reset and set mode upon hotplug. In this
4776 * case, we call set mode ourselves to restore the previous mode
4777 * and the modelist may not be filled in in time.
4778 */
4779 DRM_DEBUG_DRIVER("No preferred mode found\n")__drm_dbg(DRM_UT_DRIVER, "No preferred mode found\n");
4780 } else {
4781 decide_crtc_timing_for_drm_display_mode(
4782 &mode, preferred_mode,
4783 dm_state ? (dm_state->scaling != RMX_OFF) : false0);
4784 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4785 }
4786
4787 if (!dm_state)
4788 drm_mode_set_crtcinfo(&mode, 0);
4789
4790 /*
4791 * If scaling is enabled and refresh rate didn't change
4792 * we copy the vic and polarities of the old timings
4793 */
4794 if (!scale || mode_refresh != preferred_refresh)
4795 fill_stream_properties_from_drm_display_mode(stream,
4796 &mode, &aconnector->base, con_state, NULL((void *)0), requested_bpc);
4797 else
4798 fill_stream_properties_from_drm_display_mode(stream,
4799 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4800
4801 stream->timing.flags.DSC = 0;
4802
4803 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4804#if defined(CONFIG_DRM_AMD_DC_DCN1)
4805 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4806 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4807 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4808 &dsc_caps);
4809#endif
4810 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4811 dc_link_get_link_cap(aconnector->dc_link));
4812
4813#if defined(CONFIG_DRM_AMD_DC_DCN1)
4814 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4815 /* Set DSC policy according to dsc_clock_en */
4816 dc_dsc_policy_set_enable_dsc_when_not_needed(
4817 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4818
4819 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4820 &dsc_caps,
4821 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4822 link_bandwidth_kbps,
4823 &stream->timing,
4824 &stream->timing.dsc_cfg))
4825 stream->timing.flags.DSC = 1;
4826 /* Overwrite the stream flag if DSC is enabled through debugfs */
4827 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4828 stream->timing.flags.DSC = 1;
4829
4830 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4831 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4832
4833 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4834 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4835
4836 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4837 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4838 }
4839#endif
4840 }
4841
4842 update_stream_scaling_settings(&mode, dm_state, stream);
4843
4844 fill_audio_info(
4845 &stream->audio_info,
4846 drm_connector,
4847 sink);
4848
4849 update_stream_signal(stream, sink);
4850
4851 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4852 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4853
4854 if (stream->link->psr_settings.psr_feature_enabled) {
4855 //
4856 // should decide stream support vsc sdp colorimetry capability
4857 // before building vsc info packet
4858 //
4859 stream->use_vsc_sdp_for_colorimetry = false0;
4860 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4861 stream->use_vsc_sdp_for_colorimetry =
4862 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4863 } else {
4864 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4865 stream->use_vsc_sdp_for_colorimetry = true1;
4866 }
4867 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4868 }
4869finish:
4870 dc_sink_release(sink);
4871
4872 return stream;
4873}
4874
4875static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4876{
4877 drm_crtc_cleanup(crtc);
4878 kfree(crtc);
4879}
4880
4881static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4882 struct drm_crtc_state *state)
4883{
4884 struct dm_crtc_state *cur = to_dm_crtc_state(state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (state); (struct dm_crtc_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_crtc_state, base) );})
;
4885
4886 /* TODO Destroy dc_stream objects are stream object is flattened */
4887 if (cur->stream)
4888 dc_stream_release(cur->stream);
4889
4890
4891 __drm_atomic_helper_crtc_destroy_state(state);
4892
4893
4894 kfree(state);
4895}
4896
4897static void dm_crtc_reset_state(struct drm_crtc *crtc)
4898{
4899 struct dm_crtc_state *state;
4900
4901 if (crtc->state)
4902 dm_crtc_destroy_state(crtc, crtc->state);
4903
4904 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
4905 if (WARN_ON(!state)({ int __ret = !!(!state); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!state", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 4905); __builtin_expect(!!(__ret), 0); })
)
4906 return;
4907
4908 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4909}
4910
4911static struct drm_crtc_state *
4912dm_crtc_duplicate_state(struct drm_crtc *crtc)
4913{
4914 struct dm_crtc_state *state, *cur;
4915
4916 cur = to_dm_crtc_state(crtc->state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc->state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
4917
4918 if (WARN_ON(!crtc->state)({ int __ret = !!(!crtc->state); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!crtc->state", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 4918); __builtin_expect(!!(__ret), 0); })
)
4919 return NULL((void *)0);
4920
4921 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
4922 if (!state)
4923 return NULL((void *)0);
4924
4925 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4926
4927 if (cur->stream) {
4928 state->stream = cur->stream;
4929 dc_stream_retain(state->stream);
4930 }
4931
4932 state->active_planes = cur->active_planes;
4933 state->vrr_infopacket = cur->vrr_infopacket;
4934 state->abm_level = cur->abm_level;
4935 state->vrr_supported = cur->vrr_supported;
4936 state->freesync_config = cur->freesync_config;
4937 state->crc_src = cur->crc_src;
4938 state->cm_has_degamma = cur->cm_has_degamma;
4939 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4940
4941 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4942
4943 return &state->base;
4944}
4945
4946static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool_Bool enable)
4947{
4948 enum dc_irq_source irq_source;
4949 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
4950 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4951 int rc;
4952
4953 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4954
4955 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY16;
4956
4957 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",__drm_dbg(DRM_UT_DRIVER, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
4958 acrtc->crtc_id, enable ? "en" : "dis", rc)__drm_dbg(DRM_UT_DRIVER, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
;
4959 return rc;
4960}
4961
4962static inline int dm_set_vblank(struct drm_crtc *crtc, bool_Bool enable)
4963{
4964 enum dc_irq_source irq_source;
4965 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
4966 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4967 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc->state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
4968 int rc = 0;
4969
4970 if (enable) {
4971 /* vblank irq on -> Only need vupdate irq in vrr mode */
4972 if (amdgpu_dm_vrr_active(acrtc_state))
4973 rc = dm_set_vupdate_irq(crtc, true1);
4974 } else {
4975 /* vblank irq off -> vupdate irq off */
4976 rc = dm_set_vupdate_irq(crtc, false0);
4977 }
4978
4979 if (rc)
4980 return rc;
4981
4982 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4983 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY16;
4984}
4985
4986static int dm_enable_vblank(struct drm_crtc *crtc)
4987{
4988 return dm_set_vblank(crtc, true1);
4989}
4990
4991static void dm_disable_vblank(struct drm_crtc *crtc)
4992{
4993 dm_set_vblank(crtc, false0);
4994}
4995
4996/* Implemented only the options currently availible for the driver */
4997static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4998 .reset = dm_crtc_reset_state,
4999 .destroy = amdgpu_dm_crtc_destroy,
5000 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5001 .set_config = drm_atomic_helper_set_config,
5002 .page_flip = drm_atomic_helper_page_flip,
5003 .atomic_duplicate_state = dm_crtc_duplicate_state,
5004 .atomic_destroy_state = dm_crtc_destroy_state,
5005 .set_crc_source = amdgpu_dm_crtc_set_crc_source((void *)0),
5006 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source((void *)0),
5007 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources((void *)0),
5008 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5009 .enable_vblank = dm_enable_vblank,
5010 .disable_vblank = dm_disable_vblank,
5011 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5012};
5013
5014static enum drm_connector_status
5015amdgpu_dm_connector_detect(struct drm_connector *connector, bool_Bool force)
5016{
5017 bool_Bool connected;
5018 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5019
5020 /*
5021 * Notes:
5022 * 1. This interface is NOT called in context of HPD irq.
5023 * 2. This interface *is called* in context of user-mode ioctl. Which
5024 * makes it a bad place for *any* MST-related activity.
5025 */
5026
5027 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5028 !aconnector->fake_enable)
5029 connected = (aconnector->dc_sink != NULL((void *)0));
5030 else
5031 connected = (aconnector->base.force == DRM_FORCE_ON);
5032
5033 update_subconnector_property(aconnector);
5034
5035 return (connected ? connector_status_connected :
5036 connector_status_disconnected);
5037}
5038
5039int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5040 struct drm_connector_state *connector_state,
5041 struct drm_property *property,
5042 uint64_t val)
5043{
5044 struct drm_device *dev = connector->dev;
5045 struct amdgpu_device *adev = drm_to_adev(dev);
5046 struct dm_connector_state *dm_old_state =
5047 to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5048 struct dm_connector_state *dm_new_state =
5049 to_dm_connector_state(connector_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector_state)); (struct dm_connector_state *)
( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5050
5051 int ret = -EINVAL22;
5052
5053 if (property == dev->mode_config.scaling_mode_property) {
5054 enum amdgpu_rmx_type rmx_type;
5055
5056 switch (val) {
5057 case DRM_MODE_SCALE_CENTER2:
5058 rmx_type = RMX_CENTER;
5059 break;
5060 case DRM_MODE_SCALE_ASPECT3:
5061 rmx_type = RMX_ASPECT;
5062 break;
5063 case DRM_MODE_SCALE_FULLSCREEN1:
5064 rmx_type = RMX_FULL;
5065 break;
5066 case DRM_MODE_SCALE_NONE0:
5067 default:
5068 rmx_type = RMX_OFF;
5069 break;
5070 }
5071
5072 if (dm_old_state->scaling == rmx_type)
5073 return 0;
5074
5075 dm_new_state->scaling = rmx_type;
5076 ret = 0;
5077 } else if (property == adev->mode_info.underscan_hborder_property) {
5078 dm_new_state->underscan_hborder = val;
5079 ret = 0;
5080 } else if (property == adev->mode_info.underscan_vborder_property) {
5081 dm_new_state->underscan_vborder = val;
5082 ret = 0;
5083 } else if (property == adev->mode_info.underscan_property) {
5084 dm_new_state->underscan_enable = val;
5085 ret = 0;
5086 } else if (property == adev->mode_info.abm_level_property) {
5087 dm_new_state->abm_level = val;
5088 ret = 0;
5089 }
5090
5091 return ret;
5092}
5093
5094int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5095 const struct drm_connector_state *state,
5096 struct drm_property *property,
5097 uint64_t *val)
5098{
5099 struct drm_device *dev = connector->dev;
5100 struct amdgpu_device *adev = drm_to_adev(dev);
5101 struct dm_connector_state *dm_state =
5102 to_dm_connector_state(state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((state)); (struct dm_connector_state *)( (char *)
__mptr - __builtin_offsetof(struct dm_connector_state, base) )
;})
;
5103 int ret = -EINVAL22;
5104
5105 if (property == dev->mode_config.scaling_mode_property) {
5106 switch (dm_state->scaling) {
5107 case RMX_CENTER:
5108 *val = DRM_MODE_SCALE_CENTER2;
5109 break;
5110 case RMX_ASPECT:
5111 *val = DRM_MODE_SCALE_ASPECT3;
5112 break;
5113 case RMX_FULL:
5114 *val = DRM_MODE_SCALE_FULLSCREEN1;
5115 break;
5116 case RMX_OFF:
5117 default:
5118 *val = DRM_MODE_SCALE_NONE0;
5119 break;
5120 }
5121 ret = 0;
5122 } else if (property == adev->mode_info.underscan_hborder_property) {
5123 *val = dm_state->underscan_hborder;
5124 ret = 0;
5125 } else if (property == adev->mode_info.underscan_vborder_property) {
5126 *val = dm_state->underscan_vborder;
5127 ret = 0;
5128 } else if (property == adev->mode_info.underscan_property) {
5129 *val = dm_state->underscan_enable;
5130 ret = 0;
5131 } else if (property == adev->mode_info.abm_level_property) {
5132 *val = dm_state->abm_level;
5133 ret = 0;
5134 }
5135
5136 return ret;
5137}
5138
5139static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5140{
5141 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5142
5143 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5144}
5145
5146static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5147{
5148 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5149 const struct dc_link *link = aconnector->dc_link;
5150 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5151 struct amdgpu_display_manager *dm = &adev->dm;
5152
5153 /*
5154 * Call only if mst_mgr was iniitalized before since it's not done
5155 * for all connector types.
5156 */
5157 if (aconnector->mst_mgr.dev)
5158 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5159
5160#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE1) ||\
5161 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5162
5163 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5164 link->type != dc_connection_none &&
5165 dm->backlight_dev) {
5166 backlight_device_unregister(dm->backlight_dev);
5167 dm->backlight_dev = NULL((void *)0);
5168 }
5169#endif
5170
5171 if (aconnector->dc_em_sink)
5172 dc_sink_release(aconnector->dc_em_sink);
5173 aconnector->dc_em_sink = NULL((void *)0);
5174 if (aconnector->dc_sink)
5175 dc_sink_release(aconnector->dc_sink);
5176 aconnector->dc_sink = NULL((void *)0);
5177
5178 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5179 drm_connector_unregister(connector);
5180 drm_connector_cleanup(connector);
5181 if (aconnector->i2c) {
5182 i2c_del_adapter(&aconnector->i2c->base);
5183 kfree(aconnector->i2c);
5184 }
5185 kfree(aconnector->dm_dp_aux.aux.name);
5186
5187 kfree(connector);
5188}
5189
5190void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5191{
5192 struct dm_connector_state *state =
5193 to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5194
5195 if (connector->state)
5196 __drm_atomic_helper_connector_destroy_state(connector->state);
5197
5198 kfree(state);
5199
5200 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
5201
5202 if (state) {
5203 state->scaling = RMX_OFF;
5204 state->underscan_enable = false0;
5205 state->underscan_hborder = 0;
5206 state->underscan_vborder = 0;
5207 state->base.max_requested_bpc = 8;
5208 state->vcpi_slots = 0;
5209 state->pbn = 0;
5210 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP14)
5211 state->abm_level = amdgpu_dm_abm_level;
5212
5213 __drm_atomic_helper_connector_reset(connector, &state->base);
5214 }
5215}
5216
5217struct drm_connector_state *
5218amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5219{
5220 struct dm_connector_state *state =
5221 to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5222
5223 struct dm_connector_state *new_state =
5224 kmemdup(state, sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
5225
5226 if (!new_state)
5227 return NULL((void *)0);
5228
5229 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5230
5231 new_state->freesync_capable = state->freesync_capable;
5232 new_state->abm_level = state->abm_level;
5233 new_state->scaling = state->scaling;
5234 new_state->underscan_enable = state->underscan_enable;
5235 new_state->underscan_hborder = state->underscan_hborder;
5236 new_state->underscan_vborder = state->underscan_vborder;
5237 new_state->vcpi_slots = state->vcpi_slots;
5238 new_state->pbn = state->pbn;
5239 return &new_state->base;
5240}
5241
5242static int
5243amdgpu_dm_connector_late_register(struct drm_connector *connector)
5244{
5245 struct amdgpu_dm_connector *amdgpu_dm_connector =
5246 to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5247 int r;
5248
5249 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort10) ||
5250 (connector->connector_type == DRM_MODE_CONNECTOR_eDP14)) {
5251 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5252 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5253 if (r)
5254 return r;
5255 }
5256
5257#if defined(CONFIG_DEBUG_FS)
5258 connector_debugfs_init(amdgpu_dm_connector);
5259#endif
5260
5261 return 0;
5262}
5263
5264static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5265 .reset = amdgpu_dm_connector_funcs_reset,
5266 .detect = amdgpu_dm_connector_detect,
5267 .fill_modes = drm_helper_probe_single_connector_modes,
5268 .destroy = amdgpu_dm_connector_destroy,
5269 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5270 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5271 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5272 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5273 .late_register = amdgpu_dm_connector_late_register,
5274 .early_unregister = amdgpu_dm_connector_unregister
5275};
5276
5277static int get_modes(struct drm_connector *connector)
5278{
5279 return amdgpu_dm_connector_get_modes(connector);
5280}
5281
5282static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5283{
5284 struct dc_sink_init_data init_params = {
5285 .link = aconnector->dc_link,
5286 .sink_signal = SIGNAL_TYPE_VIRTUAL
5287 };
5288 struct edid *edid;
5289
5290 if (!aconnector->base.edid_blob_ptr) {
5291 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",__drm_err("No EDID firmware found on connector: %s ,forcing to OFF!\n"
, aconnector->base.name)
5292 aconnector->base.name)__drm_err("No EDID firmware found on connector: %s ,forcing to OFF!\n"
, aconnector->base.name)
;
5293
5294 aconnector->base.force = DRM_FORCE_OFF;
5295 aconnector->base.override_edid = false0;
5296 return;
5297 }
5298
5299 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5300
5301 aconnector->edid = edid;
5302
5303 aconnector->dc_em_sink = dc_link_add_remote_sink(
5304 aconnector->dc_link,
5305 (uint8_t *)edid,
5306 (edid->extensions + 1) * EDID_LENGTH128,
5307 &init_params);
5308
5309 if (aconnector->base.force == DRM_FORCE_ON) {
5310 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5311 aconnector->dc_link->local_sink :
5312 aconnector->dc_em_sink;
5313 dc_sink_retain(aconnector->dc_sink);
5314 }
5315}
5316
5317static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5318{
5319 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5320
5321 /*
5322 * In case of headless boot with force on for DP managed connector
5323 * Those settings have to be != 0 to get initial modeset
5324 */
5325 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5326 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5327 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5328 }
5329
5330
5331 aconnector->base.override_edid = true1;
5332 create_eml_sink(aconnector);
5333}
5334
5335static struct dc_stream_state *
5336create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5337 const struct drm_display_mode *drm_mode,
5338 const struct dm_connector_state *dm_state,
5339 const struct dc_stream_state *old_stream)
5340{
5341 struct drm_connector *connector = &aconnector->base;
5342 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5343 struct dc_stream_state *stream;
5344 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL((void *)0);
5345 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5346 enum dc_status dc_result = DC_OK;
5347
5348 do {
5349 stream = create_stream_for_sink(aconnector, drm_mode,
5350 dm_state, old_stream,
5351 requested_bpc);
5352 if (stream == NULL((void *)0)) {
5353 DRM_ERROR("Failed to create stream for sink!\n")__drm_err("Failed to create stream for sink!\n");
5354 break;
5355 }
5356
5357 dc_result = dc_validate_stream(adev->dm.dc, stream);
5358
5359 if (dc_result != DC_OK) {
5360 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5361 drm_mode->hdisplay,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5362 drm_mode->vdisplay,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5363 drm_mode->clock,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5364 dc_result,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5365 dc_status_to_str(dc_result))__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
;
5366
5367 dc_stream_release(stream);
5368 stream = NULL((void *)0);
5369 requested_bpc -= 2; /* lower bpc to retry validation */
5370 }
5371
5372 } while (stream == NULL((void *)0) && requested_bpc >= 6);
5373
5374 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5375 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n")__drm_dbg(DRM_UT_KMS, "Retry forcing YCbCr420 encoding\n");
5376
5377 aconnector->force_yuv420_output = true1;
5378 stream = create_validate_stream_for_sink(aconnector, drm_mode,
5379 dm_state, old_stream);
5380 aconnector->force_yuv420_output = false0;
5381 }
5382
5383 return stream;
5384}
5385
5386enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5387 struct drm_display_mode *mode)
5388{
5389 int result = MODE_ERROR;
5390 struct dc_sink *dc_sink;
5391 /* TODO: Unhardcode stream count */
5392 struct dc_stream_state *stream;
5393 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5394
5395 if ((mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4)) ||
5396 (mode->flags & DRM_MODE_FLAG_DBLSCAN(1<<5)))
5397 return result;
5398
5399 /*
5400 * Only run this the first time mode_valid is called to initilialize
5401 * EDID mgmt
5402 */
5403 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5404 !aconnector->dc_em_sink)
5405 handle_edid_mgmt(aconnector);
5406
5407 dc_sink = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
->dc_sink;
5408
5409 if (dc_sink == NULL((void *)0)) {
5410 DRM_ERROR("dc_sink is NULL!\n")__drm_err("dc_sink is NULL!\n");
5411 goto fail;
5412 }
5413
5414 stream = create_validate_stream_for_sink(aconnector, mode, NULL((void *)0), NULL((void *)0));
5415 if (stream) {
5416 dc_stream_release(stream);
5417 result = MODE_OK;
5418 }
5419
5420fail:
5421 /* TODO: error handling*/
5422 return result;
5423}
5424
5425static int fill_hdr_info_packet(const struct drm_connector_state *state,
5426 struct dc_info_packet *out)
5427{
5428 struct hdmi_drm_infoframe frame;
5429 unsigned char buf[30]; /* 26 + 4 */
5430 ssize_t len;
5431 int ret, i;
5432
5433 memset(out, 0, sizeof(*out))__builtin_memset((out), (0), (sizeof(*out)));
5434
5435 if (!state->hdr_output_metadata)
5436 return 0;
5437
5438 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5439 if (ret)
5440 return ret;
5441
5442 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5443 if (len < 0)
5444 return (int)len;
5445
5446 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5447 if (len != 30)
5448 return -EINVAL22;
5449
5450 /* Prepare the infopacket for DC. */
5451 switch (state->connector->connector_type) {
5452 case DRM_MODE_CONNECTOR_HDMIA11:
5453 out->hb0 = 0x87; /* type */
5454 out->hb1 = 0x01; /* version */
5455 out->hb2 = 0x1A; /* length */
5456 out->sb[0] = buf[3]; /* checksum */
5457 i = 1;
5458 break;
5459
5460 case DRM_MODE_CONNECTOR_DisplayPort10:
5461 case DRM_MODE_CONNECTOR_eDP14:
5462 out->hb0 = 0x00; /* sdp id, zero */
5463 out->hb1 = 0x87; /* type */
5464 out->hb2 = 0x1D; /* payload len - 1 */
5465 out->hb3 = (0x13 << 2); /* sdp version */
5466 out->sb[0] = 0x01; /* version */
5467 out->sb[1] = 0x1A; /* length */
5468 i = 2;
5469 break;
5470
5471 default:
5472 return -EINVAL22;
5473 }
5474
5475 memcpy(&out->sb[i], &buf[4], 26)__builtin_memcpy((&out->sb[i]), (&buf[4]), (26));
5476 out->valid = true1;
5477
5478 print_hex_dump(KERN_DEBUG"\0017", "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5479 sizeof(out->sb), false0);
5480
5481 return 0;
5482}
5483
5484static bool_Bool
5485is_hdr_metadata_different(const struct drm_connector_state *old_state,
5486 const struct drm_connector_state *new_state)
5487{
5488 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5489 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5490
5491 if (old_blob != new_blob) {
5492 if (old_blob && new_blob &&
5493 old_blob->length == new_blob->length)
5494 return memcmp(old_blob->data, new_blob->data,__builtin_memcmp((old_blob->data), (new_blob->data), (old_blob
->length))
5495 old_blob->length)__builtin_memcmp((old_blob->data), (new_blob->data), (old_blob
->length))
;
5496
5497 return true1;
5498 }
5499
5500 return false0;
5501}
5502
5503static int
5504amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5505 struct drm_atomic_state *state)
5506{
5507 struct drm_connector_state *new_con_state =
5508 drm_atomic_get_new_connector_state(state, conn);
5509 struct drm_connector_state *old_con_state =
5510 drm_atomic_get_old_connector_state(state, conn);
5511 struct drm_crtc *crtc = new_con_state->crtc;
5512 struct drm_crtc_state *new_crtc_state;
5513 int ret;
5514
5515 if (!crtc)
5516 return 0;
5517
5518 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5519 struct dc_info_packet hdr_infopacket;
5520
5521 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5522 if (ret)
5523 return ret;
5524
5525 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5526 if (IS_ERR(new_crtc_state))
5527 return PTR_ERR(new_crtc_state);
5528
5529 /*
5530 * DC considers the stream backends changed if the
5531 * static metadata changes. Forcing the modeset also
5532 * gives a simple way for userspace to switch from
5533 * 8bpc to 10bpc when setting the metadata to enter
5534 * or exit HDR.
5535 *
5536 * Changing the static metadata after it's been
5537 * set is permissible, however. So only force a
5538 * modeset if we're entering or exiting HDR.
5539 */
5540 new_crtc_state->mode_changed =
5541 !old_con_state->hdr_output_metadata ||
5542 !new_con_state->hdr_output_metadata;
5543 }
5544
5545 return 0;
5546}
5547
5548static const struct drm_connector_helper_funcs
5549amdgpu_dm_connector_helper_funcs = {
5550 /*
5551 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5552 * modes will be filtered by drm_mode_validate_size(), and those modes
5553 * are missing after user start lightdm. So we need to renew modes list.
5554 * in get_modes call back, not just return the modes count
5555 */
5556 .get_modes = get_modes,
5557 .mode_valid = amdgpu_dm_connector_mode_valid,
5558 .atomic_check = amdgpu_dm_connector_atomic_check,
5559};
5560
5561static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5562{
5563}
5564
5565static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5566{
5567 struct drm_atomic_state *state = new_crtc_state->state;
5568 struct drm_plane *plane;
5569 int num_active = 0;
5570
5571 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)->
head ) *__mptr = ((&(state->dev)->mode_config.plane_list
)->next); (__typeof(*(plane)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(plane)), head) );}); &(plane)->head != (&
(state->dev)->mode_config.plane_list); (plane) = ({ const
__typeof( ((__typeof(*(plane)) *)0)->head ) *__mptr = ((plane
)->head.next); (__typeof(*(plane)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(plane)), head) );})) if (!((new_crtc_state->plane_mask
) & drm_plane_mask(plane))) {} else
{
5572 struct drm_plane_state *new_plane_state;
5573
5574 /* Cursor planes are "fake". */
5575 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5576 continue;
5577
5578 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5579
5580 if (!new_plane_state) {
5581 /*
5582 * The plane is enable on the CRTC and hasn't changed
5583 * state. This means that it previously passed
5584 * validation and is therefore enabled.
5585 */
5586 num_active += 1;
5587 continue;
5588 }
5589
5590 /* We need a framebuffer to be considered enabled. */
5591 num_active += (new_plane_state->fb != NULL((void *)0));
5592 }
5593
5594 return num_active;
5595}
5596
5597static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5598 struct drm_crtc_state *new_crtc_state)
5599{
5600 struct dm_crtc_state *dm_new_crtc_state =
5601 to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
5602
5603 dm_new_crtc_state->active_planes = 0;
5604
5605 if (!dm_new_crtc_state->stream)
5606 return;
5607
5608 dm_new_crtc_state->active_planes =
5609 count_crtc_active_planes(new_crtc_state);
5610}
5611
5612static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5613 struct drm_crtc_state *state)
5614{
5615 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5616 struct dc *dc = adev->dm.dc;
5617 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (state); (struct dm_crtc_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_crtc_state, base) );})
;
5618 int ret = -EINVAL22;
5619
5620 dm_update_crtc_active_planes(crtc, state);
5621
5622 if (unlikely(!dm_crtc_state->stream &&__builtin_expect(!!(!dm_crtc_state->stream && modeset_required
(state, ((void *)0), dm_crtc_state->stream)), 0)
5623 modeset_required(state, NULL, dm_crtc_state->stream))__builtin_expect(!!(!dm_crtc_state->stream && modeset_required
(state, ((void *)0), dm_crtc_state->stream)), 0)
) {
5624 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 5624); __builtin_expect(!!(__ret), 0); })
;
5625 return ret;
5626 }
5627
5628 /*
5629 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5630 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5631 * planes are disabled, which is not supported by the hardware. And there is legacy
5632 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5633 */
5634 if (state->enable &&
5635 !(state->plane_mask & drm_plane_mask(crtc->primary)))
5636 return -EINVAL22;
5637
5638 /* In some use cases, like reset, no stream is attached */
5639 if (!dm_crtc_state->stream)
5640 return 0;
5641
5642 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5643 return 0;
5644
5645 return ret;
5646}
5647
5648static bool_Bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5649 const struct drm_display_mode *mode,
5650 struct drm_display_mode *adjusted_mode)
5651{
5652 return true1;
5653}
5654
5655static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5656 .disable = dm_crtc_helper_disable,
5657 .atomic_check = dm_crtc_helper_atomic_check,
5658 .mode_fixup = dm_crtc_helper_mode_fixup,
5659 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5660};
5661
5662static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5663{
5664
5665}
5666
5667static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5668{
5669 switch (display_color_depth) {
5670 case COLOR_DEPTH_666:
5671 return 6;
5672 case COLOR_DEPTH_888:
5673 return 8;
5674 case COLOR_DEPTH_101010:
5675 return 10;
5676 case COLOR_DEPTH_121212:
5677 return 12;
5678 case COLOR_DEPTH_141414:
5679 return 14;
5680 case COLOR_DEPTH_161616:
5681 return 16;
5682 default:
5683 break;
5684 }
5685 return 0;
5686}
5687
5688static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5689 struct drm_crtc_state *crtc_state,
5690 struct drm_connector_state *conn_state)
5691{
5692 struct drm_atomic_state *state = crtc_state->state;
5693 struct drm_connector *connector = conn_state->connector;
5694 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5695 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((conn_state)); (struct dm_connector_state *)( (char
*)__mptr - __builtin_offsetof(struct dm_connector_state, base
) );})
;
5696 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5697 struct drm_dp_mst_topology_mgr *mst_mgr;
5698 struct drm_dp_mst_port *mst_port;
5699 enum dc_color_depth color_depth;
5700 int clock, bpp = 0;
5701 bool_Bool is_y420 = false0;
5702
5703 if (!aconnector->port || !aconnector->dc_sink)
5704 return 0;
5705
5706 mst_port = aconnector->port;
5707 mst_mgr = &aconnector->mst_port->mst_mgr;
5708
5709 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5710 return 0;
5711
5712 if (!state->duplicated) {
5713 int max_bpc = conn_state->max_requested_bpc;
5714 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5715 aconnector->force_yuv420_output;
5716 color_depth = convert_color_depth_from_display_info(connector,
5717 is_y420,
5718 max_bpc);
5719 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5720 clock = adjusted_mode->clock;
5721 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false0);
5722 }
5723 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5724 mst_mgr,
5725 mst_port,
5726 dm_new_connector_state->pbn,
5727 dm_mst_get_pbn_divider(aconnector->dc_link));
5728 if (dm_new_connector_state->vcpi_slots < 0) {
5729 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots)__drm_dbg(DRM_UT_ATOMIC, "failed finding vcpi slots: %d\n", (
int)dm_new_connector_state->vcpi_slots)
;
5730 return dm_new_connector_state->vcpi_slots;
5731 }
5732 return 0;
5733}
5734
5735const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5736 .disable = dm_encoder_helper_disable,
5737 .atomic_check = dm_encoder_helper_atomic_check
5738};
5739
5740#if defined(CONFIG_DRM_AMD_DC_DCN1)
5741static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5742 struct dc_state *dc_state)
5743{
5744 struct dc_stream_state *stream = NULL((void *)0);
5745 struct drm_connector *connector;
5746 struct drm_connector_state *new_con_state, *old_con_state;
5747 struct amdgpu_dm_connector *aconnector;
5748 struct dm_connector_state *dm_conn_state;
5749 int i, j, clock, bpp;
5750 int vcpi, pbn_div, pbn = 0;
5751
5752 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (old_con_state) =
(state)->connectors[i].old_state, (new_con_state) = (state
)->connectors[i].new_state, 1))) {} else
{
5753
5754 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5755
5756 if (!aconnector->port)
5757 continue;
5758
5759 if (!new_con_state || !new_con_state->crtc)
5760 continue;
5761
5762 dm_conn_state = to_dm_connector_state(new_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((new_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
5763
5764 for (j = 0; j < dc_state->stream_count; j++) {
5765 stream = dc_state->streams[j];
5766 if (!stream)
5767 continue;
5768
5769 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5770 break;
5771
5772 stream = NULL((void *)0);
5773 }
5774
5775 if (!stream)
5776 continue;
5777
5778 if (stream->timing.flags.DSC != 1) {
5779 drm_dp_mst_atomic_enable_dsc(state,
5780 aconnector->port,
5781 dm_conn_state->pbn,
5782 0,
5783 false0);
5784 continue;
5785 }
5786
5787 pbn_div = dm_mst_get_pbn_divider(stream->link);
5788 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5789 clock = stream->timing.pix_clk_100hz / 10;
5790 pbn = drm_dp_calc_pbn_mode(clock, bpp, true1);
5791 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5792 aconnector->port,
5793 pbn, pbn_div,
5794 true1);
5795 if (vcpi < 0)
5796 return vcpi;
5797
5798 dm_conn_state->pbn = pbn;
5799 dm_conn_state->vcpi_slots = vcpi;
5800 }
5801 return 0;
5802}
5803#endif
5804
5805static void dm_drm_plane_reset(struct drm_plane *plane)
5806{
5807 struct dm_plane_state *amdgpu_state = NULL((void *)0);
5808
5809 if (plane->state)
5810 plane->funcs->atomic_destroy_state(plane, plane->state);
5811
5812 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL(0x0001 | 0x0004));
5813 WARN_ON(amdgpu_state == NULL)({ int __ret = !!(amdgpu_state == ((void *)0)); if (__ret) printf
("WARNING %s failed at %s:%d\n", "amdgpu_state == ((void *)0)"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 5813); __builtin_expect(!!(__ret), 0); })
;
5814
5815 if (amdgpu_state)
5816 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5817}
5818
5819static struct drm_plane_state *
5820dm_drm_plane_duplicate_state(struct drm_plane *plane)
5821{
5822 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5823
5824 old_dm_plane_state = to_dm_plane_state(plane->state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (plane->state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
5825 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL(0x0001 | 0x0004));
5826 if (!dm_plane_state)
5827 return NULL((void *)0);
5828
5829 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5830
5831 if (old_dm_plane_state->dc_state) {
5832 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5833 dc_plane_state_retain(dm_plane_state->dc_state);
5834 }
5835
5836 /* Framebuffer hasn't been updated yet, so retain old flags. */
5837 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5838 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5839
5840 return &dm_plane_state->base;
5841}
5842
5843static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5844 struct drm_plane_state *state)
5845{
5846 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (state); (struct dm_plane_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_plane_state, base) );})
;
5847
5848 if (dm_plane_state->dc_state)
5849 dc_plane_state_release(dm_plane_state->dc_state);
5850
5851 drm_atomic_helper_plane_destroy_state(plane, state);
5852}
5853
5854static const struct drm_plane_funcs dm_plane_funcs = {
5855 .update_plane = drm_atomic_helper_update_plane,
5856 .disable_plane = drm_atomic_helper_disable_plane,
5857 .destroy = drm_primary_helper_destroy,
5858 .reset = dm_drm_plane_reset,
5859 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5860 .atomic_destroy_state = dm_drm_plane_destroy_state,
5861};
5862
5863static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5864 struct drm_plane_state *new_state)
5865{
5866 struct amdgpu_framebuffer *afb;
5867 struct drm_gem_object *obj;
5868 struct amdgpu_device *adev;
5869 struct amdgpu_bo *rbo;
5870 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5871 struct list_head list;
5872 struct ttm_validate_buffer tv;
5873 struct ww_acquire_ctx ticket;
5874 uint32_t domain;
5875 int r;
5876
5877 if (!new_state->fb) {
5878 DRM_DEBUG_DRIVER("No FB bound\n")__drm_dbg(DRM_UT_DRIVER, "No FB bound\n");
5879 return 0;
5880 }
5881
5882 afb = to_amdgpu_framebuffer(new_state->fb)({ const __typeof( ((struct amdgpu_framebuffer *)0)->base )
*__mptr = (new_state->fb); (struct amdgpu_framebuffer *)(
(char *)__mptr - __builtin_offsetof(struct amdgpu_framebuffer
, base) );})
;
5883 obj = new_state->fb->obj[0];
5884 rbo = gem_to_amdgpu_bo(obj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_bo, tbo.base) );})
;
5885 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5886 INIT_LIST_HEAD(&list);
5887
5888 tv.bo = &rbo->tbo;
5889 tv.num_shared = 1;
5890 list_add(&tv.head, &list);
5891
5892 r = ttm_eu_reserve_buffers(&ticket, &list, false0, NULL((void *)0));
5893 if (r) {
5894 dev_err(adev->dev, "fail to reserve bo (%d)\n", r)printf("drm:pid%d:%s *ERROR* " "fail to reserve bo (%d)\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
5895 return r;
5896 }
5897
5898 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5899 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5900 else
5901 domain = AMDGPU_GEM_DOMAIN_VRAM0x4;
5902
5903 r = amdgpu_bo_pin(rbo, domain);
5904 if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) {
5905 if (r != -ERESTARTSYS4)
5906 DRM_ERROR("Failed to pin framebuffer with error %d\n", r)__drm_err("Failed to pin framebuffer with error %d\n", r);
5907 ttm_eu_backoff_reservation(&ticket, &list);
5908 return r;
5909 }
5910
5911 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5912 if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) {
5913 amdgpu_bo_unpin(rbo);
5914 ttm_eu_backoff_reservation(&ticket, &list);
5915 DRM_ERROR("%p bind failed\n", rbo)__drm_err("%p bind failed\n", rbo);
5916 return r;
5917 }
5918
5919 ttm_eu_backoff_reservation(&ticket, &list);
5920
5921 afb->address = amdgpu_bo_gpu_offset(rbo);
5922
5923 amdgpu_bo_ref(rbo);
5924
5925 /**
5926 * We don't do surface updates on planes that have been newly created,
5927 * but we also don't have the afb->address during atomic check.
5928 *
5929 * Fill in buffer attributes depending on the address here, but only on
5930 * newly created planes since they're not being used by DC yet and this
5931 * won't modify global state.
5932 */
5933 dm_plane_state_old = to_dm_plane_state(plane->state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (plane->state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
5934 dm_plane_state_new = to_dm_plane_state(new_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_state); (struct dm_plane_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_plane_state, base) );})
;
5935
5936 if (dm_plane_state_new->dc_state &&
5937 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5938 struct dc_plane_state *plane_state =
5939 dm_plane_state_new->dc_state;
5940 bool_Bool force_disable_dcc = !plane_state->dcc.enable;
5941
5942 fill_plane_buffer_attributes(
5943 adev, afb, plane_state->format, plane_state->rotation,
5944 dm_plane_state_new->tiling_flags,
5945 &plane_state->tiling_info, &plane_state->plane_size,
5946 &plane_state->dcc, &plane_state->address,
5947 dm_plane_state_new->tmz_surface, force_disable_dcc);
5948 }
5949
5950 return 0;
5951}
5952
5953static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5954 struct drm_plane_state *old_state)
5955{
5956 struct amdgpu_bo *rbo;
5957 int r;
5958
5959 if (!old_state->fb)
5960 return;
5961
5962 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0])({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr
= ((old_state->fb->obj[0])); (struct amdgpu_bo *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_bo, tbo.base) );
})
;
5963 r = amdgpu_bo_reserve(rbo, false0);
5964 if (unlikely(r)__builtin_expect(!!(r), 0)) {
5965 DRM_ERROR("failed to reserve rbo before unpin\n")__drm_err("failed to reserve rbo before unpin\n");
5966 return;
5967 }
5968
5969 amdgpu_bo_unpin(rbo);
5970 amdgpu_bo_unreserve(rbo);
5971 amdgpu_bo_unref(&rbo);
5972}
5973
5974static int dm_plane_helper_check_state(struct drm_plane_state *state,
5975 struct drm_crtc_state *new_crtc_state)
5976{
5977 int max_downscale = 0;
5978 int max_upscale = INT_MAX0x7fffffff;
5979
5980 /* TODO: These should be checked against DC plane caps */
5981 return drm_atomic_helper_check_plane_state(
5982 state, new_crtc_state, max_downscale, max_upscale, true1, true1);
5983}
5984
5985static int dm_plane_atomic_check(struct drm_plane *plane,
5986 struct drm_plane_state *state)
5987{
5988 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5989 struct dc *dc = adev->dm.dc;
5990 struct dm_plane_state *dm_plane_state;
5991 struct dc_scaling_info scaling_info;
5992 struct drm_crtc_state *new_crtc_state;
5993 int ret;
5994
5995 dm_plane_state = to_dm_plane_state(state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (state); (struct dm_plane_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_plane_state, base) );})
;
5996
5997 if (!dm_plane_state->dc_state)
5998 return 0;
5999
6000 new_crtc_state =
6001 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6002 if (!new_crtc_state)
6003 return -EINVAL22;
6004
6005 ret = dm_plane_helper_check_state(state, new_crtc_state);
6006 if (ret)
6007 return ret;
6008
6009 ret = fill_dc_scaling_info(state, &scaling_info);
6010 if (ret)
6011 return ret;
6012
6013 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6014 return 0;
6015
6016 return -EINVAL22;
6017}
6018
6019static int dm_plane_atomic_async_check(struct drm_plane *plane,
6020 struct drm_plane_state *new_plane_state)
6021{
6022 /* Only support async updates on cursor planes. */
6023 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6024 return -EINVAL22;
6025
6026 return 0;
6027}
6028
6029static void dm_plane_atomic_async_update(struct drm_plane *plane,
6030 struct drm_plane_state *new_state)
6031{
6032 struct drm_plane_state *old_state =
6033 drm_atomic_get_old_plane_state(new_state->state, plane);
6034
6035 swap(plane->state->fb, new_state->fb)do { __typeof(plane->state->fb) __tmp = (plane->state
->fb); (plane->state->fb) = (new_state->fb); (new_state
->fb) = __tmp; } while(0)
;
6036
6037 plane->state->src_x = new_state->src_x;
6038 plane->state->src_y = new_state->src_y;
6039 plane->state->src_w = new_state->src_w;
6040 plane->state->src_h = new_state->src_h;
6041 plane->state->crtc_x = new_state->crtc_x;
6042 plane->state->crtc_y = new_state->crtc_y;
6043 plane->state->crtc_w = new_state->crtc_w;
6044 plane->state->crtc_h = new_state->crtc_h;
6045
6046 handle_cursor_update(plane, old_state);
6047}
6048
6049static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6050 .prepare_fb = dm_plane_helper_prepare_fb,
6051 .cleanup_fb = dm_plane_helper_cleanup_fb,
6052 .atomic_check = dm_plane_atomic_check,
6053 .atomic_async_check = dm_plane_atomic_async_check,
6054 .atomic_async_update = dm_plane_atomic_async_update
6055};
6056
6057/*
6058 * TODO: these are currently initialized to rgb formats only.
6059 * For future use cases we should either initialize them dynamically based on
6060 * plane capabilities, or initialize this array to all formats, so internal drm
6061 * check will succeed, and let DC implement proper check
6062 */
6063static const uint32_t rgb_formats[] = {
6064 DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6065 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6066 DRM_FORMAT_RGBA8888((__u32)('R') | ((__u32)('A') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6067 DRM_FORMAT_XRGB2101010((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6068 DRM_FORMAT_XBGR2101010((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6069 DRM_FORMAT_ARGB2101010((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6070 DRM_FORMAT_ABGR2101010((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6071 DRM_FORMAT_XBGR8888((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6072 DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6073 DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
,
6074};
6075
6076static const uint32_t overlay_formats[] = {
6077 DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6078 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6079 DRM_FORMAT_RGBA8888((__u32)('R') | ((__u32)('A') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6080 DRM_FORMAT_XBGR8888((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6081 DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6082 DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
6083};
6084
6085static const u32 cursor_formats[] = {
6086 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
6087};
6088
6089static int get_plane_formats(const struct drm_plane *plane,
6090 const struct dc_plane_cap *plane_cap,
6091 uint32_t *formats, int max_formats)
6092{
6093 int i, num_formats = 0;
6094
6095 /*
6096 * TODO: Query support for each group of formats directly from
6097 * DC plane caps. This will require adding more formats to the
6098 * caps list.
6099 */
6100
6101 switch (plane->type) {
6102 case DRM_PLANE_TYPE_PRIMARY:
6103 for (i = 0; i < ARRAY_SIZE(rgb_formats)(sizeof((rgb_formats)) / sizeof((rgb_formats)[0])); ++i) {
6104 if (num_formats >= max_formats)
6105 break;
6106
6107 formats[num_formats++] = rgb_formats[i];
6108 }
6109
6110 if (plane_cap && plane_cap->pixel_format_support.nv12)
6111 formats[num_formats++] = DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
;
6112 if (plane_cap && plane_cap->pixel_format_support.p010)
6113 formats[num_formats++] = DRM_FORMAT_P010((__u32)('P') | ((__u32)('0') << 8) | ((__u32)('1') <<
16) | ((__u32)('0') << 24))
;
6114 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6115 formats[num_formats++] = DRM_FORMAT_XRGB16161616F((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6116 formats[num_formats++] = DRM_FORMAT_ARGB16161616F((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6117 formats[num_formats++] = DRM_FORMAT_XBGR16161616F((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6118 formats[num_formats++] = DRM_FORMAT_ABGR16161616F((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6119 }
6120 break;
6121
6122 case DRM_PLANE_TYPE_OVERLAY:
6123 for (i = 0; i < ARRAY_SIZE(overlay_formats)(sizeof((overlay_formats)) / sizeof((overlay_formats)[0])); ++i) {
6124 if (num_formats >= max_formats)
6125 break;
6126
6127 formats[num_formats++] = overlay_formats[i];
6128 }
6129 break;
6130
6131 case DRM_PLANE_TYPE_CURSOR:
6132 for (i = 0; i < ARRAY_SIZE(cursor_formats)(sizeof((cursor_formats)) / sizeof((cursor_formats)[0])); ++i) {
6133 if (num_formats >= max_formats)
6134 break;
6135
6136 formats[num_formats++] = cursor_formats[i];
6137 }
6138 break;
6139 }
6140
6141 return num_formats;
6142}
6143
6144static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6145 struct drm_plane *plane,
6146 unsigned long possible_crtcs,
6147 const struct dc_plane_cap *plane_cap)
6148{
6149 uint32_t formats[32];
6150 int num_formats;
6151 int res = -EPERM1;
6152 unsigned int supported_rotations;
6153
6154 num_formats = get_plane_formats(plane, plane_cap, formats,
6155 ARRAY_SIZE(formats)(sizeof((formats)) / sizeof((formats)[0])));
6156
6157 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6158 &dm_plane_funcs, formats, num_formats,
6159 NULL((void *)0), plane->type, NULL((void *)0));
6160 if (res)
6161 return res;
6162
6163 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6164 plane_cap && plane_cap->per_pixel_alpha) {
6165 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE)(1UL << (2)) |
6166 BIT(DRM_MODE_BLEND_PREMULTI)(1UL << (0));
6167
6168 drm_plane_create_alpha_property(plane);
6169 drm_plane_create_blend_mode_property(plane, blend_caps);
6170 }
6171
6172 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6173 plane_cap &&
6174 (plane_cap->pixel_format_support.nv12 ||
6175 plane_cap->pixel_format_support.p010)) {
6176 /* This only affects YUV formats. */
6177 drm_plane_create_color_properties(
6178 plane,
6179 BIT(DRM_COLOR_YCBCR_BT601)(1UL << (DRM_COLOR_YCBCR_BT601)) |
6180 BIT(DRM_COLOR_YCBCR_BT709)(1UL << (DRM_COLOR_YCBCR_BT709)) |
6181 BIT(DRM_COLOR_YCBCR_BT2020)(1UL << (DRM_COLOR_YCBCR_BT2020)),
6182 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE)(1UL << (DRM_COLOR_YCBCR_LIMITED_RANGE)) |
6183 BIT(DRM_COLOR_YCBCR_FULL_RANGE)(1UL << (DRM_COLOR_YCBCR_FULL_RANGE)),
6184 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6185 }
6186
6187 supported_rotations =
6188 DRM_MODE_ROTATE_0(1<<0) | DRM_MODE_ROTATE_90(1<<1) |
6189 DRM_MODE_ROTATE_180(1<<2) | DRM_MODE_ROTATE_270(1<<3);
6190
6191 if (dm->adev->asic_type >= CHIP_BONAIRE)
6192 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0(1<<0),
6193 supported_rotations);
6194
6195 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6196
6197 /* Create (reset) the plane state */
6198 if (plane->funcs->reset)
6199 plane->funcs->reset(plane);
6200
6201 return 0;
6202}
6203
6204static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6205 struct drm_plane *plane,
6206 uint32_t crtc_index)
6207{
6208 struct amdgpu_crtc *acrtc = NULL((void *)0);
6209 struct drm_plane *cursor_plane;
6210
6211 int res = -ENOMEM12;
6212
6213 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL(0x0001 | 0x0004));
6214 if (!cursor_plane)
6215 goto fail;
6216
6217 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6218 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL((void *)0));
6219
6220 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL(0x0001 | 0x0004));
6221 if (!acrtc)
6222 goto fail;
6223
6224 res = drm_crtc_init_with_planes(
6225 dm->ddev,
6226 &acrtc->base,
6227 plane,
6228 cursor_plane,
6229 &amdgpu_dm_crtc_funcs, NULL((void *)0));
6230
6231 if (res)
6232 goto fail;
6233
6234 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6235
6236 /* Create (reset) the plane state */
6237 if (acrtc->base.funcs->reset)
6238 acrtc->base.funcs->reset(&acrtc->base);
6239
6240 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6241 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6242
6243 acrtc->crtc_id = crtc_index;
6244 acrtc->base.enabled = false0;
6245 acrtc->otg_inst = -1;
6246
6247 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6248 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES4096,
6249 true1, MAX_COLOR_LUT_ENTRIES4096);
6250 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES256);
6251
6252 return 0;
6253
6254fail:
6255 kfree(acrtc);
6256 kfree(cursor_plane);
6257 return res;
6258}
6259
6260
6261static int to_drm_connector_type(enum amd_signal_type st)
6262{
6263 switch (st) {
6264 case SIGNAL_TYPE_HDMI_TYPE_A:
6265 return DRM_MODE_CONNECTOR_HDMIA11;
6266 case SIGNAL_TYPE_EDP:
6267 return DRM_MODE_CONNECTOR_eDP14;
6268 case SIGNAL_TYPE_LVDS:
6269 return DRM_MODE_CONNECTOR_LVDS7;
6270 case SIGNAL_TYPE_RGB:
6271 return DRM_MODE_CONNECTOR_VGA1;
6272 case SIGNAL_TYPE_DISPLAY_PORT:
6273 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6274 return DRM_MODE_CONNECTOR_DisplayPort10;
6275 case SIGNAL_TYPE_DVI_DUAL_LINK:
6276 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6277 return DRM_MODE_CONNECTOR_DVID3;
6278 case SIGNAL_TYPE_VIRTUAL:
6279 return DRM_MODE_CONNECTOR_VIRTUAL15;
6280
6281 default:
6282 return DRM_MODE_CONNECTOR_Unknown0;
6283 }
6284}
6285
6286static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6287{
6288 struct drm_encoder *encoder;
6289
6290 /* There is only one encoder per connector */
6291 drm_connector_for_each_possible_encoder(connector, encoder)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->head ) *__mptr = ((&((connector)->dev)->mode_config
.encoder_list)->next); (__typeof(*(encoder)) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*(encoder)), head) );}); &
(encoder)->head != (&((connector)->dev)->mode_config
.encoder_list); (encoder) = ({ const __typeof( ((__typeof(*(encoder
)) *)0)->head ) *__mptr = ((encoder)->head.next); (__typeof
(*(encoder)) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*(encoder)), head) );})) if (!(((connector)->possible_encoders
) & drm_encoder_mask(encoder))) {} else
6292 return encoder;
6293
6294 return NULL((void *)0);
6295}
6296
6297static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6298{
6299 struct drm_encoder *encoder;
6300 struct amdgpu_encoder *amdgpu_encoder;
6301
6302 encoder = amdgpu_dm_connector_to_encoder(connector);
6303
6304 if (encoder == NULL((void *)0))
6305 return;
6306
6307 amdgpu_encoder = to_amdgpu_encoder(encoder)({ const __typeof( ((struct amdgpu_encoder *)0)->base ) *__mptr
= (encoder); (struct amdgpu_encoder *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_encoder, base) );})
;
6308
6309 amdgpu_encoder->native_mode.clock = 0;
6310
6311 if (!list_empty(&connector->probed_modes)) {
6312 struct drm_display_mode *preferred_mode = NULL((void *)0);
6313
6314 list_for_each_entry(preferred_mode,for (preferred_mode = ({ const __typeof( ((__typeof(*preferred_mode
) *)0)->head ) *__mptr = ((&connector->probed_modes
)->next); (__typeof(*preferred_mode) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*preferred_mode), head) );}); &preferred_mode->
head != (&connector->probed_modes); preferred_mode = (
{ const __typeof( ((__typeof(*preferred_mode) *)0)->head )
*__mptr = (preferred_mode->head.next); (__typeof(*preferred_mode
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*preferred_mode
), head) );}))
6315 &connector->probed_modes,for (preferred_mode = ({ const __typeof( ((__typeof(*preferred_mode
) *)0)->head ) *__mptr = ((&connector->probed_modes
)->next); (__typeof(*preferred_mode) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*preferred_mode), head) );}); &preferred_mode->
head != (&connector->probed_modes); preferred_mode = (
{ const __typeof( ((__typeof(*preferred_mode) *)0)->head )
*__mptr = (preferred_mode->head.next); (__typeof(*preferred_mode
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*preferred_mode
), head) );}))
6316 head)for (preferred_mode = ({ const __typeof( ((__typeof(*preferred_mode
) *)0)->head ) *__mptr = ((&connector->probed_modes
)->next); (__typeof(*preferred_mode) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*preferred_mode), head) );}); &preferred_mode->
head != (&connector->probed_modes); preferred_mode = (
{ const __typeof( ((__typeof(*preferred_mode) *)0)->head )
*__mptr = (preferred_mode->head.next); (__typeof(*preferred_mode
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*preferred_mode
), head) );}))
{
6317 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED(1<<3))
6318 amdgpu_encoder->native_mode = *preferred_mode;
6319
6320 break;
6321 }
6322
6323 }
6324}
6325
6326static struct drm_display_mode *
6327amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6328 char *name,
6329 int hdisplay, int vdisplay)
6330{
6331 struct drm_device *dev = encoder->dev;
6332 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder)({ const __typeof( ((struct amdgpu_encoder *)0)->base ) *__mptr
= (encoder); (struct amdgpu_encoder *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_encoder, base) );})
;
6333 struct drm_display_mode *mode = NULL((void *)0);
6334 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6335
6336 mode = drm_mode_duplicate(dev, native_mode);
6337
6338 if (mode == NULL((void *)0))
6339 return NULL((void *)0);
6340
6341 mode->hdisplay = hdisplay;
6342 mode->vdisplay = vdisplay;
6343 mode->type &= ~DRM_MODE_TYPE_PREFERRED(1<<3);
6344#ifdef __linux__
6345 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN32);
6346#else
6347 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN32);
6348#endif
6349
6350 return mode;
6351
6352}
6353
6354static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6355 struct drm_connector *connector)
6356{
6357 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder)({ const __typeof( ((struct amdgpu_encoder *)0)->base ) *__mptr
= (encoder); (struct amdgpu_encoder *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_encoder, base) );})
;
6358 struct drm_display_mode *mode = NULL((void *)0);
6359 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6360 struct amdgpu_dm_connector *amdgpu_dm_connector =
6361 to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
6362 int i;
6363 int n;
6364 struct mode_size {
6365 char name[DRM_DISPLAY_MODE_LEN32];
6366 int w;
6367 int h;
6368 } common_modes[] = {
6369 { "640x480", 640, 480},
6370 { "800x600", 800, 600},
6371 { "1024x768", 1024, 768},
6372 { "1280x720", 1280, 720},
6373 { "1280x800", 1280, 800},
6374 {"1280x1024", 1280, 1024},
6375 { "1440x900", 1440, 900},
6376 {"1680x1050", 1680, 1050},
6377 {"1600x1200", 1600, 1200},
6378 {"1920x1080", 1920, 1080},
6379 {"1920x1200", 1920, 1200}
6380 };
6381
6382 n = ARRAY_SIZE(common_modes)(sizeof((common_modes)) / sizeof((common_modes)[0]));
6383
6384 for (i = 0; i < n; i++) {
6385 struct drm_display_mode *curmode = NULL((void *)0);
6386 bool_Bool mode_existed = false0;
6387
6388 if (common_modes[i].w > native_mode->hdisplay ||
6389 common_modes[i].h > native_mode->vdisplay ||
6390 (common_modes[i].w == native_mode->hdisplay &&
6391 common_modes[i].h == native_mode->vdisplay))
6392 continue;
6393
6394 list_for_each_entry(curmode, &connector->probed_modes, head)for (curmode = ({ const __typeof( ((__typeof(*curmode) *)0)->
head ) *__mptr = ((&connector->probed_modes)->next)
; (__typeof(*curmode) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*curmode), head) );}); &curmode->head != (&
connector->probed_modes); curmode = ({ const __typeof( ((__typeof
(*curmode) *)0)->head ) *__mptr = (curmode->head.next);
(__typeof(*curmode) *)( (char *)__mptr - __builtin_offsetof(
__typeof(*curmode), head) );}))
{
6395 if (common_modes[i].w == curmode->hdisplay &&
6396 common_modes[i].h == curmode->vdisplay) {
6397 mode_existed = true1;
6398 break;
6399 }
6400 }
6401
6402 if (mode_existed)
6403 continue;
6404
6405 mode = amdgpu_dm_create_common_mode(encoder,
6406 common_modes[i].name, common_modes[i].w,
6407 common_modes[i].h);
6408 drm_mode_probed_add(connector, mode);
6409 amdgpu_dm_connector->num_modes++;
6410 }
6411}
6412
6413static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6414 struct edid *edid)
6415{
6416 struct amdgpu_dm_connector *amdgpu_dm_connector =
6417 to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
6418
6419 if (edid) {
6420 /* empty probed_modes */
6421 INIT_LIST_HEAD(&connector->probed_modes);
6422 amdgpu_dm_connector->num_modes =
6423 drm_add_edid_modes(connector, edid);
6424
6425 /* sorting the probed modes before calling function
6426 * amdgpu_dm_get_native_mode() since EDID can have
6427 * more than one preferred mode. The modes that are
6428 * later in the probed mode list could be of higher
6429 * and preferred resolution. For example, 3840x2160
6430 * resolution in base EDID preferred timing and 4096x2160
6431 * preferred resolution in DID extension block later.
6432 */
6433 drm_mode_sort(&connector->probed_modes);
6434 amdgpu_dm_get_native_mode(connector);
6435 } else {
6436 amdgpu_dm_connector->num_modes = 0;
6437 }
6438}
6439
6440static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6441{
6442 struct amdgpu_dm_connector *amdgpu_dm_connector =
6443 to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
6444 struct drm_encoder *encoder;
6445 struct edid *edid = amdgpu_dm_connector->edid;
6446
6447 encoder = amdgpu_dm_connector_to_encoder(connector);
6448
6449 if (!edid || !drm_edid_is_valid(edid)) {
6450 amdgpu_dm_connector->num_modes =
6451 drm_add_modes_noedid(connector, 640, 480);
6452 } else {
6453 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6454 amdgpu_dm_connector_add_common_modes(encoder, connector);
6455 }
6456 amdgpu_dm_fbc_init(connector);
6457
6458 return amdgpu_dm_connector->num_modes;
6459}
6460
6461void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6462 struct amdgpu_dm_connector *aconnector,
6463 int connector_type,
6464 struct dc_link *link,
6465 int link_index)
6466{
6467 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6468
6469 /*
6470 * Some of the properties below require access to state, like bpc.
6471 * Allocate some default initial connector state with our reset helper.
6472 */
6473 if (aconnector->base.funcs->reset)
6474 aconnector->base.funcs->reset(&aconnector->base);
6475
6476 aconnector->connector_id = link_index;
6477 aconnector->dc_link = link;
6478 aconnector->base.interlace_allowed = false0;
6479 aconnector->base.doublescan_allowed = false0;
6480 aconnector->base.stereo_allowed = false0;
6481 aconnector->base.dpms = DRM_MODE_DPMS_OFF3;
6482 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6483 aconnector->audio_inst = -1;
6484 rw_init(&aconnector->hpd_lock, "dmhpd")_rw_init_flags(&aconnector->hpd_lock, "dmhpd", 0, ((void
*)0))
;
6485
6486 /*
6487 * configure support HPD hot plug connector_>polled default value is 0
6488 * which means HPD hot plug not supported
6489 */
6490 switch (connector_type) {
6491 case DRM_MODE_CONNECTOR_HDMIA11:
6492 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD(1 << 0);
6493 aconnector->base.ycbcr_420_allowed =
6494 link->link_enc->features.hdmi_ycbcr420_supported ? true1 : false0;
6495 break;
6496 case DRM_MODE_CONNECTOR_DisplayPort10:
6497 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD(1 << 0);
6498 aconnector->base.ycbcr_420_allowed =
6499 link->link_enc->features.dp_ycbcr420_supported ? true1 : false0;
6500 break;
6501 case DRM_MODE_CONNECTOR_DVID3:
6502 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD(1 << 0);
6503 break;
6504 default:
6505 break;
6506 }
6507
6508 drm_object_attach_property(&aconnector->base.base,
6509 dm->ddev->mode_config.scaling_mode_property,
6510 DRM_MODE_SCALE_NONE0);
6511
6512 drm_object_attach_property(&aconnector->base.base,
6513 adev->mode_info.underscan_property,
6514 UNDERSCAN_OFF);
6515 drm_object_attach_property(&aconnector->base.base,
6516 adev->mode_info.underscan_hborder_property,
6517 0);
6518 drm_object_attach_property(&aconnector->base.base,
6519 adev->mode_info.underscan_vborder_property,
6520 0);
6521
6522 if (!aconnector->mst_port)
6523 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6524
6525 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6526 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP14) ? 16 : 8;
6527 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6528
6529 if (connector_type == DRM_MODE_CONNECTOR_eDP14 &&
6530 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6531 drm_object_attach_property(&aconnector->base.base,
6532 adev->mode_info.abm_level_property, 0);
6533 }
6534
6535 if (connector_type == DRM_MODE_CONNECTOR_HDMIA11 ||
6536 connector_type == DRM_MODE_CONNECTOR_DisplayPort10 ||
6537 connector_type == DRM_MODE_CONNECTOR_eDP14) {
6538 drm_object_attach_property(
6539 &aconnector->base.base,
6540 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6541
6542 if (!aconnector->mst_port)
6543 drm_connector_attach_vrr_capable_property(&aconnector->base);
6544
6545#ifdef CONFIG_DRM_AMD_DC_HDCP
6546 if (adev->dm.hdcp_workqueue)
6547 drm_connector_attach_content_protection_property(&aconnector->base, true1);
6548#endif
6549 }
6550}
6551
6552static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6553 struct i2c_msg *msgs, int num)
6554{
6555 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6556 struct ddc_service *ddc_service = i2c->ddc_service;
6557 struct i2c_command cmd;
6558 int i;
6559 int result = -EIO5;
6560
6561 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL(0x0001 | 0x0004));
6562
6563 if (!cmd.payloads)
6564 return result;
6565
6566 cmd.number_of_payloads = num;
6567 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6568 cmd.speed = 100;
6569
6570 for (i = 0; i < num; i++) {
6571 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD0x0001);
6572 cmd.payloads[i].address = msgs[i].addr;
6573 cmd.payloads[i].length = msgs[i].len;
6574 cmd.payloads[i].data = msgs[i].buf;
6575 }
6576
6577 if (dc_submit_i2c(
6578 ddc_service->ctx->dc,
6579 ddc_service->ddc_pin->hw_info.ddc_channel,
6580 &cmd))
6581 result = num;
6582
6583 kfree(cmd.payloads);
6584 return result;
6585}
6586
6587static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6588{
6589 return I2C_FUNC_I2C0 | I2C_FUNC_SMBUS_EMUL0;
6590}
6591
6592static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6593 .master_xfer = amdgpu_dm_i2c_xfer,
6594 .functionality = amdgpu_dm_i2c_func,
6595};
6596
6597static struct amdgpu_i2c_adapter *
6598create_i2c(struct ddc_service *ddc_service,
6599 int link_index,
6600 int *res)
6601{
6602 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6603 struct amdgpu_i2c_adapter *i2c;
6604
6605 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL(0x0001 | 0x0004));
6606 if (!i2c)
6607 return NULL((void *)0);
6608#ifdef notyet
6609 i2c->base.owner = THIS_MODULE((void *)0);
6610 i2c->base.class = I2C_CLASS_DDC;
6611 i2c->base.dev.parent = &adev->pdev->dev;
6612#endif
6613 i2c->base.algo = &amdgpu_dm_i2c_algo;
6614 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6615 i2c_set_adapdata(&i2c->base, i2c);
6616 i2c->ddc_service = ddc_service;
6617 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6618
6619 return i2c;
6620}
6621
6622
6623/*
6624 * Note: this function assumes that dc_link_detect() was called for the
6625 * dc_link which will be represented by this aconnector.
6626 */
6627static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6628 struct amdgpu_dm_connector *aconnector,
6629 uint32_t link_index,
6630 struct amdgpu_encoder *aencoder)
6631{
6632 int res = 0;
6633 int connector_type;
6634 struct dc *dc = dm->dc;
6635 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6636 struct amdgpu_i2c_adapter *i2c;
6637
6638 link->priv = aconnector;
6639
6640 DRM_DEBUG_DRIVER("%s()\n", __func__)__drm_dbg(DRM_UT_DRIVER, "%s()\n", __func__);
6641
6642 i2c = create_i2c(link->ddc, link->link_index, &res);
6643 if (!i2c) {
6644 DRM_ERROR("Failed to create i2c adapter data\n")__drm_err("Failed to create i2c adapter data\n");
6645 return -ENOMEM12;
6646 }
6647
6648 aconnector->i2c = i2c;
6649 res = i2c_add_adapter(&i2c->base)0;
6650
6651 if (res) {
6652 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index)__drm_err("Failed to register hw i2c %d\n", link->link_index
)
;
6653 goto out_free;
6654 }
6655
6656 connector_type = to_drm_connector_type(link->connector_signal);
6657
6658 res = drm_connector_init_with_ddc(
6659 dm->ddev,
6660 &aconnector->base,
6661 &amdgpu_dm_connector_funcs,
6662 connector_type,
6663 &i2c->base);
6664
6665 if (res) {
6666 DRM_ERROR("connector_init failed\n")__drm_err("connector_init failed\n");
6667 aconnector->connector_id = -1;
6668 goto out_free;
6669 }
6670
6671 drm_connector_helper_add(
6672 &aconnector->base,
6673 &amdgpu_dm_connector_helper_funcs);
6674
6675 amdgpu_dm_connector_init_helper(
6676 dm,
6677 aconnector,
6678 connector_type,
6679 link,
6680 link_index);
6681
6682 drm_connector_attach_encoder(
6683 &aconnector->base, &aencoder->base);
6684
6685 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort10
6686 || connector_type == DRM_MODE_CONNECTOR_eDP14)
6687 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6688
6689out_free:
6690 if (res) {
6691 kfree(i2c);
6692 aconnector->i2c = NULL((void *)0);
6693 }
6694 return res;
6695}
6696
6697int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6698{
6699 switch (adev->mode_info.num_crtc) {
6700 case 1:
6701 return 0x1;
6702 case 2:
6703 return 0x3;
6704 case 3:
6705 return 0x7;
6706 case 4:
6707 return 0xf;
6708 case 5:
6709 return 0x1f;
6710 case 6:
6711 default:
6712 return 0x3f;
6713 }
6714}
6715
6716static int amdgpu_dm_encoder_init(struct drm_device *dev,
6717 struct amdgpu_encoder *aencoder,
6718 uint32_t link_index)
6719{
6720 struct amdgpu_device *adev = drm_to_adev(dev);
6721
6722 int res = drm_encoder_init(dev,
6723 &aencoder->base,
6724 &amdgpu_dm_encoder_funcs,
6725 DRM_MODE_ENCODER_TMDS2,
6726 NULL((void *)0));
6727
6728 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6729
6730 if (!res)
6731 aencoder->encoder_id = link_index;
6732 else
6733 aencoder->encoder_id = -1;
6734
6735 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6736
6737 return res;
6738}
6739
6740static void manage_dm_interrupts(struct amdgpu_device *adev,
6741 struct amdgpu_crtc *acrtc,
6742 bool_Bool enable)
6743{
6744 /*
6745 * We have no guarantee that the frontend index maps to the same
6746 * backend index - some even map to more than one.
6747 *
6748 * TODO: Use a different interrupt or check DC itself for the mapping.
6749 */
6750 int irq_type =
6751 amdgpu_display_crtc_idx_to_irq_type(
6752 adev,
6753 acrtc->crtc_id);
6754
6755 if (enable) {
6756 drm_crtc_vblank_on(&acrtc->base);
6757 amdgpu_irq_get(
6758 adev,
6759 &adev->pageflip_irq,
6760 irq_type);
6761 } else {
6762
6763 amdgpu_irq_put(
6764 adev,
6765 &adev->pageflip_irq,
6766 irq_type);
6767 drm_crtc_vblank_off(&acrtc->base);
6768 }
6769}
6770
6771static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6772 struct amdgpu_crtc *acrtc)
6773{
6774 int irq_type =
6775 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6776
6777 /**
6778 * This reads the current state for the IRQ and force reapplies
6779 * the setting to hardware.
6780 */
6781 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6782}
6783
6784static bool_Bool
6785is_scaling_state_different(const struct dm_connector_state *dm_state,
6786 const struct dm_connector_state *old_dm_state)
6787{
6788 if (dm_state->scaling != old_dm_state->scaling)
28
Assuming 'dm_state->scaling' is equal to 'old_dm_state->scaling'
29
Taking false branch
6789 return true1;
6790 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
30
Assuming field 'underscan_enable' is true, which participates in a condition later
6791 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6792 return true1;
6793 } else if (dm_state->underscan_enable
30.1
Field 'underscan_enable' is true
30.1
Field 'underscan_enable' is true
&& !old_dm_state->underscan_enable) {
31
Assuming field 'underscan_enable' is false, which participates in a condition later
32
Taking true branch
6794 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
33
Assuming field 'underscan_hborder' is not equal to 0
34
Assuming field 'underscan_vborder' is not equal to 0
35
Taking true branch
6795 return true1;
36
Returning the value 1, which participates in a condition later
6796 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6797 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6798 return true1;
6799 return false0;
6800}
6801
6802#ifdef CONFIG_DRM_AMD_DC_HDCP
6803static bool_Bool is_content_protection_different(struct drm_connector_state *state,
6804 const struct drm_connector_state *old_state,
6805 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6806{
6807 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
6808
6809 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6810 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED0) {
6811 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED1;
6812 return true1;
6813 }
6814
6815 /* CP is being re enabled, ignore this */
6816 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED2 &&
6817 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED1) {
6818 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED2;
6819 return false0;
6820 }
6821
6822 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6823 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED0 &&
6824 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED2)
6825 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED1;
6826
6827 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6828 * hot-plug, headless s3, dpms
6829 */
6830 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED1 && connector->dpms == DRM_MODE_DPMS_ON0 &&
6831 aconnector->dc_sink != NULL((void *)0))
6832 return true1;
6833
6834 if (old_state->content_protection == state->content_protection)
6835 return false0;
6836
6837 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED0)
6838 return true1;
6839
6840 return false0;
6841}
6842
6843#endif
6844static void remove_stream(struct amdgpu_device *adev,
6845 struct amdgpu_crtc *acrtc,
6846 struct dc_stream_state *stream)
6847{
6848 /* this is the update mode case */
6849
6850 acrtc->otg_inst = -1;
6851 acrtc->enabled = false0;
6852}
6853
6854static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6855 struct dc_cursor_position *position)
6856{
6857 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
6858 int x, y;
6859 int xorigin = 0, yorigin = 0;
6860
6861 if (!crtc || !plane->state->fb)
6862 return 0;
6863
6864 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6865 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6866 DRM_ERROR("%s: bad cursor width or height %d x %d\n",__drm_err("%s: bad cursor width or height %d x %d\n", __func__
, plane->state->crtc_w, plane->state->crtc_h)
6867 __func__,__drm_err("%s: bad cursor width or height %d x %d\n", __func__
, plane->state->crtc_w, plane->state->crtc_h)
6868 plane->state->crtc_w,__drm_err("%s: bad cursor width or height %d x %d\n", __func__
, plane->state->crtc_w, plane->state->crtc_h)
6869 plane->state->crtc_h)__drm_err("%s: bad cursor width or height %d x %d\n", __func__
, plane->state->crtc_w, plane->state->crtc_h)
;
6870 return -EINVAL22;
6871 }
6872
6873 x = plane->state->crtc_x;
6874 y = plane->state->crtc_y;
6875
6876 if (x <= -amdgpu_crtc->max_cursor_width ||
6877 y <= -amdgpu_crtc->max_cursor_height)
6878 return 0;
6879
6880 if (x < 0) {
6881 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1)(((-x)<(amdgpu_crtc->max_cursor_width - 1))?(-x):(amdgpu_crtc
->max_cursor_width - 1))
;
6882 x = 0;
6883 }
6884 if (y < 0) {
6885 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1)(((-y)<(amdgpu_crtc->max_cursor_height - 1))?(-y):(amdgpu_crtc
->max_cursor_height - 1))
;
6886 y = 0;
6887 }
6888 position->enable = true1;
6889 position->translate_by_source = true1;
6890 position->x = x;
6891 position->y = y;
6892 position->x_hotspot = xorigin;
6893 position->y_hotspot = yorigin;
6894
6895 return 0;
6896}
6897
6898static void handle_cursor_update(struct drm_plane *plane,
6899 struct drm_plane_state *old_plane_state)
6900{
6901 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6902 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb)({ const __typeof( ((struct amdgpu_framebuffer *)0)->base )
*__mptr = (plane->state->fb); (struct amdgpu_framebuffer
*)( (char *)__mptr - __builtin_offsetof(struct amdgpu_framebuffer
, base) );})
;
6903 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6904 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc->state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
: NULL((void *)0);
6905 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
6906 uint64_t address = afb ? afb->address : 0;
6907 struct dc_cursor_position position = {0};
6908 struct dc_cursor_attributes attributes;
6909 int ret;
6910
6911 if (!plane->state->fb && !old_plane_state->fb)
6912 return;
6913
6914 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",__drm_dbg(DRM_UT_DRIVER, "%s: crtc_id=%d with size %d to %d\n"
, __func__, amdgpu_crtc->crtc_id, plane->state->crtc_w
, plane->state->crtc_h)
6915 __func__,__drm_dbg(DRM_UT_DRIVER, "%s: crtc_id=%d with size %d to %d\n"
, __func__, amdgpu_crtc->crtc_id, plane->state->crtc_w
, plane->state->crtc_h)
6916 amdgpu_crtc->crtc_id,__drm_dbg(DRM_UT_DRIVER, "%s: crtc_id=%d with size %d to %d\n"
, __func__, amdgpu_crtc->crtc_id, plane->state->crtc_w
, plane->state->crtc_h)
6917 plane->state->crtc_w,__drm_dbg(DRM_UT_DRIVER, "%s: crtc_id=%d with size %d to %d\n"
, __func__, amdgpu_crtc->crtc_id, plane->state->crtc_w
, plane->state->crtc_h)
6918 plane->state->crtc_h)__drm_dbg(DRM_UT_DRIVER, "%s: crtc_id=%d with size %d to %d\n"
, __func__, amdgpu_crtc->crtc_id, plane->state->crtc_w
, plane->state->crtc_h)
;
6919
6920 ret = get_cursor_position(plane, crtc, &position);
6921 if (ret)
6922 return;
6923
6924 if (!position.enable) {
6925 /* turn off cursor */
6926 if (crtc_state && crtc_state->stream) {
6927 mutex_lock(&adev->dm.dc_lock)rw_enter_write(&adev->dm.dc_lock);
6928 dc_stream_set_cursor_position(crtc_state->stream,
6929 &position);
6930 mutex_unlock(&adev->dm.dc_lock)rw_exit_write(&adev->dm.dc_lock);
6931 }
6932 return;
6933 }
6934
6935 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6936 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6937
6938 memset(&attributes, 0, sizeof(attributes))__builtin_memset((&attributes), (0), (sizeof(attributes))
)
;
6939 attributes.address.high_part = upper_32_bits(address)((u32)(((address) >> 16) >> 16));
6940 attributes.address.low_part = lower_32_bits(address)((u32)(address));
6941 attributes.width = plane->state->crtc_w;
6942 attributes.height = plane->state->crtc_h;
6943 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6944 attributes.rotation_angle = 0;
6945 attributes.attribute_flags.value = 0;
6946
6947 attributes.pitch = attributes.width;
6948
6949 if (crtc_state->stream) {
6950 mutex_lock(&adev->dm.dc_lock)rw_enter_write(&adev->dm.dc_lock);
6951 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6952 &attributes))
6953 DRM_ERROR("DC failed to set cursor attributes\n")__drm_err("DC failed to set cursor attributes\n");
6954
6955 if (!dc_stream_set_cursor_position(crtc_state->stream,
6956 &position))
6957 DRM_ERROR("DC failed to set cursor position\n")__drm_err("DC failed to set cursor position\n");
6958 mutex_unlock(&adev->dm.dc_lock)rw_exit_write(&adev->dm.dc_lock);
6959 }
6960}
6961
6962static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6963{
6964
6965 assert_spin_locked(&acrtc->base.dev->event_lock)do { if (((&acrtc->base.dev->event_lock)->mtx_owner
!= ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})) && !(panicstr || db_active)) panic("mutex %p not held in %s"
, (&acrtc->base.dev->event_lock), __func__); } while
(0)
;
6966 WARN_ON(acrtc->event)({ int __ret = !!(acrtc->event); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "acrtc->event", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 6966); __builtin_expect(!!(__ret), 0); })
;
6967
6968 acrtc->event = acrtc->base.state->event;
6969
6970 /* Set the flip status */
6971 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6972
6973 /* Mark this event as consumed */
6974 acrtc->base.state->event = NULL((void *)0);
6975
6976 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",__drm_dbg(DRM_UT_DRIVER, "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n"
, acrtc->crtc_id)
6977 acrtc->crtc_id)__drm_dbg(DRM_UT_DRIVER, "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n"
, acrtc->crtc_id)
;
6978}
6979
6980static void update_freesync_state_on_stream(
6981 struct amdgpu_display_manager *dm,
6982 struct dm_crtc_state *new_crtc_state,
6983 struct dc_stream_state *new_stream,
6984 struct dc_plane_state *surface,
6985 u32 flip_timestamp_in_us)
6986{
6987 struct mod_vrr_params vrr_params;
6988 struct dc_info_packet vrr_infopacket = {0};
6989 struct amdgpu_device *adev = dm->adev;
6990 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (new_crtc_state->base.crtc); (struct amdgpu_crtc *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_crtc, base) );})
;
6991 unsigned long flags;
6992
6993 if (!new_stream)
6994 return;
6995
6996 /*
6997 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6998 * For now it's sufficient to just guard against these conditions.
6999 */
7000
7001 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7002 return;
7003
7004 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
7005 vrr_params = acrtc->dm_irq_params.vrr_params;
7006
7007 if (surface) {
7008 mod_freesync_handle_preflip(
7009 dm->freesync_module,
7010 surface,
7011 new_stream,
7012 flip_timestamp_in_us,
7013 &vrr_params);
7014
7015 if (adev->family < AMDGPU_FAMILY_AI141 &&
7016 amdgpu_dm_vrr_active(new_crtc_state)) {
7017 mod_freesync_handle_v_update(dm->freesync_module,
7018 new_stream, &vrr_params);
7019
7020 /* Need to call this before the frame ends. */
7021 dc_stream_adjust_vmin_vmax(dm->dc,
7022 new_crtc_state->stream,
7023 &vrr_params.adjust);
7024 }
7025 }
7026
7027 mod_freesync_build_vrr_infopacket(
7028 dm->freesync_module,
7029 new_stream,
7030 &vrr_params,
7031 PACKET_TYPE_VRR,
7032 TRANSFER_FUNC_UNKNOWN,
7033 &vrr_infopacket);
7034
7035 new_crtc_state->freesync_timing_changed |=
7036 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,__builtin_memcmp((&acrtc->dm_irq_params.vrr_params.adjust
), (&vrr_params.adjust), (sizeof(vrr_params.adjust)))
7037 &vrr_params.adjust,__builtin_memcmp((&acrtc->dm_irq_params.vrr_params.adjust
), (&vrr_params.adjust), (sizeof(vrr_params.adjust)))
7038 sizeof(vrr_params.adjust))__builtin_memcmp((&acrtc->dm_irq_params.vrr_params.adjust
), (&vrr_params.adjust), (sizeof(vrr_params.adjust)))
!= 0);
7039
7040 new_crtc_state->freesync_vrr_info_changed |=
7041 (memcmp(&new_crtc_state->vrr_infopacket,__builtin_memcmp((&new_crtc_state->vrr_infopacket), (&
vrr_infopacket), (sizeof(vrr_infopacket)))
7042 &vrr_infopacket,__builtin_memcmp((&new_crtc_state->vrr_infopacket), (&
vrr_infopacket), (sizeof(vrr_infopacket)))
7043 sizeof(vrr_infopacket))__builtin_memcmp((&new_crtc_state->vrr_infopacket), (&
vrr_infopacket), (sizeof(vrr_infopacket)))
!= 0);
7044
7045 acrtc->dm_irq_params.vrr_params = vrr_params;
7046 new_crtc_state->vrr_infopacket = vrr_infopacket;
7047
7048 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7049 new_stream->vrr_infopacket = vrr_infopacket;
7050
7051 if (new_crtc_state->freesync_vrr_info_changed)
7052 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",__drm_dbg(DRM_UT_KMS, "VRR packet update: crtc=%u enabled=%d state=%d"
, new_crtc_state->base.crtc->base.id, (int)new_crtc_state
->base.vrr_enabled, (int)vrr_params.state)
7053 new_crtc_state->base.crtc->base.id,__drm_dbg(DRM_UT_KMS, "VRR packet update: crtc=%u enabled=%d state=%d"
, new_crtc_state->base.crtc->base.id, (int)new_crtc_state
->base.vrr_enabled, (int)vrr_params.state)
7054 (int)new_crtc_state->base.vrr_enabled,__drm_dbg(DRM_UT_KMS, "VRR packet update: crtc=%u enabled=%d state=%d"
, new_crtc_state->base.crtc->base.id, (int)new_crtc_state
->base.vrr_enabled, (int)vrr_params.state)
7055 (int)vrr_params.state)__drm_dbg(DRM_UT_KMS, "VRR packet update: crtc=%u enabled=%d state=%d"
, new_crtc_state->base.crtc->base.id, (int)new_crtc_state
->base.vrr_enabled, (int)vrr_params.state)
;
7056
7057 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
7058}
7059
7060static void update_stream_irq_parameters(
7061 struct amdgpu_display_manager *dm,
7062 struct dm_crtc_state *new_crtc_state)
7063{
7064 struct dc_stream_state *new_stream = new_crtc_state->stream;
7065 struct mod_vrr_params vrr_params;
7066 struct mod_freesync_config config = new_crtc_state->freesync_config;
7067 struct amdgpu_device *adev = dm->adev;
7068 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (new_crtc_state->base.crtc); (struct amdgpu_crtc *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_crtc, base) );})
;
7069 unsigned long flags;
7070
7071 if (!new_stream)
7072 return;
7073
7074 /*
7075 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7076 * For now it's sufficient to just guard against these conditions.
7077 */
7078 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7079 return;
7080
7081 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
7082 vrr_params = acrtc->dm_irq_params.vrr_params;
7083
7084 if (new_crtc_state->vrr_supported &&
7085 config.min_refresh_in_uhz &&
7086 config.max_refresh_in_uhz) {
7087 config.state = new_crtc_state->base.vrr_enabled ?
7088 VRR_STATE_ACTIVE_VARIABLE :
7089 VRR_STATE_INACTIVE;
7090 } else {
7091 config.state = VRR_STATE_UNSUPPORTED;
7092 }
7093
7094 mod_freesync_build_vrr_params(dm->freesync_module,
7095 new_stream,
7096 &config, &vrr_params);
7097
7098 new_crtc_state->freesync_timing_changed |=
7099 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,__builtin_memcmp((&acrtc->dm_irq_params.vrr_params.adjust
), (&vrr_params.adjust), (sizeof(vrr_params.adjust)))
7100 &vrr_params.adjust, sizeof(vrr_params.adjust))__builtin_memcmp((&acrtc->dm_irq_params.vrr_params.adjust
), (&vrr_params.adjust), (sizeof(vrr_params.adjust)))
!= 0);
7101
7102 new_crtc_state->freesync_config = config;
7103 /* Copy state for access from DM IRQ handler */
7104 acrtc->dm_irq_params.freesync_config = config;
7105 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7106 acrtc->dm_irq_params.vrr_params = vrr_params;
7107 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
7108}
7109
7110static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7111 struct dm_crtc_state *new_state)
7112{
7113 bool_Bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7114 bool_Bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7115
7116 if (!old_vrr_active && new_vrr_active) {
7117 /* Transition VRR inactive -> active:
7118 * While VRR is active, we must not disable vblank irq, as a
7119 * reenable after disable would compute bogus vblank/pflip
7120 * timestamps if it likely happened inside display front-porch.
7121 *
7122 * We also need vupdate irq for the actual core vblank handling
7123 * at end of vblank.
7124 */
7125 dm_set_vupdate_irq(new_state->base.crtc, true1);
7126 drm_crtc_vblank_get(new_state->base.crtc);
7127 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",__drm_dbg(DRM_UT_DRIVER, "%s: crtc=%u VRR off->on: Get vblank ref\n"
, __func__, new_state->base.crtc->base.id)
7128 __func__, new_state->base.crtc->base.id)__drm_dbg(DRM_UT_DRIVER, "%s: crtc=%u VRR off->on: Get vblank ref\n"
, __func__, new_state->base.crtc->base.id)
;
7129 } else if (old_vrr_active && !new_vrr_active) {
7130 /* Transition VRR active -> inactive:
7131 * Allow vblank irq disable again for fixed refresh rate.
7132 */
7133 dm_set_vupdate_irq(new_state->base.crtc, false0);
7134 drm_crtc_vblank_put(new_state->base.crtc);
7135 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",__drm_dbg(DRM_UT_DRIVER, "%s: crtc=%u VRR on->off: Drop vblank ref\n"
, __func__, new_state->base.crtc->base.id)
7136 __func__, new_state->base.crtc->base.id)__drm_dbg(DRM_UT_DRIVER, "%s: crtc=%u VRR on->off: Drop vblank ref\n"
, __func__, new_state->base.crtc->base.id)
;
7137 }
7138}
7139
7140static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7141{
7142 struct drm_plane *plane;
7143 struct drm_plane_state *old_plane_state, *new_plane_state;
7144 int i;
7145
7146 /*
7147 * TODO: Make this per-stream so we don't issue redundant updates for
7148 * commits with multiple streams.
7149 */
7150 for_each_oldnew_plane_in_state(state, plane, old_plane_state,for ((i) = 0; (i) < (state)->dev->mode_config.num_total_plane
; (i)++) if (!((state)->planes[i].ptr && ((plane) =
(state)->planes[i].ptr, (void)(plane) , (old_plane_state)
= (state)->planes[i].old_state, (new_plane_state) = (state
)->planes[i].new_state, 1))) {} else
7151 new_plane_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_total_plane
; (i)++) if (!((state)->planes[i].ptr && ((plane) =
(state)->planes[i].ptr, (void)(plane) , (old_plane_state)
= (state)->planes[i].old_state, (new_plane_state) = (state
)->planes[i].new_state, 1))) {} else
7152 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7153 handle_cursor_update(plane, old_plane_state);
7154}
7155
7156static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7157 struct dc_state *dc_state,
7158 struct drm_device *dev,
7159 struct amdgpu_display_manager *dm,
7160 struct drm_crtc *pcrtc,
7161 bool_Bool wait_for_vblank)
7162{
7163 uint32_t i;
7164 uint64_t timestamp_ns;
7165 struct drm_plane *plane;
7166 struct drm_plane_state *old_plane_state, *new_plane_state;
7167 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (pcrtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
7168 struct drm_crtc_state *new_pcrtc_state =
7169 drm_atomic_get_new_crtc_state(state, pcrtc);
7170 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_pcrtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7171 struct dm_crtc_state *dm_old_crtc_state =
7172 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc))({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (drm_atomic_get_old_crtc_state(state, pcrtc)); (struct dm_crtc_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_crtc_state
, base) );})
;
7173 int planes_count = 0, vpos, hpos;
7174 long r;
7175 unsigned long flags;
7176 struct amdgpu_bo *abo;
7177 uint32_t target_vblank, last_flip_vblank;
7178 bool_Bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7179 bool_Bool pflip_present = false0;
7180 struct {
7181 struct dc_surface_update surface_updates[MAX_SURFACES3];
7182 struct dc_plane_info plane_infos[MAX_SURFACES3];
7183 struct dc_scaling_info scaling_infos[MAX_SURFACES3];
7184 struct dc_flip_addrs flip_addrs[MAX_SURFACES3];
7185 struct dc_stream_update stream_update;
7186 } *bundle;
7187
7188 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL(0x0001 | 0x0004));
7189
7190 if (!bundle) {
7191 dm_error("Failed to allocate update bundle\n")__drm_err("Failed to allocate update bundle\n");
7192 goto cleanup;
7193 }
7194
7195 /*
7196 * Disable the cursor first if we're disabling all the planes.
7197 * It'll remain on the screen after the planes are re-enabled
7198 * if we don't.
7199 */
7200 if (acrtc_state->active_planes == 0)
7201 amdgpu_dm_commit_cursors(state);
7202
7203 /* update planes when needed */
7204 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_total_plane
; (i)++) if (!((state)->planes[i].ptr && ((plane) =
(state)->planes[i].ptr, (void)(plane) , (old_plane_state)
= (state)->planes[i].old_state, (new_plane_state) = (state
)->planes[i].new_state, 1))) {} else
{
7205 struct drm_crtc *crtc = new_plane_state->crtc;
7206 struct drm_crtc_state *new_crtc_state;
7207 struct drm_framebuffer *fb = new_plane_state->fb;
7208 bool_Bool plane_needs_flip;
7209 struct dc_plane_state *dc_plane;
7210 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_plane_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
7211
7212 /* Cursor plane is handled after stream updates */
7213 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7214 continue;
7215
7216 if (!fb || !crtc || pcrtc != crtc)
7217 continue;
7218
7219 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7220 if (!new_crtc_state->active)
7221 continue;
7222
7223 dc_plane = dm_new_plane_state->dc_state;
7224
7225 bundle->surface_updates[planes_count].surface = dc_plane;
7226 if (new_pcrtc_state->color_mgmt_changed) {
7227 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7228 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7229 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7230 }
7231
7232 fill_dc_scaling_info(new_plane_state,
7233 &bundle->scaling_infos[planes_count]);
7234
7235 bundle->surface_updates[planes_count].scaling_info =
7236 &bundle->scaling_infos[planes_count];
7237
7238 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7239
7240 pflip_present = pflip_present || plane_needs_flip;
7241
7242 if (!plane_needs_flip) {
7243 planes_count += 1;
7244 continue;
7245 }
7246
7247 abo = gem_to_amdgpu_bo(fb->obj[0])({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr
= ((fb->obj[0])); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_bo, tbo.base) );})
;
7248
7249 /*
7250 * Wait for all fences on this FB. Do limited wait to avoid
7251 * deadlock during GPU reset when this fence will not signal
7252 * but we hold reservation lock for the BO.
7253 */
7254 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true1,
7255 false0,
7256 msecs_to_jiffies(5000)(((uint64_t)(5000)) * hz / 1000));
7257 if (unlikely(r <= 0)__builtin_expect(!!(r <= 0), 0))
7258 DRM_ERROR("Waiting for fences timed out!")__drm_err("Waiting for fences timed out!");
7259
7260 fill_dc_plane_info_and_addr(
7261 dm->adev, new_plane_state,
7262 dm_new_plane_state->tiling_flags,
7263 &bundle->plane_infos[planes_count],
7264 &bundle->flip_addrs[planes_count].address,
7265 dm_new_plane_state->tmz_surface, false0);
7266
7267 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",__drm_dbg(DRM_UT_DRIVER, "plane: id=%d dcc_en=%d\n", new_plane_state
->plane->index, bundle->plane_infos[planes_count].dcc
.enable)
7268 new_plane_state->plane->index,__drm_dbg(DRM_UT_DRIVER, "plane: id=%d dcc_en=%d\n", new_plane_state
->plane->index, bundle->plane_infos[planes_count].dcc
.enable)
7269 bundle->plane_infos[planes_count].dcc.enable)__drm_dbg(DRM_UT_DRIVER, "plane: id=%d dcc_en=%d\n", new_plane_state
->plane->index, bundle->plane_infos[planes_count].dcc
.enable)
;
7270
7271 bundle->surface_updates[planes_count].plane_info =
7272 &bundle->plane_infos[planes_count];
7273
7274 /*
7275 * Only allow immediate flips for fast updates that don't
7276 * change FB pitch, DCC state, rotation or mirroing.
7277 */
7278 bundle->flip_addrs[planes_count].flip_immediate =
7279 crtc->state->async_flip &&
7280 acrtc_state->update_type == UPDATE_TYPE_FAST;
7281
7282 timestamp_ns = ktime_get_ns();
7283 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7284 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7285 bundle->surface_updates[planes_count].surface = dc_plane;
7286
7287 if (!bundle->surface_updates[planes_count].surface) {
7288 DRM_ERROR("No surface for CRTC: id=%d\n",__drm_err("No surface for CRTC: id=%d\n", acrtc_attach->crtc_id
)
7289 acrtc_attach->crtc_id)__drm_err("No surface for CRTC: id=%d\n", acrtc_attach->crtc_id
)
;
7290 continue;
7291 }
7292
7293 if (plane == pcrtc->primary)
7294 update_freesync_state_on_stream(
7295 dm,
7296 acrtc_state,
7297 acrtc_state->stream,
7298 dc_plane,
7299 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7300
7301 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",__drm_dbg(DRM_UT_DRIVER, "%s Flipping to hi: 0x%x, low: 0x%x\n"
, __func__, bundle->flip_addrs[planes_count].address.grph.
addr.high_part, bundle->flip_addrs[planes_count].address.grph
.addr.low_part)
7302 __func__,__drm_dbg(DRM_UT_DRIVER, "%s Flipping to hi: 0x%x, low: 0x%x\n"
, __func__, bundle->flip_addrs[planes_count].address.grph.
addr.high_part, bundle->flip_addrs[planes_count].address.grph
.addr.low_part)
7303 bundle->flip_addrs[planes_count].address.grph.addr.high_part,__drm_dbg(DRM_UT_DRIVER, "%s Flipping to hi: 0x%x, low: 0x%x\n"
, __func__, bundle->flip_addrs[planes_count].address.grph.
addr.high_part, bundle->flip_addrs[planes_count].address.grph
.addr.low_part)
7304 bundle->flip_addrs[planes_count].address.grph.addr.low_part)__drm_dbg(DRM_UT_DRIVER, "%s Flipping to hi: 0x%x, low: 0x%x\n"
, __func__, bundle->flip_addrs[planes_count].address.grph.
addr.high_part, bundle->flip_addrs[planes_count].address.grph
.addr.low_part)
;
7305
7306 planes_count += 1;
7307
7308 }
7309
7310 if (pflip_present) {
7311 if (!vrr_active) {
7312 /* Use old throttling in non-vrr fixed refresh rate mode
7313 * to keep flip scheduling based on target vblank counts
7314 * working in a backwards compatible way, e.g., for
7315 * clients using the GLX_OML_sync_control extension or
7316 * DRI3/Present extension with defined target_msc.
7317 */
7318 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7319 }
7320 else {
7321 /* For variable refresh rate mode only:
7322 * Get vblank of last completed flip to avoid > 1 vrr
7323 * flips per video frame by use of throttling, but allow
7324 * flip programming anywhere in the possibly large
7325 * variable vrr vblank interval for fine-grained flip
7326 * timing control and more opportunity to avoid stutter
7327 * on late submission of flips.
7328 */
7329 spin_lock_irqsave(&pcrtc->dev->event_lock, flags)do { flags = 0; mtx_enter(&pcrtc->dev->event_lock);
} while (0)
;
7330 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7331 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags)do { (void)(flags); mtx_leave(&pcrtc->dev->event_lock
); } while (0)
;
7332 }
7333
7334 target_vblank = last_flip_vblank + wait_for_vblank;
7335
7336 /*
7337 * Wait until we're out of the vertical blank period before the one
7338 * targeted by the flip
7339 */
7340 while ((acrtc_attach->enabled &&
7341 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7342 0, &vpos, &hpos, NULL((void *)0),
7343 NULL((void *)0), &pcrtc->hwmode)
7344 & (DRM_SCANOUTPOS_VALID(1 << 0) | DRM_SCANOUTPOS_IN_VBLANK(1 << 1))) ==
7345 (DRM_SCANOUTPOS_VALID(1 << 0) | DRM_SCANOUTPOS_IN_VBLANK(1 << 1)) &&
7346 (int)(target_vblank -
7347 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7348 usleep_range(1000, 1100);
7349 }
7350
7351 /**
7352 * Prepare the flip event for the pageflip interrupt to handle.
7353 *
7354 * This only works in the case where we've already turned on the
7355 * appropriate hardware blocks (eg. HUBP) so in the transition case
7356 * from 0 -> n planes we have to skip a hardware generated event
7357 * and rely on sending it from software.
7358 */
7359 if (acrtc_attach->base.state->event &&
7360 acrtc_state->active_planes > 0) {
7361 drm_crtc_vblank_get(pcrtc);
7362
7363 spin_lock_irqsave(&pcrtc->dev->event_lock, flags)do { flags = 0; mtx_enter(&pcrtc->dev->event_lock);
} while (0)
;
7364
7365 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE)({ int __ret = !!(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE
); if (__ret) printf("WARNING %s failed at %s:%d\n", "acrtc_attach->pflip_status != AMDGPU_FLIP_NONE"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 7365); __builtin_expect(!!(__ret), 0); })
;
7366 prepare_flip_isr(acrtc_attach);
7367
7368 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags)do { (void)(flags); mtx_leave(&pcrtc->dev->event_lock
); } while (0)
;
7369 }
7370
7371 if (acrtc_state->stream) {
7372 if (acrtc_state->freesync_vrr_info_changed)
7373 bundle->stream_update.vrr_infopacket =
7374 &acrtc_state->stream->vrr_infopacket;
7375 }
7376 }
7377
7378 /* Update the planes if changed or disable if we don't have any. */
7379 if ((planes_count || acrtc_state->active_planes == 0) &&
7380 acrtc_state->stream) {
7381 bundle->stream_update.stream = acrtc_state->stream;
7382 if (new_pcrtc_state->mode_changed) {
7383 bundle->stream_update.src = acrtc_state->stream->src;
7384 bundle->stream_update.dst = acrtc_state->stream->dst;
7385 }
7386
7387 if (new_pcrtc_state->color_mgmt_changed) {
7388 /*
7389 * TODO: This isn't fully correct since we've actually
7390 * already modified the stream in place.
7391 */
7392 bundle->stream_update.gamut_remap =
7393 &acrtc_state->stream->gamut_remap_matrix;
7394 bundle->stream_update.output_csc_transform =
7395 &acrtc_state->stream->csc_color_matrix;
7396 bundle->stream_update.out_transfer_func =
7397 acrtc_state->stream->out_transfer_func;
7398 }
7399
7400 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7401 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7402 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7403
7404 /*
7405 * If FreeSync state on the stream has changed then we need to
7406 * re-adjust the min/max bounds now that DC doesn't handle this
7407 * as part of commit.
7408 */
7409 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7410 amdgpu_dm_vrr_active(acrtc_state)) {
7411 spin_lock_irqsave(&pcrtc->dev->event_lock, flags)do { flags = 0; mtx_enter(&pcrtc->dev->event_lock);
} while (0)
;
7412 dc_stream_adjust_vmin_vmax(
7413 dm->dc, acrtc_state->stream,
7414 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7415 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags)do { (void)(flags); mtx_leave(&pcrtc->dev->event_lock
); } while (0)
;
7416 }
7417 mutex_lock(&dm->dc_lock)rw_enter_write(&dm->dc_lock);
7418 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7419 acrtc_state->stream->link->psr_settings.psr_allow_active)
7420 amdgpu_dm_psr_disable(acrtc_state->stream);
7421
7422 dc_commit_updates_for_stream(dm->dc,
7423 bundle->surface_updates,
7424 planes_count,
7425 acrtc_state->stream,
7426 &bundle->stream_update,
7427 dc_state);
7428
7429 /**
7430 * Enable or disable the interrupts on the backend.
7431 *
7432 * Most pipes are put into power gating when unused.
7433 *
7434 * When power gating is enabled on a pipe we lose the
7435 * interrupt enablement state when power gating is disabled.
7436 *
7437 * So we need to update the IRQ control state in hardware
7438 * whenever the pipe turns on (since it could be previously
7439 * power gated) or off (since some pipes can't be power gated
7440 * on some ASICs).
7441 */
7442 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7443 dm_update_pflip_irq_state(drm_to_adev(dev),
7444 acrtc_attach);
7445
7446 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7447 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7448 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7449 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7450 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7451 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7452 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7453 amdgpu_dm_psr_enable(acrtc_state->stream);
7454 }
7455
7456 mutex_unlock(&dm->dc_lock)rw_exit_write(&dm->dc_lock);
7457 }
7458
7459 /*
7460 * Update cursor state *after* programming all the planes.
7461 * This avoids redundant programming in the case where we're going
7462 * to be disabling a single plane - those pipes are being disabled.
7463 */
7464 if (acrtc_state->active_planes)
7465 amdgpu_dm_commit_cursors(state);
7466
7467cleanup:
7468 kfree(bundle);
7469}
7470
7471static void amdgpu_dm_commit_audio(struct drm_device *dev,
7472 struct drm_atomic_state *state)
7473{
7474 struct amdgpu_device *adev = drm_to_adev(dev);
7475 struct amdgpu_dm_connector *aconnector;
7476 struct drm_connector *connector;
7477 struct drm_connector_state *old_con_state, *new_con_state;
7478 struct drm_crtc_state *new_crtc_state;
7479 struct dm_crtc_state *new_dm_crtc_state;
7480 const struct dc_stream_status *status;
7481 int i, inst;
7482
7483 /* Notify device removals. */
7484 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (old_con_state) =
(state)->connectors[i].old_state, (new_con_state) = (state
)->connectors[i].new_state, 1))) {} else
{
7485 if (old_con_state->crtc != new_con_state->crtc) {
7486 /* CRTC changes require notification. */
7487 goto notify;
7488 }
7489
7490 if (!new_con_state->crtc)
7491 continue;
7492
7493 new_crtc_state = drm_atomic_get_new_crtc_state(
7494 state, new_con_state->crtc);
7495
7496 if (!new_crtc_state)
7497 continue;
7498
7499 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7500 continue;
7501
7502 notify:
7503 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
7504
7505 mutex_lock(&adev->dm.audio_lock)rw_enter_write(&adev->dm.audio_lock);
7506 inst = aconnector->audio_inst;
7507 aconnector->audio_inst = -1;
7508 mutex_unlock(&adev->dm.audio_lock)rw_exit_write(&adev->dm.audio_lock);
7509
7510 amdgpu_dm_audio_eld_notify(adev, inst);
7511 }
7512
7513 /* Notify audio device additions. */
7514 for_each_new_connector_in_state(state, connector, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (new_con_state) =
(state)->connectors[i].new_state, (void)(new_con_state) ,
1))) {} else
{
7515 if (!new_con_state->crtc)
7516 continue;
7517
7518 new_crtc_state = drm_atomic_get_new_crtc_state(
7519 state, new_con_state->crtc);
7520
7521 if (!new_crtc_state)
7522 continue;
7523
7524 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7525 continue;
7526
7527 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7528 if (!new_dm_crtc_state->stream)
7529 continue;
7530
7531 status = dc_stream_get_status(new_dm_crtc_state->stream);
7532 if (!status)
7533 continue;
7534
7535 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
7536
7537 mutex_lock(&adev->dm.audio_lock)rw_enter_write(&adev->dm.audio_lock);
7538 inst = status->audio_inst;
7539 aconnector->audio_inst = inst;
7540 mutex_unlock(&adev->dm.audio_lock)rw_exit_write(&adev->dm.audio_lock);
7541
7542 amdgpu_dm_audio_eld_notify(adev, inst);
7543 }
7544}
7545
7546/*
7547 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7548 * @crtc_state: the DRM CRTC state
7549 * @stream_state: the DC stream state.
7550 *
7551 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7552 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7553 */
7554static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7555 struct dc_stream_state *stream_state)
7556{
7557 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7558}
7559
7560static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7561 struct drm_atomic_state *state,
7562 bool_Bool nonblock)
7563{
7564 /*
7565 * Add check here for SoC's that support hardware cursor plane, to
7566 * unset legacy_cursor_update
7567 */
7568
7569 return drm_atomic_helper_commit(dev, state, nonblock);
7570
7571 /*TODO Handle EINTR, reenable IRQ*/
7572}
7573
7574/**
7575 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7576 * @state: The atomic state to commit
7577 *
7578 * This will tell DC to commit the constructed DC state from atomic_check,
7579 * programming the hardware. Any failures here implies a hardware failure, since
7580 * atomic check should have filtered anything non-kosher.
7581 */
7582static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7583{
7584 struct drm_device *dev = state->dev;
7585 struct amdgpu_device *adev = drm_to_adev(dev);
7586 struct amdgpu_display_manager *dm = &adev->dm;
7587 struct dm_atomic_state *dm_state;
7588 struct dc_state *dc_state = NULL((void *)0), *dc_state_temp = NULL((void *)0);
7589 uint32_t i, j;
7590 struct drm_crtc *crtc;
7591 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7592 unsigned long flags;
7593 bool_Bool wait_for_vblank = true1;
7594 struct drm_connector *connector;
7595 struct drm_connector_state *old_con_state, *new_con_state;
7596 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7597 int crtc_disable_count = 0;
7598 bool_Bool mode_set_reset_required = false0;
7599
7600 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7601
7602 dm_state = dm_atomic_get_new_state(state);
7603 if (dm_state && dm_state->context) {
1
Assuming 'dm_state' is null
7604 dc_state = dm_state->context;
7605 } else {
7606 /* No state changes, retain current state. */
7607 dc_state_temp = dc_create_state(dm->dc);
7608 ASSERT(dc_state_temp)do { if (({ static int __warned; int __ret = !!(!(dc_state_temp
)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n"
, "!(dc_state_temp)", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 7608); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do
{} while (0); } while (0)
;
2
Assuming 'dc_state_temp' is non-null
3
Taking false branch
4
Loop condition is false. Exiting loop
7609 dc_state = dc_state_temp;
7610 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7611 }
7612
7613 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
5
Assuming 'i' is >= field 'num_crtc'
6
Loop condition is false. Execution continues on line 7627
7614 new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
7615 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
7616
7617 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (old_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7618
7619 if (old_crtc_state->active &&
7620 (!new_crtc_state->active ||
7621 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7622 manage_dm_interrupts(adev, acrtc, false0);
7623 dc_stream_release(dm_old_crtc_state->stream);
7624 }
7625 }
7626
7627 drm_atomic_helper_calc_timestamping_constants(state);
7628
7629 /* update changed items */
7630 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
7
Assuming 'i' is >= field 'num_crtc'
8
Loop condition is false. Execution continues on line 7701
7631 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
7632
7633 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7634 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (old_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7635
7636 DRM_DEBUG_DRIVER(__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7637 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7638 "planes_changed:%d, mode_changed:%d,active_changed:%d,"__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7639 "connectors_changed:%d\n",__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7640 acrtc->crtc_id,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7641 new_crtc_state->enable,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7642 new_crtc_state->active,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7643 new_crtc_state->planes_changed,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7644 new_crtc_state->mode_changed,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7645 new_crtc_state->active_changed,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
7646 new_crtc_state->connectors_changed)__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
;
7647
7648 /* Copy all transient state flags into dc state */
7649 if (dm_new_crtc_state->stream) {
7650 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7651 dm_new_crtc_state->stream);
7652 }
7653
7654 /* handles headless hotplug case, updating new_state and
7655 * aconnector as needed
7656 */
7657
7658 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7659
7660 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc)__drm_dbg(DRM_UT_DRIVER, "Atomic commit: SET crtc id %d: [%p]\n"
, acrtc->crtc_id, acrtc)
;
7661
7662 if (!dm_new_crtc_state->stream) {
7663 /*
7664 * this could happen because of issues with
7665 * userspace notifications delivery.
7666 * In this case userspace tries to set mode on
7667 * display which is disconnected in fact.
7668 * dc_sink is NULL in this case on aconnector.
7669 * We expect reset mode will come soon.
7670 *
7671 * This can also happen when unplug is done
7672 * during resume sequence ended
7673 *
7674 * In this case, we want to pretend we still
7675 * have a sink to keep the pipe running so that
7676 * hw state is consistent with the sw state
7677 */
7678 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",__drm_dbg(DRM_UT_DRIVER, "%s: Failed to create new stream for crtc %d\n"
, __func__, acrtc->base.base.id)
7679 __func__, acrtc->base.base.id)__drm_dbg(DRM_UT_DRIVER, "%s: Failed to create new stream for crtc %d\n"
, __func__, acrtc->base.base.id)
;
7680 continue;
7681 }
7682
7683 if (dm_old_crtc_state->stream)
7684 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7685
7686 pm_runtime_get_noresume(dev->dev);
7687
7688 acrtc->enabled = true1;
7689 acrtc->hw_mode = new_crtc_state->mode;
7690 crtc->hwmode = new_crtc_state->mode;
7691 mode_set_reset_required = true1;
7692 } else if (modereset_required(new_crtc_state)) {
7693 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc)__drm_dbg(DRM_UT_DRIVER, "Atomic commit: RESET. crtc id %d:[%p]\n"
, acrtc->crtc_id, acrtc)
;
7694 /* i.e. reset mode */
7695 if (dm_old_crtc_state->stream)
7696 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7697 mode_set_reset_required = true1;
7698 }
7699 } /* for_each_crtc_in_state() */
7700
7701 if (dc_state
8.1
'dc_state' is non-null
8.1
'dc_state' is non-null
) {
9
Taking true branch
7702 /* if there mode set or reset, disable eDP PSR */
7703 if (mode_set_reset_required
9.1
'mode_set_reset_required' is false
9.1
'mode_set_reset_required' is false
)
10
Taking false branch
7704 amdgpu_dm_psr_disable_all(dm);
7705
7706 dm_enable_per_frame_crtc_master_sync(dc_state);
7707 mutex_lock(&dm->dc_lock)rw_enter_write(&dm->dc_lock);
7708 WARN_ON(!dc_commit_state(dm->dc, dc_state))({ int __ret = !!(!dc_commit_state(dm->dc, dc_state)); if (
__ret) printf("WARNING %s failed at %s:%d\n", "!dc_commit_state(dm->dc, dc_state)"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 7708); __builtin_expect(!!(__ret), 0); })
;
11
Assuming the condition is false
12
Taking false branch
7709 mutex_unlock(&dm->dc_lock)rw_exit_write(&dm->dc_lock);
7710 }
7711
7712 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (new_crtc_state) = (state
)->crtcs[i].new_state, (void)(new_crtc_state) , 1))) {} else
{
13
Loop condition is false. Execution continues on line 7760
7713 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
7714
7715 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7716
7717 if (dm_new_crtc_state->stream != NULL((void *)0)) {
7718 const struct dc_stream_status *status =
7719 dc_stream_get_status(dm_new_crtc_state->stream);
7720
7721 if (!status)
7722 status = dc_stream_get_status_from_state(dc_state,
7723 dm_new_crtc_state->stream);
7724 if (!status)
7725 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc)do { __drm_err("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream, acrtc); do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n"
, __func__, 7725); do {} while (0); } while (0); } while (0)
;
7726 else
7727 acrtc->otg_inst = status->primary_otg_inst;
7728 }
7729 }
7730#ifdef CONFIG_DRM_AMD_DC_HDCP
7731 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (old_con_state) =
(state)->connectors[i].old_state, (new_con_state) = (state
)->connectors[i].new_state, 1))) {} else
{
7732 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((new_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
7733 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (dm_new_con_state->base.crtc); (struct amdgpu_crtc *)( (
char *)__mptr - __builtin_offsetof(struct amdgpu_crtc, base) )
;})
;
7734 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
7735
7736 new_crtc_state = NULL((void *)0);
7737
7738 if (acrtc)
7739 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7740
7741 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7742
7743 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL((void *)0) &&
7744 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED2) {
7745 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7746 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED1;
7747 continue;
7748 }
7749
7750 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7751 hdcp_update_display(
7752 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7753 new_con_state->hdcp_content_type,
7754 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED1 ? true1
7755 : false0);
7756 }
7757#endif
7758
7759 /* Handle connector state changes */
7760 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (old_con_state) =
(state)->connectors[i].old_state, (new_con_state) = (state
)->connectors[i].new_state, 1))) {} else
{
14
Assuming 'i' is < field 'num_connector'
15
Loop condition is true. Entering loop body
16
Assuming field 'ptr' is non-null
17
Taking false branch
7761 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((new_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
7762 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((old_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
7763 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (dm_new_con_state->base.crtc); (struct amdgpu_crtc *)( (
char *)__mptr - __builtin_offsetof(struct amdgpu_crtc, base) )
;})
;
7764 struct dc_surface_update dummy_updates[MAX_SURFACES3];
7765 struct dc_stream_update stream_update;
7766 struct dc_info_packet hdr_packet;
7767 struct dc_stream_status *status = NULL((void *)0);
7768 bool_Bool abm_changed, hdr_changed, scaling_changed;
7769
7770 memset(&dummy_updates, 0, sizeof(dummy_updates))__builtin_memset((&dummy_updates), (0), (sizeof(dummy_updates
)))
;
7771 memset(&stream_update, 0, sizeof(stream_update))__builtin_memset((&stream_update), (0), (sizeof(stream_update
)))
;
7772
7773 if (acrtc) {
18
Assuming 'acrtc' is non-null
19
Taking true branch
7774 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7775 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7776 }
7777
7778 /* Skip any modesets/resets */
7779 if (!acrtc
19.1
'acrtc' is non-null
19.1
'acrtc' is non-null
|| drm_atomic_crtc_needs_modeset(new_crtc_state))
20
Calling 'drm_atomic_crtc_needs_modeset'
24
Returning from 'drm_atomic_crtc_needs_modeset'
25
Assuming the condition is false
26
Taking false branch
7780 continue;
7781
7782 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7783 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (old_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7784
7785 scaling_changed = is_scaling_state_different(dm_new_con_state,
27
Calling 'is_scaling_state_different'
37
Returning from 'is_scaling_state_different'
7786 dm_old_con_state);
7787
7788 abm_changed = dm_new_crtc_state->abm_level !=
38
Assuming 'dm_new_crtc_state->abm_level' is equal to 'dm_old_crtc_state->abm_level'
7789 dm_old_crtc_state->abm_level;
7790
7791 hdr_changed =
7792 is_hdr_metadata_different(old_con_state, new_con_state);
7793
7794 if (!scaling_changed
38.1
'scaling_changed' is true
38.1
'scaling_changed' is true
&& !abm_changed && !hdr_changed)
7795 continue;
7796
7797 stream_update.stream = dm_new_crtc_state->stream;
7798 if (scaling_changed
38.2
'scaling_changed' is true
38.2
'scaling_changed' is true
) {
39
Taking true branch
7799 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7800 dm_new_con_state, dm_new_crtc_state->stream);
7801
7802 stream_update.src = dm_new_crtc_state->stream->src;
7803 stream_update.dst = dm_new_crtc_state->stream->dst;
7804 }
7805
7806 if (abm_changed
39.1
'abm_changed' is false
39.1
'abm_changed' is false
) {
40
Taking false branch
7807 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7808
7809 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7810 }
7811
7812 if (hdr_changed
40.1
'hdr_changed' is false
40.1
'hdr_changed' is false
) {
41
Taking false branch
7813 fill_hdr_info_packet(new_con_state, &hdr_packet);
7814 stream_update.hdr_static_metadata = &hdr_packet;
7815 }
7816
7817 status = dc_stream_get_status(dm_new_crtc_state->stream);
42
Value assigned to 'status'
7818 WARN_ON(!status)({ int __ret = !!(!status); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!status", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 7818); __builtin_expect(!!(__ret), 0); })
;
43
Assuming 'status' is null
44
Taking true branch
7819 WARN_ON(!status->plane_count)({ int __ret = !!(!status->plane_count); if (__ret) printf
("WARNING %s failed at %s:%d\n", "!status->plane_count", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 7819); __builtin_expect(!!(__ret), 0); })
;
45
Access to field 'plane_count' results in a dereference of a null pointer (loaded from variable 'status')
7820
7821 /*
7822 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7823 * Here we create an empty update on each plane.
7824 * To fix this, DC should permit updating only stream properties.
7825 */
7826 for (j = 0; j < status->plane_count; j++)
7827 dummy_updates[j].surface = status->plane_states[0];
7828
7829
7830 mutex_lock(&dm->dc_lock)rw_enter_write(&dm->dc_lock);
7831 dc_commit_updates_for_stream(dm->dc,
7832 dummy_updates,
7833 status->plane_count,
7834 dm_new_crtc_state->stream,
7835 &stream_update,
7836 dc_state);
7837 mutex_unlock(&dm->dc_lock)rw_exit_write(&dm->dc_lock);
7838 }
7839
7840 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7841 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
7842 new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
7843 if (old_crtc_state->active && !new_crtc_state->active)
7844 crtc_disable_count++;
7845
7846 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7847 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (old_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7848
7849 /* For freesync config update on crtc state and params for irq */
7850 update_stream_irq_parameters(dm, dm_new_crtc_state);
7851
7852 /* Handle vrr on->off / off->on transitions */
7853 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7854 dm_new_crtc_state);
7855 }
7856
7857 /**
7858 * Enable interrupts for CRTCs that are newly enabled or went through
7859 * a modeset. It was intentionally deferred until after the front end
7860 * state was modified to wait until the OTG was on and so the IRQ
7861 * handlers didn't access stale or invalid state.
7862 */
7863 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
7864 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
7865
7866 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7867
7868 if (new_crtc_state->active &&
7869 (!old_crtc_state->active ||
7870 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7871 dc_stream_retain(dm_new_crtc_state->stream);
7872 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7873 manage_dm_interrupts(adev, acrtc, true1);
7874
7875#ifdef CONFIG_DEBUG_FS
7876 /**
7877 * Frontend may have changed so reapply the CRC capture
7878 * settings for the stream.
7879 */
7880 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7881
7882 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7883 amdgpu_dm_crtc_configure_crc_source(
7884 crtc, dm_new_crtc_state,
7885 dm_new_crtc_state->crc_src);
7886 }
7887#endif
7888 }
7889 }
7890
7891 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)for ((j) = 0; (j) < (state)->dev->mode_config.num_crtc
; (j)++) if (!((state)->crtcs[j].ptr && ((crtc) = (
state)->crtcs[j].ptr, (void)(crtc) , (new_crtc_state) = (state
)->crtcs[j].new_state, (void)(new_crtc_state) , 1))) {} else
7892 if (new_crtc_state->async_flip)
7893 wait_for_vblank = false0;
7894
7895 /* update planes when needed per crtc*/
7896 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)for ((j) = 0; (j) < (state)->dev->mode_config.num_crtc
; (j)++) if (!((state)->crtcs[j].ptr && ((crtc) = (
state)->crtcs[j].ptr, (void)(crtc) , (new_crtc_state) = (state
)->crtcs[j].new_state, (void)(new_crtc_state) , 1))) {} else
{
7897 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
7898
7899 if (dm_new_crtc_state->stream)
7900 amdgpu_dm_commit_planes(state, dc_state, dev,
7901 dm, crtc, wait_for_vblank);
7902 }
7903
7904 /* Update audio instances for each connector. */
7905 amdgpu_dm_commit_audio(dev, state);
7906
7907 /*
7908 * send vblank event on all events not handled in flip and
7909 * mark consumed event for drm_atomic_helper_commit_hw_done
7910 */
7911 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
7912 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (new_crtc_state) = (state
)->crtcs[i].new_state, (void)(new_crtc_state) , 1))) {} else
{
7913
7914 if (new_crtc_state->event)
7915 drm_send_event_locked(dev, &new_crtc_state->event->base);
7916
7917 new_crtc_state->event = NULL((void *)0);
7918 }
7919 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
7920
7921 /* Signal HW programming completion */
7922 drm_atomic_helper_commit_hw_done(state);
7923
7924 if (wait_for_vblank)
7925 drm_atomic_helper_wait_for_flip_done(dev, state);
7926
7927 drm_atomic_helper_cleanup_planes(dev, state);
7928
7929 /*
7930 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7931 * so we can put the GPU into runtime suspend if we're not driving any
7932 * displays anymore
7933 */
7934 for (i = 0; i < crtc_disable_count; i++)
7935 pm_runtime_put_autosuspend(dev->dev);
7936 pm_runtime_mark_last_busy(dev->dev);
7937
7938 if (dc_state_temp)
7939 dc_release_state(dc_state_temp);
7940}
7941
7942
7943static int dm_force_atomic_commit(struct drm_connector *connector)
7944{
7945 int ret = 0;
7946 struct drm_device *ddev = connector->dev;
7947 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7948 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (connector->encoder->crtc); (struct amdgpu_crtc *)( (
char *)__mptr - __builtin_offsetof(struct amdgpu_crtc, base) )
;})
;
7949 struct drm_plane *plane = disconnected_acrtc->base.primary;
7950 struct drm_connector_state *conn_state;
7951 struct drm_crtc_state *crtc_state;
7952 struct drm_plane_state *plane_state;
7953
7954 if (!state)
7955 return -ENOMEM12;
7956
7957 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7958
7959 /* Construct an atomic state to restore previous display setting */
7960
7961 /*
7962 * Attach connectors to drm_atomic_state
7963 */
7964 conn_state = drm_atomic_get_connector_state(state, connector);
7965
7966 ret = PTR_ERR_OR_ZERO(conn_state);
7967 if (ret)
7968 goto out;
7969
7970 /* Attach crtc to drm_atomic_state*/
7971 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7972
7973 ret = PTR_ERR_OR_ZERO(crtc_state);
7974 if (ret)
7975 goto out;
7976
7977 /* force a restore */
7978 crtc_state->mode_changed = true1;
7979
7980 /* Attach plane to drm_atomic_state */
7981 plane_state = drm_atomic_get_plane_state(state, plane);
7982
7983 ret = PTR_ERR_OR_ZERO(plane_state);
7984 if (ret)
7985 goto out;
7986
7987 /* Call commit internally with the state we just constructed */
7988 ret = drm_atomic_commit(state);
7989
7990out:
7991 drm_atomic_state_put(state);
7992 if (ret)
7993 DRM_ERROR("Restoring old state failed with %i\n", ret)__drm_err("Restoring old state failed with %i\n", ret);
7994
7995 return ret;
7996}
7997
7998/*
7999 * This function handles all cases when set mode does not come upon hotplug.
8000 * This includes when a display is unplugged then plugged back into the
8001 * same port and when running without usermode desktop manager supprot
8002 */
8003void dm_restore_drm_connector_state(struct drm_device *dev,
8004 struct drm_connector *connector)
8005{
8006 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
8007 struct amdgpu_crtc *disconnected_acrtc;
8008 struct dm_crtc_state *acrtc_state;
8009
8010 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8011 return;
8012
8013 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (connector->encoder->crtc); (struct amdgpu_crtc *)( (
char *)__mptr - __builtin_offsetof(struct amdgpu_crtc, base) )
;})
;
8014 if (!disconnected_acrtc)
8015 return;
8016
8017 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (disconnected_acrtc->base.state); (struct dm_crtc_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_crtc_state
, base) );})
;
8018 if (!acrtc_state->stream)
8019 return;
8020
8021 /*
8022 * If the previous sink is not released and different from the current,
8023 * we deduce we are in a state where we can not rely on usermode call
8024 * to turn on the display, so we do it here
8025 */
8026 if (acrtc_state->stream->sink != aconnector->dc_sink)
8027 dm_force_atomic_commit(&aconnector->base);
8028}
8029
8030/*
8031 * Grabs all modesetting locks to serialize against any blocking commits,
8032 * Waits for completion of all non blocking commits.
8033 */
8034static int do_aquire_global_lock(struct drm_device *dev,
8035 struct drm_atomic_state *state)
8036{
8037 struct drm_crtc *crtc;
8038 struct drm_crtc_commit *commit;
8039 long ret;
8040
8041 /*
8042 * Adding all modeset locks to aquire_ctx will
8043 * ensure that when the framework release it the
8044 * extra locks we are locking here will get released to
8045 */
8046 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8047 if (ret)
8048 return ret;
8049
8050 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->head
) *__mptr = ((&dev->mode_config.crtc_list)->next);
(__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*crtc), head) );}); &crtc->head != (&dev->mode_config
.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)
->head ) *__mptr = (crtc->head.next); (__typeof(*crtc) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), head)
);}))
{
8051 spin_lock(&crtc->commit_lock)mtx_enter(&crtc->commit_lock);
8052 commit = list_first_entry_or_null(&crtc->commit_list,(list_empty(&crtc->commit_list) ? ((void *)0) : ({ const
__typeof( ((struct drm_crtc_commit *)0)->commit_entry ) *
__mptr = ((&crtc->commit_list)->next); (struct drm_crtc_commit
*)( (char *)__mptr - __builtin_offsetof(struct drm_crtc_commit
, commit_entry) );}))
8053 struct drm_crtc_commit, commit_entry)(list_empty(&crtc->commit_list) ? ((void *)0) : ({ const
__typeof( ((struct drm_crtc_commit *)0)->commit_entry ) *
__mptr = ((&crtc->commit_list)->next); (struct drm_crtc_commit
*)( (char *)__mptr - __builtin_offsetof(struct drm_crtc_commit
, commit_entry) );}))
;
8054 if (commit)
8055 drm_crtc_commit_get(commit);
8056 spin_unlock(&crtc->commit_lock)mtx_leave(&crtc->commit_lock);
8057
8058 if (!commit)
8059 continue;
8060
8061 /*
8062 * Make sure all pending HW programming completed and
8063 * page flips done
8064 */
8065 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZhz);
8066
8067 if (ret > 0)
8068 ret = wait_for_completion_interruptible_timeout(
8069 &commit->flip_done, 10*HZhz);
8070
8071 if (ret == 0)
8072 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "__drm_err("[CRTC:%d:%s] hw_done or flip_done " "timed out\n",
crtc->base.id, crtc->name)
8073 "timed out\n", crtc->base.id, crtc->name)__drm_err("[CRTC:%d:%s] hw_done or flip_done " "timed out\n",
crtc->base.id, crtc->name)
;
8074
8075 drm_crtc_commit_put(commit);
8076 }
8077
8078 return ret < 0 ? ret : 0;
8079}
8080
8081static void get_freesync_config_for_crtc(
8082 struct dm_crtc_state *new_crtc_state,
8083 struct dm_connector_state *new_con_state)
8084{
8085 struct mod_freesync_config config = {0};
8086 struct amdgpu_dm_connector *aconnector =
8087 to_amdgpu_dm_connector(new_con_state->base.connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (new_con_state->base.connector); (struct amdgpu_dm_connector
*)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector
, base) );})
;
8088 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8089 int vrefresh = drm_mode_vrefresh(mode);
8090
8091 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8092 vrefresh >= aconnector->min_vfreq &&
8093 vrefresh <= aconnector->max_vfreq;
8094
8095 if (new_crtc_state->vrr_supported) {
8096 new_crtc_state->stream->ignore_msa_timing_param = true1;
8097 config.state = new_crtc_state->base.vrr_enabled ?
8098 VRR_STATE_ACTIVE_VARIABLE :
8099 VRR_STATE_INACTIVE;
8100 config.min_refresh_in_uhz =
8101 aconnector->min_vfreq * 1000000;
8102 config.max_refresh_in_uhz =
8103 aconnector->max_vfreq * 1000000;
8104 config.vsif_supported = true1;
8105 config.btr = true1;
8106 }
8107
8108 new_crtc_state->freesync_config = config;
8109}
8110
8111static void reset_freesync_config_for_crtc(
8112 struct dm_crtc_state *new_crtc_state)
8113{
8114 new_crtc_state->vrr_supported = false0;
8115
8116 memset(&new_crtc_state->vrr_infopacket, 0,__builtin_memset((&new_crtc_state->vrr_infopacket), (0
), (sizeof(new_crtc_state->vrr_infopacket)))
8117 sizeof(new_crtc_state->vrr_infopacket))__builtin_memset((&new_crtc_state->vrr_infopacket), (0
), (sizeof(new_crtc_state->vrr_infopacket)))
;
8118}
8119
8120static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8121 struct drm_atomic_state *state,
8122 struct drm_crtc *crtc,
8123 struct drm_crtc_state *old_crtc_state,
8124 struct drm_crtc_state *new_crtc_state,
8125 bool_Bool enable,
8126 bool_Bool *lock_and_validation_needed)
8127{
8128 struct dm_atomic_state *dm_state = NULL((void *)0);
8129 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8130 struct dc_stream_state *new_stream;
8131 int ret = 0;
8132
8133 /*
8134 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8135 * update changed items
8136 */
8137 struct amdgpu_crtc *acrtc = NULL((void *)0);
8138 struct amdgpu_dm_connector *aconnector = NULL((void *)0);
8139 struct drm_connector_state *drm_new_conn_state = NULL((void *)0), *drm_old_conn_state = NULL((void *)0);
8140 struct dm_connector_state *dm_new_conn_state = NULL((void *)0), *dm_old_conn_state = NULL((void *)0);
8141
8142 new_stream = NULL((void *)0);
8143
8144 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (old_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
8145 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
8146 acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
8147 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8148
8149 /* TODO This hack should go away */
8150 if (aconnector && enable) {
8151 /* Make sure fake sink is created in plug-in scenario */
8152 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8153 &aconnector->base);
8154 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8155 &aconnector->base);
8156
8157 if (IS_ERR(drm_new_conn_state)) {
8158 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8159 goto fail;
8160 }
8161
8162 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((drm_new_conn_state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
8163 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((drm_old_conn_state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
8164
8165 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8166 goto skip_modeset;
8167
8168 new_stream = create_validate_stream_for_sink(aconnector,
8169 &new_crtc_state->mode,
8170 dm_new_conn_state,
8171 dm_old_crtc_state->stream);
8172
8173 /*
8174 * we can have no stream on ACTION_SET if a display
8175 * was disconnected during S3, in this case it is not an
8176 * error, the OS will be updated after detection, and
8177 * will do the right thing on next atomic commit
8178 */
8179
8180 if (!new_stream) {
8181 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",__drm_dbg(DRM_UT_DRIVER, "%s: Failed to create new stream for crtc %d\n"
, __func__, acrtc->base.base.id)
8182 __func__, acrtc->base.base.id)__drm_dbg(DRM_UT_DRIVER, "%s: Failed to create new stream for crtc %d\n"
, __func__, acrtc->base.base.id)
;
8183 ret = -ENOMEM12;
8184 goto fail;
8185 }
8186
8187 /*
8188 * TODO: Check VSDB bits to decide whether this should
8189 * be enabled or not.
8190 */
8191 new_stream->triggered_crtc_reset.enabled =
8192 dm->force_timing_sync;
8193
8194 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8195
8196 ret = fill_hdr_info_packet(drm_new_conn_state,
8197 &new_stream->hdr_static_metadata);
8198 if (ret)
8199 goto fail;
8200
8201 /*
8202 * If we already removed the old stream from the context
8203 * (and set the new stream to NULL) then we can't reuse
8204 * the old stream even if the stream and scaling are unchanged.
8205 * We'll hit the BUG_ON and black screen.
8206 *
8207 * TODO: Refactor this function to allow this check to work
8208 * in all conditions.
8209 */
8210 if (dm_new_crtc_state->stream &&
8211 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8212 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8213 new_crtc_state->mode_changed = false0;
8214 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",__drm_dbg(DRM_UT_DRIVER, "Mode change not required, setting mode_changed to %d"
, new_crtc_state->mode_changed)
8215 new_crtc_state->mode_changed)__drm_dbg(DRM_UT_DRIVER, "Mode change not required, setting mode_changed to %d"
, new_crtc_state->mode_changed)
;
8216 }
8217 }
8218
8219 /* mode_changed flag may get updated above, need to check again */
8220 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8221 goto skip_modeset;
8222
8223 DRM_DEBUG_DRIVER(__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8224 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8225 "planes_changed:%d, mode_changed:%d,active_changed:%d,"__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8226 "connectors_changed:%d\n",__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8227 acrtc->crtc_id,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8228 new_crtc_state->enable,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8229 new_crtc_state->active,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8230 new_crtc_state->planes_changed,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8231 new_crtc_state->mode_changed,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8232 new_crtc_state->active_changed,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
8233 new_crtc_state->connectors_changed)__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d," "connectors_changed:%d\n"
, acrtc->crtc_id, new_crtc_state->enable, new_crtc_state
->active, new_crtc_state->planes_changed, new_crtc_state
->mode_changed, new_crtc_state->active_changed, new_crtc_state
->connectors_changed)
;
8234
8235 /* Remove stream for any changed/disabled CRTC */
8236 if (!enable) {
8237
8238 if (!dm_old_crtc_state->stream)
8239 goto skip_modeset;
8240
8241 ret = dm_atomic_get_state(state, &dm_state);
8242 if (ret)
8243 goto fail;
8244
8245 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",__drm_dbg(DRM_UT_DRIVER, "Disabling DRM crtc: %d\n", crtc->
base.id)
8246 crtc->base.id)__drm_dbg(DRM_UT_DRIVER, "Disabling DRM crtc: %d\n", crtc->
base.id)
;
8247
8248 /* i.e. reset mode */
8249 if (dc_remove_stream_from_ctx(
8250 dm->dc,
8251 dm_state->context,
8252 dm_old_crtc_state->stream) != DC_OK) {
8253 ret = -EINVAL22;
8254 goto fail;
8255 }
8256
8257 dc_stream_release(dm_old_crtc_state->stream);
8258 dm_new_crtc_state->stream = NULL((void *)0);
8259
8260 reset_freesync_config_for_crtc(dm_new_crtc_state);
8261
8262 *lock_and_validation_needed = true1;
8263
8264 } else {/* Add stream for any updated/enabled CRTC */
8265 /*
8266 * Quick fix to prevent NULL pointer on new_stream when
8267 * added MST connectors not found in existing crtc_state in the chained mode
8268 * TODO: need to dig out the root cause of that
8269 */
8270 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8271 goto skip_modeset;
8272
8273 if (modereset_required(new_crtc_state))
8274 goto skip_modeset;
8275
8276 if (modeset_required(new_crtc_state, new_stream,
8277 dm_old_crtc_state->stream)) {
8278
8279 WARN_ON(dm_new_crtc_state->stream)({ int __ret = !!(dm_new_crtc_state->stream); if (__ret) printf
("WARNING %s failed at %s:%d\n", "dm_new_crtc_state->stream"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 8279); __builtin_expect(!!(__ret), 0); })
;
8280
8281 ret = dm_atomic_get_state(state, &dm_state);
8282 if (ret)
8283 goto fail;
8284
8285 dm_new_crtc_state->stream = new_stream;
8286
8287 dc_stream_retain(new_stream);
8288
8289 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",__drm_dbg(DRM_UT_DRIVER, "Enabling DRM crtc: %d\n", crtc->
base.id)
8290 crtc->base.id)__drm_dbg(DRM_UT_DRIVER, "Enabling DRM crtc: %d\n", crtc->
base.id)
;
8291
8292 if (dc_add_stream_to_ctx(
8293 dm->dc,
8294 dm_state->context,
8295 dm_new_crtc_state->stream) != DC_OK) {
8296 ret = -EINVAL22;
8297 goto fail;
8298 }
8299
8300 *lock_and_validation_needed = true1;
8301 }
8302 }
8303
8304skip_modeset:
8305 /* Release extra reference */
8306 if (new_stream)
8307 dc_stream_release(new_stream);
8308
8309 /*
8310 * We want to do dc stream updates that do not require a
8311 * full modeset below.
8312 */
8313 if (!(enable && aconnector && new_crtc_state->active))
8314 return 0;
8315 /*
8316 * Given above conditions, the dc state cannot be NULL because:
8317 * 1. We're in the process of enabling CRTCs (just been added
8318 * to the dc context, or already is on the context)
8319 * 2. Has a valid connector attached, and
8320 * 3. Is currently active and enabled.
8321 * => The dc stream state currently exists.
8322 */
8323 BUG_ON(dm_new_crtc_state->stream == NULL)((!(dm_new_crtc_state->stream == ((void *)0))) ? (void)0 :
__assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 8323, "!(dm_new_crtc_state->stream == ((void *)0))"))
;
8324
8325 /* Scaling or underscan settings */
8326 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8327 drm_atomic_crtc_needs_modeset(new_crtc_state))
8328 update_stream_scaling_settings(
8329 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8330
8331 /* ABM settings */
8332 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8333
8334 /*
8335 * Color management settings. We also update color properties
8336 * when a modeset is needed, to ensure it gets reprogrammed.
8337 */
8338 if (dm_new_crtc_state->base.color_mgmt_changed ||
8339 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8340 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8341 if (ret)
8342 goto fail;
8343 }
8344
8345 /* Update Freesync settings. */
8346 get_freesync_config_for_crtc(dm_new_crtc_state,
8347 dm_new_conn_state);
8348
8349 return ret;
8350
8351fail:
8352 if (new_stream)
8353 dc_stream_release(new_stream);
8354 return ret;
8355}
8356
8357static bool_Bool should_reset_plane(struct drm_atomic_state *state,
8358 struct drm_plane *plane,
8359 struct drm_plane_state *old_plane_state,
8360 struct drm_plane_state *new_plane_state)
8361{
8362 struct drm_plane *other;
8363 struct drm_plane_state *old_other_state, *new_other_state;
8364 struct drm_crtc_state *new_crtc_state;
8365 int i;
8366
8367 /*
8368 * TODO: Remove this hack once the checks below are sufficient
8369 * enough to determine when we need to reset all the planes on
8370 * the stream.
8371 */
8372 if (state->allow_modeset)
8373 return true1;
8374
8375 /* Exit early if we know that we're adding or removing the plane. */
8376 if (old_plane_state->crtc != new_plane_state->crtc)
8377 return true1;
8378
8379 /* old crtc == new_crtc == NULL, plane not in context. */
8380 if (!new_plane_state->crtc)
8381 return false0;
8382
8383 new_crtc_state =
8384 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8385
8386 if (!new_crtc_state)
8387 return true1;
8388
8389 /* CRTC Degamma changes currently require us to recreate planes. */
8390 if (new_crtc_state->color_mgmt_changed)
8391 return true1;
8392
8393 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8394 return true1;
8395
8396 /*
8397 * If there are any new primary or overlay planes being added or
8398 * removed then the z-order can potentially change. To ensure
8399 * correct z-order and pipe acquisition the current DC architecture
8400 * requires us to remove and recreate all existing planes.
8401 *
8402 * TODO: Come up with a more elegant solution for this.
8403 */
8404 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_total_plane
; (i)++) if (!((state)->planes[i].ptr && ((other) =
(state)->planes[i].ptr, (void)(other) , (old_other_state)
= (state)->planes[i].old_state, (new_other_state) = (state
)->planes[i].new_state, 1))) {} else
{
8405 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8406
8407 if (other->type == DRM_PLANE_TYPE_CURSOR)
8408 continue;
8409
8410 if (old_other_state->crtc != new_plane_state->crtc &&
8411 new_other_state->crtc != new_plane_state->crtc)
8412 continue;
8413
8414 if (old_other_state->crtc != new_other_state->crtc)
8415 return true1;
8416
8417 /* Src/dst size and scaling updates. */
8418 if (old_other_state->src_w != new_other_state->src_w ||
8419 old_other_state->src_h != new_other_state->src_h ||
8420 old_other_state->crtc_w != new_other_state->crtc_w ||
8421 old_other_state->crtc_h != new_other_state->crtc_h)
8422 return true1;
8423
8424 /* Rotation / mirroring updates. */
8425 if (old_other_state->rotation != new_other_state->rotation)
8426 return true1;
8427
8428 /* Blending updates. */
8429 if (old_other_state->pixel_blend_mode !=
8430 new_other_state->pixel_blend_mode)
8431 return true1;
8432
8433 /* Alpha updates. */
8434 if (old_other_state->alpha != new_other_state->alpha)
8435 return true1;
8436
8437 /* Colorspace changes. */
8438 if (old_other_state->color_range != new_other_state->color_range ||
8439 old_other_state->color_encoding != new_other_state->color_encoding)
8440 return true1;
8441
8442 /* Framebuffer checks fall at the end. */
8443 if (!old_other_state->fb || !new_other_state->fb)
8444 continue;
8445
8446 /* Pixel format changes can require bandwidth updates. */
8447 if (old_other_state->fb->format != new_other_state->fb->format)
8448 return true1;
8449
8450 old_dm_plane_state = to_dm_plane_state(old_other_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (old_other_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
8451 new_dm_plane_state = to_dm_plane_state(new_other_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_other_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
8452
8453 /* Tiling and DCC changes also require bandwidth updates. */
8454 if (old_dm_plane_state->tiling_flags !=
8455 new_dm_plane_state->tiling_flags)
8456 return true1;
8457 }
8458
8459 return false0;
8460}
8461
8462static int dm_update_plane_state(struct dc *dc,
8463 struct drm_atomic_state *state,
8464 struct drm_plane *plane,
8465 struct drm_plane_state *old_plane_state,
8466 struct drm_plane_state *new_plane_state,
8467 bool_Bool enable,
8468 bool_Bool *lock_and_validation_needed)
8469{
8470
8471 struct dm_atomic_state *dm_state = NULL((void *)0);
8472 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8473 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8474 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8475 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8476 struct amdgpu_crtc *new_acrtc;
8477 bool_Bool needs_reset;
8478 int ret = 0;
8479
8480
8481 new_plane_crtc = new_plane_state->crtc;
8482 old_plane_crtc = old_plane_state->crtc;
8483 dm_new_plane_state = to_dm_plane_state(new_plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_plane_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
8484 dm_old_plane_state = to_dm_plane_state(old_plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (old_plane_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
8485
8486 /*TODO Implement better atomic check for cursor plane */
8487 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8488 if (!enable || !new_plane_crtc ||
8489 drm_atomic_plane_disabling(plane->state, new_plane_state))
8490 return 0;
8491
8492 new_acrtc = to_amdgpu_crtc(new_plane_crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (new_plane_crtc); (struct amdgpu_crtc *)( (char *)__mptr -
__builtin_offsetof(struct amdgpu_crtc, base) );})
;
8493
8494 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8495 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8496 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",__drm_dbg(DRM_UT_ATOMIC, "Bad cursor size %d x %d\n", new_plane_state
->crtc_w, new_plane_state->crtc_h)
8497 new_plane_state->crtc_w, new_plane_state->crtc_h)__drm_dbg(DRM_UT_ATOMIC, "Bad cursor size %d x %d\n", new_plane_state
->crtc_w, new_plane_state->crtc_h)
;
8498 return -EINVAL22;
8499 }
8500
8501 return 0;
8502 }
8503
8504 needs_reset = should_reset_plane(state, plane, old_plane_state,
8505 new_plane_state);
8506
8507 /* Remove any changed/removed planes */
8508 if (!enable) {
8509 if (!needs_reset)
8510 return 0;
8511
8512 if (!old_plane_crtc)
8513 return 0;
8514
8515 old_crtc_state = drm_atomic_get_old_crtc_state(
8516 state, old_plane_crtc);
8517 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (old_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
8518
8519 if (!dm_old_crtc_state->stream)
8520 return 0;
8521
8522 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",__drm_dbg(DRM_UT_ATOMIC, "Disabling DRM plane: %d on DRM crtc %d\n"
, plane->base.id, old_plane_crtc->base.id)
8523 plane->base.id, old_plane_crtc->base.id)__drm_dbg(DRM_UT_ATOMIC, "Disabling DRM plane: %d on DRM crtc %d\n"
, plane->base.id, old_plane_crtc->base.id)
;
8524
8525 ret = dm_atomic_get_state(state, &dm_state);
8526 if (ret)
8527 return ret;
8528
8529 if (!dc_remove_plane_from_context(
8530 dc,
8531 dm_old_crtc_state->stream,
8532 dm_old_plane_state->dc_state,
8533 dm_state->context)) {
8534
8535 return -EINVAL22;
8536 }
8537
8538
8539 dc_plane_state_release(dm_old_plane_state->dc_state);
8540 dm_new_plane_state->dc_state = NULL((void *)0);
8541
8542 *lock_and_validation_needed = true1;
8543
8544 } else { /* Add new planes */
8545 struct dc_plane_state *dc_new_plane_state;
8546
8547 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8548 return 0;
8549
8550 if (!new_plane_crtc)
8551 return 0;
8552
8553 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8554 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
8555
8556 if (!dm_new_crtc_state->stream)
8557 return 0;
8558
8559 if (!needs_reset)
8560 return 0;
8561
8562 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8563 if (ret)
8564 return ret;
8565
8566 WARN_ON(dm_new_plane_state->dc_state)({ int __ret = !!(dm_new_plane_state->dc_state); if (__ret
) printf("WARNING %s failed at %s:%d\n", "dm_new_plane_state->dc_state"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 8566); __builtin_expect(!!(__ret), 0); })
;
8567
8568 dc_new_plane_state = dc_create_plane_state(dc);
8569 if (!dc_new_plane_state)
8570 return -ENOMEM12;
8571
8572 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",__drm_dbg(DRM_UT_DRIVER, "Enabling DRM plane: %d on DRM crtc %d\n"
, plane->base.id, new_plane_crtc->base.id)
8573 plane->base.id, new_plane_crtc->base.id)__drm_dbg(DRM_UT_DRIVER, "Enabling DRM plane: %d on DRM crtc %d\n"
, plane->base.id, new_plane_crtc->base.id)
;
8574
8575 ret = fill_dc_plane_attributes(
8576 drm_to_adev(new_plane_crtc->dev),
8577 dc_new_plane_state,
8578 new_plane_state,
8579 new_crtc_state);
8580 if (ret) {
8581 dc_plane_state_release(dc_new_plane_state);
8582 return ret;
8583 }
8584
8585 ret = dm_atomic_get_state(state, &dm_state);
8586 if (ret) {
8587 dc_plane_state_release(dc_new_plane_state);
8588 return ret;
8589 }
8590
8591 /*
8592 * Any atomic check errors that occur after this will
8593 * not need a release. The plane state will be attached
8594 * to the stream, and therefore part of the atomic
8595 * state. It'll be released when the atomic state is
8596 * cleaned.
8597 */
8598 if (!dc_add_plane_to_context(
8599 dc,
8600 dm_new_crtc_state->stream,
8601 dc_new_plane_state,
8602 dm_state->context)) {
8603
8604 dc_plane_state_release(dc_new_plane_state);
8605 return -EINVAL22;
8606 }
8607
8608 dm_new_plane_state->dc_state = dc_new_plane_state;
8609
8610 /* Tell DC to do a full surface update every time there
8611 * is a plane change. Inefficient, but works for now.
8612 */
8613 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8614
8615 *lock_and_validation_needed = true1;
8616 }
8617
8618
8619 return ret;
8620}
8621
8622#if defined(CONFIG_DRM_AMD_DC_DCN1)
8623static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8624{
8625 struct drm_connector *connector;
8626 struct drm_connector_state *conn_state;
8627 struct amdgpu_dm_connector *aconnector = NULL((void *)0);
8628 int i;
8629 for_each_new_connector_in_state(state, connector, conn_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (conn_state) = (
state)->connectors[i].new_state, (void)(conn_state) , 1)))
{} else
{
8630 if (conn_state->crtc != crtc)
8631 continue;
8632
8633 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
8634 if (!aconnector->port || !aconnector->mst_port)
8635 aconnector = NULL((void *)0);
8636 else
8637 break;
8638 }
8639
8640 if (!aconnector)
8641 return 0;
8642
8643 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8644}
8645#endif
8646
8647static int validate_overlay(struct drm_atomic_state *state)
8648{
8649 int i;
8650 struct drm_plane *plane;
8651 struct drm_plane_state *old_plane_state, *new_plane_state;
8652 struct drm_plane_state *primary_state, *overlay_state = NULL((void *)0);
8653
8654 /* Check if primary plane is contained inside overlay */
8655 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i)for ((i) = ((state)->dev->mode_config.num_total_plane -
1); (i) >= 0; (i)--) if (!((state)->planes[i].ptr &&
((plane) = (state)->planes[i].ptr, (old_plane_state) = (state
)->planes[i].old_state, (new_plane_state) = (state)->planes
[i].new_state, 1))) {} else
{
8656 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8657 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8658 return 0;
8659
8660 overlay_state = new_plane_state;
8661 continue;
8662 }
8663 }
8664
8665 /* check if we're making changes to the overlay plane */
8666 if (!overlay_state)
8667 return 0;
8668
8669 /* check if overlay plane is enabled */
8670 if (!overlay_state->crtc)
8671 return 0;
8672
8673 /* find the primary plane for the CRTC that the overlay is enabled on */
8674 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8675 if (IS_ERR(primary_state))
8676 return PTR_ERR(primary_state);
8677
8678 /* check if primary plane is enabled */
8679 if (!primary_state->crtc)
8680 return 0;
8681
8682 /* Perform the bounds check to ensure the overlay plane covers the primary */
8683 if (primary_state->crtc_x < overlay_state->crtc_x ||
8684 primary_state->crtc_y < overlay_state->crtc_y ||
8685 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8686 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8687 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n")__drm_dbg(DRM_UT_ATOMIC, "Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n"
)
;
8688 return -EINVAL22;
8689 }
8690
8691 return 0;
8692}
8693
8694/**
8695 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8696 * @dev: The DRM device
8697 * @state: The atomic state to commit
8698 *
8699 * Validate that the given atomic state is programmable by DC into hardware.
8700 * This involves constructing a &struct dc_state reflecting the new hardware
8701 * state we wish to commit, then querying DC to see if it is programmable. It's
8702 * important not to modify the existing DC state. Otherwise, atomic_check
8703 * may unexpectedly commit hardware changes.
8704 *
8705 * When validating the DC state, it's important that the right locks are
8706 * acquired. For full updates case which removes/adds/updates streams on one
8707 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8708 * that any such full update commit will wait for completion of any outstanding
8709 * flip using DRMs synchronization events.
8710 *
8711 * Note that DM adds the affected connectors for all CRTCs in state, when that
8712 * might not seem necessary. This is because DC stream creation requires the
8713 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8714 * be possible but non-trivial - a possible TODO item.
8715 *
8716 * Return: -Error code if validation failed.
8717 */
8718static int amdgpu_dm_atomic_check(struct drm_device *dev,
8719 struct drm_atomic_state *state)
8720{
8721 struct amdgpu_device *adev = drm_to_adev(dev);
8722 struct dm_atomic_state *dm_state = NULL((void *)0);
8723 struct dc *dc = adev->dm.dc;
8724 struct drm_connector *connector;
8725 struct drm_connector_state *old_con_state, *new_con_state;
8726 struct drm_crtc *crtc;
8727 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8728 struct drm_plane *plane;
8729 struct drm_plane_state *old_plane_state, *new_plane_state;
8730 enum dc_status status;
8731 int ret, i;
8732 bool_Bool lock_and_validation_needed = false0;
8733
8734 amdgpu_check_debugfs_connector_property_change(adev, state);
8735
8736 ret = drm_atomic_helper_check_modeset(dev, state);
8737 if (ret)
8738 goto fail;
8739
8740 /* Check connector changes */
8741 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (old_con_state) =
(state)->connectors[i].old_state, (new_con_state) = (state
)->connectors[i].new_state, 1))) {} else
{
8742 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((old_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
8743 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((new_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
8744
8745 /* Skip connectors that are disabled or part of modeset already. */
8746 if (!old_con_state->crtc && !new_con_state->crtc)
8747 continue;
8748
8749 if (!new_con_state->crtc)
8750 continue;
8751
8752 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8753 if (IS_ERR(new_crtc_state)) {
8754 ret = PTR_ERR(new_crtc_state);
8755 goto fail;
8756 }
8757
8758 if (dm_old_con_state->abm_level !=
8759 dm_new_con_state->abm_level)
8760 new_crtc_state->connectors_changed = true1;
8761 }
8762
8763#if defined(CONFIG_DRM_AMD_DC_DCN1)
8764 if (dc_resource_is_dsc_encoding_supported(dc)) {
8765 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
8766 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8767 ret = add_affected_mst_dsc_crtcs(state, crtc);
8768 if (ret)
8769 goto fail;
8770 }
8771 }
8772 }
8773#endif
8774 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
8775 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8776 !new_crtc_state->color_mgmt_changed &&
8777 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8778 continue;
8779
8780 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8781 if (ret)
8782 goto fail;
8783
8784 if (!new_crtc_state->enable)
8785 continue;
8786
8787 ret = drm_atomic_add_affected_connectors(state, crtc);
8788 if (ret)
8789 return ret;
8790
8791 ret = drm_atomic_add_affected_planes(state, crtc);
8792 if (ret)
8793 goto fail;
8794 }
8795
8796 /*
8797 * Add all primary and overlay planes on the CRTC to the state
8798 * whenever a plane is enabled to maintain correct z-ordering
8799 * and to enable fast surface updates.
8800 */
8801 drm_for_each_crtc(crtc, dev)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->head
) *__mptr = ((&(dev)->mode_config.crtc_list)->next
); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*crtc), head) );}); &crtc->head != (&(dev)->mode_config
.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)
->head ) *__mptr = (crtc->head.next); (__typeof(*crtc) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), head)
);}))
{
8802 bool_Bool modified = false0;
8803
8804 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_total_plane
; (i)++) if (!((state)->planes[i].ptr && ((plane) =
(state)->planes[i].ptr, (void)(plane) , (old_plane_state)
= (state)->planes[i].old_state, (new_plane_state) = (state
)->planes[i].new_state, 1))) {} else
{
8805 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8806 continue;
8807
8808 if (new_plane_state->crtc == crtc ||
8809 old_plane_state->crtc == crtc) {
8810 modified = true1;
8811 break;
8812 }
8813 }
8814
8815 if (!modified)
8816 continue;
8817
8818 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)->
head ) *__mptr = ((&(state->dev)->mode_config.plane_list
)->next); (__typeof(*(plane)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(plane)), head) );}); &(plane)->head != (&
(state->dev)->mode_config.plane_list); (plane) = ({ const
__typeof( ((__typeof(*(plane)) *)0)->head ) *__mptr = ((plane
)->head.next); (__typeof(*(plane)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(plane)), head) );})) if (!((crtc->state->plane_mask
) & drm_plane_mask(plane))) {} else
{
8819 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8820 continue;
8821
8822 new_plane_state =
8823 drm_atomic_get_plane_state(state, plane);
8824
8825 if (IS_ERR(new_plane_state)) {
8826 ret = PTR_ERR(new_plane_state);
8827 goto fail;
8828 }
8829 }
8830 }
8831
8832 /* Prepass for updating tiling flags on new planes. */
8833 for_each_new_plane_in_state(state, plane, new_plane_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_total_plane
; (i)++) if (!((state)->planes[i].ptr && ((plane) =
(state)->planes[i].ptr, (void)(plane) , (new_plane_state)
= (state)->planes[i].new_state, (void)(new_plane_state) ,
1))) {} else
{
8834 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_plane_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
8835 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb)({ const __typeof( ((struct amdgpu_framebuffer *)0)->base )
*__mptr = (new_plane_state->fb); (struct amdgpu_framebuffer
*)( (char *)__mptr - __builtin_offsetof(struct amdgpu_framebuffer
, base) );})
;
8836
8837 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8838 &new_dm_plane_state->tmz_surface);
8839 if (ret)
8840 goto fail;
8841 }
8842
8843 /* Remove exiting planes if they are modified */
8844 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i)for ((i) = ((state)->dev->mode_config.num_total_plane -
1); (i) >= 0; (i)--) if (!((state)->planes[i].ptr &&
((plane) = (state)->planes[i].ptr, (old_plane_state) = (state
)->planes[i].old_state, (new_plane_state) = (state)->planes
[i].new_state, 1))) {} else
{
8845 ret = dm_update_plane_state(dc, state, plane,
8846 old_plane_state,
8847 new_plane_state,
8848 false0,
8849 &lock_and_validation_needed);
8850 if (ret)
8851 goto fail;
8852 }
8853
8854 /* Disable all crtcs which require disable */
8855 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
8856 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8857 old_crtc_state,
8858 new_crtc_state,
8859 false0,
8860 &lock_and_validation_needed);
8861 if (ret)
8862 goto fail;
8863 }
8864
8865 /* Enable all crtcs which require enable */
8866 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (old_crtc_state) = (state
)->crtcs[i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (state)->crtcs[i].new_state, 1))) {} else
{
8867 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8868 old_crtc_state,
8869 new_crtc_state,
8870 true1,
8871 &lock_and_validation_needed);
8872 if (ret)
8873 goto fail;
8874 }
8875
8876 ret = validate_overlay(state);
8877 if (ret)
8878 goto fail;
8879
8880 /* Add new/modified planes */
8881 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i)for ((i) = ((state)->dev->mode_config.num_total_plane -
1); (i) >= 0; (i)--) if (!((state)->planes[i].ptr &&
((plane) = (state)->planes[i].ptr, (old_plane_state) = (state
)->planes[i].old_state, (new_plane_state) = (state)->planes
[i].new_state, 1))) {} else
{
8882 ret = dm_update_plane_state(dc, state, plane,
8883 old_plane_state,
8884 new_plane_state,
8885 true1,
8886 &lock_and_validation_needed);
8887 if (ret)
8888 goto fail;
8889 }
8890
8891 /* Run this here since we want to validate the streams we created */
8892 ret = drm_atomic_helper_check_planes(dev, state);
8893 if (ret)
8894 goto fail;
8895
8896 if (state->legacy_cursor_update) {
8897 /*
8898 * This is a fast cursor update coming from the plane update
8899 * helper, check if it can be done asynchronously for better
8900 * performance.
8901 */
8902 state->async_update =
8903 !drm_atomic_helper_async_check(dev, state);
8904
8905 /*
8906 * Skip the remaining global validation if this is an async
8907 * update. Cursor updates can be done without affecting
8908 * state or bandwidth calcs and this avoids the performance
8909 * penalty of locking the private state object and
8910 * allocating a new dc_state.
8911 */
8912 if (state->async_update)
8913 return 0;
8914 }
8915
8916 /* Check scaling and underscan changes*/
8917 /* TODO Removed scaling changes validation due to inability to commit
8918 * new stream into context w\o causing full reset. Need to
8919 * decide how to handle.
8920 */
8921 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (old_con_state) =
(state)->connectors[i].old_state, (new_con_state) = (state
)->connectors[i].new_state, 1))) {} else
{
8922 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((old_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
8923 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((new_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
8924 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (dm_new_con_state->base.crtc); (struct amdgpu_crtc *)( (
char *)__mptr - __builtin_offsetof(struct amdgpu_crtc, base) )
;})
;
8925
8926 /* Skip any modesets/resets */
8927 if (!acrtc || drm_atomic_crtc_needs_modeset(
8928 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8929 continue;
8930
8931 /* Skip any thing not scale or underscan changes */
8932 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8933 continue;
8934
8935 lock_and_validation_needed = true1;
8936 }
8937
8938 /**
8939 * Streams and planes are reset when there are changes that affect
8940 * bandwidth. Anything that affects bandwidth needs to go through
8941 * DC global validation to ensure that the configuration can be applied
8942 * to hardware.
8943 *
8944 * We have to currently stall out here in atomic_check for outstanding
8945 * commits to finish in this case because our IRQ handlers reference
8946 * DRM state directly - we can end up disabling interrupts too early
8947 * if we don't.
8948 *
8949 * TODO: Remove this stall and drop DM state private objects.
8950 */
8951 if (lock_and_validation_needed) {
8952 ret = dm_atomic_get_state(state, &dm_state);
8953 if (ret)
8954 goto fail;
8955
8956 ret = do_aquire_global_lock(dev, state);
8957 if (ret)
8958 goto fail;
8959
8960#if defined(CONFIG_DRM_AMD_DC_DCN1)
8961 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8962 goto fail;
8963
8964 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8965 if (ret)
8966 goto fail;
8967#endif
8968
8969 /*
8970 * Perform validation of MST topology in the state:
8971 * We need to perform MST atomic check before calling
8972 * dc_validate_global_state(), or there is a chance
8973 * to get stuck in an infinite loop and hang eventually.
8974 */
8975 ret = drm_dp_mst_atomic_check(state);
8976 if (ret)
8977 goto fail;
8978 status = dc_validate_global_state(dc, dm_state->context, false0);
8979 if (status != DC_OK) {
8980 drm_dbg_atomic(dev,drm_dev_dbg((dev)->dev, DRM_UT_ATOMIC, "DC global validation failure: %s (%d)"
, dc_status_to_str(status), status)
8981 "DC global validation failure: %s (%d)",drm_dev_dbg((dev)->dev, DRM_UT_ATOMIC, "DC global validation failure: %s (%d)"
, dc_status_to_str(status), status)
8982 dc_status_to_str(status), status)drm_dev_dbg((dev)->dev, DRM_UT_ATOMIC, "DC global validation failure: %s (%d)"
, dc_status_to_str(status), status)
;
8983 ret = -EINVAL22;
8984 goto fail;
8985 }
8986 } else {
8987 /*
8988 * The commit is a fast update. Fast updates shouldn't change
8989 * the DC context, affect global validation, and can have their
8990 * commit work done in parallel with other commits not touching
8991 * the same resource. If we have a new DC context as part of
8992 * the DM atomic state from validation we need to free it and
8993 * retain the existing one instead.
8994 *
8995 * Furthermore, since the DM atomic state only contains the DC
8996 * context and can safely be annulled, we can free the state
8997 * and clear the associated private object now to free
8998 * some memory and avoid a possible use-after-free later.
8999 */
9000
9001 for (i = 0; i < state->num_private_objs; i++) {
9002 struct drm_private_obj *obj = state->private_objs[i].ptr;
9003
9004 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9005 int j = state->num_private_objs-1;
9006
9007 dm_atomic_destroy_state(obj,
9008 state->private_objs[i].state);
9009
9010 /* If i is not at the end of the array then the
9011 * last element needs to be moved to where i was
9012 * before the array can safely be truncated.
9013 */
9014 if (i != j)
9015 state->private_objs[i] =
9016 state->private_objs[j];
9017
9018 state->private_objs[j].ptr = NULL((void *)0);
9019 state->private_objs[j].state = NULL((void *)0);
9020 state->private_objs[j].old_state = NULL((void *)0);
9021 state->private_objs[j].new_state = NULL((void *)0);
9022
9023 state->num_private_objs = j;
9024 break;
9025 }
9026 }
9027 }
9028
9029 /* Store the overall update type for use later in atomic check. */
9030 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->dev->mode_config.num_crtc
; (i)++) if (!((state)->crtcs[i].ptr && ((crtc) = (
state)->crtcs[i].ptr, (void)(crtc) , (new_crtc_state) = (state
)->crtcs[i].new_state, (void)(new_crtc_state) , 1))) {} else
{
9031 struct dm_crtc_state *dm_new_crtc_state =
9032 to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
9033
9034 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9035 UPDATE_TYPE_FULL :
9036 UPDATE_TYPE_FAST;
9037 }
9038
9039 /* Must be success */
9040 WARN_ON(ret)({ int __ret = !!(ret); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "ret", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 9040); __builtin_expect(!!(__ret), 0); })
;
9041 return ret;
9042
9043fail:
9044 if (ret == -EDEADLK11)
9045 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n")__drm_dbg(DRM_UT_DRIVER, "Atomic check stopped to avoid deadlock.\n"
)
;
9046 else if (ret == -EINTR4 || ret == -EAGAIN35 || ret == -ERESTARTSYS4)
9047 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n")__drm_dbg(DRM_UT_DRIVER, "Atomic check stopped due to signal.\n"
)
;
9048 else
9049 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret)__drm_dbg(DRM_UT_DRIVER, "Atomic check failed with err: %d \n"
, ret)
;
9050
9051 return ret;
9052}
9053
9054static bool_Bool is_dp_capable_without_timing_msa(struct dc *dc,
9055 struct amdgpu_dm_connector *amdgpu_dm_connector)
9056{
9057 uint8_t dpcd_data;
9058 bool_Bool capable = false0;
9059
9060 if (amdgpu_dm_connector->dc_link &&
9061 dm_helpers_dp_read_dpcd(
9062 NULL((void *)0),
9063 amdgpu_dm_connector->dc_link,
9064 DP_DOWN_STREAM_PORT_COUNT0x007,
9065 &dpcd_data,
9066 sizeof(dpcd_data))) {
9067 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED(1 << 6)) ? true1:false0;
9068 }
9069
9070 return capable;
9071}
9072void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9073 struct edid *edid)
9074{
9075 int i;
9076 bool_Bool edid_check_required;
9077 struct detailed_timing *timing;
9078 struct detailed_non_pixel *data;
9079 struct detailed_data_monitor_range *range;
9080 struct amdgpu_dm_connector *amdgpu_dm_connector =
9081 to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
9082 struct dm_connector_state *dm_con_state = NULL((void *)0);
9083
9084 struct drm_device *dev = connector->dev;
9085 struct amdgpu_device *adev = drm_to_adev(dev);
9086 bool_Bool freesync_capable = false0;
9087
9088 if (!connector->state) {
9089 DRM_ERROR("%s - Connector has no state", __func__)__drm_err("%s - Connector has no state", __func__);
9090 goto update;
9091 }
9092
9093 if (!edid) {
9094 dm_con_state = to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
9095
9096 amdgpu_dm_connector->min_vfreq = 0;
9097 amdgpu_dm_connector->max_vfreq = 0;
9098 amdgpu_dm_connector->pixel_clock_mhz = 0;
9099
9100 goto update;
9101 }
9102
9103 dm_con_state = to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
9104
9105 edid_check_required = false0;
9106 if (!amdgpu_dm_connector->dc_sink) {
9107 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n")__drm_err("dc_sink NULL, could not add free_sync module.\n");
9108 goto update;
9109 }
9110 if (!adev->dm.freesync_module)
9111 goto update;
9112 /*
9113 * if edid non zero restrict freesync only for dp and edp
9114 */
9115 if (edid) {
9116 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9117 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9118 edid_check_required = is_dp_capable_without_timing_msa(
9119 adev->dm.dc,
9120 amdgpu_dm_connector);
9121 }
9122 }
9123 if (edid_check_required == true1 && (edid->version > 1 ||
9124 (edid->version == 1 && edid->revision > 1))) {
9125 for (i = 0; i < 4; i++) {
9126
9127 timing = &edid->detailed_timings[i];
9128 data = &timing->data.other_data;
9129 range = &data->data.range;
9130 /*
9131 * Check if monitor has continuous frequency mode
9132 */
9133 if (data->type != EDID_DETAIL_MONITOR_RANGE0xfd)
9134 continue;
9135 /*
9136 * Check for flag range limits only. If flag == 1 then
9137 * no additional timing information provided.
9138 * Default GTF, GTF Secondary curve and CVT are not
9139 * supported
9140 */
9141 if (range->flags != 1)
9142 continue;
9143
9144 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9145 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9146 amdgpu_dm_connector->pixel_clock_mhz =
9147 range->pixel_clock_mhz * 10;
9148 break;
9149 }
9150
9151 if (amdgpu_dm_connector->max_vfreq -
9152 amdgpu_dm_connector->min_vfreq > 10) {
9153
9154 freesync_capable = true1;
9155 }
9156 }
9157
9158update:
9159 if (dm_con_state)
9160 dm_con_state->freesync_capable = freesync_capable;
9161
9162 if (connector->vrr_capable_property)
9163 drm_connector_set_vrr_capable_property(connector,
9164 freesync_capable);
9165}
9166
9167static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9168{
9169 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE2];
9170
9171 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9172 return;
9173 if (link->type == dc_connection_none)
9174 return;
9175 if (dm_helpers_dp_read_dpcd(NULL((void *)0), link, DP_PSR_SUPPORT0x070,
9176 dpcd_data, sizeof(dpcd_data))) {
9177 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9178
9179 if (dpcd_data[0] == 0) {
9180 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9181 link->psr_settings.psr_feature_enabled = false0;
9182 } else {
9183 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9184 link->psr_settings.psr_feature_enabled = true1;
9185 }
9186
9187 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled)printk("\0016" "[" "drm" "] " "PSR support:%d\n", link->psr_settings
.psr_feature_enabled)
;
9188 }
9189}
9190
9191/*
9192 * amdgpu_dm_link_setup_psr() - configure psr link
9193 * @stream: stream state
9194 *
9195 * Return: true if success
9196 */
9197static bool_Bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9198{
9199 struct dc_link *link = NULL((void *)0);
9200 struct psr_config psr_config = {0};
9201 struct psr_context psr_context = {0};
9202 bool_Bool ret = false0;
9203
9204 if (stream == NULL((void *)0))
9205 return false0;
9206
9207 link = stream->link;
9208
9209 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9210
9211 if (psr_config.psr_version > 0) {
9212 psr_config.psr_exit_link_training_required = 0x1;
9213 psr_config.psr_frame_capture_indication_req = 0;
9214 psr_config.psr_rfb_setup_time = 0x37;
9215 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9216 psr_config.allow_smu_optimizations = 0x0;
9217
9218 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9219
9220 }
9221 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled)__drm_dbg(DRM_UT_DRIVER, "PSR link: %d\n", link->psr_settings
.psr_feature_enabled)
;
9222
9223 return ret;
9224}
9225
9226/*
9227 * amdgpu_dm_psr_enable() - enable psr f/w
9228 * @stream: stream state
9229 *
9230 * Return: true if success
9231 */
9232bool_Bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9233{
9234 struct dc_link *link = stream->link;
9235 unsigned int vsync_rate_hz = 0;
9236 struct dc_static_screen_params params = {0};
9237 /* Calculate number of static frames before generating interrupt to
9238 * enter PSR.
9239 */
9240 // Init fail safe of 2 frames static
9241 unsigned int num_frames_static = 2;
9242
9243 DRM_DEBUG_DRIVER("Enabling psr...\n")__drm_dbg(DRM_UT_DRIVER, "Enabling psr...\n");
9244
9245 vsync_rate_hz = div64_u64(div64_u64((
9246 stream->timing.pix_clk_100hz * 100),
9247 stream->timing.v_total),
9248 stream->timing.h_total);
9249
9250 /* Round up
9251 * Calculate number of frames such that at least 30 ms of time has
9252 * passed.
9253 */
9254 if (vsync_rate_hz != 0) {
9255 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9256 num_frames_static = (30000 / frame_time_microsec) + 1;
9257 }
9258
9259 params.triggers.cursor_update = true1;
9260 params.triggers.overlay_update = true1;
9261 params.triggers.surface_update = true1;
9262 params.num_frames = num_frames_static;
9263
9264 dc_stream_set_static_screen_params(link->ctx->dc,
9265 &stream, 1,
9266 &params);
9267
9268 return dc_link_set_psr_allow_active(link, true1, false0);
9269}
9270
9271/*
9272 * amdgpu_dm_psr_disable() - disable psr f/w
9273 * @stream: stream state
9274 *
9275 * Return: true if success
9276 */
9277static bool_Bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9278{
9279
9280 DRM_DEBUG_DRIVER("Disabling psr...\n")__drm_dbg(DRM_UT_DRIVER, "Disabling psr...\n");
9281
9282 return dc_link_set_psr_allow_active(stream->link, false0, true1);
9283}
9284
9285/*
9286 * amdgpu_dm_psr_disable() - disable psr f/w
9287 * if psr is enabled on any stream
9288 *
9289 * Return: true if success
9290 */
9291static bool_Bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9292{
9293 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n")__drm_dbg(DRM_UT_DRIVER, "Disabling psr if psr is enabled on any stream\n"
)
;
9294 return dc_set_psr_allow_active(dm->dc, false0);
9295}
9296
9297void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9298{
9299 struct amdgpu_device *adev = drm_to_adev(dev);
9300 struct dc *dc = adev->dm.dc;
9301 int i;
9302
9303 mutex_lock(&adev->dm.dc_lock)rw_enter_write(&adev->dm.dc_lock);
9304 if (dc->current_state) {
9305 for (i = 0; i < dc->current_state->stream_count; ++i)
9306 dc->current_state->streams[i]
9307 ->triggered_crtc_reset.enabled =
9308 adev->dm.force_timing_sync;
9309
9310 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9311 dc_trigger_sync(dc, dc->current_state);
9312 }
9313 mutex_unlock(&adev->dm.dc_lock)rw_exit_write(&adev->dm.dc_lock);
9314}

/usr/src/sys/dev/pci/drm/include/drm/drm_atomic.h

1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28#ifndef DRM_ATOMIC_H_
29#define DRM_ATOMIC_H_
30
31#include <drm/drm_crtc.h>
32#include <drm/drm_util.h>
33
34/**
35 * struct drm_crtc_commit - track modeset commits on a CRTC
36 *
37 * This structure is used to track pending modeset changes and atomic commit on
38 * a per-CRTC basis. Since updating the list should never block, this structure
39 * is reference counted to allow waiters to safely wait on an event to complete,
40 * without holding any locks.
41 *
42 * It has 3 different events in total to allow a fine-grained synchronization
43 * between outstanding updates::
44 *
45 * atomic commit thread hardware
46 *
47 * write new state into hardware ----> ...
48 * signal hw_done
49 * switch to new state on next
50 * ... v/hblank
51 *
52 * wait for buffers to show up ...
53 *
54 * ... send completion irq
55 * irq handler signals flip_done
56 * cleanup old buffers
57 *
58 * signal cleanup_done
59 *
60 * wait for flip_done <----
61 * clean up atomic state
62 *
63 * The important bit to know is that &cleanup_done is the terminal event, but the
64 * ordering between &flip_done and &hw_done is entirely up to the specific driver
65 * and modeset state change.
66 *
67 * For an implementation of how to use this look at
68 * drm_atomic_helper_setup_commit() from the atomic helper library.
69 */
70struct drm_crtc_commit {
71 /**
72 * @crtc:
73 *
74 * DRM CRTC for this commit.
75 */
76 struct drm_crtc *crtc;
77
78 /**
79 * @ref:
80 *
81 * Reference count for this structure. Needed to allow blocking on
82 * completions without the risk of the completion disappearing
83 * meanwhile.
84 */
85 struct kref ref;
86
87 /**
88 * @flip_done:
89 *
90 * Will be signaled when the hardware has flipped to the new set of
91 * buffers. Signals at the same time as when the drm event for this
92 * commit is sent to userspace, or when an out-fence is singalled. Note
93 * that for most hardware, in most cases this happens after @hw_done is
94 * signalled.
95 *
96 * Completion of this stage is signalled implicitly by calling
97 * drm_crtc_send_vblank_event() on &drm_crtc_state.event.
98 */
99 struct completion flip_done;
100
101 /**
102 * @hw_done:
103 *
104 * Will be signalled when all hw register changes for this commit have
105 * been written out. Especially when disabling a pipe this can be much
106 * later than @flip_done, since that can signal already when the
107 * screen goes black, whereas to fully shut down a pipe more register
108 * I/O is required.
109 *
110 * Note that this does not need to include separately reference-counted
111 * resources like backing storage buffer pinning, or runtime pm
112 * management.
113 *
114 * Drivers should call drm_atomic_helper_commit_hw_done() to signal
115 * completion of this stage.
116 */
117 struct completion hw_done;
118
119 /**
120 * @cleanup_done:
121 *
122 * Will be signalled after old buffers have been cleaned up by calling
123 * drm_atomic_helper_cleanup_planes(). Since this can only happen after
124 * a vblank wait completed it might be a bit later. This completion is
125 * useful to throttle updates and avoid hardware updates getting ahead
126 * of the buffer cleanup too much.
127 *
128 * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal
129 * completion of this stage.
130 */
131 struct completion cleanup_done;
132
133 /**
134 * @commit_entry:
135 *
136 * Entry on the per-CRTC &drm_crtc.commit_list. Protected by
137 * $drm_crtc.commit_lock.
138 */
139 struct list_head commit_entry;
140
141 /**
142 * @event:
143 *
144 * &drm_pending_vblank_event pointer to clean up private events.
145 */
146 struct drm_pending_vblank_event *event;
147
148 /**
149 * @abort_completion:
150 *
151 * A flag that's set after drm_atomic_helper_setup_commit() takes a
152 * second reference for the completion of $drm_crtc_state.event. It's
153 * used by the free code to remove the second reference if commit fails.
154 */
155 bool_Bool abort_completion;
156};
157
158struct __drm_planes_state {
159 struct drm_plane *ptr;
160 struct drm_plane_state *state, *old_state, *new_state;
161};
162
163struct __drm_crtcs_state {
164 struct drm_crtc *ptr;
165 struct drm_crtc_state *state, *old_state, *new_state;
166
167 /**
168 * @commit:
169 *
170 * A reference to the CRTC commit object that is kept for use by
171 * drm_atomic_helper_wait_for_flip_done() after
172 * drm_atomic_helper_commit_hw_done() is called. This ensures that a
173 * concurrent commit won't free a commit object that is still in use.
174 */
175 struct drm_crtc_commit *commit;
176
177 s32 __user *out_fence_ptr;
178 u64 last_vblank_count;
179};
180
181struct __drm_connnectors_state {
182 struct drm_connector *ptr;
183 struct drm_connector_state *state, *old_state, *new_state;
184 /**
185 * @out_fence_ptr:
186 *
187 * User-provided pointer which the kernel uses to return a sync_file
188 * file descriptor. Used by writeback connectors to signal completion of
189 * the writeback.
190 */
191 s32 __user *out_fence_ptr;
192};
193
194struct drm_private_obj;
195struct drm_private_state;
196
197/**
198 * struct drm_private_state_funcs - atomic state functions for private objects
199 *
200 * These hooks are used by atomic helpers to create, swap and destroy states of
201 * private objects. The structure itself is used as a vtable to identify the
202 * associated private object type. Each private object type that needs to be
203 * added to the atomic states is expected to have an implementation of these
204 * hooks and pass a pointer to its drm_private_state_funcs struct to
205 * drm_atomic_get_private_obj_state().
206 */
207struct drm_private_state_funcs {
208 /**
209 * @atomic_duplicate_state:
210 *
211 * Duplicate the current state of the private object and return it. It
212 * is an error to call this before obj->state has been initialized.
213 *
214 * RETURNS:
215 *
216 * Duplicated atomic state or NULL when obj->state is not
217 * initialized or allocation failed.
218 */
219 struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj);
220
221 /**
222 * @atomic_destroy_state:
223 *
224 * Frees the private object state created with @atomic_duplicate_state.
225 */
226 void (*atomic_destroy_state)(struct drm_private_obj *obj,
227 struct drm_private_state *state);
228};
229
230/**
231 * struct drm_private_obj - base struct for driver private atomic object
232 *
233 * A driver private object is initialized by calling
234 * drm_atomic_private_obj_init() and cleaned up by calling
235 * drm_atomic_private_obj_fini().
236 *
237 * Currently only tracks the state update functions and the opaque driver
238 * private state itself, but in the future might also track which
239 * &drm_modeset_lock is required to duplicate and update this object's state.
240 *
241 * All private objects must be initialized before the DRM device they are
242 * attached to is registered to the DRM subsystem (call to drm_dev_register())
243 * and should stay around until this DRM device is unregistered (call to
244 * drm_dev_unregister()). In other words, private objects lifetime is tied
245 * to the DRM device lifetime. This implies that:
246 *
247 * 1/ all calls to drm_atomic_private_obj_init() must be done before calling
248 * drm_dev_register()
249 * 2/ all calls to drm_atomic_private_obj_fini() must be done after calling
250 * drm_dev_unregister()
251 */
252struct drm_private_obj {
253 /**
254 * @head: List entry used to attach a private object to a &drm_device
255 * (queued to &drm_mode_config.privobj_list).
256 */
257 struct list_head head;
258
259 /**
260 * @lock: Modeset lock to protect the state object.
261 */
262 struct drm_modeset_lock lock;
263
264 /**
265 * @state: Current atomic state for this driver private object.
266 */
267 struct drm_private_state *state;
268
269 /**
270 * @funcs:
271 *
272 * Functions to manipulate the state of this driver private object, see
273 * &drm_private_state_funcs.
274 */
275 const struct drm_private_state_funcs *funcs;
276};
277
278/**
279 * drm_for_each_privobj() - private object iterator
280 *
281 * @privobj: pointer to the current private object. Updated after each
282 * iteration
283 * @dev: the DRM device we want get private objects from
284 *
285 * Allows one to iterate over all private objects attached to @dev
286 */
287#define drm_for_each_privobj(privobj, dev)for (privobj = ({ const __typeof( ((__typeof(*privobj) *)0)->
head ) *__mptr = ((&(dev)->mode_config.privobj_list)->
next); (__typeof(*privobj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*privobj), head) );}); &privobj->head != (&
(dev)->mode_config.privobj_list); privobj = ({ const __typeof
( ((__typeof(*privobj) *)0)->head ) *__mptr = (privobj->
head.next); (__typeof(*privobj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*privobj), head) );}))
\
288 list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head)for (privobj = ({ const __typeof( ((__typeof(*privobj) *)0)->
head ) *__mptr = ((&(dev)->mode_config.privobj_list)->
next); (__typeof(*privobj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*privobj), head) );}); &privobj->head != (&
(dev)->mode_config.privobj_list); privobj = ({ const __typeof
( ((__typeof(*privobj) *)0)->head ) *__mptr = (privobj->
head.next); (__typeof(*privobj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*privobj), head) );}))
289
290/**
291 * struct drm_private_state - base struct for driver private object state
292 * @state: backpointer to global drm_atomic_state
293 *
294 * Currently only contains a backpointer to the overall atomic update, but in
295 * the future also might hold synchronization information similar to e.g.
296 * &drm_crtc.commit.
297 */
298struct drm_private_state {
299 struct drm_atomic_state *state;
300};
301
302struct __drm_private_objs_state {
303 struct drm_private_obj *ptr;
304 struct drm_private_state *state, *old_state, *new_state;
305};
306
307/**
308 * struct drm_atomic_state - the global state object for atomic updates
309 * @ref: count of all references to this state (will not be freed until zero)
310 * @dev: parent DRM device
311 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
312 * @async_update: hint for asynchronous plane update
313 * @planes: pointer to array of structures with per-plane data
314 * @crtcs: pointer to array of CRTC pointers
315 * @num_connector: size of the @connectors and @connector_states arrays
316 * @connectors: pointer to array of structures with per-connector data
317 * @num_private_objs: size of the @private_objs array
318 * @private_objs: pointer to array of private object pointers
319 * @acquire_ctx: acquire context for this atomic modeset state update
320 *
321 * States are added to an atomic update by calling drm_atomic_get_crtc_state(),
322 * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for
323 * private state structures, drm_atomic_get_private_obj_state().
324 */
325struct drm_atomic_state {
326 struct kref ref;
327
328 struct drm_device *dev;
329
330 /**
331 * @allow_modeset:
332 *
333 * Allow full modeset. This is used by the ATOMIC IOCTL handler to
334 * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
335 * never consult this flag, instead looking at the output of
336 * drm_atomic_crtc_needs_modeset().
337 */
338 bool_Bool allow_modeset : 1;
339 bool_Bool legacy_cursor_update : 1;
340 bool_Bool async_update : 1;
341 /**
342 * @duplicated:
343 *
344 * Indicates whether or not this atomic state was duplicated using
345 * drm_atomic_helper_duplicate_state(). Drivers and atomic helpers
346 * should use this to fixup normal inconsistencies in duplicated
347 * states.
348 */
349 bool_Bool duplicated : 1;
350 struct __drm_planes_state *planes;
351 struct __drm_crtcs_state *crtcs;
352 int num_connector;
353 struct __drm_connnectors_state *connectors;
354 int num_private_objs;
355 struct __drm_private_objs_state *private_objs;
356
357 struct drm_modeset_acquire_ctx *acquire_ctx;
358
359 /**
360 * @fake_commit:
361 *
362 * Used for signaling unbound planes/connectors.
363 * When a connector or plane is not bound to any CRTC, it's still important
364 * to preserve linearity to prevent the atomic states from being freed to early.
365 *
366 * This commit (if set) is not bound to any CRTC, but will be completed when
367 * drm_atomic_helper_commit_hw_done() is called.
368 */
369 struct drm_crtc_commit *fake_commit;
370
371 /**
372 * @commit_work:
373 *
374 * Work item which can be used by the driver or helpers to execute the
375 * commit without blocking.
376 */
377 struct work_struct commit_work;
378};
379
380void __drm_crtc_commit_free(struct kref *kref);
381
382/**
383 * drm_crtc_commit_get - acquire a reference to the CRTC commit
384 * @commit: CRTC commit
385 *
386 * Increases the reference of @commit.
387 *
388 * Returns:
389 * The pointer to @commit, with reference increased.
390 */
391static inline struct drm_crtc_commit *drm_crtc_commit_get(struct drm_crtc_commit *commit)
392{
393 kref_get(&commit->ref);
394 return commit;
395}
396
397/**
398 * drm_crtc_commit_put - release a reference to the CRTC commmit
399 * @commit: CRTC commit
400 *
401 * This releases a reference to @commit which is freed after removing the
402 * final reference. No locking required and callable from any context.
403 */
404static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit)
405{
406 kref_put(&commit->ref, __drm_crtc_commit_free);
407}
408
409struct drm_atomic_state * __must_check
410drm_atomic_state_alloc(struct drm_device *dev);
411void drm_atomic_state_clear(struct drm_atomic_state *state);
412
413/**
414 * drm_atomic_state_get - acquire a reference to the atomic state
415 * @state: The atomic state
416 *
417 * Returns a new reference to the @state
418 */
419static inline struct drm_atomic_state *
420drm_atomic_state_get(struct drm_atomic_state *state)
421{
422 kref_get(&state->ref);
423 return state;
424}
425
426void __drm_atomic_state_free(struct kref *ref);
427
428/**
429 * drm_atomic_state_put - release a reference to the atomic state
430 * @state: The atomic state
431 *
432 * This releases a reference to @state which is freed after removing the
433 * final reference. No locking required and callable from any context.
434 */
435static inline void drm_atomic_state_put(struct drm_atomic_state *state)
436{
437 kref_put(&state->ref, __drm_atomic_state_free);
438}
439
440int __must_check
441drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
442void drm_atomic_state_default_clear(struct drm_atomic_state *state);
443void drm_atomic_state_default_release(struct drm_atomic_state *state);
444
445struct drm_crtc_state * __must_check
446drm_atomic_get_crtc_state(struct drm_atomic_state *state,
447 struct drm_crtc *crtc);
448struct drm_plane_state * __must_check
449drm_atomic_get_plane_state(struct drm_atomic_state *state,
450 struct drm_plane *plane);
451struct drm_connector_state * __must_check
452drm_atomic_get_connector_state(struct drm_atomic_state *state,
453 struct drm_connector *connector);
454
455void drm_atomic_private_obj_init(struct drm_device *dev,
456 struct drm_private_obj *obj,
457 struct drm_private_state *state,
458 const struct drm_private_state_funcs *funcs);
459void drm_atomic_private_obj_fini(struct drm_private_obj *obj);
460
461struct drm_private_state * __must_check
462drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
463 struct drm_private_obj *obj);
464struct drm_private_state *
465drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
466 struct drm_private_obj *obj);
467struct drm_private_state *
468drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
469 struct drm_private_obj *obj);
470
471struct drm_connector *
472drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
473 struct drm_encoder *encoder);
474struct drm_connector *
475drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
476 struct drm_encoder *encoder);
477
478/**
479 * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists
480 * @state: global atomic state object
481 * @crtc: CRTC to grab
482 *
483 * This function returns the CRTC state for the given CRTC, or NULL
484 * if the CRTC is not part of the global atomic state.
485 *
486 * This function is deprecated, @drm_atomic_get_old_crtc_state or
487 * @drm_atomic_get_new_crtc_state should be used instead.
488 */
489static inline struct drm_crtc_state *
490drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
491 struct drm_crtc *crtc)
492{
493 return state->crtcs[drm_crtc_index(crtc)].state;
494}
495
496/**
497 * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists
498 * @state: global atomic state object
499 * @crtc: CRTC to grab
500 *
501 * This function returns the old CRTC state for the given CRTC, or
502 * NULL if the CRTC is not part of the global atomic state.
503 */
504static inline struct drm_crtc_state *
505drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
506 struct drm_crtc *crtc)
507{
508 return state->crtcs[drm_crtc_index(crtc)].old_state;
509}
510/**
511 * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists
512 * @state: global atomic state object
513 * @crtc: CRTC to grab
514 *
515 * This function returns the new CRTC state for the given CRTC, or
516 * NULL if the CRTC is not part of the global atomic state.
517 */
518static inline struct drm_crtc_state *
519drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
520 struct drm_crtc *crtc)
521{
522 return state->crtcs[drm_crtc_index(crtc)].new_state;
523}
524
525/**
526 * drm_atomic_get_existing_plane_state - get plane state, if it exists
527 * @state: global atomic state object
528 * @plane: plane to grab
529 *
530 * This function returns the plane state for the given plane, or NULL
531 * if the plane is not part of the global atomic state.
532 *
533 * This function is deprecated, @drm_atomic_get_old_plane_state or
534 * @drm_atomic_get_new_plane_state should be used instead.
535 */
536static inline struct drm_plane_state *
537drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
538 struct drm_plane *plane)
539{
540 return state->planes[drm_plane_index(plane)].state;
541}
542
543/**
544 * drm_atomic_get_old_plane_state - get plane state, if it exists
545 * @state: global atomic state object
546 * @plane: plane to grab
547 *
548 * This function returns the old plane state for the given plane, or
549 * NULL if the plane is not part of the global atomic state.
550 */
551static inline struct drm_plane_state *
552drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
553 struct drm_plane *plane)
554{
555 return state->planes[drm_plane_index(plane)].old_state;
556}
557
558/**
559 * drm_atomic_get_new_plane_state - get plane state, if it exists
560 * @state: global atomic state object
561 * @plane: plane to grab
562 *
563 * This function returns the new plane state for the given plane, or
564 * NULL if the plane is not part of the global atomic state.
565 */
566static inline struct drm_plane_state *
567drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
568 struct drm_plane *plane)
569{
570 return state->planes[drm_plane_index(plane)].new_state;
571}
572
573/**
574 * drm_atomic_get_existing_connector_state - get connector state, if it exists
575 * @state: global atomic state object
576 * @connector: connector to grab
577 *
578 * This function returns the connector state for the given connector,
579 * or NULL if the connector is not part of the global atomic state.
580 *
581 * This function is deprecated, @drm_atomic_get_old_connector_state or
582 * @drm_atomic_get_new_connector_state should be used instead.
583 */
584static inline struct drm_connector_state *
585drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
586 struct drm_connector *connector)
587{
588 int index = drm_connector_index(connector);
589
590 if (index >= state->num_connector)
591 return NULL((void *)0);
592
593 return state->connectors[index].state;
594}
595
596/**
597 * drm_atomic_get_old_connector_state - get connector state, if it exists
598 * @state: global atomic state object
599 * @connector: connector to grab
600 *
601 * This function returns the old connector state for the given connector,
602 * or NULL if the connector is not part of the global atomic state.
603 */
604static inline struct drm_connector_state *
605drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
606 struct drm_connector *connector)
607{
608 int index = drm_connector_index(connector);
609
610 if (index >= state->num_connector)
611 return NULL((void *)0);
612
613 return state->connectors[index].old_state;
614}
615
616/**
617 * drm_atomic_get_new_connector_state - get connector state, if it exists
618 * @state: global atomic state object
619 * @connector: connector to grab
620 *
621 * This function returns the new connector state for the given connector,
622 * or NULL if the connector is not part of the global atomic state.
623 */
624static inline struct drm_connector_state *
625drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
626 struct drm_connector *connector)
627{
628 int index = drm_connector_index(connector);
629
630 if (index >= state->num_connector)
631 return NULL((void *)0);
632
633 return state->connectors[index].new_state;
634}
635
636/**
637 * __drm_atomic_get_current_plane_state - get current plane state
638 * @state: global atomic state object
639 * @plane: plane to grab
640 *
641 * This function returns the plane state for the given plane, either from
642 * @state, or if the plane isn't part of the atomic state update, from @plane.
643 * This is useful in atomic check callbacks, when drivers need to peek at, but
644 * not change, state of other planes, since it avoids threading an error code
645 * back up the call chain.
646 *
647 * WARNING:
648 *
649 * Note that this function is in general unsafe since it doesn't check for the
650 * required locking for access state structures. Drivers must ensure that it is
651 * safe to access the returned state structure through other means. One common
652 * example is when planes are fixed to a single CRTC, and the driver knows that
653 * the CRTC lock is held already. In that case holding the CRTC lock gives a
654 * read-lock on all planes connected to that CRTC. But if planes can be
655 * reassigned things get more tricky. In that case it's better to use
656 * drm_atomic_get_plane_state and wire up full error handling.
657 *
658 * Returns:
659 *
660 * Read-only pointer to the current plane state.
661 */
662static inline const struct drm_plane_state *
663__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
664 struct drm_plane *plane)
665{
666 if (state->planes[drm_plane_index(plane)].state)
667 return state->planes[drm_plane_index(plane)].state;
668
669 return plane->state;
670}
671
672int __must_check
673drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
674 struct drm_encoder *encoder);
675int __must_check
676drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
677 struct drm_crtc *crtc);
678int __must_check
679drm_atomic_add_affected_planes(struct drm_atomic_state *state,
680 struct drm_crtc *crtc);
681
682int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
683int __must_check drm_atomic_commit(struct drm_atomic_state *state);
684int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
685
686void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
687
688/**
689 * for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update
690 * @__state: &struct drm_atomic_state pointer
691 * @connector: &struct drm_connector iteration cursor
692 * @old_connector_state: &struct drm_connector_state iteration cursor for the
693 * old state
694 * @new_connector_state: &struct drm_connector_state iteration cursor for the
695 * new state
696 * @__i: int iteration cursor, for macro-internal use
697 *
698 * This iterates over all connectors in an atomic update, tracking both old and
699 * new state. This is useful in places where the state delta needs to be
700 * considered, for example in atomic check functions.
701 */
702#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i)for ((__i) = 0; (__i) < (__state)->num_connector; (__i)
++) if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, (new_connector_state
) = (__state)->connectors[__i].new_state, 1))) {} else
\
703 for ((__i) = 0; \
704 (__i) < (__state)->num_connector; \
705 (__i)++) \
706 for_each_if ((__state)->connectors[__i].ptr && \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, (new_connector_state
) = (__state)->connectors[__i].new_state, 1))) {} else
707 ((connector) = (__state)->connectors[__i].ptr, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, (new_connector_state
) = (__state)->connectors[__i].new_state, 1))) {} else
708 (void)(connector) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, (new_connector_state
) = (__state)->connectors[__i].new_state, 1))) {} else
709 (old_connector_state) = (__state)->connectors[__i].old_state, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, (new_connector_state
) = (__state)->connectors[__i].new_state, 1))) {} else
710 (new_connector_state) = (__state)->connectors[__i].new_state, 1))if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, (new_connector_state
) = (__state)->connectors[__i].new_state, 1))) {} else
711
712/**
713 * for_each_old_connector_in_state - iterate over all connectors in an atomic update
714 * @__state: &struct drm_atomic_state pointer
715 * @connector: &struct drm_connector iteration cursor
716 * @old_connector_state: &struct drm_connector_state iteration cursor for the
717 * old state
718 * @__i: int iteration cursor, for macro-internal use
719 *
720 * This iterates over all connectors in an atomic update, tracking only the old
721 * state. This is useful in disable functions, where we need the old state the
722 * hardware is still in.
723 */
724#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i)for ((__i) = 0; (__i) < (__state)->num_connector; (__i)
++) if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, 1))) {} else
\
725 for ((__i) = 0; \
726 (__i) < (__state)->num_connector; \
727 (__i)++) \
728 for_each_if ((__state)->connectors[__i].ptr && \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, 1))) {} else
729 ((connector) = (__state)->connectors[__i].ptr, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, 1))) {} else
730 (void)(connector) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, 1))) {} else
731 (old_connector_state) = (__state)->connectors[__i].old_state, 1))if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (old_connector_state
) = (__state)->connectors[__i].old_state, 1))) {} else
732
733/**
734 * for_each_new_connector_in_state - iterate over all connectors in an atomic update
735 * @__state: &struct drm_atomic_state pointer
736 * @connector: &struct drm_connector iteration cursor
737 * @new_connector_state: &struct drm_connector_state iteration cursor for the
738 * new state
739 * @__i: int iteration cursor, for macro-internal use
740 *
741 * This iterates over all connectors in an atomic update, tracking only the new
742 * state. This is useful in enable functions, where we need the new state the
743 * hardware should be in when the atomic commit operation has completed.
744 */
745#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i)for ((__i) = 0; (__i) < (__state)->num_connector; (__i)
++) if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (new_connector_state
) = (__state)->connectors[__i].new_state, (void)(new_connector_state
) , 1))) {} else
\
746 for ((__i) = 0; \
747 (__i) < (__state)->num_connector; \
748 (__i)++) \
749 for_each_if ((__state)->connectors[__i].ptr && \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (new_connector_state
) = (__state)->connectors[__i].new_state, (void)(new_connector_state
) , 1))) {} else
750 ((connector) = (__state)->connectors[__i].ptr, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (new_connector_state
) = (__state)->connectors[__i].new_state, (void)(new_connector_state
) , 1))) {} else
751 (void)(connector) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (new_connector_state
) = (__state)->connectors[__i].new_state, (void)(new_connector_state
) , 1))) {} else
752 (new_connector_state) = (__state)->connectors[__i].new_state, \if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (new_connector_state
) = (__state)->connectors[__i].new_state, (void)(new_connector_state
) , 1))) {} else
753 (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1))if (!((__state)->connectors[__i].ptr && ((connector
) = (__state)->connectors[__i].ptr, (void)(connector) , (new_connector_state
) = (__state)->connectors[__i].new_state, (void)(new_connector_state
) , 1))) {} else
754
755/**
756 * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update
757 * @__state: &struct drm_atomic_state pointer
758 * @crtc: &struct drm_crtc iteration cursor
759 * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state
760 * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state
761 * @__i: int iteration cursor, for macro-internal use
762 *
763 * This iterates over all CRTCs in an atomic update, tracking both old and
764 * new state. This is useful in places where the state delta needs to be
765 * considered, for example in atomic check functions.
766 */
767#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i)for ((__i) = 0; (__i) < (__state)->dev->mode_config.
num_crtc; (__i)++) if (!((__state)->crtcs[__i].ptr &&
((crtc) = (__state)->crtcs[__i].ptr, (void)(crtc) , (old_crtc_state
) = (__state)->crtcs[__i].old_state, (void)(old_crtc_state
) , (new_crtc_state) = (__state)->crtcs[__i].new_state, 1)
)) {} else
\
768 for ((__i) = 0; \
769 (__i) < (__state)->dev->mode_config.num_crtc; \
770 (__i)++) \
771 for_each_if ((__state)->crtcs[__i].ptr && \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (old_crtc_state) = (__state
)->crtcs[__i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (__state)->crtcs[__i].new_state, 1))) {} else
772 ((crtc) = (__state)->crtcs[__i].ptr, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (old_crtc_state) = (__state
)->crtcs[__i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (__state)->crtcs[__i].new_state, 1))) {} else
773 (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (old_crtc_state) = (__state
)->crtcs[__i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (__state)->crtcs[__i].new_state, 1))) {} else
774 (old_crtc_state) = (__state)->crtcs[__i].old_state, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (old_crtc_state) = (__state
)->crtcs[__i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (__state)->crtcs[__i].new_state, 1))) {} else
775 (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (old_crtc_state) = (__state
)->crtcs[__i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (__state)->crtcs[__i].new_state, 1))) {} else
776 (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (old_crtc_state) = (__state
)->crtcs[__i].old_state, (void)(old_crtc_state) , (new_crtc_state
) = (__state)->crtcs[__i].new_state, 1))) {} else
777
778/**
779 * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update
780 * @__state: &struct drm_atomic_state pointer
781 * @crtc: &struct drm_crtc iteration cursor
782 * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state
783 * @__i: int iteration cursor, for macro-internal use
784 *
785 * This iterates over all CRTCs in an atomic update, tracking only the old
786 * state. This is useful in disable functions, where we need the old state the
787 * hardware is still in.
788 */
789#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i)for ((__i) = 0; (__i) < (__state)->dev->mode_config.
num_crtc; (__i)++) if (!((__state)->crtcs[__i].ptr &&
((crtc) = (__state)->crtcs[__i].ptr, (old_crtc_state) = (
__state)->crtcs[__i].old_state, 1))) {} else
\
790 for ((__i) = 0; \
791 (__i) < (__state)->dev->mode_config.num_crtc; \
792 (__i)++) \
793 for_each_if ((__state)->crtcs[__i].ptr && \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (old_crtc_state) = (__state)->crtcs[
__i].old_state, 1))) {} else
794 ((crtc) = (__state)->crtcs[__i].ptr, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (old_crtc_state) = (__state)->crtcs[
__i].old_state, 1))) {} else
795 (old_crtc_state) = (__state)->crtcs[__i].old_state, 1))if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (old_crtc_state) = (__state)->crtcs[
__i].old_state, 1))) {} else
796
797/**
798 * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update
799 * @__state: &struct drm_atomic_state pointer
800 * @crtc: &struct drm_crtc iteration cursor
801 * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state
802 * @__i: int iteration cursor, for macro-internal use
803 *
804 * This iterates over all CRTCs in an atomic update, tracking only the new
805 * state. This is useful in enable functions, where we need the new state the
806 * hardware should be in when the atomic commit operation has completed.
807 */
808#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i)for ((__i) = 0; (__i) < (__state)->dev->mode_config.
num_crtc; (__i)++) if (!((__state)->crtcs[__i].ptr &&
((crtc) = (__state)->crtcs[__i].ptr, (void)(crtc) , (new_crtc_state
) = (__state)->crtcs[__i].new_state, (void)(new_crtc_state
) , 1))) {} else
\
809 for ((__i) = 0; \
810 (__i) < (__state)->dev->mode_config.num_crtc; \
811 (__i)++) \
812 for_each_if ((__state)->crtcs[__i].ptr && \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (new_crtc_state) = (__state
)->crtcs[__i].new_state, (void)(new_crtc_state) , 1))) {} else
813 ((crtc) = (__state)->crtcs[__i].ptr, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (new_crtc_state) = (__state
)->crtcs[__i].new_state, (void)(new_crtc_state) , 1))) {} else
814 (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (new_crtc_state) = (__state
)->crtcs[__i].new_state, (void)(new_crtc_state) , 1))) {} else
815 (new_crtc_state) = (__state)->crtcs[__i].new_state, \if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (new_crtc_state) = (__state
)->crtcs[__i].new_state, (void)(new_crtc_state) , 1))) {} else
816 (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))if (!((__state)->crtcs[__i].ptr && ((crtc) = (__state
)->crtcs[__i].ptr, (void)(crtc) , (new_crtc_state) = (__state
)->crtcs[__i].new_state, (void)(new_crtc_state) , 1))) {} else
817
818/**
819 * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
820 * @__state: &struct drm_atomic_state pointer
821 * @plane: &struct drm_plane iteration cursor
822 * @old_plane_state: &struct drm_plane_state iteration cursor for the old state
823 * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
824 * @__i: int iteration cursor, for macro-internal use
825 *
826 * This iterates over all planes in an atomic update, tracking both old and
827 * new state. This is useful in places where the state delta needs to be
828 * considered, for example in atomic check functions.
829 */
830#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i)for ((__i) = 0; (__i) < (__state)->dev->mode_config.
num_total_plane; (__i)++) if (!((__state)->planes[__i].ptr
&& ((plane) = (__state)->planes[__i].ptr, (void)(
plane) , (old_plane_state) = (__state)->planes[__i].old_state
, (new_plane_state) = (__state)->planes[__i].new_state, 1)
)) {} else
\
831 for ((__i) = 0; \
832 (__i) < (__state)->dev->mode_config.num_total_plane; \
833 (__i)++) \
834 for_each_if ((__state)->planes[__i].ptr && \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (old_plane_state) = (__state
)->planes[__i].old_state, (new_plane_state) = (__state)->
planes[__i].new_state, 1))) {} else
835 ((plane) = (__state)->planes[__i].ptr, \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (old_plane_state) = (__state
)->planes[__i].old_state, (new_plane_state) = (__state)->
planes[__i].new_state, 1))) {} else
836 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (old_plane_state) = (__state
)->planes[__i].old_state, (new_plane_state) = (__state)->
planes[__i].new_state, 1))) {} else
837 (old_plane_state) = (__state)->planes[__i].old_state,\if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (old_plane_state) = (__state
)->planes[__i].old_state, (new_plane_state) = (__state)->
planes[__i].new_state, 1))) {} else
838 (new_plane_state) = (__state)->planes[__i].new_state, 1))if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (old_plane_state) = (__state
)->planes[__i].old_state, (new_plane_state) = (__state)->
planes[__i].new_state, 1))) {} else
839
840/**
841 * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic
842 * update in reverse order
843 * @__state: &struct drm_atomic_state pointer
844 * @plane: &struct drm_plane iteration cursor
845 * @old_plane_state: &struct drm_plane_state iteration cursor for the old state
846 * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
847 * @__i: int iteration cursor, for macro-internal use
848 *
849 * This iterates over all planes in an atomic update in reverse order,
850 * tracking both old and new state. This is useful in places where the
851 * state delta needs to be considered, for example in atomic check functions.
852 */
853#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i)for ((__i) = ((__state)->dev->mode_config.num_total_plane
- 1); (__i) >= 0; (__i)--) if (!((__state)->planes[__i
].ptr && ((plane) = (__state)->planes[__i].ptr, (old_plane_state
) = (__state)->planes[__i].old_state, (new_plane_state) = (
__state)->planes[__i].new_state, 1))) {} else
\
854 for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
855 (__i) >= 0; \
856 (__i)--) \
857 for_each_if ((__state)->planes[__i].ptr && \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (old_plane_state) = (__state)->planes
[__i].old_state, (new_plane_state) = (__state)->planes[__i
].new_state, 1))) {} else
858 ((plane) = (__state)->planes[__i].ptr, \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (old_plane_state) = (__state)->planes
[__i].old_state, (new_plane_state) = (__state)->planes[__i
].new_state, 1))) {} else
859 (old_plane_state) = (__state)->planes[__i].old_state,\if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (old_plane_state) = (__state)->planes
[__i].old_state, (new_plane_state) = (__state)->planes[__i
].new_state, 1))) {} else
860 (new_plane_state) = (__state)->planes[__i].new_state, 1))if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (old_plane_state) = (__state)->planes
[__i].old_state, (new_plane_state) = (__state)->planes[__i
].new_state, 1))) {} else
861
862/**
863 * for_each_old_plane_in_state - iterate over all planes in an atomic update
864 * @__state: &struct drm_atomic_state pointer
865 * @plane: &struct drm_plane iteration cursor
866 * @old_plane_state: &struct drm_plane_state iteration cursor for the old state
867 * @__i: int iteration cursor, for macro-internal use
868 *
869 * This iterates over all planes in an atomic update, tracking only the old
870 * state. This is useful in disable functions, where we need the old state the
871 * hardware is still in.
872 */
873#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i)for ((__i) = 0; (__i) < (__state)->dev->mode_config.
num_total_plane; (__i)++) if (!((__state)->planes[__i].ptr
&& ((plane) = (__state)->planes[__i].ptr, (old_plane_state
) = (__state)->planes[__i].old_state, 1))) {} else
\
874 for ((__i) = 0; \
875 (__i) < (__state)->dev->mode_config.num_total_plane; \
876 (__i)++) \
877 for_each_if ((__state)->planes[__i].ptr && \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (old_plane_state) = (__state)->planes
[__i].old_state, 1))) {} else
878 ((plane) = (__state)->planes[__i].ptr, \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (old_plane_state) = (__state)->planes
[__i].old_state, 1))) {} else
879 (old_plane_state) = (__state)->planes[__i].old_state, 1))if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (old_plane_state) = (__state)->planes
[__i].old_state, 1))) {} else
880/**
881 * for_each_new_plane_in_state - iterate over all planes in an atomic update
882 * @__state: &struct drm_atomic_state pointer
883 * @plane: &struct drm_plane iteration cursor
884 * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
885 * @__i: int iteration cursor, for macro-internal use
886 *
887 * This iterates over all planes in an atomic update, tracking only the new
888 * state. This is useful in enable functions, where we need the new state the
889 * hardware should be in when the atomic commit operation has completed.
890 */
891#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i)for ((__i) = 0; (__i) < (__state)->dev->mode_config.
num_total_plane; (__i)++) if (!((__state)->planes[__i].ptr
&& ((plane) = (__state)->planes[__i].ptr, (void)(
plane) , (new_plane_state) = (__state)->planes[__i].new_state
, (void)(new_plane_state) , 1))) {} else
\
892 for ((__i) = 0; \
893 (__i) < (__state)->dev->mode_config.num_total_plane; \
894 (__i)++) \
895 for_each_if ((__state)->planes[__i].ptr && \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (new_plane_state) = (__state
)->planes[__i].new_state, (void)(new_plane_state) , 1))) {
} else
896 ((plane) = (__state)->planes[__i].ptr, \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (new_plane_state) = (__state
)->planes[__i].new_state, (void)(new_plane_state) , 1))) {
} else
897 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (new_plane_state) = (__state
)->planes[__i].new_state, (void)(new_plane_state) , 1))) {
} else
898 (new_plane_state) = (__state)->planes[__i].new_state, \if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (new_plane_state) = (__state
)->planes[__i].new_state, (void)(new_plane_state) , 1))) {
} else
899 (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1))if (!((__state)->planes[__i].ptr && ((plane) = (__state
)->planes[__i].ptr, (void)(plane) , (new_plane_state) = (__state
)->planes[__i].new_state, (void)(new_plane_state) , 1))) {
} else
900
901/**
902 * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
903 * @__state: &struct drm_atomic_state pointer
904 * @obj: &struct drm_private_obj iteration cursor
905 * @old_obj_state: &struct drm_private_state iteration cursor for the old state
906 * @new_obj_state: &struct drm_private_state iteration cursor for the new state
907 * @__i: int iteration cursor, for macro-internal use
908 *
909 * This iterates over all private objects in an atomic update, tracking both
910 * old and new state. This is useful in places where the state delta needs
911 * to be considered, for example in atomic check functions.
912 */
913#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i)for ((__i) = 0; (__i) < (__state)->num_private_objs &&
((obj) = (__state)->private_objs[__i].ptr, (old_obj_state
) = (__state)->private_objs[__i].old_state, (new_obj_state
) = (__state)->private_objs[__i].new_state, 1); (__i)++)
\
914 for ((__i) = 0; \
915 (__i) < (__state)->num_private_objs && \
916 ((obj) = (__state)->private_objs[__i].ptr, \
917 (old_obj_state) = (__state)->private_objs[__i].old_state, \
918 (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
919 (__i)++)
920
921/**
922 * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update
923 * @__state: &struct drm_atomic_state pointer
924 * @obj: &struct drm_private_obj iteration cursor
925 * @old_obj_state: &struct drm_private_state iteration cursor for the old state
926 * @__i: int iteration cursor, for macro-internal use
927 *
928 * This iterates over all private objects in an atomic update, tracking only
929 * the old state. This is useful in disable functions, where we need the old
930 * state the hardware is still in.
931 */
932#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i)for ((__i) = 0; (__i) < (__state)->num_private_objs &&
((obj) = (__state)->private_objs[__i].ptr, (old_obj_state
) = (__state)->private_objs[__i].old_state, 1); (__i)++)
\
933 for ((__i) = 0; \
934 (__i) < (__state)->num_private_objs && \
935 ((obj) = (__state)->private_objs[__i].ptr, \
936 (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \
937 (__i)++)
938
939/**
940 * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update
941 * @__state: &struct drm_atomic_state pointer
942 * @obj: &struct drm_private_obj iteration cursor
943 * @new_obj_state: &struct drm_private_state iteration cursor for the new state
944 * @__i: int iteration cursor, for macro-internal use
945 *
946 * This iterates over all private objects in an atomic update, tracking only
947 * the new state. This is useful in enable functions, where we need the new state the
948 * hardware should be in when the atomic commit operation has completed.
949 */
950#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i)for ((__i) = 0; (__i) < (__state)->num_private_objs &&
((obj) = (__state)->private_objs[__i].ptr, (new_obj_state
) = (__state)->private_objs[__i].new_state, 1); (__i)++)
\
951 for ((__i) = 0; \
952 (__i) < (__state)->num_private_objs && \
953 ((obj) = (__state)->private_objs[__i].ptr, \
954 (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
955 (__i)++)
956
957/**
958 * drm_atomic_crtc_needs_modeset - compute combined modeset need
959 * @state: &drm_crtc_state for the CRTC
960 *
961 * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track
962 * whether the state CRTC changed enough to need a full modeset cycle:
963 * mode_changed, active_changed and connectors_changed. This helper simply
964 * combines these three to compute the overall need for a modeset for @state.
965 *
966 * The atomic helper code sets these booleans, but drivers can and should
967 * change them appropriately to accurately represent whether a modeset is
968 * really needed. In general, drivers should avoid full modesets whenever
969 * possible.
970 *
971 * For example if the CRTC mode has changed, and the hardware is able to enact
972 * the requested mode change without going through a full modeset, the driver
973 * should clear mode_changed in its &drm_mode_config_funcs.atomic_check
974 * implementation.
975 */
976static inline bool_Bool
977drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
978{
979 return state->mode_changed || state->active_changed ||
21
Assuming field 'mode_changed' is false
22
Assuming field 'active_changed' is false
23
Returning value, which participates in a condition later
980 state->connectors_changed;
981}
982
983/**
984 * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active
985 * @state: &drm_crtc_state for the CRTC
986 *
987 * When in self refresh mode, the crtc_state->active value will be false, since
988 * the CRTC is off. However in some cases we're interested in whether the CRTC
989 * is active, or effectively active (ie: it's connected to an active display).
990 * In these cases, use this function instead of just checking active.
991 */
992static inline bool_Bool
993drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state)
994{
995 return state->active || state->self_refresh_active;
996}
997
998/**
999 * struct drm_bus_cfg - bus configuration
1000 *
1001 * This structure stores the configuration of a physical bus between two
1002 * components in an output pipeline, usually between two bridges, an encoder
1003 * and a bridge, or a bridge and a connector.
1004 *
1005 * The bus configuration is stored in &drm_bridge_state separately for the
1006 * input and output buses, as seen from the point of view of each bridge. The
1007 * bus configuration of a bridge output is usually identical to the
1008 * configuration of the next bridge's input, but may differ if the signals are
1009 * modified between the two bridges, for instance by an inverter on the board.
1010 * The input and output configurations of a bridge may differ if the bridge
1011 * modifies the signals internally, for instance by performing format
1012 * conversion, or modifying signals polarities.
1013 */
1014struct drm_bus_cfg {
1015 /**
1016 * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format)
1017 *
1018 * This field should not be directly modified by drivers
1019 * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus
1020 * format negotiation).
1021 */
1022 u32 format;
1023
1024 /**
1025 * @flags: DRM_BUS_* flags used on this bus
1026 */
1027 u32 flags;
1028};
1029
1030/**
1031 * struct drm_bridge_state - Atomic bridge state object
1032 */
1033struct drm_bridge_state {
1034 /**
1035 * @base: inherit from &drm_private_state
1036 */
1037 struct drm_private_state base;
1038
1039 /**
1040 * @bridge: the bridge this state refers to
1041 */
1042 struct drm_bridge *bridge;
1043
1044 /**
1045 * @input_bus_cfg: input bus configuration
1046 */
1047 struct drm_bus_cfg input_bus_cfg;
1048
1049 /**
1050 * @output_bus_cfg: input bus configuration
1051 */
1052 struct drm_bus_cfg output_bus_cfg;
1053};
1054
1055static inline struct drm_bridge_state *
1056drm_priv_to_bridge_state(struct drm_private_state *priv)
1057{
1058 return container_of(priv, struct drm_bridge_state, base)({ const __typeof( ((struct drm_bridge_state *)0)->base ) *
__mptr = (priv); (struct drm_bridge_state *)( (char *)__mptr -
__builtin_offsetof(struct drm_bridge_state, base) );})
;
1059}
1060
1061struct drm_bridge_state *
1062drm_atomic_get_bridge_state(struct drm_atomic_state *state,
1063 struct drm_bridge *bridge);
1064struct drm_bridge_state *
1065drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
1066 struct drm_bridge *bridge);
1067struct drm_bridge_state *
1068drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
1069 struct drm_bridge *bridge);
1070
1071#endif /* DRM_ATOMIC_H_ */