Bug Summary

File:dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Warning:line 2435, column 3
Value stored to 'dret' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name amdgpu_dm.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
29#include "dm_services_types.h"
30#include "dc.h"
31#include "dc/inc/core_types.h"
32#include "dal_asic_id.h"
33#include "dmub/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
36#include "dc/dc_dmub_srv.h"
37
38#include "vid.h"
39#include "amdgpu.h"
40#include "amdgpu_display.h"
41#include "amdgpu_ucode.h"
42#include "atom.h"
43#include "amdgpu_dm.h"
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
46#include <drm/drm_hdcp.h>
47#endif
48#include "amdgpu_pm.h"
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
53#include "amdgpu_dm_mst_types.h"
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
63#include <linux/types.h>
64#include <linux/pm_runtime.h>
65#include <linux/pci.h>
66#include <linux/firmware.h>
67#include <linux/component.h>
68
69#include <drm/drm_atomic.h>
70#include <drm/drm_atomic_uapi.h>
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
73#include <drm/drm_fb_helper.h>
74#include <drm/drm_fourcc.h>
75#include <drm/drm_edid.h>
76#include <drm/drm_vblank.h>
77#include <drm/drm_audio_component.h>
78#include <drm/drm_hdcp.h>
79
80#if defined(CONFIG_DRM_AMD_DC_DCN1)
81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
87
88#include "soc15_common.h"
89#endif
90
91#include "modules/inc/mod_freesync.h"
92#include "modules/power/power_helpers.h"
93#include "modules/inc/mod_info_packet.h"
94
95#define FIRMWARE_RENOIR_DMUB"amdgpu/renoir_dmcub.bin" "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
98#define FIRMWARE_SIENNA_CICHLID_DMUB"amdgpu/sienna_cichlid_dmcub.bin" "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100#define FIRMWARE_NAVY_FLOUNDER_DMUB"amdgpu/navy_flounder_dmcub.bin" "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102#endif
103#define FIRMWARE_GREEN_SARDINE_DMUB"amdgpu/green_sardine_dmcub.bin" "amdgpu/green_sardine_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105
106#define FIRMWARE_RAVEN_DMCU"amdgpu/raven_dmcu.bin" "amdgpu/raven_dmcu.bin"
107MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108
109#define FIRMWARE_NAVI12_DMCU"amdgpu/navi12_dmcu.bin" "amdgpu/navi12_dmcu.bin"
110MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111
112/* Number of bytes in PSP header for firmware. */
113#define PSP_HEADER_BYTES0x100 0x100
114
115/* Number of bytes in PSP footer for firmware. */
116#define PSP_FOOTER_BYTES0x100 0x100
117
118/**
119 * DOC: overview
120 *
121 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123 * requests into DC requests, and DC responses into DRM responses.
124 *
125 * The root control structure is &struct amdgpu_display_manager.
126 */
127
128/* basic init/fini API */
129static int amdgpu_dm_init(struct amdgpu_device *adev);
130static void amdgpu_dm_fini(struct amdgpu_device *adev);
131
132static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133{
134 switch (link->dpcd_caps.dongle_type) {
135 case DISPLAY_DONGLE_NONE:
136 return DRM_MODE_SUBCONNECTOR_Native;
137 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 return DRM_MODE_SUBCONNECTOR_VGA;
139 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 return DRM_MODE_SUBCONNECTOR_DVID;
142 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_HDMIA;
145 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 default:
147 return DRM_MODE_SUBCONNECTOR_Unknown;
148 }
149}
150
151static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152{
153 struct dc_link *link = aconnector->dc_link;
154 struct drm_connector *connector = &aconnector->base;
155 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156
157 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort10)
158 return;
159
160 if (aconnector->dc_sink)
161 subconnector = get_subconnector_type(link);
162
163 drm_object_property_set_value(&connector->base,
164 connector->dev->mode_config.dp_subconnector_property,
165 subconnector);
166}
167
168/*
169 * initializes drm_device display related structures, based on the information
170 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171 * drm_encoder, drm_mode_config
172 *
173 * Returns 0 on success
174 */
175static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176/* removes and deallocates the drm structures, created by the above function */
177static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178
179static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 struct drm_plane *plane,
181 unsigned long possible_crtcs,
182 const struct dc_plane_cap *plane_cap);
183static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 struct drm_plane *plane,
185 uint32_t link_index);
186static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 struct amdgpu_dm_connector *amdgpu_dm_connector,
188 uint32_t link_index,
189 struct amdgpu_encoder *amdgpu_encoder);
190static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 struct amdgpu_encoder *aencoder,
192 uint32_t link_index);
193
194static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195
196static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 struct drm_atomic_state *state,
198 bool_Bool nonblock);
199
200static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201
202static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 struct drm_atomic_state *state);
204
205static void handle_cursor_update(struct drm_plane *plane,
206 struct drm_plane_state *old_plane_state);
207
208static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209static bool_Bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210static bool_Bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211static bool_Bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212static bool_Bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213
214/*
215 * dm_vblank_get_counter
216 *
217 * @brief
218 * Get counter for number of vertical blanks
219 *
220 * @param
221 * struct amdgpu_device *adev - [in] desired amdgpu device
222 * int disp_idx - [in] which CRTC to get the counter from
223 *
224 * @return
225 * Counter for vertical blanks
226 */
227static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228{
229 if (crtc >= adev->mode_info.num_crtc)
230 return 0;
231 else {
232 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233
234 if (acrtc->dm_irq_params.stream == NULL((void *)0)) {
235 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc)
236 crtc)__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc);
237 return 0;
238 }
239
240 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 }
242}
243
244static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 u32 *vbl, u32 *position)
246{
247 uint32_t v_blank_start, v_blank_end, h_position, v_position;
248
249 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 return -EINVAL22;
251 else {
252 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253
254 if (acrtc->dm_irq_params.stream == NULL((void *)0)) {
255 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc)
256 crtc)__drm_err("dc_stream_state is NULL for crtc '%d'!\n", crtc);
257 return 0;
258 }
259
260 /*
261 * TODO rework base driver to use values directly.
262 * for now parse it back into reg-format
263 */
264 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 &v_blank_start,
266 &v_blank_end,
267 &h_position,
268 &v_position);
269
270 *position = v_position | (h_position << 16);
271 *vbl = v_blank_start | (v_blank_end << 16);
272 }
273
274 return 0;
275}
276
277static bool_Bool dm_is_idle(void *handle)
278{
279 /* XXX todo */
280 return true1;
281}
282
283static int dm_wait_for_idle(void *handle)
284{
285 /* XXX todo */
286 return 0;
287}
288
289static bool_Bool dm_check_soft_reset(void *handle)
290{
291 return false0;
292}
293
294static int dm_soft_reset(void *handle)
295{
296 /* XXX todo */
297 return 0;
298}
299
300static struct amdgpu_crtc *
301get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 int otg_inst)
303{
304 struct drm_device *dev = adev_to_drm(adev);
305 struct drm_crtc *crtc;
306 struct amdgpu_crtc *amdgpu_crtc;
307
308 if (otg_inst == -1) {
309 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 309); __builtin_expect(!!(__ret), 0); })
;
310 return adev->mode_info.crtcs[0];
311 }
312
313 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->head
) *__mptr = ((&dev->mode_config.crtc_list)->next);
(__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*crtc), head) );}); &crtc->head != (&dev->mode_config
.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)
->head ) *__mptr = (crtc->head.next); (__typeof(*crtc) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), head)
);}))
{
314 amdgpu_crtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
315
316 if (amdgpu_crtc->otg_inst == otg_inst)
317 return amdgpu_crtc;
318 }
319
320 return NULL((void *)0);
321}
322
323static inline bool_Bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324{
325 return acrtc->dm_irq_params.freesync_config.state ==
326 VRR_STATE_ACTIVE_VARIABLE ||
327 acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_FIXED;
329}
330
331static inline bool_Bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332{
333 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335}
336
337/**
338 * dm_pflip_high_irq() - Handle pageflip interrupt
339 * @interrupt_params: ignored
340 *
341 * Handles the pageflip interrupt by notifying all interested parties
342 * that the pageflip has been completed.
343 */
344static void dm_pflip_high_irq(void *interrupt_params)
345{
346 struct amdgpu_crtc *amdgpu_crtc;
347 struct common_irq_params *irq_params = interrupt_params;
348 struct amdgpu_device *adev = irq_params->adev;
349 unsigned long flags;
350 struct drm_pending_vblank_event *e;
351 uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 bool_Bool vrr_active;
353
354 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355
356 /* IRQ could occur when in initial stage */
357 /* TODO work and BO cleanup */
358 if (amdgpu_crtc == NULL((void *)0)) {
359 DRM_DEBUG_DRIVER("CRTC is null, returning.\n")__drm_dbg(DRM_UT_DRIVER, "CRTC is null, returning.\n");
360 return;
361 }
362
363 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
364
365 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
367 amdgpu_crtc->pflip_status,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
368 AMDGPU_FLIP_SUBMITTED,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
369 amdgpu_crtc->crtc_id,__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
370 amdgpu_crtc)__drm_dbg(DRM_UT_DRIVER, "amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n"
, amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, amdgpu_crtc
->crtc_id, amdgpu_crtc)
;
371 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
372 return;
373 }
374
375 /* page flip completed. */
376 e = amdgpu_crtc->event;
377 amdgpu_crtc->event = NULL((void *)0);
378
379 if (!e)
380 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 380); __builtin_expect(!!(__ret), 0); })
;
381
382 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383
384 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 if (!vrr_active ||
386 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 &v_blank_end, &hpos, &vpos) ||
388 (vpos < v_blank_start)) {
389 /* Update to correct count and vblank timestamp if racing with
390 * vblank irq. This also updates to the correct vblank timestamp
391 * even in VRR mode, as scanout is past the front-porch atm.
392 */
393 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394
395 /* Wake up userspace by sending the pageflip event with proper
396 * count and timestamp of vblank of flip completion.
397 */
398 if (e) {
399 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400
401 /* Event sent, so done with vblank for this flip */
402 drm_crtc_vblank_put(&amdgpu_crtc->base);
403 }
404 } else if (e) {
405 /* VRR active and inside front-porch: vblank count and
406 * timestamp for pageflip event will only be up to date after
407 * drm_crtc_handle_vblank() has been executed from late vblank
408 * irq handler after start of back-porch (vline 0). We queue the
409 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 * updated timestamp and count, once it runs after us.
411 *
412 * We need to open-code this instead of using the helper
413 * drm_crtc_arm_vblank_event(), as that helper would
414 * call drm_crtc_accurate_vblank_count(), which we must
415 * not call in VRR mode while we are in front-porch!
416 */
417
418 /* sequence will be replaced by real count during send-out. */
419 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 e->pipe = amdgpu_crtc->crtc_id;
421
422 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 e = NULL((void *)0);
424 }
425
426 /* Keep track of vblank of this flip for flip throttling. We use the
427 * cooked hw counter, as that one incremented at start of this vblank
428 * of pageflip completion, so last_flip_vblank is the forbidden count
429 * for queueing new pageflips if vsync + VRR is enabled.
430 */
431 amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433
434 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
436
437 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",__drm_dbg(DRM_UT_DRIVER, "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n"
, amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int) !e)
438 amdgpu_crtc->crtc_id, amdgpu_crtc,__drm_dbg(DRM_UT_DRIVER, "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n"
, amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int) !e)
439 vrr_active, (int) !e)__drm_dbg(DRM_UT_DRIVER, "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n"
, amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int) !e)
;
440}
441
442static void dm_vupdate_high_irq(void *interrupt_params)
443{
444 struct common_irq_params *irq_params = interrupt_params;
445 struct amdgpu_device *adev = irq_params->adev;
446 struct amdgpu_crtc *acrtc;
447 unsigned long flags;
448 int vrr_active;
449
450 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451
452 if (acrtc) {
453 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454
455 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d\n", acrtc->
crtc_id, vrr_active)
456 acrtc->crtc_id,__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d\n", acrtc->
crtc_id, vrr_active)
457 vrr_active)__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d\n", acrtc->
crtc_id, vrr_active)
;
458
459 /* Core vblank handling is done here after end of front-porch in
460 * vrr mode, as vblank timestamping will give valid results
461 * while now done after front-porch. This will also deliver
462 * page-flip completion events that have been queued to us
463 * if a pageflip happened inside front-porch.
464 */
465 if (vrr_active) {
466 drm_crtc_handle_vblank(&acrtc->base);
467
468 /* BTR processing for pre-DCE12 ASICs */
469 if (acrtc->dm_irq_params.stream &&
470 adev->family < AMDGPU_FAMILY_AI141) {
471 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
472 mod_freesync_handle_v_update(
473 adev->dm.freesync_module,
474 acrtc->dm_irq_params.stream,
475 &acrtc->dm_irq_params.vrr_params);
476
477 dc_stream_adjust_vmin_vmax(
478 adev->dm.dc,
479 acrtc->dm_irq_params.stream,
480 &acrtc->dm_irq_params.vrr_params.adjust);
481 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
482 }
483 }
484 }
485}
486
487/**
488 * dm_crtc_high_irq() - Handles CRTC interrupt
489 * @interrupt_params: used for determining the CRTC instance
490 *
491 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492 * event handler.
493 */
494static void dm_crtc_high_irq(void *interrupt_params)
495{
496 struct common_irq_params *irq_params = interrupt_params;
497 struct amdgpu_device *adev = irq_params->adev;
498 struct amdgpu_crtc *acrtc;
499 unsigned long flags;
500 int vrr_active;
501
502 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 if (!acrtc)
504 return;
505
506 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507
508 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d, planes:%d\n",
acrtc->crtc_id, vrr_active, acrtc->dm_irq_params.active_planes
)
509 vrr_active, acrtc->dm_irq_params.active_planes)__drm_dbg(DRM_UT_VBL, "crtc:%d, vupdate-vrr:%d, planes:%d\n",
acrtc->crtc_id, vrr_active, acrtc->dm_irq_params.active_planes
)
;
510
511 /**
512 * Core vblank handling at start of front-porch is only possible
513 * in non-vrr mode, as only there vblank timestamping will give
514 * valid results while done in front-porch. Otherwise defer it
515 * to dm_vupdate_high_irq after end of front-porch.
516 */
517 if (!vrr_active)
518 drm_crtc_handle_vblank(&acrtc->base);
519
520 /**
521 * Following stuff must happen at start of vblank, for crc
522 * computation and below-the-range btr support in vrr mode.
523 */
524 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525
526 /* BTR updates need to happen before VUPDATE on Vega and above. */
527 if (adev->family < AMDGPU_FAMILY_AI141)
528 return;
529
530 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags)do { flags = 0; mtx_enter(&adev_to_drm(adev)->event_lock
); } while (0)
;
531
532 if (acrtc->dm_irq_params.stream &&
533 acrtc->dm_irq_params.vrr_params.supported &&
534 acrtc->dm_irq_params.freesync_config.state ==
535 VRR_STATE_ACTIVE_VARIABLE) {
536 mod_freesync_handle_v_update(adev->dm.freesync_module,
537 acrtc->dm_irq_params.stream,
538 &acrtc->dm_irq_params.vrr_params);
539
540 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 &acrtc->dm_irq_params.vrr_params.adjust);
542 }
543
544 /*
545 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 * In that case, pageflip completion interrupts won't fire and pageflip
547 * completion events won't get delivered. Prevent this by sending
548 * pending pageflip events from here if a flip is still pending.
549 *
550 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 * avoid race conditions between flip programming and completion,
552 * which could cause too early flip completion events.
553 */
554 if (adev->family >= AMDGPU_FAMILY_RV142 &&
555 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 acrtc->dm_irq_params.active_planes == 0) {
557 if (acrtc->event) {
558 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 acrtc->event = NULL((void *)0);
560 drm_crtc_vblank_put(&acrtc->base);
561 }
562 acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 }
564
565 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags)do { (void)(flags); mtx_leave(&adev_to_drm(adev)->event_lock
); } while (0)
;
566}
567
568static int dm_set_clockgating_state(void *handle,
569 enum amd_clockgating_state state)
570{
571 return 0;
572}
573
574static int dm_set_powergating_state(void *handle,
575 enum amd_powergating_state state)
576{
577 return 0;
578}
579
580/* Prototypes of private functions */
581static int dm_early_init(void* handle);
582
583/* Allocate memory for FBC compressed data */
584static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585{
586 struct drm_device *dev = connector->dev;
587 struct amdgpu_device *adev = drm_to_adev(dev);
588 struct dm_compressor_info *compressor = &adev->dm.compressor;
589 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
590 struct drm_display_mode *mode;
591 unsigned long max_size = 0;
592
593 if (adev->dm.dc->fbc_compressor == NULL((void *)0))
594 return;
595
596 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 return;
598
599 if (compressor->bo_ptr)
600 return;
601
602
603 list_for_each_entry(mode, &connector->modes, head)for (mode = ({ const __typeof( ((__typeof(*mode) *)0)->head
) *__mptr = ((&connector->modes)->next); (__typeof
(*mode) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mode
), head) );}); &mode->head != (&connector->modes
); mode = ({ const __typeof( ((__typeof(*mode) *)0)->head )
*__mptr = (mode->head.next); (__typeof(*mode) *)( (char *
)__mptr - __builtin_offsetof(__typeof(*mode), head) );}))
{
604 if (max_size < mode->htotal * mode->vtotal)
605 max_size = mode->htotal * mode->vtotal;
606 }
607
608 if (max_size) {
609 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE(1 << 12),
610 AMDGPU_GEM_DOMAIN_GTT0x2, &compressor->bo_ptr,
611 &compressor->gpu_addr, &compressor->cpu_addr);
612
613 if (r)
614 DRM_ERROR("DM: Failed to initialize FBC\n")__drm_err("DM: Failed to initialize FBC\n");
615 else {
616 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 DRM_INFO("DM: FBC alloc %lu\n", max_size*4)printk("\0016" "[" "drm" "] " "DM: FBC alloc %lu\n", max_size
*4)
;
618 }
619
620 }
621
622}
623
624static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 int pipe, bool_Bool *enabled,
626 unsigned char *buf, int max_bytes)
627{
628 struct drm_device *dev = dev_get_drvdata(kdev)((void *)0);
629 struct amdgpu_device *adev = drm_to_adev(dev);
630 struct drm_connector *connector;
631 struct drm_connector_list_iter conn_iter;
632 struct amdgpu_dm_connector *aconnector;
633 int ret = 0;
634
635 *enabled = false0;
636
637 mutex_lock(&adev->dm.audio_lock)rw_enter_write(&adev->dm.audio_lock);
638
639 drm_connector_list_iter_begin(dev, &conn_iter);
640 drm_for_each_connector_iter(connector, &conn_iter)while ((connector = drm_connector_list_iter_next(&conn_iter
)))
{
641 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
642 if (aconnector->audio_inst != port)
643 continue;
644
645 *enabled = true1;
646 ret = drm_eld_size(connector->eld);
647 memcpy(buf, connector->eld, min(max_bytes, ret))__builtin_memcpy((buf), (connector->eld), ((((max_bytes)<
(ret))?(max_bytes):(ret))))
;
648
649 break;
650 }
651 drm_connector_list_iter_end(&conn_iter);
652
653 mutex_unlock(&adev->dm.audio_lock)rw_exit_write(&adev->dm.audio_lock);
654
655 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled)__drm_dbg(DRM_UT_KMS, "Get ELD : idx=%d ret=%d en=%d\n", port
, ret, *enabled)
;
656
657 return ret;
658}
659
660static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 .get_eld = amdgpu_dm_audio_component_get_eld,
662};
663
664static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 struct device *hda_kdev, void *data)
666{
667 struct drm_device *dev = dev_get_drvdata(kdev)((void *)0);
668 struct amdgpu_device *adev = drm_to_adev(dev);
669 struct drm_audio_component *acomp = data;
670
671 acomp->ops = &amdgpu_dm_audio_component_ops;
672 acomp->dev = kdev;
673 adev->dm.audio_component = acomp;
674
675 return 0;
676}
677
678static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 struct device *hda_kdev, void *data)
680{
681 struct drm_device *dev = dev_get_drvdata(kdev)((void *)0);
682 struct amdgpu_device *adev = drm_to_adev(dev);
683 struct drm_audio_component *acomp = data;
684
685 acomp->ops = NULL((void *)0);
686 acomp->dev = NULL((void *)0);
687 adev->dm.audio_component = NULL((void *)0);
688}
689
690#ifdef notyet
691static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
694};
695#endif
696
697static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
698{
699 int i, ret;
700
701 if (!amdgpu_audio)
702 return 0;
703
704 adev->mode_info.audio.enabled = true1;
705
706 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707
708 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 adev->mode_info.audio.pin[i].channels = -1;
710 adev->mode_info.audio.pin[i].rate = -1;
711 adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 adev->mode_info.audio.pin[i].status_bits = 0;
713 adev->mode_info.audio.pin[i].category_code = 0;
714 adev->mode_info.audio.pin[i].connected = false0;
715 adev->mode_info.audio.pin[i].id =
716 adev->dm.dc->res_pool->audios[i]->inst;
717 adev->mode_info.audio.pin[i].offset = 0;
718 }
719
720 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops)0;
721 if (ret < 0)
722 return ret;
723
724 adev->dm.audio_registered = true1;
725
726 return 0;
727}
728
729static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
730{
731 if (!amdgpu_audio)
732 return;
733
734 if (!adev->mode_info.audio.enabled)
735 return;
736
737 if (adev->dm.audio_registered) {
738 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 adev->dm.audio_registered = false0;
740 }
741
742 /* TODO: Disable audio? */
743
744 adev->mode_info.audio.enabled = false0;
745}
746
747static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748{
749 struct drm_audio_component *acomp = adev->dm.audio_component;
750
751 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 DRM_DEBUG_KMS("Notify ELD: %d\n", pin)__drm_dbg(DRM_UT_KMS, "Notify ELD: %d\n", pin);
753
754 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
755 pin, -1);
756 }
757}
758
759static int dm_dmub_hw_init(struct amdgpu_device *adev)
760{
761 const struct dmcub_firmware_header_v1_0 *hdr;
762 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764 const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 struct abm *abm = adev->dm.dc->res_pool->abm;
767 struct dmub_srv_hw_params hw_params;
768 enum dmub_status status;
769 const unsigned char *fw_inst_const, *fw_bss_data;
770 uint32_t i, fw_inst_const_size, fw_bss_data_size;
771 bool_Bool has_hw_support;
772
773 if (!dmub_srv)
774 /* DMUB isn't supported on the ASIC. */
775 return 0;
776
777 if (!fb_info) {
778 DRM_ERROR("No framebuffer info for DMUB service.\n")__drm_err("No framebuffer info for DMUB service.\n");
779 return -EINVAL22;
780 }
781
782 if (!dmub_fw) {
783 /* Firmware required for DMUB support. */
784 DRM_ERROR("No firmware provided for DMUB.\n")__drm_err("No firmware provided for DMUB.\n");
785 return -EINVAL22;
786 }
787
788 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 if (status != DMUB_STATUS_OK) {
790 DRM_ERROR("Error checking HW support for DMUB: %d\n", status)__drm_err("Error checking HW support for DMUB: %d\n", status);
791 return -EINVAL22;
792 }
793
794 if (!has_hw_support) {
795 DRM_INFO("DMUB unsupported on ASIC\n")printk("\0016" "[" "drm" "] " "DMUB unsupported on ASIC\n");
796 return 0;
797 }
798
799 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800
801 fw_inst_const = dmub_fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
803 PSP_HEADER_BYTES0x100;
804
805 fw_bss_data = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
807 le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes));
808
809 /* Copy firmware and bios info into FB memory. */
810 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes)) -
811 PSP_HEADER_BYTES0x100 - PSP_FOOTER_BYTES0x100;
812
813 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes)((__uint32_t)(hdr->bss_data_bytes));
814
815 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 * amdgpu_ucode_init_single_fw will load dmub firmware
817 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 * will be done by dm_dmub_hw_init
819 */
820 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,__builtin_memcpy((fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr
), (fw_inst_const), (fw_inst_const_size))
822 fw_inst_const_size)__builtin_memcpy((fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr
), (fw_inst_const), (fw_inst_const_size))
;
823 }
824
825 if (fw_bss_data_size)
826 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,__builtin_memcpy((fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr
), (fw_bss_data), (fw_bss_data_size))
827 fw_bss_data, fw_bss_data_size)__builtin_memcpy((fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr
), (fw_bss_data), (fw_bss_data_size))
;
828
829 /* Copy firmware bios info into FB memory. */
830 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,__builtin_memcpy((fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr
), (adev->bios), (adev->bios_size))
831 adev->bios_size)__builtin_memcpy((fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr
), (adev->bios), (adev->bios_size))
;
832
833 /* Reset regions that need to be reset. */
834 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,__builtin_memset((fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_4_MAILBOX].size))
835 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size)__builtin_memset((fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_4_MAILBOX].size))
;
836
837 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,__builtin_memset((fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size))
838 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size)__builtin_memset((fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size))
;
839
840 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,__builtin_memset((fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_6_FW_STATE].size))
841 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size)__builtin_memset((fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr
), (0), (fb_info->fb[DMUB_WINDOW_6_FW_STATE].size))
;
842
843 /* Initialize hardware. */
844 memset(&hw_params, 0, sizeof(hw_params))__builtin_memset((&hw_params), (0), (sizeof(hw_params)));
845 hw_params.fb_base = adev->gmc.fb_start;
846 hw_params.fb_offset = adev->gmc.aper_base;
847
848 /* backdoor load firmware and trigger dmub running */
849 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 hw_params.load_inst_const = true1;
851
852 if (dmcu)
853 hw_params.psp_version = dmcu->psp_version;
854
855 for (i = 0; i < fb_info->num_fb; ++i)
856 hw_params.fb[i] = &fb_info->fb[i];
857
858 status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 if (status != DMUB_STATUS_OK) {
860 DRM_ERROR("Error initializing DMUB HW: %d\n", status)__drm_err("Error initializing DMUB HW: %d\n", status);
861 return -EINVAL22;
862 }
863
864 /* Wait for firmware load to finish. */
865 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 if (status != DMUB_STATUS_OK)
867 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status)printk("\0014" "[" "drm" "] " "Wait for DMUB auto-load failed: %d\n"
, status)
;
868
869 /* Init DMCU and ABM if available. */
870 if (dmcu && abm) {
871 dmcu->funcs->dmcu_init(dmcu);
872 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
873 }
874
875 if (!adev->dm.dc->ctx->dmub_srv)
876 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
877 if (!adev->dm.dc->ctx->dmub_srv) {
878 DRM_ERROR("Couldn't allocate DC DMUB server!\n")__drm_err("Couldn't allocate DC DMUB server!\n");
879 return -ENOMEM12;
880 }
881
882 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",printk("\0016" "[" "drm" "] " "DMUB hardware initialized: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
883 adev->dm.dmcub_fw_version)printk("\0016" "[" "drm" "] " "DMUB hardware initialized: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
;
884
885 return 0;
886}
887
888static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
889 struct drm_atomic_state *state)
890{
891 struct drm_connector *connector;
892 struct drm_crtc *crtc;
893 struct amdgpu_dm_connector *amdgpu_dm_connector;
894 struct drm_connector_state *conn_state;
895 struct dm_crtc_state *acrtc_state;
896 struct drm_crtc_state *crtc_state;
897 struct dc_stream_state *stream;
898 struct drm_device *dev = adev_to_drm(adev);
899
900 list_for_each_entry(connector, &dev->mode_config.connector_list, head)for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
{
901
902 amdgpu_dm_connector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
903 conn_state = connector->state;
904
905 if (!(conn_state && conn_state->crtc))
906 continue;
907
908 crtc = conn_state->crtc;
909 acrtc_state = to_dm_crtc_state(crtc->state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc->state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
910
911 if (!(acrtc_state && acrtc_state->stream))
912 continue;
913
914 stream = acrtc_state->stream;
915
916 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
917 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
918 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
919 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
920 conn_state = drm_atomic_get_connector_state(state, connector);
921 crtc_state = drm_atomic_get_crtc_state(state, crtc);
922 crtc_state->mode_changed = true1;
923 }
924 }
925}
926
927static int amdgpu_dm_init(struct amdgpu_device *adev)
928{
929 struct dc_init_data init_data;
930#ifdef CONFIG_DRM_AMD_DC_HDCP
931 struct dc_callback_init init_params;
932#endif
933 int r;
934
935 adev->dm.ddev = adev_to_drm(adev);
936 adev->dm.adev = adev;
937
938 /* Zero all the fields */
939 memset(&init_data, 0, sizeof(init_data))__builtin_memset((&init_data), (0), (sizeof(init_data)));
940#ifdef CONFIG_DRM_AMD_DC_HDCP
941 memset(&init_params, 0, sizeof(init_params))__builtin_memset((&init_params), (0), (sizeof(init_params
)))
;
942#endif
943
944 rw_init(&adev->dm.dc_lock, "dmdc")_rw_init_flags(&adev->dm.dc_lock, "dmdc", 0, ((void *)
0))
;
945 rw_init(&adev->dm.audio_lock, "dmaud")_rw_init_flags(&adev->dm.audio_lock, "dmaud", 0, ((void
*)0))
;
946
947 if(amdgpu_dm_irq_init(adev)) {
948 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n")__drm_err("amdgpu: failed to initialize DM IRQ support.\n");
949 goto error;
950 }
951
952 init_data.asic_id.chip_family = adev->family;
953
954 init_data.asic_id.pci_revision_id = adev->pdev->revision;
955 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
956 init_data.asic_id.chip_id = adev->pdev->device;
957
958 init_data.asic_id.vram_width = adev->gmc.vram_width;
959 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
960 init_data.asic_id.atombios_base_address =
961 adev->mode_info.atom_context->bios;
962
963 init_data.driver = adev;
964
965 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
966
967 if (!adev->dm.cgs_device) {
968 DRM_ERROR("amdgpu: failed to create cgs device.\n")__drm_err("amdgpu: failed to create cgs device.\n");
969 goto error;
970 }
971
972 init_data.cgs_device = adev->dm.cgs_device;
973
974 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
975
976 switch (adev->asic_type) {
977 case CHIP_CARRIZO:
978 case CHIP_STONEY:
979 case CHIP_RAVEN:
980 case CHIP_RENOIR:
981 init_data.flags.gpu_vm_support = true1;
982 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)((adev->external_rev_id >= 0xA1) && (adev->external_rev_id
< 0xFF))
)
983 init_data.flags.disable_dmcu = true1;
984 break;
985 default:
986 break;
987 }
988
989 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
990 init_data.flags.fbc_support = true1;
991
992 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
993 init_data.flags.multi_mon_pp_mclk_switch = true1;
994
995 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
996 init_data.flags.disable_fractional_pwm = true1;
997
998 init_data.flags.power_down_display_on_boot = true1;
999
1000 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1001
1002 /* Display Core create. */
1003 adev->dm.dc = dc_create(&init_data);
1004
1005 if (adev->dm.dc) {
1006 DRM_INFO("Display Core initialized with v%s!\n", DC_VER)printk("\0016" "[" "drm" "] " "Display Core initialized with v%s!\n"
, "3.2.104")
;
1007 } else {
1008 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER)printk("\0016" "[" "drm" "] " "Display Core failed to initialize with v%s!\n"
, "3.2.104")
;
1009 goto error;
1010 }
1011
1012 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1013 adev->dm.dc->debug.force_single_disp_pipe_split = false0;
1014 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1015 }
1016
1017 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1018 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false0 : true1;
1019
1020 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1021 adev->dm.dc->debug.disable_stutter = true1;
1022
1023 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1024 adev->dm.dc->debug.disable_dsc = true1;
1025
1026 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1027 adev->dm.dc->debug.disable_clock_gate = true1;
1028
1029 r = dm_dmub_hw_init(adev);
1030 if (r) {
1031 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r)__drm_err("DMUB interface failed to initialize: status=%d\n",
r)
;
1032 goto error;
1033 }
1034
1035 dc_hardware_init(adev->dm.dc);
1036
1037 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1038 if (!adev->dm.freesync_module) {
1039 DRM_ERROR(__drm_err("amdgpu: failed to initialize freesync_module.\n")
1040 "amdgpu: failed to initialize freesync_module.\n")__drm_err("amdgpu: failed to initialize freesync_module.\n");
1041 } else
1042 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",__drm_dbg(DRM_UT_DRIVER, "amdgpu: freesync_module init done %p.\n"
, adev->dm.freesync_module)
1043 adev->dm.freesync_module)__drm_dbg(DRM_UT_DRIVER, "amdgpu: freesync_module init done %p.\n"
, adev->dm.freesync_module)
;
1044
1045 amdgpu_dm_init_color_mod();
1046
1047#ifdef CONFIG_DRM_AMD_DC_HDCP
1048 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1049 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1050
1051 if (!adev->dm.hdcp_workqueue)
1052 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n")__drm_err("amdgpu: failed to initialize hdcp_workqueue.\n");
1053 else
1054 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue)__drm_dbg(DRM_UT_DRIVER, "amdgpu: hdcp_workqueue init done %p.\n"
, adev->dm.hdcp_workqueue)
;
1055
1056 dc_init_callbacks(adev->dm.dc, &init_params);
1057 }
1058#endif
1059 if (amdgpu_dm_initialize_drm_device(adev)) {
1060 DRM_ERROR(__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
1061 "amdgpu: failed to initialize sw for display support.\n")__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
;
1062 goto error;
1063 }
1064
1065 /* create fake encoders for MST */
1066 dm_dp_create_fake_mst_encoders(adev);
1067
1068 /* TODO: Add_display_info? */
1069
1070 /* TODO use dynamic cursor width */
1071 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1072 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1073
1074 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1075 DRM_ERROR(__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
1076 "amdgpu: failed to initialize sw for display support.\n")__drm_err("amdgpu: failed to initialize sw for display support.\n"
)
;
1077 goto error;
1078 }
1079
1080 DRM_DEBUG_DRIVER("KMS initialized.\n")__drm_dbg(DRM_UT_DRIVER, "KMS initialized.\n");
1081
1082 return 0;
1083error:
1084 amdgpu_dm_fini(adev);
1085
1086 return -EINVAL22;
1087}
1088
1089static void amdgpu_dm_fini(struct amdgpu_device *adev)
1090{
1091 int i;
1092
1093 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1094 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1095 }
1096
1097 amdgpu_dm_audio_fini(adev);
1098
1099 amdgpu_dm_destroy_drm_device(&adev->dm);
1100
1101#ifdef CONFIG_DRM_AMD_DC_HDCP
1102 if (adev->dm.hdcp_workqueue) {
1103 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1104 adev->dm.hdcp_workqueue = NULL((void *)0);
1105 }
1106
1107 if (adev->dm.dc)
1108 dc_deinit_callbacks(adev->dm.dc);
1109#endif
1110 if (adev->dm.dc->ctx->dmub_srv) {
1111 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1112 adev->dm.dc->ctx->dmub_srv = NULL((void *)0);
1113 }
1114
1115 if (adev->dm.dmub_bo)
1116 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1117 &adev->dm.dmub_bo_gpu_addr,
1118 &adev->dm.dmub_bo_cpu_addr);
1119
1120 /* DC Destroy TODO: Replace destroy DAL */
1121 if (adev->dm.dc)
1122 dc_destroy(&adev->dm.dc);
1123 /*
1124 * TODO: pageflip, vlank interrupt
1125 *
1126 * amdgpu_dm_irq_fini(adev);
1127 */
1128
1129 if (adev->dm.cgs_device) {
1130 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1131 adev->dm.cgs_device = NULL((void *)0);
1132 }
1133 if (adev->dm.freesync_module) {
1134 mod_freesync_destroy(adev->dm.freesync_module);
1135 adev->dm.freesync_module = NULL((void *)0);
1136 }
1137
1138 mutex_destroy(&adev->dm.audio_lock);
1139 mutex_destroy(&adev->dm.dc_lock);
1140
1141 return;
1142}
1143
1144static int load_dmcu_fw(struct amdgpu_device *adev)
1145{
1146 const char *fw_name_dmcu = NULL((void *)0);
1147 int r;
1148 const struct dmcu_firmware_header_v1_0 *hdr;
1149
1150 switch(adev->asic_type) {
1151#if defined(CONFIG_DRM_AMD_DC_SI)
1152 case CHIP_TAHITI:
1153 case CHIP_PITCAIRN:
1154 case CHIP_VERDE:
1155 case CHIP_OLAND:
1156#endif
1157 case CHIP_BONAIRE:
1158 case CHIP_HAWAII:
1159 case CHIP_KAVERI:
1160 case CHIP_KABINI:
1161 case CHIP_MULLINS:
1162 case CHIP_TONGA:
1163 case CHIP_FIJI:
1164 case CHIP_CARRIZO:
1165 case CHIP_STONEY:
1166 case CHIP_POLARIS11:
1167 case CHIP_POLARIS10:
1168 case CHIP_POLARIS12:
1169 case CHIP_VEGAM:
1170 case CHIP_VEGA10:
1171 case CHIP_VEGA12:
1172 case CHIP_VEGA20:
1173 case CHIP_NAVI10:
1174 case CHIP_NAVI14:
1175 case CHIP_RENOIR:
1176#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1177 case CHIP_SIENNA_CICHLID:
1178 case CHIP_NAVY_FLOUNDER:
1179#endif
1180 return 0;
1181 case CHIP_NAVI12:
1182 fw_name_dmcu = FIRMWARE_NAVI12_DMCU"amdgpu/navi12_dmcu.bin";
1183 break;
1184 case CHIP_RAVEN:
1185 if (ASICREV_IS_PICASSO(adev->external_rev_id)((adev->external_rev_id >= 0x41) && (adev->external_rev_id
< 0x81))
)
1186 fw_name_dmcu = FIRMWARE_RAVEN_DMCU"amdgpu/raven_dmcu.bin";
1187 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)((adev->external_rev_id >= 0x81) && (adev->external_rev_id
< 0x91))
)
1188 fw_name_dmcu = FIRMWARE_RAVEN_DMCU"amdgpu/raven_dmcu.bin";
1189 else
1190 return 0;
1191 break;
1192 default:
1193 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type)__drm_err("Unsupported ASIC type: 0x%X\n", adev->asic_type
)
;
1194 return -EINVAL22;
1195 }
1196
1197 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1198 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n")__drm_dbg(DRM_UT_KMS, "dm: DMCU firmware not supported on direct or SMU loading\n"
)
;
1199 return 0;
1200 }
1201
1202 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1203 if (r == -ENOENT2) {
1204 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1205 DRM_DEBUG_KMS("dm: DMCU firmware not found\n")__drm_dbg(DRM_UT_KMS, "dm: DMCU firmware not found\n");
1206 adev->dm.fw_dmcu = NULL((void *)0);
1207 return 0;
1208 }
1209 if (r) {
1210 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
1211 fw_name_dmcu)printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
;
1212 return r;
1213 }
1214
1215 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1216 if (r) {
1217 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
1218 fw_name_dmcu)printf("drm:pid%d:%s *ERROR* " "amdgpu_dm: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name_dmcu
)
;
1219 release_firmware(adev->dm.fw_dmcu);
1220 adev->dm.fw_dmcu = NULL((void *)0);
1221 return r;
1222 }
1223
1224 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1226 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1227 adev->firmware.fw_size +=
1228 roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE)(((((__uint32_t)(hdr->header.ucode_size_bytes)) - ((__uint32_t
)(hdr->intv_size_bytes))) + (((1 << 12)) - 1)) &
(~((__typeof(((__uint32_t)(hdr->header.ucode_size_bytes))
- ((__uint32_t)(hdr->intv_size_bytes))))((1 << 12))
- 1)))
;
1229
1230 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1231 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1232 adev->firmware.fw_size +=
1233 roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE)(((((__uint32_t)(hdr->intv_size_bytes))) + (((1 << 12
)) - 1)) & (~((__typeof(((__uint32_t)(hdr->intv_size_bytes
))))((1 << 12)) - 1)))
;
1234
1235 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version)((__uint32_t)(hdr->header.ucode_version));
1236
1237 DRM_DEBUG_KMS("PSP loading DMCU firmware\n")__drm_dbg(DRM_UT_KMS, "PSP loading DMCU firmware\n");
1238
1239 return 0;
1240}
1241
1242static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1243{
1244 struct amdgpu_device *adev = ctx;
1245
1246 return dm_read_reg(adev->dm.dc->ctx, address)dm_read_reg_func(adev->dm.dc->ctx, address, __func__);
1247}
1248
1249static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1250 uint32_t value)
1251{
1252 struct amdgpu_device *adev = ctx;
1253
1254 return dm_write_reg(adev->dm.dc->ctx, address, value)dm_write_reg_func(adev->dm.dc->ctx, address, value, __func__
)
;
1255}
1256
1257static int dm_dmub_sw_init(struct amdgpu_device *adev)
1258{
1259 struct dmub_srv_create_params create_params;
1260 struct dmub_srv_region_params region_params;
1261 struct dmub_srv_region_info region_info;
1262 struct dmub_srv_fb_params fb_params;
1263 struct dmub_srv_fb_info *fb_info;
1264 struct dmub_srv *dmub_srv;
1265 const struct dmcub_firmware_header_v1_0 *hdr;
1266 const char *fw_name_dmub;
1267 enum dmub_asic dmub_asic;
1268 enum dmub_status status;
1269 int r;
1270
1271 switch (adev->asic_type) {
1272 case CHIP_RENOIR:
1273 dmub_asic = DMUB_ASIC_DCN21;
1274 fw_name_dmub = FIRMWARE_RENOIR_DMUB"amdgpu/renoir_dmcub.bin";
1275 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)((adev->external_rev_id >= 0xA1) && (adev->external_rev_id
< 0xFF))
)
1276 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB"amdgpu/green_sardine_dmcub.bin";
1277 break;
1278#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1279 case CHIP_SIENNA_CICHLID:
1280 dmub_asic = DMUB_ASIC_DCN30;
1281 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB"amdgpu/sienna_cichlid_dmcub.bin";
1282 break;
1283 case CHIP_NAVY_FLOUNDER:
1284 dmub_asic = DMUB_ASIC_DCN30;
1285 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB"amdgpu/navy_flounder_dmcub.bin";
1286 break;
1287#endif
1288
1289 default:
1290 /* ASIC doesn't support DMUB. */
1291 return 0;
1292 }
1293
1294 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1295 if (r) {
1296 DRM_ERROR("DMUB firmware loading failed: %d\n", r)__drm_err("DMUB firmware loading failed: %d\n", r);
1297 return 0;
1298 }
1299
1300 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1301 if (r) {
1302 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r)__drm_err("Couldn't validate DMUB firmware: %d\n", r);
1303 return 0;
1304 }
1305
1306 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1307 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version)((__uint32_t)(hdr->header.ucode_version));
1308
1309 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1310 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1311 AMDGPU_UCODE_ID_DMCUB;
1312 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1313 adev->dm.dmub_fw;
1314 adev->firmware.fw_size +=
1315 roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE)(((((__uint32_t)(hdr->inst_const_bytes))) + (((1 << 12
)) - 1)) & (~((__typeof(((__uint32_t)(hdr->inst_const_bytes
))))((1 << 12)) - 1)))
;
1316
1317 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",printk("\0016" "[" "drm" "] " "Loading DMUB firmware via PSP: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
1318 adev->dm.dmcub_fw_version)printk("\0016" "[" "drm" "] " "Loading DMUB firmware via PSP: version=0x%08X\n"
, adev->dm.dmcub_fw_version)
;
1319 }
1320
1321
1322 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL(0x0001 | 0x0004));
1323 dmub_srv = adev->dm.dmub_srv;
1324
1325 if (!dmub_srv) {
1326 DRM_ERROR("Failed to allocate DMUB service!\n")__drm_err("Failed to allocate DMUB service!\n");
1327 return -ENOMEM12;
1328 }
1329
1330 memset(&create_params, 0, sizeof(create_params))__builtin_memset((&create_params), (0), (sizeof(create_params
)))
;
1331 create_params.user_ctx = adev;
1332 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1333 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1334 create_params.asic = dmub_asic;
1335
1336 /* Create the DMUB service. */
1337 status = dmub_srv_create(dmub_srv, &create_params);
1338 if (status != DMUB_STATUS_OK) {
1339 DRM_ERROR("Error creating DMUB service: %d\n", status)__drm_err("Error creating DMUB service: %d\n", status);
1340 return -EINVAL22;
1341 }
1342
1343 /* Calculate the size of all the regions for the DMUB service. */
1344 memset(&region_params, 0, sizeof(region_params))__builtin_memset((&region_params), (0), (sizeof(region_params
)))
;
1345
1346 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes)) -
1347 PSP_HEADER_BYTES0x100 - PSP_FOOTER_BYTES0x100;
1348 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes)((__uint32_t)(hdr->bss_data_bytes));
1349 region_params.vbios_size = adev->bios_size;
1350 region_params.fw_bss_data = region_params.bss_data_size ?
1351 adev->dm.dmub_fw->data +
1352 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
1353 le32_to_cpu(hdr->inst_const_bytes)((__uint32_t)(hdr->inst_const_bytes)) : NULL((void *)0);
1354 region_params.fw_inst_const =
1355 adev->dm.dmub_fw->data +
1356 le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)) +
1357 PSP_HEADER_BYTES0x100;
1358
1359 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1360 &region_info);
1361
1362 if (status != DMUB_STATUS_OK) {
1363 DRM_ERROR("Error calculating DMUB region info: %d\n", status)__drm_err("Error calculating DMUB region info: %d\n", status);
1364 return -EINVAL22;
1365 }
1366
1367 /*
1368 * Allocate a framebuffer based on the total size of all the regions.
1369 * TODO: Move this into GART.
1370 */
1371 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE(1 << 12),
1372 AMDGPU_GEM_DOMAIN_VRAM0x4, &adev->dm.dmub_bo,
1373 &adev->dm.dmub_bo_gpu_addr,
1374 &adev->dm.dmub_bo_cpu_addr);
1375 if (r)
1376 return r;
1377
1378 /* Rebase the regions on the framebuffer address. */
1379 memset(&fb_params, 0, sizeof(fb_params))__builtin_memset((&fb_params), (0), (sizeof(fb_params)));
1380 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1381 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1382 fb_params.region_info = &region_info;
1383
1384 adev->dm.dmub_fb_info =
1385 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL(0x0001 | 0x0004));
1386 fb_info = adev->dm.dmub_fb_info;
1387
1388 if (!fb_info) {
1389 DRM_ERROR(__drm_err("Failed to allocate framebuffer info for DMUB service!\n"
)
1390 "Failed to allocate framebuffer info for DMUB service!\n")__drm_err("Failed to allocate framebuffer info for DMUB service!\n"
)
;
1391 return -ENOMEM12;
1392 }
1393
1394 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1395 if (status != DMUB_STATUS_OK) {
1396 DRM_ERROR("Error calculating DMUB FB info: %d\n", status)__drm_err("Error calculating DMUB FB info: %d\n", status);
1397 return -EINVAL22;
1398 }
1399
1400 return 0;
1401}
1402
1403static int dm_sw_init(void *handle)
1404{
1405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1406 int r;
1407
1408 r = dm_dmub_sw_init(adev);
1409 if (r)
1410 return r;
1411
1412 return load_dmcu_fw(adev);
1413}
1414
1415static int dm_sw_fini(void *handle)
1416{
1417 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1418
1419 kfree(adev->dm.dmub_fb_info);
1420 adev->dm.dmub_fb_info = NULL((void *)0);
1421
1422 if (adev->dm.dmub_srv) {
1423 dmub_srv_destroy(adev->dm.dmub_srv);
1424 adev->dm.dmub_srv = NULL((void *)0);
1425 }
1426
1427 release_firmware(adev->dm.dmub_fw);
1428 adev->dm.dmub_fw = NULL((void *)0);
1429
1430 release_firmware(adev->dm.fw_dmcu);
1431 adev->dm.fw_dmcu = NULL((void *)0);
1432
1433 return 0;
1434}
1435
1436static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1437{
1438 struct amdgpu_dm_connector *aconnector;
1439 struct drm_connector *connector;
1440 struct drm_connector_list_iter iter;
1441 int ret = 0;
1442
1443 drm_connector_list_iter_begin(dev, &iter);
1444 drm_for_each_connector_iter(connector, &iter)while ((connector = drm_connector_list_iter_next(&iter))) {
1445 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
1446 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1447 aconnector->mst_mgr.aux) {
1448 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",__drm_dbg(DRM_UT_DRIVER, "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
1449 aconnector,__drm_dbg(DRM_UT_DRIVER, "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
1450 aconnector->base.base.id)__drm_dbg(DRM_UT_DRIVER, "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
;
1451
1452 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true1);
1453 if (ret < 0) {
1454 DRM_ERROR("DM_MST: Failed to start MST\n")__drm_err("DM_MST: Failed to start MST\n");
1455 aconnector->dc_link->type =
1456 dc_connection_single;
1457 break;
1458 }
1459 }
1460 }
1461 drm_connector_list_iter_end(&iter);
1462
1463 return ret;
1464}
1465
1466static int dm_late_init(void *handle)
1467{
1468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1469
1470 struct dmcu_iram_parameters params;
1471 unsigned int linear_lut[16];
1472 int i;
1473 struct dmcu *dmcu = NULL((void *)0);
1474 bool_Bool ret = true1;
1475
1476 dmcu = adev->dm.dc->res_pool->dmcu;
1477
1478 for (i = 0; i < 16; i++)
1479 linear_lut[i] = 0xFFFF * i / 15;
1480
1481 params.set = 0;
1482 params.backlight_ramping_start = 0xCCCC;
1483 params.backlight_ramping_reduction = 0xCCCCCCCC;
1484 params.backlight_lut_array_size = 16;
1485 params.backlight_lut_array = linear_lut;
1486
1487 /* Min backlight level after ABM reduction, Don't allow below 1%
1488 * 0xFFFF x 0.01 = 0x28F
1489 */
1490 params.min_abm_backlight = 0x28F;
1491
1492 /* In the case where abm is implemented on dmcub,
1493 * dmcu object will be null.
1494 * ABM 2.4 and up are implemented on dmcub.
1495 */
1496 if (dmcu)
1497 ret = dmcu_load_iram(dmcu, params);
1498 else if (adev->dm.dc->ctx->dmub_srv)
1499 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1500
1501 if (!ret)
1502 return -EINVAL22;
1503
1504 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1505}
1506
1507static void s3_handle_mst(struct drm_device *dev, bool_Bool suspend)
1508{
1509 struct amdgpu_dm_connector *aconnector;
1510 struct drm_connector *connector;
1511 struct drm_connector_list_iter iter;
1512 struct drm_dp_mst_topology_mgr *mgr;
1513 int ret;
1514 bool_Bool need_hotplug = false0;
1515
1516 drm_connector_list_iter_begin(dev, &iter);
1517 drm_for_each_connector_iter(connector, &iter)while ((connector = drm_connector_list_iter_next(&iter))) {
1518 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
1519 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1520 aconnector->mst_port)
1521 continue;
1522
1523 mgr = &aconnector->mst_mgr;
1524
1525 if (suspend) {
1526 drm_dp_mst_topology_mgr_suspend(mgr);
1527 } else {
1528 ret = drm_dp_mst_topology_mgr_resume(mgr, true1);
1529 if (ret < 0) {
1530 drm_dp_mst_topology_mgr_set_mst(mgr, false0);
1531 need_hotplug = true1;
1532 }
1533 }
1534 }
1535 drm_connector_list_iter_end(&iter);
1536
1537 if (need_hotplug)
1538 drm_kms_helper_hotplug_event(dev);
1539}
1540
1541static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1542{
1543 struct smu_context *smu = &adev->smu;
1544 int ret = 0;
1545
1546 if (!is_support_sw_smu(adev))
1547 return 0;
1548
1549 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1550 * on window driver dc implementation.
1551 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1552 * should be passed to smu during boot up and resume from s3.
1553 * boot up: dc calculate dcn watermark clock settings within dc_create,
1554 * dcn20_resource_construct
1555 * then call pplib functions below to pass the settings to smu:
1556 * smu_set_watermarks_for_clock_ranges
1557 * smu_set_watermarks_table
1558 * navi10_set_watermarks_table
1559 * smu_write_watermarks_table
1560 *
1561 * For Renoir, clock settings of dcn watermark are also fixed values.
1562 * dc has implemented different flow for window driver:
1563 * dc_hardware_init / dc_set_power_state
1564 * dcn10_init_hw
1565 * notify_wm_ranges
1566 * set_wm_ranges
1567 * -- Linux
1568 * smu_set_watermarks_for_clock_ranges
1569 * renoir_set_watermarks_table
1570 * smu_write_watermarks_table
1571 *
1572 * For Linux,
1573 * dc_hardware_init -> amdgpu_dm_init
1574 * dc_set_power_state --> dm_resume
1575 *
1576 * therefore, this function apply to navi10/12/14 but not Renoir
1577 * *
1578 */
1579 switch(adev->asic_type) {
1580 case CHIP_NAVI10:
1581 case CHIP_NAVI14:
1582 case CHIP_NAVI12:
1583 break;
1584 default:
1585 return 0;
1586 }
1587
1588 ret = smu_write_watermarks_table(smu);
1589 if (ret) {
1590 DRM_ERROR("Failed to update WMTABLE!\n")__drm_err("Failed to update WMTABLE!\n");
1591 return ret;
1592 }
1593
1594 return 0;
1595}
1596
1597/**
1598 * dm_hw_init() - Initialize DC device
1599 * @handle: The base driver device containing the amdgpu_dm device.
1600 *
1601 * Initialize the &struct amdgpu_display_manager device. This involves calling
1602 * the initializers of each DM component, then populating the struct with them.
1603 *
1604 * Although the function implies hardware initialization, both hardware and
1605 * software are initialized here. Splitting them out to their relevant init
1606 * hooks is a future TODO item.
1607 *
1608 * Some notable things that are initialized here:
1609 *
1610 * - Display Core, both software and hardware
1611 * - DC modules that we need (freesync and color management)
1612 * - DRM software states
1613 * - Interrupt sources and handlers
1614 * - Vblank support
1615 * - Debug FS entries, if enabled
1616 */
1617static int dm_hw_init(void *handle)
1618{
1619 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1620 /* Create DAL display manager */
1621 amdgpu_dm_init(adev);
1622 amdgpu_dm_hpd_init(adev);
1623
1624 return 0;
1625}
1626
1627/**
1628 * dm_hw_fini() - Teardown DC device
1629 * @handle: The base driver device containing the amdgpu_dm device.
1630 *
1631 * Teardown components within &struct amdgpu_display_manager that require
1632 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1633 * were loaded. Also flush IRQ workqueues and disable them.
1634 */
1635static int dm_hw_fini(void *handle)
1636{
1637 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1638
1639 amdgpu_dm_hpd_fini(adev);
1640
1641 amdgpu_dm_irq_fini(adev);
1642 amdgpu_dm_fini(adev);
1643 return 0;
1644}
1645
1646
1647static int dm_enable_vblank(struct drm_crtc *crtc);
1648static void dm_disable_vblank(struct drm_crtc *crtc);
1649
1650static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1651 struct dc_state *state, bool_Bool enable)
1652{
1653 enum dc_irq_source irq_source;
1654 struct amdgpu_crtc *acrtc;
1655 int rc = -EBUSY16;
1656 int i = 0;
1657
1658 for (i = 0; i < state->stream_count; i++) {
1659 acrtc = get_crtc_by_otg_inst(
1660 adev, state->stream_status[i].primary_otg_inst);
1661
1662 if (acrtc && state->stream_status[i].plane_count != 0) {
1663 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1664 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY16;
1665 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",__drm_dbg(DRM_UT_CORE, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
1666 acrtc->crtc_id, enable ? "en" : "dis", rc)__drm_dbg(DRM_UT_CORE, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
;
1667 if (rc)
1668 DRM_WARN("Failed to %s pflip interrupts\n",printk("\0014" "[" "drm" "] " "Failed to %s pflip interrupts\n"
, enable ? "enable" : "disable")
1669 enable ? "enable" : "disable")printk("\0014" "[" "drm" "] " "Failed to %s pflip interrupts\n"
, enable ? "enable" : "disable")
;
1670
1671 if (enable) {
1672 rc = dm_enable_vblank(&acrtc->base);
1673 if (rc)
1674 DRM_WARN("Failed to enable vblank interrupts\n")printk("\0014" "[" "drm" "] " "Failed to enable vblank interrupts\n"
)
;
1675 } else {
1676 dm_disable_vblank(&acrtc->base);
1677 }
1678
1679 }
1680 }
1681
1682}
1683
1684static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1685{
1686 struct dc_state *context = NULL((void *)0);
1687 enum dc_status res = DC_ERROR_UNEXPECTED;
1688 int i;
1689 struct dc_stream_state *del_streams[MAX_PIPES6];
1690 int del_streams_count = 0;
1691
1692 memset(del_streams, 0, sizeof(del_streams))__builtin_memset((del_streams), (0), (sizeof(del_streams)));
1693
1694 context = dc_create_state(dc);
1695 if (context == NULL((void *)0))
1696 goto context_alloc_fail;
1697
1698 dc_resource_state_copy_construct_current(dc, context);
1699
1700 /* First remove from context all streams */
1701 for (i = 0; i < context->stream_count; i++) {
1702 struct dc_stream_state *stream = context->streams[i];
1703
1704 del_streams[del_streams_count++] = stream;
1705 }
1706
1707 /* Remove all planes for removed streams and then remove the streams */
1708 for (i = 0; i < del_streams_count; i++) {
1709 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1710 res = DC_FAIL_DETACH_SURFACES;
1711 goto fail;
1712 }
1713
1714 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1715 if (res != DC_OK)
1716 goto fail;
1717 }
1718
1719
1720 res = dc_validate_global_state(dc, context, false0);
1721
1722 if (res != DC_OK) {
1723 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res)__drm_err("%s:resource validation failed, dc_status:%d\n", __func__
, res)
;
1724 goto fail;
1725 }
1726
1727 res = dc_commit_state(dc, context);
1728
1729fail:
1730 dc_release_state(context);
1731
1732context_alloc_fail:
1733 return res;
1734}
1735
1736static int dm_suspend(void *handle)
1737{
1738 struct amdgpu_device *adev = handle;
1739 struct amdgpu_display_manager *dm = &adev->dm;
1740 int ret = 0;
1741
1742 if (amdgpu_in_reset(adev)) {
1743 mutex_lock(&dm->dc_lock)rw_enter_write(&dm->dc_lock);
1744 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1745
1746 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false0);
1747
1748 amdgpu_dm_commit_zero_streams(dm->dc);
1749
1750 amdgpu_dm_irq_suspend(adev);
1751
1752 return ret;
1753 }
1754
1755 WARN_ON(adev->dm.cached_state)({ int __ret = !!(adev->dm.cached_state); if (__ret) printf
("WARNING %s failed at %s:%d\n", "adev->dm.cached_state", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 1755); __builtin_expect(!!(__ret), 0); })
;
1756 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1757
1758 s3_handle_mst(adev_to_drm(adev), true1);
1759
1760 amdgpu_dm_irq_suspend(adev);
1761
1762 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1763
1764 return 0;
1765}
1766
1767static struct amdgpu_dm_connector *
1768amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1769 struct drm_crtc *crtc)
1770{
1771 uint32_t i;
1772 struct drm_connector_state *new_con_state;
1773 struct drm_connector *connector;
1774 struct drm_crtc *crtc_from_state;
1775
1776 for_each_new_connector_in_state(state, connector, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (new_con_state) =
(state)->connectors[i].new_state, (void)(new_con_state) ,
1))) {} else
{
1777 crtc_from_state = new_con_state->crtc;
1778
1779 if (crtc_from_state == crtc)
1780 return to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
1781 }
1782
1783 return NULL((void *)0);
1784}
1785
1786static void emulated_link_detect(struct dc_link *link)
1787{
1788 struct dc_sink_init_data sink_init_data = { 0 };
1789 struct display_sink_capability sink_caps = { 0 };
1790 enum dc_edid_status edid_status;
1791 struct dc_context *dc_ctx = link->ctx;
1792 struct dc_sink *sink = NULL((void *)0);
1793 struct dc_sink *prev_sink = NULL((void *)0);
1794
1795 link->type = dc_connection_none;
1796 prev_sink = link->local_sink;
1797
1798 if (prev_sink)
1799 dc_sink_release(prev_sink);
1800
1801 switch (link->connector_signal) {
1802 case SIGNAL_TYPE_HDMI_TYPE_A: {
1803 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1804 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1805 break;
1806 }
1807
1808 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1809 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1810 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1811 break;
1812 }
1813
1814 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1815 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1816 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1817 break;
1818 }
1819
1820 case SIGNAL_TYPE_LVDS: {
1821 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1822 sink_caps.signal = SIGNAL_TYPE_LVDS;
1823 break;
1824 }
1825
1826 case SIGNAL_TYPE_EDP: {
1827 sink_caps.transaction_type =
1828 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1829 sink_caps.signal = SIGNAL_TYPE_EDP;
1830 break;
1831 }
1832
1833 case SIGNAL_TYPE_DISPLAY_PORT: {
1834 sink_caps.transaction_type =
1835 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1836 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1837 break;
1838 }
1839
1840 default:
1841 DC_ERROR("Invalid connector type! signal:%d\n",do { (void)(dc_ctx); __drm_err("Invalid connector type! signal:%d\n"
, link->connector_signal); } while (0)
1842 link->connector_signal)do { (void)(dc_ctx); __drm_err("Invalid connector type! signal:%d\n"
, link->connector_signal); } while (0)
;
1843 return;
1844 }
1845
1846 sink_init_data.link = link;
1847 sink_init_data.sink_signal = sink_caps.signal;
1848
1849 sink = dc_sink_create(&sink_init_data);
1850 if (!sink) {
1851 DC_ERROR("Failed to create sink!\n")do { (void)(dc_ctx); __drm_err("Failed to create sink!\n"); }
while (0)
;
1852 return;
1853 }
1854
1855 /* dc_sink_create returns a new reference */
1856 link->local_sink = sink;
1857
1858 edid_status = dm_helpers_read_local_edid(
1859 link->ctx,
1860 link,
1861 sink);
1862
1863 if (edid_status != EDID_OK)
1864 DC_ERROR("Failed to read EDID")do { (void)(dc_ctx); __drm_err("Failed to read EDID"); } while
(0)
;
1865
1866}
1867
1868static void dm_gpureset_commit_state(struct dc_state *dc_state,
1869 struct amdgpu_display_manager *dm)
1870{
1871 struct {
1872 struct dc_surface_update surface_updates[MAX_SURFACES3];
1873 struct dc_plane_info plane_infos[MAX_SURFACES3];
1874 struct dc_scaling_info scaling_infos[MAX_SURFACES3];
1875 struct dc_flip_addrs flip_addrs[MAX_SURFACES3];
1876 struct dc_stream_update stream_update;
1877 } * bundle;
1878 int k, m;
1879
1880 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL(0x0001 | 0x0004));
1881
1882 if (!bundle) {
1883 dm_error("Failed to allocate update bundle\n")__drm_err("Failed to allocate update bundle\n");
1884 goto cleanup;
1885 }
1886
1887 for (k = 0; k < dc_state->stream_count; k++) {
1888 bundle->stream_update.stream = dc_state->streams[k];
1889
1890 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1891 bundle->surface_updates[m].surface =
1892 dc_state->stream_status->plane_states[m];
1893 bundle->surface_updates[m].surface->force_full_update =
1894 true1;
1895 }
1896 dc_commit_updates_for_stream(
1897 dm->dc, bundle->surface_updates,
1898 dc_state->stream_status->plane_count,
1899 dc_state->streams[k], &bundle->stream_update, dc_state);
1900 }
1901
1902cleanup:
1903 kfree(bundle);
1904
1905 return;
1906}
1907
1908static void dm_set_dpms_off(struct dc_link *link)
1909{
1910 struct dc_stream_state *stream_state;
1911 struct amdgpu_dm_connector *aconnector = link->priv;
1912 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1913 struct dc_stream_update stream_update;
1914 bool_Bool dpms_off = true1;
1915
1916 memset(&stream_update, 0, sizeof(stream_update))__builtin_memset((&stream_update), (0), (sizeof(stream_update
)))
;
1917 stream_update.dpms_off = &dpms_off;
1918
1919 mutex_lock(&adev->dm.dc_lock)rw_enter_write(&adev->dm.dc_lock);
1920 stream_state = dc_stream_find_from_link(link);
1921
1922 if (stream_state == NULL((void *)0)) {
1923 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n")__drm_dbg(DRM_UT_DRIVER, "Error finding stream state associated with link!\n"
)
;
1924 mutex_unlock(&adev->dm.dc_lock)rw_exit_write(&adev->dm.dc_lock);
1925 return;
1926 }
1927
1928 stream_update.stream = stream_state;
1929 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL((void *)0), 0,
1930 stream_state, &stream_update,
1931 stream_state->ctx->dc->current_state);
1932 mutex_unlock(&adev->dm.dc_lock)rw_exit_write(&adev->dm.dc_lock);
1933}
1934
1935static int dm_resume(void *handle)
1936{
1937 struct amdgpu_device *adev = handle;
1938 struct drm_device *ddev = adev_to_drm(adev);
1939 struct amdgpu_display_manager *dm = &adev->dm;
1940 struct amdgpu_dm_connector *aconnector;
1941 struct drm_connector *connector;
1942 struct drm_connector_list_iter iter;
1943 struct drm_crtc *crtc;
1944 struct drm_crtc_state *new_crtc_state;
1945 struct dm_crtc_state *dm_new_crtc_state;
1946 struct drm_plane *plane;
1947 struct drm_plane_state *new_plane_state;
1948 struct dm_plane_state *dm_new_plane_state;
1949 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (dm->atomic_obj.state); (struct dm_atomic_state *
)( (char *)__mptr - __builtin_offsetof(struct dm_atomic_state
, base) );})
;
1950 enum dc_connection_type new_connection_type = dc_connection_none;
1951 struct dc_state *dc_state;
1952 int i, r, j;
1953
1954 if (amdgpu_in_reset(adev)) {
1955 dc_state = dm->cached_dc_state;
1956
1957 r = dm_dmub_hw_init(adev);
1958 if (r)
1959 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r)__drm_err("DMUB interface failed to initialize: status=%d\n",
r)
;
1960
1961 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1962 dc_resume(dm->dc);
1963
1964 amdgpu_dm_irq_resume_early(adev);
1965
1966 for (i = 0; i < dc_state->stream_count; i++) {
1967 dc_state->streams[i]->mode_changed = true1;
1968 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
1969 dc_state->stream_status[i].plane_states[j]->update_flags.raw
1970 = 0xffffffff;
1971 }
1972 }
1973
1974 WARN_ON(!dc_commit_state(dm->dc, dc_state))({ int __ret = !!(!dc_commit_state(dm->dc, dc_state)); if (
__ret) printf("WARNING %s failed at %s:%d\n", "!dc_commit_state(dm->dc, dc_state)"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 1974); __builtin_expect(!!(__ret), 0); })
;
1975
1976 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1977
1978 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true1);
1979
1980 dc_release_state(dm->cached_dc_state);
1981 dm->cached_dc_state = NULL((void *)0);
1982
1983 amdgpu_dm_irq_resume_late(adev);
1984
1985 mutex_unlock(&dm->dc_lock)rw_exit_write(&dm->dc_lock);
1986
1987 return 0;
1988 }
1989 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1990 dc_release_state(dm_state->context);
1991 dm_state->context = dc_create_state(dm->dc);
1992 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1993 dc_resource_state_construct(dm->dc, dm_state->context);
1994
1995 /* Before powering on DC we need to re-initialize DMUB. */
1996 r = dm_dmub_hw_init(adev);
1997 if (r)
1998 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r)__drm_err("DMUB interface failed to initialize: status=%d\n",
r)
;
1999
2000 /* power on hardware */
2001 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2002
2003 /* program HPD filter */
2004 dc_resume(dm->dc);
2005
2006 /*
2007 * early enable HPD Rx IRQ, should be done before set mode as short
2008 * pulse interrupts are used for MST
2009 */
2010 amdgpu_dm_irq_resume_early(adev);
2011
2012 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2013 s3_handle_mst(ddev, false0);
2014
2015 /* Do detection*/
2016 drm_connector_list_iter_begin(ddev, &iter);
2017 drm_for_each_connector_iter(connector, &iter)while ((connector = drm_connector_list_iter_next(&iter))) {
2018 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
2019
2020 /*
2021 * this is the case when traversing through already created
2022 * MST connectors, should be skipped
2023 */
2024 if (aconnector->mst_port)
2025 continue;
2026
2027 mutex_lock(&aconnector->hpd_lock)rw_enter_write(&aconnector->hpd_lock);
2028 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2029 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
2030
2031 if (aconnector->base.force && new_connection_type == dc_connection_none)
2032 emulated_link_detect(aconnector->dc_link);
2033 else
2034 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2035
2036 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2037 aconnector->fake_enable = false0;
2038
2039 if (aconnector->dc_sink)
2040 dc_sink_release(aconnector->dc_sink);
2041 aconnector->dc_sink = NULL((void *)0);
2042 amdgpu_dm_update_connector_after_detect(aconnector);
2043 mutex_unlock(&aconnector->hpd_lock)rw_exit_write(&aconnector->hpd_lock);
2044 }
2045 drm_connector_list_iter_end(&iter);
2046
2047 /* Force mode set in atomic commit */
2048 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (dm->cached_state)->dev->mode_config
.num_crtc; (i)++) if (!((dm->cached_state)->crtcs[i].ptr
&& ((crtc) = (dm->cached_state)->crtcs[i].ptr,
(void)(crtc) , (new_crtc_state) = (dm->cached_state)->
crtcs[i].new_state, (void)(new_crtc_state) , 1))) {} else
2049 new_crtc_state->active_changed = true1;
2050
2051 /*
2052 * atomic_check is expected to create the dc states. We need to release
2053 * them here, since they were duplicated as part of the suspend
2054 * procedure.
2055 */
2056 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (dm->cached_state)->dev->mode_config
.num_crtc; (i)++) if (!((dm->cached_state)->crtcs[i].ptr
&& ((crtc) = (dm->cached_state)->crtcs[i].ptr,
(void)(crtc) , (new_crtc_state) = (dm->cached_state)->
crtcs[i].new_state, (void)(new_crtc_state) , 1))) {} else
{
2057 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
2058 if (dm_new_crtc_state->stream) {
2059 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1)({ int __ret = !!(kref_read(&dm_new_crtc_state->stream
->refcount) > 1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "kref_read(&dm_new_crtc_state->stream->refcount) > 1"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 2059); __builtin_expect(!!(__ret), 0); })
;
2060 dc_stream_release(dm_new_crtc_state->stream);
2061 dm_new_crtc_state->stream = NULL((void *)0);
2062 }
2063 }
2064
2065 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i)for ((i) = 0; (i) < (dm->cached_state)->dev->mode_config
.num_total_plane; (i)++) if (!((dm->cached_state)->planes
[i].ptr && ((plane) = (dm->cached_state)->planes
[i].ptr, (void)(plane) , (new_plane_state) = (dm->cached_state
)->planes[i].new_state, (void)(new_plane_state) , 1))) {} else
{
2066 dm_new_plane_state = to_dm_plane_state(new_plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_plane_state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
2067 if (dm_new_plane_state->dc_state) {
2068 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1)({ int __ret = !!(kref_read(&dm_new_plane_state->dc_state
->refcount) > 1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "kref_read(&dm_new_plane_state->dc_state->refcount) > 1"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 2068); __builtin_expect(!!(__ret), 0); })
;
2069 dc_plane_state_release(dm_new_plane_state->dc_state);
2070 dm_new_plane_state->dc_state = NULL((void *)0);
2071 }
2072 }
2073
2074 drm_atomic_helper_resume(ddev, dm->cached_state);
2075
2076 dm->cached_state = NULL((void *)0);
2077
2078 amdgpu_dm_irq_resume_late(adev);
2079
2080 amdgpu_dm_smu_write_watermarks_table(adev);
2081
2082 return 0;
2083}
2084
2085/**
2086 * DOC: DM Lifecycle
2087 *
2088 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2089 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2090 * the base driver's device list to be initialized and torn down accordingly.
2091 *
2092 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2093 */
2094
2095static const struct amd_ip_funcs amdgpu_dm_funcs = {
2096 .name = "dm",
2097 .early_init = dm_early_init,
2098 .late_init = dm_late_init,
2099 .sw_init = dm_sw_init,
2100 .sw_fini = dm_sw_fini,
2101 .hw_init = dm_hw_init,
2102 .hw_fini = dm_hw_fini,
2103 .suspend = dm_suspend,
2104 .resume = dm_resume,
2105 .is_idle = dm_is_idle,
2106 .wait_for_idle = dm_wait_for_idle,
2107 .check_soft_reset = dm_check_soft_reset,
2108 .soft_reset = dm_soft_reset,
2109 .set_clockgating_state = dm_set_clockgating_state,
2110 .set_powergating_state = dm_set_powergating_state,
2111};
2112
2113const struct amdgpu_ip_block_version dm_ip_block =
2114{
2115 .type = AMD_IP_BLOCK_TYPE_DCE,
2116 .major = 1,
2117 .minor = 0,
2118 .rev = 0,
2119 .funcs = &amdgpu_dm_funcs,
2120};
2121
2122
2123/**
2124 * DOC: atomic
2125 *
2126 * *WIP*
2127 */
2128
2129static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2130 .fb_create = amdgpu_display_user_framebuffer_create,
2131 .output_poll_changed = drm_fb_helper_output_poll_changed,
2132 .atomic_check = amdgpu_dm_atomic_check,
2133 .atomic_commit = amdgpu_dm_atomic_commit,
2134};
2135
2136static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2137 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2138};
2139
2140static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2141{
2142 u32 max_cll, min_cll, max, min, q, r;
2143 struct amdgpu_dm_backlight_caps *caps;
2144 struct amdgpu_display_manager *dm;
2145 struct drm_connector *conn_base;
2146 struct amdgpu_device *adev;
2147 struct dc_link *link = NULL((void *)0);
2148 static const u8 pre_computed_values[] = {
2149 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2150 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2151
2152 if (!aconnector || !aconnector->dc_link)
2153 return;
2154
2155 link = aconnector->dc_link;
2156 if (link->connector_signal != SIGNAL_TYPE_EDP)
2157 return;
2158
2159 conn_base = &aconnector->base;
2160 adev = drm_to_adev(conn_base->dev);
2161 dm = &adev->dm;
2162 caps = &dm->backlight_caps;
2163 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2164 caps->aux_support = false0;
2165 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2166 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2167
2168 if (caps->ext_caps->bits.oled == 1 /*||
2169 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2170 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2171 caps->aux_support = true1;
2172
2173 if (amdgpu_backlight == 0)
2174 caps->aux_support = false0;
2175 else if (amdgpu_backlight == 1)
2176 caps->aux_support = true1;
2177
2178 /* From the specification (CTA-861-G), for calculating the maximum
2179 * luminance we need to use:
2180 * Luminance = 50*2**(CV/32)
2181 * Where CV is a one-byte value.
2182 * For calculating this expression we may need float point precision;
2183 * to avoid this complexity level, we take advantage that CV is divided
2184 * by a constant. From the Euclids division algorithm, we know that CV
2185 * can be written as: CV = 32*q + r. Next, we replace CV in the
2186 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2187 * need to pre-compute the value of r/32. For pre-computing the values
2188 * We just used the following Ruby line:
2189 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2190 * The results of the above expressions can be verified at
2191 * pre_computed_values.
2192 */
2193 q = max_cll >> 5;
2194 r = max_cll % 32;
2195 max = (1 << q) * pre_computed_values[r];
2196
2197 // min luminance: maxLum * (CV/255)^2 / 100
2198 q = DIV_ROUND_CLOSEST(min_cll, 255)(((min_cll) + ((255) / 2)) / (255));
2199 min = max * DIV_ROUND_CLOSEST((q * q), 100)((((q * q)) + ((100) / 2)) / (100));
2200
2201 caps->aux_max_input_signal = max;
2202 caps->aux_min_input_signal = min;
2203}
2204
2205void amdgpu_dm_update_connector_after_detect(
2206 struct amdgpu_dm_connector *aconnector)
2207{
2208 struct drm_connector *connector = &aconnector->base;
2209 struct drm_device *dev = connector->dev;
2210 struct dc_sink *sink;
2211
2212 /* MST handled by drm_mst framework */
2213 if (aconnector->mst_mgr.mst_state == true1)
2214 return;
2215
2216 sink = aconnector->dc_link->local_sink;
2217 if (sink)
2218 dc_sink_retain(sink);
2219
2220 /*
2221 * Edid mgmt connector gets first update only in mode_valid hook and then
2222 * the connector sink is set to either fake or physical sink depends on link status.
2223 * Skip if already done during boot.
2224 */
2225 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2226 && aconnector->dc_em_sink) {
2227
2228 /*
2229 * For S3 resume with headless use eml_sink to fake stream
2230 * because on resume connector->sink is set to NULL
2231 */
2232 mutex_lock(&dev->mode_config.mutex)rw_enter_write(&dev->mode_config.mutex);
2233
2234 if (sink) {
2235 if (aconnector->dc_sink) {
2236 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2237 /*
2238 * retain and release below are used to
2239 * bump up refcount for sink because the link doesn't point
2240 * to it anymore after disconnect, so on next crtc to connector
2241 * reshuffle by UMD we will get into unwanted dc_sink release
2242 */
2243 dc_sink_release(aconnector->dc_sink);
2244 }
2245 aconnector->dc_sink = sink;
2246 dc_sink_retain(aconnector->dc_sink);
2247 amdgpu_dm_update_freesync_caps(connector,
2248 aconnector->edid);
2249 } else {
2250 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2251 if (!aconnector->dc_sink) {
2252 aconnector->dc_sink = aconnector->dc_em_sink;
2253 dc_sink_retain(aconnector->dc_sink);
2254 }
2255 }
2256
2257 mutex_unlock(&dev->mode_config.mutex)rw_exit_write(&dev->mode_config.mutex);
2258
2259 if (sink)
2260 dc_sink_release(sink);
2261 return;
2262 }
2263
2264 /*
2265 * TODO: temporary guard to look for proper fix
2266 * if this sink is MST sink, we should not do anything
2267 */
2268 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2269 dc_sink_release(sink);
2270 return;
2271 }
2272
2273 if (aconnector->dc_sink == sink) {
2274 /*
2275 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2276 * Do nothing!!
2277 */
2278 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: dc_sink didn't change.\n"
, aconnector->connector_id)
2279 aconnector->connector_id)__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: dc_sink didn't change.\n"
, aconnector->connector_id)
;
2280 if (sink)
2281 dc_sink_release(sink);
2282 return;
2283 }
2284
2285 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n"
, aconnector->connector_id, aconnector->dc_sink, sink)
2286 aconnector->connector_id, aconnector->dc_sink, sink)__drm_dbg(DRM_UT_DRIVER, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n"
, aconnector->connector_id, aconnector->dc_sink, sink)
;
2287
2288 mutex_lock(&dev->mode_config.mutex)rw_enter_write(&dev->mode_config.mutex);
2289
2290 /*
2291 * 1. Update status of the drm connector
2292 * 2. Send an event and let userspace tell us what to do
2293 */
2294 if (sink) {
2295 /*
2296 * TODO: check if we still need the S3 mode update workaround.
2297 * If yes, put it here.
2298 */
2299 if (aconnector->dc_sink) {
2300 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2301 dc_sink_release(aconnector->dc_sink);
2302 }
2303
2304 aconnector->dc_sink = sink;
2305 dc_sink_retain(aconnector->dc_sink);
2306 if (sink->dc_edid.length == 0) {
2307 aconnector->edid = NULL((void *)0);
2308 if (aconnector->dc_link->aux_mode) {
2309 drm_dp_cec_unset_edid(
2310 &aconnector->dm_dp_aux.aux);
2311 }
2312 } else {
2313 aconnector->edid =
2314 (struct edid *)sink->dc_edid.raw_edid;
2315
2316 drm_connector_update_edid_property(connector,
2317 aconnector->edid);
2318 if (aconnector->dc_link->aux_mode)
2319 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2320 aconnector->edid);
2321 }
2322
2323 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2324 update_connector_ext_caps(aconnector);
2325 } else {
2326 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2327 amdgpu_dm_update_freesync_caps(connector, NULL((void *)0));
2328 drm_connector_update_edid_property(connector, NULL((void *)0));
2329 aconnector->num_modes = 0;
2330 dc_sink_release(aconnector->dc_sink);
2331 aconnector->dc_sink = NULL((void *)0);
2332 aconnector->edid = NULL((void *)0);
2333#ifdef CONFIG_DRM_AMD_DC_HDCP
2334 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2335 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED2)
2336 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED1;
2337#endif
2338 }
2339
2340 mutex_unlock(&dev->mode_config.mutex)rw_exit_write(&dev->mode_config.mutex);
2341
2342 update_subconnector_property(aconnector);
2343
2344 if (sink)
2345 dc_sink_release(sink);
2346}
2347
2348static void handle_hpd_irq(void *param)
2349{
2350 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2351 struct drm_connector *connector = &aconnector->base;
2352 struct drm_device *dev = connector->dev;
2353 enum dc_connection_type new_connection_type = dc_connection_none;
2354#ifdef CONFIG_DRM_AMD_DC_HDCP
2355 struct amdgpu_device *adev = drm_to_adev(dev);
2356#endif
2357
2358 /*
2359 * In case of failure or MST no need to update connector status or notify the OS
2360 * since (for MST case) MST does this in its own context.
2361 */
2362 mutex_lock(&aconnector->hpd_lock)rw_enter_write(&aconnector->hpd_lock);
2363
2364#ifdef CONFIG_DRM_AMD_DC_HDCP
2365 if (adev->dm.hdcp_workqueue)
2366 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2367#endif
2368 if (aconnector->fake_enable)
2369 aconnector->fake_enable = false0;
2370
2371 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2372 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
2373
2374 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2375 emulated_link_detect(aconnector->dc_link);
2376
2377
2378 drm_modeset_lock_all(dev);
2379 dm_restore_drm_connector_state(dev, connector);
2380 drm_modeset_unlock_all(dev);
2381
2382 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2383 drm_kms_helper_hotplug_event(dev);
2384
2385 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2386 if (new_connection_type == dc_connection_none &&
2387 aconnector->dc_link->type == dc_connection_none)
2388 dm_set_dpms_off(aconnector->dc_link);
2389
2390 amdgpu_dm_update_connector_after_detect(aconnector);
2391
2392 drm_modeset_lock_all(dev);
2393 dm_restore_drm_connector_state(dev, connector);
2394 drm_modeset_unlock_all(dev);
2395
2396 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2397 drm_kms_helper_hotplug_event(dev);
2398 }
2399 mutex_unlock(&aconnector->hpd_lock)rw_exit_write(&aconnector->hpd_lock);
2400
2401}
2402
2403static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2404{
2405 uint8_t esi[DP_PSR_ERROR_STATUS0x2006 - DP_SINK_COUNT_ESI0x2002] = { 0 };
2406 uint8_t dret;
2407 bool_Bool new_irq_handled = false0;
2408 int dpcd_addr;
2409 int dpcd_bytes_to_read;
2410
2411 const int max_process_count = 30;
2412 int process_count = 0;
2413
2414 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2415
2416 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2417 dpcd_bytes_to_read = DP_LANE0_1_STATUS0x202 - DP_SINK_COUNT0x200;
2418 /* DPCD 0x200 - 0x201 for downstream IRQ */
2419 dpcd_addr = DP_SINK_COUNT0x200;
2420 } else {
2421 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS0x2006 - DP_SINK_COUNT_ESI0x2002;
2422 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2423 dpcd_addr = DP_SINK_COUNT_ESI0x2002;
2424 }
2425
2426 dret = drm_dp_dpcd_read(
2427 &aconnector->dm_dp_aux.aux,
2428 dpcd_addr,
2429 esi,
2430 dpcd_bytes_to_read);
2431
2432 while (dret == dpcd_bytes_to_read &&
2433 process_count < max_process_count) {
2434 uint8_t retry;
2435 dret = 0;
Value stored to 'dret' is never read
2436
2437 process_count++;
2438
2439 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2])__drm_dbg(DRM_UT_DRIVER, "ESI %02x %02x %02x\n", esi[0], esi[
1], esi[2])
;
2440 /* handle HPD short pulse irq */
2441 if (aconnector->mst_mgr.mst_state)
2442 drm_dp_mst_hpd_irq(
2443 &aconnector->mst_mgr,
2444 esi,
2445 &new_irq_handled);
2446
2447 if (new_irq_handled) {
2448 /* ACK at DPCD to notify down stream */
2449 const int ack_dpcd_bytes_to_write =
2450 dpcd_bytes_to_read - 1;
2451
2452 for (retry = 0; retry < 3; retry++) {
2453 uint8_t wret;
2454
2455 wret = drm_dp_dpcd_write(
2456 &aconnector->dm_dp_aux.aux,
2457 dpcd_addr + 1,
2458 &esi[1],
2459 ack_dpcd_bytes_to_write);
2460 if (wret == ack_dpcd_bytes_to_write)
2461 break;
2462 }
2463
2464 /* check if there is new irq to be handled */
2465 dret = drm_dp_dpcd_read(
2466 &aconnector->dm_dp_aux.aux,
2467 dpcd_addr,
2468 esi,
2469 dpcd_bytes_to_read);
2470
2471 new_irq_handled = false0;
2472 } else {
2473 break;
2474 }
2475 }
2476
2477 if (process_count == max_process_count)
2478 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n")__drm_dbg(DRM_UT_DRIVER, "Loop exceeded max iterations\n");
2479}
2480
2481static void handle_hpd_rx_irq(void *param)
2482{
2483 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2484 struct drm_connector *connector = &aconnector->base;
2485 struct drm_device *dev = connector->dev;
2486 struct dc_link *dc_link = aconnector->dc_link;
2487 bool_Bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2488 enum dc_connection_type new_connection_type = dc_connection_none;
2489#ifdef CONFIG_DRM_AMD_DC_HDCP
2490 union hpd_irq_data hpd_irq_data;
2491 struct amdgpu_device *adev = drm_to_adev(dev);
2492
2493 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data))__builtin_memset((&hpd_irq_data), (0), (sizeof(hpd_irq_data
)))
;
2494#endif
2495
2496 /*
2497 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2498 * conflict, after implement i2c helper, this mutex should be
2499 * retired.
2500 */
2501 if (dc_link->type != dc_connection_mst_branch)
2502 mutex_lock(&aconnector->hpd_lock)rw_enter_write(&aconnector->hpd_lock);
2503
2504
2505#ifdef CONFIG_DRM_AMD_DC_HDCP
2506 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL((void *)0)) &&
2507#else
2508 if (dc_link_handle_hpd_rx_irq(dc_link, NULL((void *)0), NULL((void *)0)) &&
2509#endif
2510 !is_mst_root_connector) {
2511 /* Downstream Port status changed. */
2512 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2513 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
2514
2515 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2516 emulated_link_detect(dc_link);
2517
2518 if (aconnector->fake_enable)
2519 aconnector->fake_enable = false0;
2520
2521 amdgpu_dm_update_connector_after_detect(aconnector);
2522
2523
2524 drm_modeset_lock_all(dev);
2525 dm_restore_drm_connector_state(dev, connector);
2526 drm_modeset_unlock_all(dev);
2527
2528 drm_kms_helper_hotplug_event(dev);
2529 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2530
2531 if (aconnector->fake_enable)
2532 aconnector->fake_enable = false0;
2533
2534 amdgpu_dm_update_connector_after_detect(aconnector);
2535
2536
2537 drm_modeset_lock_all(dev);
2538 dm_restore_drm_connector_state(dev, connector);
2539 drm_modeset_unlock_all(dev);
2540
2541 drm_kms_helper_hotplug_event(dev);
2542 }
2543 }
2544#ifdef CONFIG_DRM_AMD_DC_HDCP
2545 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2546 if (adev->dm.hdcp_workqueue)
2547 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2548 }
2549#endif
2550 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2551 (dc_link->type == dc_connection_mst_branch))
2552 dm_handle_hpd_rx_irq(aconnector);
2553
2554 if (dc_link->type != dc_connection_mst_branch) {
2555 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2556 mutex_unlock(&aconnector->hpd_lock)rw_exit_write(&aconnector->hpd_lock);
2557 }
2558}
2559
2560static void register_hpd_handlers(struct amdgpu_device *adev)
2561{
2562 struct drm_device *dev = adev_to_drm(adev);
2563 struct drm_connector *connector;
2564 struct amdgpu_dm_connector *aconnector;
2565 const struct dc_link *dc_link;
2566 struct dc_interrupt_params int_params = {0};
2567
2568 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2569 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2570
2571 list_for_each_entry(connector,for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
2572 &dev->mode_config.connector_list, head)for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
{
2573
2574 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
2575 dc_link = aconnector->dc_link;
2576
2577 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2578 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2579 int_params.irq_source = dc_link->irq_source_hpd;
2580
2581 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2582 handle_hpd_irq,
2583 (void *) aconnector);
2584 }
2585
2586 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2587
2588 /* Also register for DP short pulse (hpd_rx). */
2589 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2590 int_params.irq_source = dc_link->irq_source_hpd_rx;
2591
2592 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2593 handle_hpd_rx_irq,
2594 (void *) aconnector);
2595 }
2596 }
2597}
2598
2599#if defined(CONFIG_DRM_AMD_DC_SI)
2600/* Register IRQ sources and initialize IRQ callbacks */
2601static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2602{
2603 struct dc *dc = adev->dm.dc;
2604 struct common_irq_params *c_irq_params;
2605 struct dc_interrupt_params int_params = {0};
2606 int r;
2607 int i;
2608 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY0;
2609
2610 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2611 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2612
2613 /*
2614 * Actions of amdgpu_irq_add_id():
2615 * 1. Register a set() function with base driver.
2616 * Base driver will call set() function to enable/disable an
2617 * interrupt in DC hardware.
2618 * 2. Register amdgpu_dm_irq_handler().
2619 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2620 * coming from DC hardware.
2621 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2622 * for acknowledging and handling. */
2623
2624 /* Use VBLANK interrupt */
2625 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2626 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2627 if (r) {
2628 DRM_ERROR("Failed to add crtc irq id!\n")__drm_err("Failed to add crtc irq id!\n");
2629 return r;
2630 }
2631
2632 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2633 int_params.irq_source =
2634 dc_interrupt_to_irq_source(dc, i+1 , 0);
2635
2636 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2637
2638 c_irq_params->adev = adev;
2639 c_irq_params->irq_src = int_params.irq_source;
2640
2641 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2642 dm_crtc_high_irq, c_irq_params);
2643 }
2644
2645 /* Use GRPH_PFLIP interrupt */
2646 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP8;
2647 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP18; i += 2) {
2648 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2649 if (r) {
2650 DRM_ERROR("Failed to add page flip irq id!\n")__drm_err("Failed to add page flip irq id!\n");
2651 return r;
2652 }
2653
2654 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2655 int_params.irq_source =
2656 dc_interrupt_to_irq_source(dc, i, 0);
2657
2658 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2659
2660 c_irq_params->adev = adev;
2661 c_irq_params->irq_src = int_params.irq_source;
2662
2663 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2664 dm_pflip_high_irq, c_irq_params);
2665
2666 }
2667
2668 /* HPD */
2669 r = amdgpu_irq_add_id(adev, client_id,
2670 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A42, &adev->hpd_irq);
2671 if (r) {
2672 DRM_ERROR("Failed to add hpd irq id!\n")__drm_err("Failed to add hpd irq id!\n");
2673 return r;
2674 }
2675
2676 register_hpd_handlers(adev);
2677
2678 return 0;
2679}
2680#endif
2681
2682/* Register IRQ sources and initialize IRQ callbacks */
2683static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2684{
2685 struct dc *dc = adev->dm.dc;
2686 struct common_irq_params *c_irq_params;
2687 struct dc_interrupt_params int_params = {0};
2688 int r;
2689 int i;
2690 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY0;
2691
2692 if (adev->asic_type >= CHIP_VEGA10)
2693 client_id = SOC15_IH_CLIENTID_DCE;
2694
2695 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2696 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2697
2698 /*
2699 * Actions of amdgpu_irq_add_id():
2700 * 1. Register a set() function with base driver.
2701 * Base driver will call set() function to enable/disable an
2702 * interrupt in DC hardware.
2703 * 2. Register amdgpu_dm_irq_handler().
2704 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2705 * coming from DC hardware.
2706 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2707 * for acknowledging and handling. */
2708
2709 /* Use VBLANK interrupt */
2710 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT019; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT024; i++) {
2711 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2712 if (r) {
2713 DRM_ERROR("Failed to add crtc irq id!\n")__drm_err("Failed to add crtc irq id!\n");
2714 return r;
2715 }
2716
2717 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2718 int_params.irq_source =
2719 dc_interrupt_to_irq_source(dc, i, 0);
2720
2721 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2722
2723 c_irq_params->adev = adev;
2724 c_irq_params->irq_src = int_params.irq_source;
2725
2726 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2727 dm_crtc_high_irq, c_irq_params);
2728 }
2729
2730 /* Use VUPDATE interrupt */
2731 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT7; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT17; i += 2) {
2732 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2733 if (r) {
2734 DRM_ERROR("Failed to add vupdate irq id!\n")__drm_err("Failed to add vupdate irq id!\n");
2735 return r;
2736 }
2737
2738 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2739 int_params.irq_source =
2740 dc_interrupt_to_irq_source(dc, i, 0);
2741
2742 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2743
2744 c_irq_params->adev = adev;
2745 c_irq_params->irq_src = int_params.irq_source;
2746
2747 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2748 dm_vupdate_high_irq, c_irq_params);
2749 }
2750
2751 /* Use GRPH_PFLIP interrupt */
2752 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP8;
2753 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP18; i += 2) {
2754 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2755 if (r) {
2756 DRM_ERROR("Failed to add page flip irq id!\n")__drm_err("Failed to add page flip irq id!\n");
2757 return r;
2758 }
2759
2760 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2761 int_params.irq_source =
2762 dc_interrupt_to_irq_source(dc, i, 0);
2763
2764 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2765
2766 c_irq_params->adev = adev;
2767 c_irq_params->irq_src = int_params.irq_source;
2768
2769 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2770 dm_pflip_high_irq, c_irq_params);
2771
2772 }
2773
2774 /* HPD */
2775 r = amdgpu_irq_add_id(adev, client_id,
2776 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A42, &adev->hpd_irq);
2777 if (r) {
2778 DRM_ERROR("Failed to add hpd irq id!\n")__drm_err("Failed to add hpd irq id!\n");
2779 return r;
2780 }
2781
2782 register_hpd_handlers(adev);
2783
2784 return 0;
2785}
2786
2787#if defined(CONFIG_DRM_AMD_DC_DCN1)
2788/* Register IRQ sources and initialize IRQ callbacks */
2789static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2790{
2791 struct dc *dc = adev->dm.dc;
2792 struct common_irq_params *c_irq_params;
2793 struct dc_interrupt_params int_params = {0};
2794 int r;
2795 int i;
2796
2797 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2798 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2799
2800 /*
2801 * Actions of amdgpu_irq_add_id():
2802 * 1. Register a set() function with base driver.
2803 * Base driver will call set() function to enable/disable an
2804 * interrupt in DC hardware.
2805 * 2. Register amdgpu_dm_irq_handler().
2806 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2807 * coming from DC hardware.
2808 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2809 * for acknowledging and handling.
2810 */
2811
2812 /* Use VSTARTUP interrupt */
2813 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP0x3C;
2814 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP0x3C + adev->mode_info.num_crtc - 1;
2815 i++) {
2816 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2817
2818 if (r) {
2819 DRM_ERROR("Failed to add crtc irq id!\n")__drm_err("Failed to add crtc irq id!\n");
2820 return r;
2821 }
2822
2823 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2824 int_params.irq_source =
2825 dc_interrupt_to_irq_source(dc, i, 0);
2826
2827 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2828
2829 c_irq_params->adev = adev;
2830 c_irq_params->irq_src = int_params.irq_source;
2831
2832 amdgpu_dm_irq_register_interrupt(
2833 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2834 }
2835
2836 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2837 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2838 * to trigger at end of each vblank, regardless of state of the lock,
2839 * matching DCE behaviour.
2840 */
2841 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT0x57;
2842 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT0x57 + adev->mode_info.num_crtc - 1;
2843 i++) {
2844 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2845
2846 if (r) {
2847 DRM_ERROR("Failed to add vupdate irq id!\n")__drm_err("Failed to add vupdate irq id!\n");
2848 return r;
2849 }
2850
2851 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2852 int_params.irq_source =
2853 dc_interrupt_to_irq_source(dc, i, 0);
2854
2855 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2856
2857 c_irq_params->adev = adev;
2858 c_irq_params->irq_src = int_params.irq_source;
2859
2860 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2861 dm_vupdate_high_irq, c_irq_params);
2862 }
2863
2864 /* Use GRPH_PFLIP interrupt */
2865 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT0x4F;
2866 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT0x4F + adev->mode_info.num_crtc - 1;
2867 i++) {
2868 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2869 if (r) {
2870 DRM_ERROR("Failed to add page flip irq id!\n")__drm_err("Failed to add page flip irq id!\n");
2871 return r;
2872 }
2873
2874 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 int_params.irq_source =
2876 dc_interrupt_to_irq_source(dc, i, 0);
2877
2878 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2879
2880 c_irq_params->adev = adev;
2881 c_irq_params->irq_src = int_params.irq_source;
2882
2883 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 dm_pflip_high_irq, c_irq_params);
2885
2886 }
2887
2888 /* HPD */
2889 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT9,
2890 &adev->hpd_irq);
2891 if (r) {
2892 DRM_ERROR("Failed to add hpd irq id!\n")__drm_err("Failed to add hpd irq id!\n");
2893 return r;
2894 }
2895
2896 register_hpd_handlers(adev);
2897
2898 return 0;
2899}
2900#endif
2901
2902/*
2903 * Acquires the lock for the atomic state object and returns
2904 * the new atomic state.
2905 *
2906 * This should only be called during atomic check.
2907 */
2908static int dm_atomic_get_state(struct drm_atomic_state *state,
2909 struct dm_atomic_state **dm_state)
2910{
2911 struct drm_device *dev = state->dev;
2912 struct amdgpu_device *adev = drm_to_adev(dev);
2913 struct amdgpu_display_manager *dm = &adev->dm;
2914 struct drm_private_state *priv_state;
2915
2916 if (*dm_state)
2917 return 0;
2918
2919 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2920 if (IS_ERR(priv_state))
2921 return PTR_ERR(priv_state);
2922
2923 *dm_state = to_dm_atomic_state(priv_state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (priv_state); (struct dm_atomic_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_atomic_state, base) );})
;
2924
2925 return 0;
2926}
2927
2928static struct dm_atomic_state *
2929dm_atomic_get_new_state(struct drm_atomic_state *state)
2930{
2931 struct drm_device *dev = state->dev;
2932 struct amdgpu_device *adev = drm_to_adev(dev);
2933 struct amdgpu_display_manager *dm = &adev->dm;
2934 struct drm_private_obj *obj;
2935 struct drm_private_state *new_obj_state;
2936 int i;
2937
2938 for_each_new_private_obj_in_state(state, obj, new_obj_state, i)for ((i) = 0; (i) < (state)->num_private_objs &&
((obj) = (state)->private_objs[i].ptr, (new_obj_state) = (
state)->private_objs[i].new_state, 1); (i)++)
{
2939 if (obj->funcs == dm->atomic_obj.funcs)
2940 return to_dm_atomic_state(new_obj_state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (new_obj_state); (struct dm_atomic_state *)( (char *
)__mptr - __builtin_offsetof(struct dm_atomic_state, base) );
})
;
2941 }
2942
2943 return NULL((void *)0);
2944}
2945
2946static struct drm_private_state *
2947dm_atomic_duplicate_state(struct drm_private_obj *obj)
2948{
2949 struct dm_atomic_state *old_state, *new_state;
2950
2951 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL(0x0001 | 0x0004));
2952 if (!new_state)
2953 return NULL((void *)0);
2954
2955 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2956
2957 old_state = to_dm_atomic_state(obj->state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (obj->state); (struct dm_atomic_state *)( (char *
)__mptr - __builtin_offsetof(struct dm_atomic_state, base) );
})
;
2958
2959 if (old_state && old_state->context)
2960 new_state->context = dc_copy_state(old_state->context);
2961
2962 if (!new_state->context) {
2963 kfree(new_state);
2964 return NULL((void *)0);
2965 }
2966
2967 return &new_state->base;
2968}
2969
2970static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2971 struct drm_private_state *state)
2972{
2973 struct dm_atomic_state *dm_state = to_dm_atomic_state(state)({ const __typeof( ((struct dm_atomic_state *)0)->base ) *
__mptr = (state); (struct dm_atomic_state *)( (char *)__mptr -
__builtin_offsetof(struct dm_atomic_state, base) );})
;
2974
2975 if (dm_state && dm_state->context)
2976 dc_release_state(dm_state->context);
2977
2978 kfree(dm_state);
2979}
2980
2981static struct drm_private_state_funcs dm_atomic_state_funcs = {
2982 .atomic_duplicate_state = dm_atomic_duplicate_state,
2983 .atomic_destroy_state = dm_atomic_destroy_state,
2984};
2985
2986static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2987{
2988 struct dm_atomic_state *state;
2989 int r;
2990
2991 adev->mode_info.mode_config_initialized = true1;
2992
2993 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2994 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2995
2996 adev_to_drm(adev)->mode_config.max_width = 16384;
2997 adev_to_drm(adev)->mode_config.max_height = 16384;
2998
2999 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3000 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3001 /* indicates support for immediate flip */
3002 adev_to_drm(adev)->mode_config.async_page_flip = true1;
3003
3004 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3005
3006 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
3007 if (!state)
3008 return -ENOMEM12;
3009
3010 state->context = dc_create_state(adev->dm.dc);
3011 if (!state->context) {
3012 kfree(state);
3013 return -ENOMEM12;
3014 }
3015
3016 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3017
3018 drm_atomic_private_obj_init(adev_to_drm(adev),
3019 &adev->dm.atomic_obj,
3020 &state->base,
3021 &dm_atomic_state_funcs);
3022
3023 r = amdgpu_display_modeset_create_props(adev);
3024 if (r) {
3025 dc_release_state(state->context);
3026 kfree(state);
3027 return r;
3028 }
3029
3030 r = amdgpu_dm_audio_init(adev);
3031 if (r) {
3032 dc_release_state(state->context);
3033 kfree(state);
3034 return r;
3035 }
3036
3037 return 0;
3038}
3039
3040#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT12 12
3041#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT255 255
3042#define AUX_BL_DEFAULT_TRANSITION_TIME_MS50 50
3043
3044#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE1) ||\
3045 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3046
3047static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3048{
3049#if defined(CONFIG_ACPI1)
3050 struct amdgpu_dm_backlight_caps caps;
3051
3052 memset(&caps, 0, sizeof(caps))__builtin_memset((&caps), (0), (sizeof(caps)));
3053
3054 if (dm->backlight_caps.caps_valid)
3055 return;
3056
3057 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3058 if (caps.caps_valid) {
3059 dm->backlight_caps.caps_valid = true1;
3060 if (caps.aux_support)
3061 return;
3062 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3063 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3064 } else {
3065 dm->backlight_caps.min_input_signal =
3066 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT12;
3067 dm->backlight_caps.max_input_signal =
3068 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT255;
3069 }
3070#else
3071 if (dm->backlight_caps.aux_support)
3072 return;
3073
3074 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT12;
3075 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT255;
3076#endif
3077}
3078
3079static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3080 unsigned *min, unsigned *max)
3081{
3082 if (!caps)
3083 return 0;
3084
3085 if (caps->aux_support) {
3086 // Firmware limits are in nits, DC API wants millinits.
3087 *max = 1000 * caps->aux_max_input_signal;
3088 *min = 1000 * caps->aux_min_input_signal;
3089 } else {
3090 // Firmware limits are 8-bit, PWM control is 16-bit.
3091 *max = 0x101 * caps->max_input_signal;
3092 *min = 0x101 * caps->min_input_signal;
3093 }
3094 return 1;
3095}
3096
3097static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3098 uint32_t brightness)
3099{
3100 unsigned min, max;
3101
3102 if (!get_brightness_range(caps, &min, &max))
3103 return brightness;
3104
3105 // Rescale 0..255 to min..max
3106 return min + DIV_ROUND_CLOSEST((max - min) * brightness,((((max - min) * brightness) + ((0xFF) / 2)) / (0xFF))
3107 AMDGPU_MAX_BL_LEVEL)((((max - min) * brightness) + ((0xFF) / 2)) / (0xFF));
3108}
3109
3110static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3111 uint32_t brightness)
3112{
3113 unsigned min, max;
3114
3115 if (!get_brightness_range(caps, &min, &max))
3116 return brightness;
3117
3118 if (brightness < min)
3119 return 0;
3120 // Rescale min..max to 0..255
3121 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),(((0xFF * (brightness - min)) + ((max - min) / 2)) / (max - min
))
3122 max - min)(((0xFF * (brightness - min)) + ((max - min) / 2)) / (max - min
))
;
3123}
3124
3125static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3126{
3127 struct amdgpu_display_manager *dm = bl_get_data(bd)(bd)->data;
3128 struct amdgpu_dm_backlight_caps caps;
3129 struct dc_link *link = NULL((void *)0);
3130 u32 brightness;
3131 bool_Bool rc;
3132
3133 amdgpu_dm_update_backlight_caps(dm);
3134 caps = dm->backlight_caps;
3135
3136 link = (struct dc_link *)dm->backlight_link;
3137
3138 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3139 // Change brightness based on AUX property
3140 if (caps.aux_support)
3141 rc = dc_link_set_backlight_level_nits(link, true1, brightness,
3142 AUX_BL_DEFAULT_TRANSITION_TIME_MS50);
3143 else
3144 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3145
3146 return rc ? 0 : 1;
3147}
3148
3149static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3150{
3151 struct amdgpu_display_manager *dm = bl_get_data(bd)(bd)->data;
3152 struct amdgpu_dm_backlight_caps caps;
3153
3154 amdgpu_dm_update_backlight_caps(dm);
3155 caps = dm->backlight_caps;
3156
3157 if (caps.aux_support) {
3158 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3159 u32 avg, peak;
3160 bool_Bool rc;
3161
3162 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3163 if (!rc)
3164 return bd->props.brightness;
3165 return convert_brightness_to_user(&caps, avg);
3166 } else {
3167 int ret = dc_link_get_backlight_level(dm->backlight_link);
3168
3169 if (ret == DC_ERROR_UNEXPECTED)
3170 return bd->props.brightness;
3171 return convert_brightness_to_user(&caps, ret);
3172 }
3173}
3174
3175static const struct backlight_ops amdgpu_dm_backlight_ops = {
3176 .options = BL_CORE_SUSPENDRESUME1,
3177 .get_brightness = amdgpu_dm_backlight_get_brightness,
3178 .update_status = amdgpu_dm_backlight_update_status,
3179};
3180
3181static void
3182amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3183{
3184 char bl_name[16];
3185 struct backlight_properties props = { 0 };
3186
3187 amdgpu_dm_update_backlight_caps(dm);
3188
3189 props.max_brightness = AMDGPU_MAX_BL_LEVEL0xFF;
3190 props.brightness = AMDGPU_MAX_BL_LEVEL0xFF;
3191 props.type = BACKLIGHT_RAW0;
3192
3193 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3194 adev_to_drm(dm->adev)->primary->index);
3195
3196 dm->backlight_dev = backlight_device_register(bl_name,
3197 adev_to_drm(dm->adev)->dev,
3198 dm,
3199 &amdgpu_dm_backlight_ops,
3200 &props);
3201
3202 if (IS_ERR(dm->backlight_dev))
3203 DRM_ERROR("DM: Backlight registration failed!\n")__drm_err("DM: Backlight registration failed!\n");
3204 else
3205 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name)__drm_dbg(DRM_UT_DRIVER, "DM: Registered Backlight device: %s\n"
, bl_name)
;
3206}
3207
3208#endif
3209
3210static int initialize_plane(struct amdgpu_display_manager *dm,
3211 struct amdgpu_mode_info *mode_info, int plane_id,
3212 enum drm_plane_type plane_type,
3213 const struct dc_plane_cap *plane_cap)
3214{
3215 struct drm_plane *plane;
3216 unsigned long possible_crtcs;
3217 int ret = 0;
3218
3219 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL(0x0001 | 0x0004));
3220 if (!plane) {
3221 DRM_ERROR("KMS: Failed to allocate plane\n")__drm_err("KMS: Failed to allocate plane\n");
3222 return -ENOMEM12;
3223 }
3224 plane->type = plane_type;
3225
3226 /*
3227 * HACK: IGT tests expect that the primary plane for a CRTC
3228 * can only have one possible CRTC. Only expose support for
3229 * any CRTC if they're not going to be used as a primary plane
3230 * for a CRTC - like overlay or underlay planes.
3231 */
3232 possible_crtcs = 1 << plane_id;
3233 if (plane_id >= dm->dc->caps.max_streams)
3234 possible_crtcs = 0xff;
3235
3236 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3237
3238 if (ret) {
3239 DRM_ERROR("KMS: Failed to initialize plane\n")__drm_err("KMS: Failed to initialize plane\n");
3240 kfree(plane);
3241 return ret;
3242 }
3243
3244 if (mode_info)
3245 mode_info->planes[plane_id] = plane;
3246
3247 return ret;
3248}
3249
3250
3251static void register_backlight_device(struct amdgpu_display_manager *dm,
3252 struct dc_link *link)
3253{
3254#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE1) ||\
3255 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3256
3257 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3258 link->type != dc_connection_none) {
3259 /*
3260 * Event if registration failed, we should continue with
3261 * DM initialization because not having a backlight control
3262 * is better then a black screen.
3263 */
3264 amdgpu_dm_register_backlight_device(dm);
3265
3266 if (dm->backlight_dev)
3267 dm->backlight_link = link;
3268 }
3269#endif
3270}
3271
3272
3273/*
3274 * In this architecture, the association
3275 * connector -> encoder -> crtc
3276 * id not really requried. The crtc and connector will hold the
3277 * display_index as an abstraction to use with DAL component
3278 *
3279 * Returns 0 on success
3280 */
3281static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3282{
3283 struct amdgpu_display_manager *dm = &adev->dm;
3284 int32_t i;
3285 struct amdgpu_dm_connector *aconnector = NULL((void *)0);
3286 struct amdgpu_encoder *aencoder = NULL((void *)0);
3287 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3288 uint32_t link_cnt;
3289 int32_t primary_planes;
3290 enum dc_connection_type new_connection_type = dc_connection_none;
3291 const struct dc_plane_cap *plane;
3292
3293 dm->display_indexes_num = dm->dc->caps.max_streams;
3294 /* Update the actual used number of crtc */
3295 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3296
3297 link_cnt = dm->dc->caps.max_links;
3298 if (amdgpu_dm_mode_config_init(dm->adev)) {
3299 DRM_ERROR("DM: Failed to initialize mode config\n")__drm_err("DM: Failed to initialize mode config\n");
3300 return -EINVAL22;
3301 }
3302
3303 /* There is one primary plane per CRTC */
3304 primary_planes = dm->dc->caps.max_streams;
3305 ASSERT(primary_planes <= AMDGPU_MAX_PLANES)do { if (({ static int __warned; int __ret = !!(!(primary_planes
<= 6)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n"
, "!(primary_planes <= 6)", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 3305); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do
{} while (0); } while (0)
;
3306
3307 /*
3308 * Initialize primary planes, implicit planes for legacy IOCTLS.
3309 * Order is reversed to match iteration order in atomic check.
3310 */
3311 for (i = (primary_planes - 1); i >= 0; i--) {
3312 plane = &dm->dc->caps.planes[i];
3313
3314 if (initialize_plane(dm, mode_info, i,
3315 DRM_PLANE_TYPE_PRIMARY, plane)) {
3316 DRM_ERROR("KMS: Failed to initialize primary plane\n")__drm_err("KMS: Failed to initialize primary plane\n");
3317 goto fail;
3318 }
3319 }
3320
3321 /*
3322 * Initialize overlay planes, index starting after primary planes.
3323 * These planes have a higher DRM index than the primary planes since
3324 * they should be considered as having a higher z-order.
3325 * Order is reversed to match iteration order in atomic check.
3326 *
3327 * Only support DCN for now, and only expose one so we don't encourage
3328 * userspace to use up all the pipes.
3329 */
3330 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3331 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3332
3333 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3334 continue;
3335
3336 if (!plane->blends_with_above || !plane->blends_with_below)
3337 continue;
3338
3339 if (!plane->pixel_format_support.argb8888)
3340 continue;
3341
3342 if (initialize_plane(dm, NULL((void *)0), primary_planes + i,
3343 DRM_PLANE_TYPE_OVERLAY, plane)) {
3344 DRM_ERROR("KMS: Failed to initialize overlay plane\n")__drm_err("KMS: Failed to initialize overlay plane\n");
3345 goto fail;
3346 }
3347
3348 /* Only create one overlay plane. */
3349 break;
3350 }
3351
3352 for (i = 0; i < dm->dc->caps.max_streams; i++)
3353 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3354 DRM_ERROR("KMS: Failed to initialize crtc\n")__drm_err("KMS: Failed to initialize crtc\n");
3355 goto fail;
3356 }
3357
3358 /* loops over all connectors on the board */
3359 for (i = 0; i < link_cnt; i++) {
3360 struct dc_link *link = NULL((void *)0);
3361
3362 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX31) {
3363 DRM_ERROR(__drm_err("KMS: Cannot support more than %d display indexes\n"
, 31)
3364 "KMS: Cannot support more than %d display indexes\n",__drm_err("KMS: Cannot support more than %d display indexes\n"
, 31)
3365 AMDGPU_DM_MAX_DISPLAY_INDEX)__drm_err("KMS: Cannot support more than %d display indexes\n"
, 31)
;
3366 continue;
3367 }
3368
3369 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL(0x0001 | 0x0004));
3370 if (!aconnector)
3371 goto fail;
3372
3373 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL(0x0001 | 0x0004));
3374 if (!aencoder)
3375 goto fail;
3376
3377 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3378 DRM_ERROR("KMS: Failed to initialize encoder\n")__drm_err("KMS: Failed to initialize encoder\n");
3379 goto fail;
3380 }
3381
3382 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3383 DRM_ERROR("KMS: Failed to initialize connector\n")__drm_err("KMS: Failed to initialize connector\n");
3384 goto fail;
3385 }
3386
3387 link = dc_get_link_at_index(dm->dc, i);
3388
3389 if (!dc_link_detect_sink(link, &new_connection_type))
3390 DRM_ERROR("KMS: Failed to detect connector\n")__drm_err("KMS: Failed to detect connector\n");
3391
3392 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3393 emulated_link_detect(link);
3394 amdgpu_dm_update_connector_after_detect(aconnector);
3395
3396 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3397 amdgpu_dm_update_connector_after_detect(aconnector);
3398 register_backlight_device(dm, link);
3399 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3400 amdgpu_dm_set_psr_caps(link);
3401 }
3402
3403
3404 }
3405
3406 /* Software is initialized. Now we can register interrupt handlers. */
3407 switch (adev->asic_type) {
3408#if defined(CONFIG_DRM_AMD_DC_SI)
3409 case CHIP_TAHITI:
3410 case CHIP_PITCAIRN:
3411 case CHIP_VERDE:
3412 case CHIP_OLAND:
3413 if (dce60_register_irq_handlers(dm->adev)) {
3414 DRM_ERROR("DM: Failed to initialize IRQ\n")__drm_err("DM: Failed to initialize IRQ\n");
3415 goto fail;
3416 }
3417 break;
3418#endif
3419 case CHIP_BONAIRE:
3420 case CHIP_HAWAII:
3421 case CHIP_KAVERI:
3422 case CHIP_KABINI:
3423 case CHIP_MULLINS:
3424 case CHIP_TONGA:
3425 case CHIP_FIJI:
3426 case CHIP_CARRIZO:
3427 case CHIP_STONEY:
3428 case CHIP_POLARIS11:
3429 case CHIP_POLARIS10:
3430 case CHIP_POLARIS12:
3431 case CHIP_VEGAM:
3432 case CHIP_VEGA10:
3433 case CHIP_VEGA12:
3434 case CHIP_VEGA20:
3435 if (dce110_register_irq_handlers(dm->adev)) {
3436 DRM_ERROR("DM: Failed to initialize IRQ\n")__drm_err("DM: Failed to initialize IRQ\n");
3437 goto fail;
3438 }
3439 break;
3440#if defined(CONFIG_DRM_AMD_DC_DCN1)
3441 case CHIP_RAVEN:
3442 case CHIP_NAVI12:
3443 case CHIP_NAVI10:
3444 case CHIP_NAVI14:
3445 case CHIP_RENOIR:
3446#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3447 case CHIP_SIENNA_CICHLID:
3448 case CHIP_NAVY_FLOUNDER:
3449#endif
3450 if (dcn10_register_irq_handlers(dm->adev)) {
3451 DRM_ERROR("DM: Failed to initialize IRQ\n")__drm_err("DM: Failed to initialize IRQ\n");
3452 goto fail;
3453 }
3454 break;
3455#endif
3456 default:
3457 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type)__drm_err("Unsupported ASIC type: 0x%X\n", adev->asic_type
)
;
3458 goto fail;
3459 }
3460
3461 return 0;
3462fail:
3463 kfree(aencoder);
3464 kfree(aconnector);
3465
3466 return -EINVAL22;
3467}
3468
3469static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3470{
3471 drm_mode_config_cleanup(dm->ddev);
3472 drm_atomic_private_obj_fini(&dm->atomic_obj);
3473 return;
3474}
3475
3476/******************************************************************************
3477 * amdgpu_display_funcs functions
3478 *****************************************************************************/
3479
3480/*
3481 * dm_bandwidth_update - program display watermarks
3482 *
3483 * @adev: amdgpu_device pointer
3484 *
3485 * Calculate and program the display watermarks and line buffer allocation.
3486 */
3487static void dm_bandwidth_update(struct amdgpu_device *adev)
3488{
3489 /* TODO: implement later */
3490}
3491
3492static const struct amdgpu_display_funcs dm_display_funcs = {
3493 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3494 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3495 .backlight_set_level = NULL((void *)0), /* never called for DC */
3496 .backlight_get_level = NULL((void *)0), /* never called for DC */
3497 .hpd_sense = NULL((void *)0),/* called unconditionally */
3498 .hpd_set_polarity = NULL((void *)0), /* called unconditionally */
3499 .hpd_get_gpio_reg = NULL((void *)0), /* VBIOS parsing. DAL does it. */
3500 .page_flip_get_scanoutpos =
3501 dm_crtc_get_scanoutpos,/* called unconditionally */
3502 .add_encoder = NULL((void *)0), /* VBIOS parsing. DAL does it. */
3503 .add_connector = NULL((void *)0), /* VBIOS parsing. DAL does it. */
3504};
3505
3506#if defined(CONFIG_DEBUG_KERNEL_DC)
3507
3508static ssize_t s3_debug_store(struct device *device,
3509 struct device_attribute *attr,
3510 const char *buf,
3511 size_t count)
3512{
3513 int ret;
3514 int s3_state;
3515 struct drm_device *drm_dev = dev_get_drvdata(device)((void *)0);
3516 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3517
3518 ret = kstrtoint(buf, 0, &s3_state);
3519
3520 if (ret == 0) {
3521 if (s3_state) {
3522 dm_resume(adev);
3523 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3524 } else
3525 dm_suspend(adev);
3526 }
3527
3528 return ret == 0 ? count : 0;
3529}
3530
3531DEVICE_ATTR_WO(s3_debug);
3532
3533#endif
3534
3535static int dm_early_init(void *handle)
3536{
3537 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3538
3539 switch (adev->asic_type) {
3540#if defined(CONFIG_DRM_AMD_DC_SI)
3541 case CHIP_TAHITI:
3542 case CHIP_PITCAIRN:
3543 case CHIP_VERDE:
3544 adev->mode_info.num_crtc = 6;
3545 adev->mode_info.num_hpd = 6;
3546 adev->mode_info.num_dig = 6;
3547 break;
3548 case CHIP_OLAND:
3549 adev->mode_info.num_crtc = 2;
3550 adev->mode_info.num_hpd = 2;
3551 adev->mode_info.num_dig = 2;
3552 break;
3553#endif
3554 case CHIP_BONAIRE:
3555 case CHIP_HAWAII:
3556 adev->mode_info.num_crtc = 6;
3557 adev->mode_info.num_hpd = 6;
3558 adev->mode_info.num_dig = 6;
3559 break;
3560 case CHIP_KAVERI:
3561 adev->mode_info.num_crtc = 4;
3562 adev->mode_info.num_hpd = 6;
3563 adev->mode_info.num_dig = 7;
3564 break;
3565 case CHIP_KABINI:
3566 case CHIP_MULLINS:
3567 adev->mode_info.num_crtc = 2;
3568 adev->mode_info.num_hpd = 6;
3569 adev->mode_info.num_dig = 6;
3570 break;
3571 case CHIP_FIJI:
3572 case CHIP_TONGA:
3573 adev->mode_info.num_crtc = 6;
3574 adev->mode_info.num_hpd = 6;
3575 adev->mode_info.num_dig = 7;
3576 break;
3577 case CHIP_CARRIZO:
3578 adev->mode_info.num_crtc = 3;
3579 adev->mode_info.num_hpd = 6;
3580 adev->mode_info.num_dig = 9;
3581 break;
3582 case CHIP_STONEY:
3583 adev->mode_info.num_crtc = 2;
3584 adev->mode_info.num_hpd = 6;
3585 adev->mode_info.num_dig = 9;
3586 break;
3587 case CHIP_POLARIS11:
3588 case CHIP_POLARIS12:
3589 adev->mode_info.num_crtc = 5;
3590 adev->mode_info.num_hpd = 5;
3591 adev->mode_info.num_dig = 5;
3592 break;
3593 case CHIP_POLARIS10:
3594 case CHIP_VEGAM:
3595 adev->mode_info.num_crtc = 6;
3596 adev->mode_info.num_hpd = 6;
3597 adev->mode_info.num_dig = 6;
3598 break;
3599 case CHIP_VEGA10:
3600 case CHIP_VEGA12:
3601 case CHIP_VEGA20:
3602 adev->mode_info.num_crtc = 6;
3603 adev->mode_info.num_hpd = 6;
3604 adev->mode_info.num_dig = 6;
3605 break;
3606#if defined(CONFIG_DRM_AMD_DC_DCN1)
3607 case CHIP_RAVEN:
3608 adev->mode_info.num_crtc = 4;
3609 adev->mode_info.num_hpd = 4;
3610 adev->mode_info.num_dig = 4;
3611 break;
3612#endif
3613 case CHIP_NAVI10:
3614 case CHIP_NAVI12:
3615#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3616 case CHIP_SIENNA_CICHLID:
3617 case CHIP_NAVY_FLOUNDER:
3618#endif
3619 adev->mode_info.num_crtc = 6;
3620 adev->mode_info.num_hpd = 6;
3621 adev->mode_info.num_dig = 6;
3622 break;
3623 case CHIP_NAVI14:
3624 adev->mode_info.num_crtc = 5;
3625 adev->mode_info.num_hpd = 5;
3626 adev->mode_info.num_dig = 5;
3627 break;
3628 case CHIP_RENOIR:
3629 adev->mode_info.num_crtc = 4;
3630 adev->mode_info.num_hpd = 4;
3631 adev->mode_info.num_dig = 4;
3632 break;
3633 default:
3634 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type)__drm_err("Unsupported ASIC type: 0x%X\n", adev->asic_type
)
;
3635 return -EINVAL22;
3636 }
3637
3638 amdgpu_dm_set_irq_funcs(adev);
3639
3640 if (adev->mode_info.funcs == NULL((void *)0))
3641 adev->mode_info.funcs = &dm_display_funcs;
3642
3643 /*
3644 * Note: Do NOT change adev->audio_endpt_rreg and
3645 * adev->audio_endpt_wreg because they are initialised in
3646 * amdgpu_device_init()
3647 */
3648#if defined(CONFIG_DEBUG_KERNEL_DC)
3649 device_create_file(0
3650 adev_to_drm(adev)->dev,0
3651 &dev_attr_s3_debug)0;
3652#endif
3653
3654 return 0;
3655}
3656
3657static bool_Bool modeset_required(struct drm_crtc_state *crtc_state,
3658 struct dc_stream_state *new_stream,
3659 struct dc_stream_state *old_stream)
3660{
3661 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3662}
3663
3664static bool_Bool modereset_required(struct drm_crtc_state *crtc_state)
3665{
3666 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3667}
3668
3669static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3670{
3671 drm_encoder_cleanup(encoder);
3672 kfree(encoder);
3673}
3674
3675static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3676 .destroy = amdgpu_dm_encoder_destroy,
3677};
3678
3679
3680static int fill_dc_scaling_info(const struct drm_plane_state *state,
3681 struct dc_scaling_info *scaling_info)
3682{
3683 int scale_w, scale_h;
3684
3685 memset(scaling_info, 0, sizeof(*scaling_info))__builtin_memset((scaling_info), (0), (sizeof(*scaling_info))
)
;
3686
3687 /* Source is fixed 16.16 but we ignore mantissa for now... */
3688 scaling_info->src_rect.x = state->src_x >> 16;
3689 scaling_info->src_rect.y = state->src_y >> 16;
3690
3691 /*
3692 * For reasons we don't (yet) fully understand a non-zero
3693 * src_y coordinate into an NV12 buffer can cause a
3694 * system hang. To avoid hangs (and maybe be overly cautious)
3695 * let's reject both non-zero src_x and src_y.
3696 *
3697 * We currently know of only one use-case to reproduce a
3698 * scenario with non-zero src_x and src_y for NV12, which
3699 * is to gesture the YouTube Android app into full screen
3700 * on ChromeOS.
3701 */
3702 if (state->fb &&
3703 state->fb->format->format == DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
&&
3704 (scaling_info->src_rect.x != 0 ||
3705 scaling_info->src_rect.y != 0))
3706 return -EINVAL22;
3707
3708 /*
3709 * For reasons we don't (yet) fully understand a non-zero
3710 * src_y coordinate into an NV12 buffer can cause a
3711 * system hang. To avoid hangs (and maybe be overly cautious)
3712 * let's reject both non-zero src_x and src_y.
3713 *
3714 * We currently know of only one use-case to reproduce a
3715 * scenario with non-zero src_x and src_y for NV12, which
3716 * is to gesture the YouTube Android app into full screen
3717 * on ChromeOS.
3718 */
3719 if (state->fb &&
3720 state->fb->format->format == DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
&&
3721 (scaling_info->src_rect.x != 0 ||
3722 scaling_info->src_rect.y != 0))
3723 return -EINVAL22;
3724
3725 scaling_info->src_rect.width = state->src_w >> 16;
3726 if (scaling_info->src_rect.width == 0)
3727 return -EINVAL22;
3728
3729 scaling_info->src_rect.height = state->src_h >> 16;
3730 if (scaling_info->src_rect.height == 0)
3731 return -EINVAL22;
3732
3733 scaling_info->dst_rect.x = state->crtc_x;
3734 scaling_info->dst_rect.y = state->crtc_y;
3735
3736 if (state->crtc_w == 0)
3737 return -EINVAL22;
3738
3739 scaling_info->dst_rect.width = state->crtc_w;
3740
3741 if (state->crtc_h == 0)
3742 return -EINVAL22;
3743
3744 scaling_info->dst_rect.height = state->crtc_h;
3745
3746 /* DRM doesn't specify clipping on destination output. */
3747 scaling_info->clip_rect = scaling_info->dst_rect;
3748
3749 /* TODO: Validate scaling per-format with DC plane caps */
3750 scale_w = scaling_info->dst_rect.width * 1000 /
3751 scaling_info->src_rect.width;
3752
3753 if (scale_w < 250 || scale_w > 16000)
3754 return -EINVAL22;
3755
3756 scale_h = scaling_info->dst_rect.height * 1000 /
3757 scaling_info->src_rect.height;
3758
3759 if (scale_h < 250 || scale_h > 16000)
3760 return -EINVAL22;
3761
3762 /*
3763 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3764 * assume reasonable defaults based on the format.
3765 */
3766
3767 return 0;
3768}
3769
3770static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3771 uint64_t *tiling_flags, bool_Bool *tmz_surface)
3772{
3773 struct amdgpu_bo *rbo;
3774 int r;
3775
3776 if (!amdgpu_fb) {
3777 *tiling_flags = 0;
3778 *tmz_surface = false0;
3779 return 0;
3780 }
3781
3782 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0])({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr
= ((amdgpu_fb->base.obj[0])); (struct amdgpu_bo *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_bo, tbo.base) );
})
;
3783 r = amdgpu_bo_reserve(rbo, false0);
3784
3785 if (unlikely(r)__builtin_expect(!!(r), 0)) {
3786 /* Don't show error message when returning -ERESTARTSYS */
3787 if (r != -ERESTARTSYS4)
3788 DRM_ERROR("Unable to reserve buffer: %d\n", r)__drm_err("Unable to reserve buffer: %d\n", r);
3789 return r;
3790 }
3791
3792 if (tiling_flags)
3793 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3794
3795 if (tmz_surface)
3796 *tmz_surface = amdgpu_bo_encrypted(rbo);
3797
3798 amdgpu_bo_unreserve(rbo);
3799
3800 return r;
3801}
3802
3803static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3804{
3805 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B)(((__u64)(tiling_flags) >> 5) & 0xFFFFFF);
3806
3807 return offset ? (address + offset * 256) : 0;
3808}
3809
3810static int
3811fill_plane_dcc_attributes(struct amdgpu_device *adev,
3812 const struct amdgpu_framebuffer *afb,
3813 const enum surface_pixel_format format,
3814 const enum dc_rotation_angle rotation,
3815 const struct plane_size *plane_size,
3816 const union dc_tiling_info *tiling_info,
3817 const uint64_t info,
3818 struct dc_plane_dcc_param *dcc,
3819 struct dc_plane_address *address,
3820 bool_Bool force_disable_dcc)
3821{
3822 struct dc *dc = adev->dm.dc;
3823 struct dc_dcc_surface_param input;
3824 struct dc_surface_dcc_cap output;
3825 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B)(((__u64)(info) >> 5) & 0xFFFFFF);
3826 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B)(((__u64)(info) >> 43) & 0x1) != 0;
3827 uint64_t dcc_address;
3828
3829 memset(&input, 0, sizeof(input))__builtin_memset((&input), (0), (sizeof(input)));
3830 memset(&output, 0, sizeof(output))__builtin_memset((&output), (0), (sizeof(output)));
3831
3832 if (force_disable_dcc)
3833 return 0;
3834
3835 if (!offset)
3836 return 0;
3837
3838 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3839 return 0;
3840
3841 if (!dc->cap_funcs.get_dcc_compression_cap)
3842 return -EINVAL22;
3843
3844 input.format = format;
3845 input.surface_size.width = plane_size->surface_size.width;
3846 input.surface_size.height = plane_size->surface_size.height;
3847 input.swizzle_mode = tiling_info->gfx9.swizzle;
3848
3849 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3850 input.scan = SCAN_DIRECTION_HORIZONTAL;
3851 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3852 input.scan = SCAN_DIRECTION_VERTICAL;
3853
3854 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3855 return -EINVAL22;
3856
3857 if (!output.capable)
3858 return -EINVAL22;
3859
3860 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3861 return -EINVAL22;
3862
3863 dcc->enable = 1;
3864 dcc->meta_pitch =
3865 AMDGPU_TILING_GET(info, DCC_PITCH_MAX)(((__u64)(info) >> 29) & 0x3FFF) + 1;
3866 dcc->independent_64b_blks = i64b;
3867
3868 dcc_address = get_dcc_address(afb->address, info);
3869 address->grph.meta_addr.low_part = lower_32_bits(dcc_address)((u32)(dcc_address));
3870 address->grph.meta_addr.high_part = upper_32_bits(dcc_address)((u32)(((dcc_address) >> 16) >> 16));
3871
3872 return 0;
3873}
3874
3875static int
3876fill_plane_buffer_attributes(struct amdgpu_device *adev,
3877 const struct amdgpu_framebuffer *afb,
3878 const enum surface_pixel_format format,
3879 const enum dc_rotation_angle rotation,
3880 const uint64_t tiling_flags,
3881 union dc_tiling_info *tiling_info,
3882 struct plane_size *plane_size,
3883 struct dc_plane_dcc_param *dcc,
3884 struct dc_plane_address *address,
3885 bool_Bool tmz_surface,
3886 bool_Bool force_disable_dcc)
3887{
3888 const struct drm_framebuffer *fb = &afb->base;
3889 int ret;
3890
3891 memset(tiling_info, 0, sizeof(*tiling_info))__builtin_memset((tiling_info), (0), (sizeof(*tiling_info)));
3892 memset(plane_size, 0, sizeof(*plane_size))__builtin_memset((plane_size), (0), (sizeof(*plane_size)));
3893 memset(dcc, 0, sizeof(*dcc))__builtin_memset((dcc), (0), (sizeof(*dcc)));
3894 memset(address, 0, sizeof(*address))__builtin_memset((address), (0), (sizeof(*address)));
3895
3896 address->tmz_surface = tmz_surface;
3897
3898 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3899 plane_size->surface_size.x = 0;
3900 plane_size->surface_size.y = 0;
3901 plane_size->surface_size.width = fb->width;
3902 plane_size->surface_size.height = fb->height;
3903 plane_size->surface_pitch =
3904 fb->pitches[0] / fb->format->cpp[0];
3905
3906 address->type = PLN_ADDR_TYPE_GRAPHICS;
3907 address->grph.addr.low_part = lower_32_bits(afb->address)((u32)(afb->address));
3908 address->grph.addr.high_part = upper_32_bits(afb->address)((u32)(((afb->address) >> 16) >> 16));
3909 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3910 uint64_t chroma_addr = afb->address + fb->offsets[1];
3911
3912 plane_size->surface_size.x = 0;
3913 plane_size->surface_size.y = 0;
3914 plane_size->surface_size.width = fb->width;
3915 plane_size->surface_size.height = fb->height;
3916 plane_size->surface_pitch =
3917 fb->pitches[0] / fb->format->cpp[0];
3918
3919 plane_size->chroma_size.x = 0;
3920 plane_size->chroma_size.y = 0;
3921 /* TODO: set these based on surface format */
3922 plane_size->chroma_size.width = fb->width / 2;
3923 plane_size->chroma_size.height = fb->height / 2;
3924
3925 plane_size->chroma_pitch =
3926 fb->pitches[1] / fb->format->cpp[1];
3927
3928 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3929 address->video_progressive.luma_addr.low_part =
3930 lower_32_bits(afb->address)((u32)(afb->address));
3931 address->video_progressive.luma_addr.high_part =
3932 upper_32_bits(afb->address)((u32)(((afb->address) >> 16) >> 16));
3933 address->video_progressive.chroma_addr.low_part =
3934 lower_32_bits(chroma_addr)((u32)(chroma_addr));
3935 address->video_progressive.chroma_addr.high_part =
3936 upper_32_bits(chroma_addr)((u32)(((chroma_addr) >> 16) >> 16));
3937 }
3938
3939 /* Fill GFX8 params */
3940 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)(((__u64)(tiling_flags) >> 0) & 0xf) == DC_ARRAY_2D_TILED_THIN1) {
3941 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3942
3943 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH)(((__u64)(tiling_flags) >> 15) & 0x3);
3944 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT)(((__u64)(tiling_flags) >> 17) & 0x3);
3945 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT)(((__u64)(tiling_flags) >> 19) & 0x3);
3946 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT)(((__u64)(tiling_flags) >> 9) & 0x7);
3947 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS)(((__u64)(tiling_flags) >> 21) & 0x3);
3948
3949 /* XXX fix me for VI */
3950 tiling_info->gfx8.num_banks = num_banks;
3951 tiling_info->gfx8.array_mode =
3952 DC_ARRAY_2D_TILED_THIN1;
3953 tiling_info->gfx8.tile_split = tile_split;
3954 tiling_info->gfx8.bank_width = bankw;
3955 tiling_info->gfx8.bank_height = bankh;
3956 tiling_info->gfx8.tile_aspect = mtaspect;
3957 tiling_info->gfx8.tile_mode =
3958 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3959 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)(((__u64)(tiling_flags) >> 0) & 0xf)
3960 == DC_ARRAY_1D_TILED_THIN1) {
3961 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3962 }
3963
3964 tiling_info->gfx8.pipe_config =
3965 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG)(((__u64)(tiling_flags) >> 4) & 0x1f);
3966
3967 if (adev->asic_type == CHIP_VEGA10 ||
3968 adev->asic_type == CHIP_VEGA12 ||
3969 adev->asic_type == CHIP_VEGA20 ||
3970 adev->asic_type == CHIP_NAVI10 ||
3971 adev->asic_type == CHIP_NAVI14 ||
3972 adev->asic_type == CHIP_NAVI12 ||
3973#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3974 adev->asic_type == CHIP_SIENNA_CICHLID ||
3975 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3976#endif
3977 adev->asic_type == CHIP_RENOIR ||
3978 adev->asic_type == CHIP_RAVEN) {
3979 /* Fill GFX9 params */
3980 tiling_info->gfx9.num_pipes =
3981 adev->gfx.config.gb_addr_config_fields.num_pipes;
3982 tiling_info->gfx9.num_banks =
3983 adev->gfx.config.gb_addr_config_fields.num_banks;
3984 tiling_info->gfx9.pipe_interleave =
3985 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3986 tiling_info->gfx9.num_shader_engines =
3987 adev->gfx.config.gb_addr_config_fields.num_se;
3988 tiling_info->gfx9.max_compressed_frags =
3989 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3990 tiling_info->gfx9.num_rb_per_se =
3991 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3992 tiling_info->gfx9.swizzle =
3993 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE)(((__u64)(tiling_flags) >> 0) & 0x1f);
3994 tiling_info->gfx9.shaderEnable = 1;
3995
3996#ifdef CONFIG_DRM_AMD_DC_DCN3_01
3997 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3998 adev->asic_type == CHIP_NAVY_FLOUNDER)
3999 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4000#endif
4001 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4002 plane_size, tiling_info,
4003 tiling_flags, dcc, address,
4004 force_disable_dcc);
4005 if (ret)
4006 return ret;
4007 }
4008
4009 return 0;
4010}
4011
4012static void
4013fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4014 bool_Bool *per_pixel_alpha, bool_Bool *global_alpha,
4015 int *global_alpha_value)
4016{
4017 *per_pixel_alpha = false0;
4018 *global_alpha = false0;
4019 *global_alpha_value = 0xff;
4020
4021 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4022 return;
4023
4024 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI0) {
4025 static const uint32_t alpha_formats[] = {
4026 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
4027 DRM_FORMAT_RGBA8888((__u32)('R') | ((__u32)('A') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
4028 DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
4029 };
4030 uint32_t format = plane_state->fb->format->format;
4031 unsigned int i;
4032
4033 for (i = 0; i < ARRAY_SIZE(alpha_formats)(sizeof((alpha_formats)) / sizeof((alpha_formats)[0])); ++i) {
4034 if (format == alpha_formats[i]) {
4035 *per_pixel_alpha = true1;
4036 break;
4037 }
4038 }
4039 }
4040
4041 if (plane_state->alpha < 0xffff) {
4042 *global_alpha = true1;
4043 *global_alpha_value = plane_state->alpha >> 8;
4044 }
4045}
4046
4047static int
4048fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4049 const enum surface_pixel_format format,
4050 enum dc_color_space *color_space)
4051{
4052 bool_Bool full_range;
4053
4054 *color_space = COLOR_SPACE_SRGB;
4055
4056 /* DRM color properties only affect non-RGB formats. */
4057 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4058 return 0;
4059
4060 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4061
4062 switch (plane_state->color_encoding) {
4063 case DRM_COLOR_YCBCR_BT601:
4064 if (full_range)
4065 *color_space = COLOR_SPACE_YCBCR601;
4066 else
4067 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4068 break;
4069
4070 case DRM_COLOR_YCBCR_BT709:
4071 if (full_range)
4072 *color_space = COLOR_SPACE_YCBCR709;
4073 else
4074 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4075 break;
4076
4077 case DRM_COLOR_YCBCR_BT2020:
4078 if (full_range)
4079 *color_space = COLOR_SPACE_2020_YCBCR;
4080 else
4081 return -EINVAL22;
4082 break;
4083
4084 default:
4085 return -EINVAL22;
4086 }
4087
4088 return 0;
4089}
4090
4091static int
4092fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4093 const struct drm_plane_state *plane_state,
4094 const uint64_t tiling_flags,
4095 struct dc_plane_info *plane_info,
4096 struct dc_plane_address *address,
4097 bool_Bool tmz_surface,
4098 bool_Bool force_disable_dcc)
4099{
4100 const struct drm_framebuffer *fb = plane_state->fb;
4101 const struct amdgpu_framebuffer *afb =
4102 to_amdgpu_framebuffer(plane_state->fb)({ const __typeof( ((struct amdgpu_framebuffer *)0)->base )
*__mptr = (plane_state->fb); (struct amdgpu_framebuffer *
)( (char *)__mptr - __builtin_offsetof(struct amdgpu_framebuffer
, base) );})
;
4103 struct drm_format_name_buf format_name;
4104 int ret;
4105
4106 memset(plane_info, 0, sizeof(*plane_info))__builtin_memset((plane_info), (0), (sizeof(*plane_info)));
4107
4108 switch (fb->format->format) {
4109 case DRM_FORMAT_C8((__u32)('C') | ((__u32)('8') << 8) | ((__u32)(' ') <<
16) | ((__u32)(' ') << 24))
:
4110 plane_info->format =
4111 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4112 break;
4113 case DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
:
4114 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4115 break;
4116 case DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4117 case DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4118 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4119 break;
4120 case DRM_FORMAT_XRGB2101010((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4121 case DRM_FORMAT_ARGB2101010((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4122 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4123 break;
4124 case DRM_FORMAT_XBGR2101010((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4125 case DRM_FORMAT_ABGR2101010((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
:
4126 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4127 break;
4128 case DRM_FORMAT_XBGR8888((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4129 case DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
:
4130 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4131 break;
4132 case DRM_FORMAT_NV21((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('2') <<
16) | ((__u32)('1') << 24))
:
4133 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4134 break;
4135 case DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
:
4136 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4137 break;
4138 case DRM_FORMAT_P010((__u32)('P') | ((__u32)('0') << 8) | ((__u32)('1') <<
16) | ((__u32)('0') << 24))
:
4139 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4140 break;
4141 case DRM_FORMAT_XRGB16161616F((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4142 case DRM_FORMAT_ARGB16161616F((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4143 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4144 break;
4145 case DRM_FORMAT_XBGR16161616F((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4146 case DRM_FORMAT_ABGR16161616F((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
:
4147 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4148 break;
4149 default:
4150 DRM_ERROR(__drm_err("Unsupported screen format %s\n", drm_get_format_name
(fb->format->format, &format_name))
4151 "Unsupported screen format %s\n",__drm_err("Unsupported screen format %s\n", drm_get_format_name
(fb->format->format, &format_name))
4152 drm_get_format_name(fb->format->format, &format_name))__drm_err("Unsupported screen format %s\n", drm_get_format_name
(fb->format->format, &format_name))
;
4153 return -EINVAL22;
4154 }
4155
4156 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK( (1<<0) | (1<<1) | (1<<2) | (1<<3))) {
4157 case DRM_MODE_ROTATE_0(1<<0):
4158 plane_info->rotation = ROTATION_ANGLE_0;
4159 break;
4160 case DRM_MODE_ROTATE_90(1<<1):
4161 plane_info->rotation = ROTATION_ANGLE_90;
4162 break;
4163 case DRM_MODE_ROTATE_180(1<<2):
4164 plane_info->rotation = ROTATION_ANGLE_180;
4165 break;
4166 case DRM_MODE_ROTATE_270(1<<3):
4167 plane_info->rotation = ROTATION_ANGLE_270;
4168 break;
4169 default:
4170 plane_info->rotation = ROTATION_ANGLE_0;
4171 break;
4172 }
4173
4174 plane_info->visible = true1;
4175 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4176
4177 plane_info->layer_index = 0;
4178
4179 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4180 &plane_info->color_space);
4181 if (ret)
4182 return ret;
4183
4184 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4185 plane_info->rotation, tiling_flags,
4186 &plane_info->tiling_info,
4187 &plane_info->plane_size,
4188 &plane_info->dcc, address, tmz_surface,
4189 force_disable_dcc);
4190 if (ret)
4191 return ret;
4192
4193 fill_blending_from_plane_state(
4194 plane_state, &plane_info->per_pixel_alpha,
4195 &plane_info->global_alpha, &plane_info->global_alpha_value);
4196
4197 return 0;
4198}
4199
4200static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4201 struct dc_plane_state *dc_plane_state,
4202 struct drm_plane_state *plane_state,
4203 struct drm_crtc_state *crtc_state)
4204{
4205 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc_state); (struct dm_crtc_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_crtc_state, base) );})
;
4206 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (plane_state); (struct dm_plane_state *)( (char *)__mptr -
__builtin_offsetof(struct dm_plane_state, base) );})
;
4207 struct dc_scaling_info scaling_info;
4208 struct dc_plane_info plane_info;
4209 int ret;
4210 bool_Bool force_disable_dcc = false0;
4211
4212 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4213 if (ret)
4214 return ret;
4215
4216 dc_plane_state->src_rect = scaling_info.src_rect;
4217 dc_plane_state->dst_rect = scaling_info.dst_rect;
4218 dc_plane_state->clip_rect = scaling_info.clip_rect;
4219 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4220
4221 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4222 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4223 dm_plane_state->tiling_flags,
4224 &plane_info,
4225 &dc_plane_state->address,
4226 dm_plane_state->tmz_surface,
4227 force_disable_dcc);
4228 if (ret)
4229 return ret;
4230
4231 dc_plane_state->format = plane_info.format;
4232 dc_plane_state->color_space = plane_info.color_space;
4233 dc_plane_state->format = plane_info.format;
4234 dc_plane_state->plane_size = plane_info.plane_size;
4235 dc_plane_state->rotation = plane_info.rotation;
4236 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4237 dc_plane_state->stereo_format = plane_info.stereo_format;
4238 dc_plane_state->tiling_info = plane_info.tiling_info;
4239 dc_plane_state->visible = plane_info.visible;
4240 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4241 dc_plane_state->global_alpha = plane_info.global_alpha;
4242 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4243 dc_plane_state->dcc = plane_info.dcc;
4244 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4245
4246 /*
4247 * Always set input transfer function, since plane state is refreshed
4248 * every time.
4249 */
4250 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4251 if (ret)
4252 return ret;
4253
4254 return 0;
4255}
4256
4257static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4258 const struct dm_connector_state *dm_state,
4259 struct dc_stream_state *stream)
4260{
4261 enum amdgpu_rmx_type rmx_type;
4262
4263 struct rect src = { 0 }; /* viewport in composition space*/
4264 struct rect dst = { 0 }; /* stream addressable area */
4265
4266 /* no mode. nothing to be done */
4267 if (!mode)
4268 return;
4269
4270 /* Full screen scaling by default */
4271 src.width = mode->hdisplay;
4272 src.height = mode->vdisplay;
4273 dst.width = stream->timing.h_addressable;
4274 dst.height = stream->timing.v_addressable;
4275
4276 if (dm_state) {
4277 rmx_type = dm_state->scaling;
4278 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4279 if (src.width * dst.height <
4280 src.height * dst.width) {
4281 /* height needs less upscaling/more downscaling */
4282 dst.width = src.width *
4283 dst.height / src.height;
4284 } else {
4285 /* width needs less upscaling/more downscaling */
4286 dst.height = src.height *
4287 dst.width / src.width;
4288 }
4289 } else if (rmx_type == RMX_CENTER) {
4290 dst = src;
4291 }
4292
4293 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4294 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4295
4296 if (dm_state->underscan_enable) {
4297 dst.x += dm_state->underscan_hborder / 2;
4298 dst.y += dm_state->underscan_vborder / 2;
4299 dst.width -= dm_state->underscan_hborder;
4300 dst.height -= dm_state->underscan_vborder;
4301 }
4302 }
4303
4304 stream->src = src;
4305 stream->dst = dst;
4306
4307 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",__drm_dbg(DRM_UT_DRIVER, "Destination Rectangle x:%d y:%d width:%d height:%d\n"
, dst.x, dst.y, dst.width, dst.height)
4308 dst.x, dst.y, dst.width, dst.height)__drm_dbg(DRM_UT_DRIVER, "Destination Rectangle x:%d y:%d width:%d height:%d\n"
, dst.x, dst.y, dst.width, dst.height)
;
4309
4310}
4311
4312static enum dc_color_depth
4313convert_color_depth_from_display_info(const struct drm_connector *connector,
4314 bool_Bool is_y420, int requested_bpc)
4315{
4316 uint8_t bpc;
4317
4318 if (is_y420) {
4319 bpc = 8;
4320
4321 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4322 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48(1 << 2))
4323 bpc = 16;
4324 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36(1 << 1))
4325 bpc = 12;
4326 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30(1 << 0))
4327 bpc = 10;
4328 } else {
4329 bpc = (uint8_t)connector->display_info.bpc;
4330 /* Assume 8 bpc by default if no bpc is specified. */
4331 bpc = bpc ? bpc : 8;
4332 }
4333
4334 if (requested_bpc > 0) {
4335 /*
4336 * Cap display bpc based on the user requested value.
4337 *
4338 * The value for state->max_bpc may not correctly updated
4339 * depending on when the connector gets added to the state
4340 * or if this was called outside of atomic check, so it
4341 * can't be used directly.
4342 */
4343 bpc = min_t(u8, bpc, requested_bpc)({ u8 __min_a = (bpc); u8 __min_b = (requested_bpc); __min_a <
__min_b ? __min_a : __min_b; })
;
4344
4345 /* Round down to the nearest even number. */
4346 bpc = bpc - (bpc & 1);
4347 }
4348
4349 switch (bpc) {
4350 case 0:
4351 /*
4352 * Temporary Work around, DRM doesn't parse color depth for
4353 * EDID revision before 1.4
4354 * TODO: Fix edid parsing
4355 */
4356 return COLOR_DEPTH_888;
4357 case 6:
4358 return COLOR_DEPTH_666;
4359 case 8:
4360 return COLOR_DEPTH_888;
4361 case 10:
4362 return COLOR_DEPTH_101010;
4363 case 12:
4364 return COLOR_DEPTH_121212;
4365 case 14:
4366 return COLOR_DEPTH_141414;
4367 case 16:
4368 return COLOR_DEPTH_161616;
4369 default:
4370 return COLOR_DEPTH_UNDEFINED;
4371 }
4372}
4373
4374static enum dc_aspect_ratio
4375get_aspect_ratio(const struct drm_display_mode *mode_in)
4376{
4377 /* 1-1 mapping, since both enums follow the HDMI spec. */
4378 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4379}
4380
4381static enum dc_color_space
4382get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4383{
4384 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4385
4386 switch (dc_crtc_timing->pixel_encoding) {
4387 case PIXEL_ENCODING_YCBCR422:
4388 case PIXEL_ENCODING_YCBCR444:
4389 case PIXEL_ENCODING_YCBCR420:
4390 {
4391 /*
4392 * 27030khz is the separation point between HDTV and SDTV
4393 * according to HDMI spec, we use YCbCr709 and YCbCr601
4394 * respectively
4395 */
4396 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4397 if (dc_crtc_timing->flags.Y_ONLY)
4398 color_space =
4399 COLOR_SPACE_YCBCR709_LIMITED;
4400 else
4401 color_space = COLOR_SPACE_YCBCR709;
4402 } else {
4403 if (dc_crtc_timing->flags.Y_ONLY)
4404 color_space =
4405 COLOR_SPACE_YCBCR601_LIMITED;
4406 else
4407 color_space = COLOR_SPACE_YCBCR601;
4408 }
4409
4410 }
4411 break;
4412 case PIXEL_ENCODING_RGB:
4413 color_space = COLOR_SPACE_SRGB;
4414 break;
4415
4416 default:
4417 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 4417); __builtin_expect(!!(__ret), 0); })
;
4418 break;
4419 }
4420
4421 return color_space;
4422}
4423
4424static bool_Bool adjust_colour_depth_from_display_info(
4425 struct dc_crtc_timing *timing_out,
4426 const struct drm_display_info *info)
4427{
4428 enum dc_color_depth depth = timing_out->display_color_depth;
4429 int normalized_clk;
4430 do {
4431 normalized_clk = timing_out->pix_clk_100hz / 10;
4432 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4433 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4434 normalized_clk /= 2;
4435 /* Adjusting pix clock following on HDMI spec based on colour depth */
4436 switch (depth) {
4437 case COLOR_DEPTH_888:
4438 break;
4439 case COLOR_DEPTH_101010:
4440 normalized_clk = (normalized_clk * 30) / 24;
4441 break;
4442 case COLOR_DEPTH_121212:
4443 normalized_clk = (normalized_clk * 36) / 24;
4444 break;
4445 case COLOR_DEPTH_161616:
4446 normalized_clk = (normalized_clk * 48) / 24;
4447 break;
4448 default:
4449 /* The above depths are the only ones valid for HDMI. */
4450 return false0;
4451 }
4452 if (normalized_clk <= info->max_tmds_clock) {
4453 timing_out->display_color_depth = depth;
4454 return true1;
4455 }
4456 } while (--depth > COLOR_DEPTH_666);
4457 return false0;
4458}
4459
4460static void fill_stream_properties_from_drm_display_mode(
4461 struct dc_stream_state *stream,
4462 const struct drm_display_mode *mode_in,
4463 const struct drm_connector *connector,
4464 const struct drm_connector_state *connector_state,
4465 const struct dc_stream_state *old_stream,
4466 int requested_bpc)
4467{
4468 struct dc_crtc_timing *timing_out = &stream->timing;
4469 const struct drm_display_info *info = &connector->display_info;
4470 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
4471 struct hdmi_vendor_infoframe hv_frame;
4472 struct hdmi_avi_infoframe avi_frame;
4473
4474 memset(&hv_frame, 0, sizeof(hv_frame))__builtin_memset((&hv_frame), (0), (sizeof(hv_frame)));
4475 memset(&avi_frame, 0, sizeof(avi_frame))__builtin_memset((&avi_frame), (0), (sizeof(avi_frame)));
4476
4477 timing_out->h_border_left = 0;
4478 timing_out->h_border_right = 0;
4479 timing_out->v_border_top = 0;
4480 timing_out->v_border_bottom = 0;
4481 /* TODO: un-hardcode */
4482 if (drm_mode_is_420_only(info, mode_in)
4483 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4484 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4485 else if (drm_mode_is_420_also(info, mode_in)
4486 && aconnector->force_yuv420_output)
4487 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4488 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444(1<<1))
4489 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4490 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4491 else
4492 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4493
4494 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4495 timing_out->display_color_depth = convert_color_depth_from_display_info(
4496 connector,
4497 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4498 requested_bpc);
4499 timing_out->scan_type = SCANNING_TYPE_NODATA;
4500 timing_out->hdmi_vic = 0;
4501
4502 if(old_stream) {
4503 timing_out->vic = old_stream->timing.vic;
4504 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4505 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4506 } else {
4507 timing_out->vic = drm_match_cea_mode(mode_in);
4508 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC(1<<0))
4509 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4510 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC(1<<2))
4511 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4512 }
4513
4514 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4515 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4516 timing_out->vic = avi_frame.video_code;
4517 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4518 timing_out->hdmi_vic = hv_frame.vic;
4519 }
4520
4521 timing_out->h_addressable = mode_in->crtc_hdisplay;
4522 timing_out->h_total = mode_in->crtc_htotal;
4523 timing_out->h_sync_width =
4524 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4525 timing_out->h_front_porch =
4526 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4527 timing_out->v_total = mode_in->crtc_vtotal;
4528 timing_out->v_addressable = mode_in->crtc_vdisplay;
4529 timing_out->v_front_porch =
4530 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4531 timing_out->v_sync_width =
4532 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4533 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4534 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4535
4536 stream->output_color_space = get_output_color_space(timing_out);
4537
4538 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4539 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4540 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4541 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4542 drm_mode_is_420_also(info, mode_in) &&
4543 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4544 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4545 adjust_colour_depth_from_display_info(timing_out, info);
4546 }
4547 }
4548}
4549
4550static void fill_audio_info(struct audio_info *audio_info,
4551 const struct drm_connector *drm_connector,
4552 const struct dc_sink *dc_sink)
4553{
4554 int i = 0;
4555 int cea_revision = 0;
4556 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4557
4558 audio_info->manufacture_id = edid_caps->manufacturer_id;
4559 audio_info->product_id = edid_caps->product_id;
4560
4561 cea_revision = drm_connector->display_info.cea_rev;
4562
4563#ifdef __linux__
4564 strscpy(audio_info->display_name,
4565 edid_caps->display_name,
4566 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS20);
4567#else
4568 strncpy(audio_info->display_name,
4569 edid_caps->display_name,
4570 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS20 - 1);
4571#endif
4572
4573 if (cea_revision >= 3) {
4574 audio_info->mode_count = edid_caps->audio_mode_count;
4575
4576 for (i = 0; i < audio_info->mode_count; ++i) {
4577 audio_info->modes[i].format_code =
4578 (enum audio_format_code)
4579 (edid_caps->audio_modes[i].format_code);
4580 audio_info->modes[i].channel_count =
4581 edid_caps->audio_modes[i].channel_count;
4582 audio_info->modes[i].sample_rates.all =
4583 edid_caps->audio_modes[i].sample_rate;
4584 audio_info->modes[i].sample_size =
4585 edid_caps->audio_modes[i].sample_size;
4586 }
4587 }
4588
4589 audio_info->flags.all = edid_caps->speaker_flags;
4590
4591 /* TODO: We only check for the progressive mode, check for interlace mode too */
4592 if (drm_connector->latency_present[0]) {
4593 audio_info->video_latency = drm_connector->video_latency[0];
4594 audio_info->audio_latency = drm_connector->audio_latency[0];
4595 }
4596
4597 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4598
4599}
4600
4601static void
4602copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4603 struct drm_display_mode *dst_mode)
4604{
4605 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4606 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4607 dst_mode->crtc_clock = src_mode->crtc_clock;
4608 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4609 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4610 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4611 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4612 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4613 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4614 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4615 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4616 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4617 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4618 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4619}
4620
4621static void
4622decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4623 const struct drm_display_mode *native_mode,
4624 bool_Bool scale_enabled)
4625{
4626 if (scale_enabled) {
4627 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4628 } else if (native_mode->clock == drm_mode->clock &&
4629 native_mode->htotal == drm_mode->htotal &&
4630 native_mode->vtotal == drm_mode->vtotal) {
4631 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4632 } else {
4633 /* no scaling nor amdgpu inserted, no need to patch */
4634 }
4635}
4636
4637static struct dc_sink *
4638create_fake_sink(struct amdgpu_dm_connector *aconnector)
4639{
4640 struct dc_sink_init_data sink_init_data = { 0 };
4641 struct dc_sink *sink = NULL((void *)0);
4642 sink_init_data.link = aconnector->dc_link;
4643 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4644
4645 sink = dc_sink_create(&sink_init_data);
4646 if (!sink) {
4647 DRM_ERROR("Failed to create sink!\n")__drm_err("Failed to create sink!\n");
4648 return NULL((void *)0);
4649 }
4650 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4651
4652 return sink;
4653}
4654
4655static void set_multisync_trigger_params(
4656 struct dc_stream_state *stream)
4657{
4658 if (stream->triggered_crtc_reset.enabled) {
4659 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4660 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4661 }
4662}
4663
4664static void set_master_stream(struct dc_stream_state *stream_set[],
4665 int stream_count)
4666{
4667 int j, highest_rfr = 0, master_stream = 0;
4668
4669 for (j = 0; j < stream_count; j++) {
4670 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4671 int refresh_rate = 0;
4672
4673 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4674 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4675 if (refresh_rate > highest_rfr) {
4676 highest_rfr = refresh_rate;
4677 master_stream = j;
4678 }
4679 }
4680 }
4681 for (j = 0; j < stream_count; j++) {
4682 if (stream_set[j])
4683 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4684 }
4685}
4686
4687static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4688{
4689 int i = 0;
4690
4691 if (context->stream_count < 2)
4692 return;
4693 for (i = 0; i < context->stream_count ; i++) {
4694 if (!context->streams[i])
4695 continue;
4696 /*
4697 * TODO: add a function to read AMD VSDB bits and set
4698 * crtc_sync_master.multi_sync_enabled flag
4699 * For now it's set to false
4700 */
4701 set_multisync_trigger_params(context->streams[i]);
4702 }
4703 set_master_stream(context->streams, context->stream_count);
4704}
4705
4706static struct dc_stream_state *
4707create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4708 const struct drm_display_mode *drm_mode,
4709 const struct dm_connector_state *dm_state,
4710 const struct dc_stream_state *old_stream,
4711 int requested_bpc)
4712{
4713 struct drm_display_mode *preferred_mode = NULL((void *)0);
4714 struct drm_connector *drm_connector;
4715 const struct drm_connector_state *con_state =
4716 dm_state ? &dm_state->base : NULL((void *)0);
4717 struct dc_stream_state *stream = NULL((void *)0);
4718 struct drm_display_mode mode = *drm_mode;
4719 bool_Bool native_mode_found = false0;
4720 bool_Bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false0;
4721 int mode_refresh;
4722 int preferred_refresh = 0;
4723#if defined(CONFIG_DRM_AMD_DC_DCN1)
4724 struct dsc_dec_dpcd_caps dsc_caps;
4725#endif
4726 uint32_t link_bandwidth_kbps;
4727
4728 struct dc_sink *sink = NULL((void *)0);
4729 if (aconnector == NULL((void *)0)) {
4730 DRM_ERROR("aconnector is NULL!\n")__drm_err("aconnector is NULL!\n");
4731 return stream;
4732 }
4733
4734 drm_connector = &aconnector->base;
4735
4736 if (!aconnector->dc_sink) {
4737 sink = create_fake_sink(aconnector);
4738 if (!sink)
4739 return stream;
4740 } else {
4741 sink = aconnector->dc_sink;
4742 dc_sink_retain(sink);
4743 }
4744
4745 stream = dc_create_stream_for_sink(sink);
4746
4747 if (stream == NULL((void *)0)) {
4748 DRM_ERROR("Failed to create stream for sink!\n")__drm_err("Failed to create stream for sink!\n");
4749 goto finish;
4750 }
4751
4752 stream->dm_stream_context = aconnector;
4753
4754 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4755 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4756
4757 list_for_each_entry(preferred_mode, &aconnector->base.modes, head)for (preferred_mode = ({ const __typeof( ((__typeof(*preferred_mode
) *)0)->head ) *__mptr = ((&aconnector->base.modes)
->next); (__typeof(*preferred_mode) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*preferred_mode), head) );}); &preferred_mode->
head != (&aconnector->base.modes); preferred_mode = ({
const __typeof( ((__typeof(*preferred_mode) *)0)->head ) *
__mptr = (preferred_mode->head.next); (__typeof(*preferred_mode
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*preferred_mode
), head) );}))
{
4758 /* Search for preferred mode */
4759 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED(1<<3)) {
4760 native_mode_found = true1;
4761 break;
4762 }
4763 }
4764 if (!native_mode_found)
4765 preferred_mode = list_first_entry_or_null((list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
4766 &aconnector->base.modes,(list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
4767 struct drm_display_mode,(list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
4768 head)(list_empty(&aconnector->base.modes) ? ((void *)0) : (
{ const __typeof( ((struct drm_display_mode *)0)->head ) *
__mptr = ((&aconnector->base.modes)->next); (struct
drm_display_mode *)( (char *)__mptr - __builtin_offsetof(struct
drm_display_mode, head) );}))
;
4769
4770 mode_refresh = drm_mode_vrefresh(&mode);
4771
4772 if (preferred_mode == NULL((void *)0)) {
4773 /*
4774 * This may not be an error, the use case is when we have no
4775 * usermode calls to reset and set mode upon hotplug. In this
4776 * case, we call set mode ourselves to restore the previous mode
4777 * and the modelist may not be filled in in time.
4778 */
4779 DRM_DEBUG_DRIVER("No preferred mode found\n")__drm_dbg(DRM_UT_DRIVER, "No preferred mode found\n");
4780 } else {
4781 decide_crtc_timing_for_drm_display_mode(
4782 &mode, preferred_mode,
4783 dm_state ? (dm_state->scaling != RMX_OFF) : false0);
4784 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4785 }
4786
4787 if (!dm_state)
4788 drm_mode_set_crtcinfo(&mode, 0);
4789
4790 /*
4791 * If scaling is enabled and refresh rate didn't change
4792 * we copy the vic and polarities of the old timings
4793 */
4794 if (!scale || mode_refresh != preferred_refresh)
4795 fill_stream_properties_from_drm_display_mode(stream,
4796 &mode, &aconnector->base, con_state, NULL((void *)0), requested_bpc);
4797 else
4798 fill_stream_properties_from_drm_display_mode(stream,
4799 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4800
4801 stream->timing.flags.DSC = 0;
4802
4803 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4804#if defined(CONFIG_DRM_AMD_DC_DCN1)
4805 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4806 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4807 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4808 &dsc_caps);
4809#endif
4810 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4811 dc_link_get_link_cap(aconnector->dc_link));
4812
4813#if defined(CONFIG_DRM_AMD_DC_DCN1)
4814 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4815 /* Set DSC policy according to dsc_clock_en */
4816 dc_dsc_policy_set_enable_dsc_when_not_needed(
4817 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4818
4819 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4820 &dsc_caps,
4821 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4822 link_bandwidth_kbps,
4823 &stream->timing,
4824 &stream->timing.dsc_cfg))
4825 stream->timing.flags.DSC = 1;
4826 /* Overwrite the stream flag if DSC is enabled through debugfs */
4827 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4828 stream->timing.flags.DSC = 1;
4829
4830 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4831 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4832
4833 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4834 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4835
4836 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4837 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4838 }
4839#endif
4840 }
4841
4842 update_stream_scaling_settings(&mode, dm_state, stream);
4843
4844 fill_audio_info(
4845 &stream->audio_info,
4846 drm_connector,
4847 sink);
4848
4849 update_stream_signal(stream, sink);
4850
4851 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4852 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4853
4854 if (stream->link->psr_settings.psr_feature_enabled) {
4855 //
4856 // should decide stream support vsc sdp colorimetry capability
4857 // before building vsc info packet
4858 //
4859 stream->use_vsc_sdp_for_colorimetry = false0;
4860 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4861 stream->use_vsc_sdp_for_colorimetry =
4862 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4863 } else {
4864 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4865 stream->use_vsc_sdp_for_colorimetry = true1;
4866 }
4867 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4868 }
4869finish:
4870 dc_sink_release(sink);
4871
4872 return stream;
4873}
4874
4875static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4876{
4877 drm_crtc_cleanup(crtc);
4878 kfree(crtc);
4879}
4880
4881static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4882 struct drm_crtc_state *state)
4883{
4884 struct dm_crtc_state *cur = to_dm_crtc_state(state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (state); (struct dm_crtc_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_crtc_state, base) );})
;
4885
4886 /* TODO Destroy dc_stream objects are stream object is flattened */
4887 if (cur->stream)
4888 dc_stream_release(cur->stream);
4889
4890
4891 __drm_atomic_helper_crtc_destroy_state(state);
4892
4893
4894 kfree(state);
4895}
4896
4897static void dm_crtc_reset_state(struct drm_crtc *crtc)
4898{
4899 struct dm_crtc_state *state;
4900
4901 if (crtc->state)
4902 dm_crtc_destroy_state(crtc, crtc->state);
4903
4904 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
4905 if (WARN_ON(!state)({ int __ret = !!(!state); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!state", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 4905); __builtin_expect(!!(__ret), 0); })
)
4906 return;
4907
4908 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4909}
4910
4911static struct drm_crtc_state *
4912dm_crtc_duplicate_state(struct drm_crtc *crtc)
4913{
4914 struct dm_crtc_state *state, *cur;
4915
4916 cur = to_dm_crtc_state(crtc->state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc->state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
4917
4918 if (WARN_ON(!crtc->state)({ int __ret = !!(!crtc->state); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!crtc->state", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 4918); __builtin_expect(!!(__ret), 0); })
)
4919 return NULL((void *)0);
4920
4921 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
4922 if (!state)
4923 return NULL((void *)0);
4924
4925 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4926
4927 if (cur->stream) {
4928 state->stream = cur->stream;
4929 dc_stream_retain(state->stream);
4930 }
4931
4932 state->active_planes = cur->active_planes;
4933 state->vrr_infopacket = cur->vrr_infopacket;
4934 state->abm_level = cur->abm_level;
4935 state->vrr_supported = cur->vrr_supported;
4936 state->freesync_config = cur->freesync_config;
4937 state->crc_src = cur->crc_src;
4938 state->cm_has_degamma = cur->cm_has_degamma;
4939 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4940
4941 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4942
4943 return &state->base;
4944}
4945
4946static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool_Bool enable)
4947{
4948 enum dc_irq_source irq_source;
4949 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
4950 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4951 int rc;
4952
4953 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4954
4955 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY16;
4956
4957 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",__drm_dbg(DRM_UT_DRIVER, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
4958 acrtc->crtc_id, enable ? "en" : "dis", rc)__drm_dbg(DRM_UT_DRIVER, "crtc %d - vupdate irq %sabling: r=%d\n"
, acrtc->crtc_id, enable ? "en" : "dis", rc)
;
4959 return rc;
4960}
4961
4962static inline int dm_set_vblank(struct drm_crtc *crtc, bool_Bool enable)
4963{
4964 enum dc_irq_source irq_source;
4965 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr
= (crtc); (struct amdgpu_crtc *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_crtc, base) );})
;
4966 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4967 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (crtc->state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
4968 int rc = 0;
4969
4970 if (enable) {
4971 /* vblank irq on -> Only need vupdate irq in vrr mode */
4972 if (amdgpu_dm_vrr_active(acrtc_state))
4973 rc = dm_set_vupdate_irq(crtc, true1);
4974 } else {
4975 /* vblank irq off -> vupdate irq off */
4976 rc = dm_set_vupdate_irq(crtc, false0);
4977 }
4978
4979 if (rc)
4980 return rc;
4981
4982 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4983 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY16;
4984}
4985
4986static int dm_enable_vblank(struct drm_crtc *crtc)
4987{
4988 return dm_set_vblank(crtc, true1);
4989}
4990
4991static void dm_disable_vblank(struct drm_crtc *crtc)
4992{
4993 dm_set_vblank(crtc, false0);
4994}
4995
4996/* Implemented only the options currently availible for the driver */
4997static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4998 .reset = dm_crtc_reset_state,
4999 .destroy = amdgpu_dm_crtc_destroy,
5000 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5001 .set_config = drm_atomic_helper_set_config,
5002 .page_flip = drm_atomic_helper_page_flip,
5003 .atomic_duplicate_state = dm_crtc_duplicate_state,
5004 .atomic_destroy_state = dm_crtc_destroy_state,
5005 .set_crc_source = amdgpu_dm_crtc_set_crc_source((void *)0),
5006 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source((void *)0),
5007 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources((void *)0),
5008 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5009 .enable_vblank = dm_enable_vblank,
5010 .disable_vblank = dm_disable_vblank,
5011 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5012};
5013
5014static enum drm_connector_status
5015amdgpu_dm_connector_detect(struct drm_connector *connector, bool_Bool force)
5016{
5017 bool_Bool connected;
5018 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5019
5020 /*
5021 * Notes:
5022 * 1. This interface is NOT called in context of HPD irq.
5023 * 2. This interface *is called* in context of user-mode ioctl. Which
5024 * makes it a bad place for *any* MST-related activity.
5025 */
5026
5027 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5028 !aconnector->fake_enable)
5029 connected = (aconnector->dc_sink != NULL((void *)0));
5030 else
5031 connected = (aconnector->base.force == DRM_FORCE_ON);
5032
5033 update_subconnector_property(aconnector);
5034
5035 return (connected ? connector_status_connected :
5036 connector_status_disconnected);
5037}
5038
5039int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5040 struct drm_connector_state *connector_state,
5041 struct drm_property *property,
5042 uint64_t val)
5043{
5044 struct drm_device *dev = connector->dev;
5045 struct amdgpu_device *adev = drm_to_adev(dev);
5046 struct dm_connector_state *dm_old_state =
5047 to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5048 struct dm_connector_state *dm_new_state =
5049 to_dm_connector_state(connector_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector_state)); (struct dm_connector_state *)
( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5050
5051 int ret = -EINVAL22;
5052
5053 if (property == dev->mode_config.scaling_mode_property) {
5054 enum amdgpu_rmx_type rmx_type;
5055
5056 switch (val) {
5057 case DRM_MODE_SCALE_CENTER2:
5058 rmx_type = RMX_CENTER;
5059 break;
5060 case DRM_MODE_SCALE_ASPECT3:
5061 rmx_type = RMX_ASPECT;
5062 break;
5063 case DRM_MODE_SCALE_FULLSCREEN1:
5064 rmx_type = RMX_FULL;
5065 break;
5066 case DRM_MODE_SCALE_NONE0:
5067 default:
5068 rmx_type = RMX_OFF;
5069 break;
5070 }
5071
5072 if (dm_old_state->scaling == rmx_type)
5073 return 0;
5074
5075 dm_new_state->scaling = rmx_type;
5076 ret = 0;
5077 } else if (property == adev->mode_info.underscan_hborder_property) {
5078 dm_new_state->underscan_hborder = val;
5079 ret = 0;
5080 } else if (property == adev->mode_info.underscan_vborder_property) {
5081 dm_new_state->underscan_vborder = val;
5082 ret = 0;
5083 } else if (property == adev->mode_info.underscan_property) {
5084 dm_new_state->underscan_enable = val;
5085 ret = 0;
5086 } else if (property == adev->mode_info.abm_level_property) {
5087 dm_new_state->abm_level = val;
5088 ret = 0;
5089 }
5090
5091 return ret;
5092}
5093
5094int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5095 const struct drm_connector_state *state,
5096 struct drm_property *property,
5097 uint64_t *val)
5098{
5099 struct drm_device *dev = connector->dev;
5100 struct amdgpu_device *adev = drm_to_adev(dev);
5101 struct dm_connector_state *dm_state =
5102 to_dm_connector_state(state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((state)); (struct dm_connector_state *)( (char *)
__mptr - __builtin_offsetof(struct dm_connector_state, base) )
;})
;
5103 int ret = -EINVAL22;
5104
5105 if (property == dev->mode_config.scaling_mode_property) {
5106 switch (dm_state->scaling) {
5107 case RMX_CENTER:
5108 *val = DRM_MODE_SCALE_CENTER2;
5109 break;
5110 case RMX_ASPECT:
5111 *val = DRM_MODE_SCALE_ASPECT3;
5112 break;
5113 case RMX_FULL:
5114 *val = DRM_MODE_SCALE_FULLSCREEN1;
5115 break;
5116 case RMX_OFF:
5117 default:
5118 *val = DRM_MODE_SCALE_NONE0;
5119 break;
5120 }
5121 ret = 0;
5122 } else if (property == adev->mode_info.underscan_hborder_property) {
5123 *val = dm_state->underscan_hborder;
5124 ret = 0;
5125 } else if (property == adev->mode_info.underscan_vborder_property) {
5126 *val = dm_state->underscan_vborder;
5127 ret = 0;
5128 } else if (property == adev->mode_info.underscan_property) {
5129 *val = dm_state->underscan_enable;
5130 ret = 0;
5131 } else if (property == adev->mode_info.abm_level_property) {
5132 *val = dm_state->abm_level;
5133 ret = 0;
5134 }
5135
5136 return ret;
5137}
5138
5139static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5140{
5141 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5142
5143 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5144}
5145
5146static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5147{
5148 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5149 const struct dc_link *link = aconnector->dc_link;
5150 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5151 struct amdgpu_display_manager *dm = &adev->dm;
5152
5153 /*
5154 * Call only if mst_mgr was iniitalized before since it's not done
5155 * for all connector types.
5156 */
5157 if (aconnector->mst_mgr.dev)
5158 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5159
5160#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE1) ||\
5161 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5162
5163 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5164 link->type != dc_connection_none &&
5165 dm->backlight_dev) {
5166 backlight_device_unregister(dm->backlight_dev);
5167 dm->backlight_dev = NULL((void *)0);
5168 }
5169#endif
5170
5171 if (aconnector->dc_em_sink)
5172 dc_sink_release(aconnector->dc_em_sink);
5173 aconnector->dc_em_sink = NULL((void *)0);
5174 if (aconnector->dc_sink)
5175 dc_sink_release(aconnector->dc_sink);
5176 aconnector->dc_sink = NULL((void *)0);
5177
5178 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5179 drm_connector_unregister(connector);
5180 drm_connector_cleanup(connector);
5181 if (aconnector->i2c) {
5182 i2c_del_adapter(&aconnector->i2c->base);
5183 kfree(aconnector->i2c);
5184 }
5185 kfree(aconnector->dm_dp_aux.aux.name);
5186
5187 kfree(connector);
5188}
5189
5190void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5191{
5192 struct dm_connector_state *state =
5193 to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5194
5195 if (connector->state)
5196 __drm_atomic_helper_connector_destroy_state(connector->state);
5197
5198 kfree(state);
5199
5200 state = kzalloc(sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
5201
5202 if (state) {
5203 state->scaling = RMX_OFF;
5204 state->underscan_enable = false0;
5205 state->underscan_hborder = 0;
5206 state->underscan_vborder = 0;
5207 state->base.max_requested_bpc = 8;
5208 state->vcpi_slots = 0;
5209 state->pbn = 0;
5210 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP14)
5211 state->abm_level = amdgpu_dm_abm_level;
5212
5213 __drm_atomic_helper_connector_reset(connector, &state->base);
5214 }
5215}
5216
5217struct drm_connector_state *
5218amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5219{
5220 struct dm_connector_state *state =
5221 to_dm_connector_state(connector->state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((connector->state)); (struct dm_connector_state
*)( (char *)__mptr - __builtin_offsetof(struct dm_connector_state
, base) );})
;
5222
5223 struct dm_connector_state *new_state =
5224 kmemdup(state, sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
5225
5226 if (!new_state)
5227 return NULL((void *)0);
5228
5229 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5230
5231 new_state->freesync_capable = state->freesync_capable;
5232 new_state->abm_level = state->abm_level;
5233 new_state->scaling = state->scaling;
5234 new_state->underscan_enable = state->underscan_enable;
5235 new_state->underscan_hborder = state->underscan_hborder;
5236 new_state->underscan_vborder = state->underscan_vborder;
5237 new_state->vcpi_slots = state->vcpi_slots;
5238 new_state->pbn = state->pbn;
5239 return &new_state->base;
5240}
5241
5242static int
5243amdgpu_dm_connector_late_register(struct drm_connector *connector)
5244{
5245 struct amdgpu_dm_connector *amdgpu_dm_connector =
5246 to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5247 int r;
5248
5249 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort10) ||
5250 (connector->connector_type == DRM_MODE_CONNECTOR_eDP14)) {
5251 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5252 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5253 if (r)
5254 return r;
5255 }
5256
5257#if defined(CONFIG_DEBUG_FS)
5258 connector_debugfs_init(amdgpu_dm_connector);
5259#endif
5260
5261 return 0;
5262}
5263
5264static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5265 .reset = amdgpu_dm_connector_funcs_reset,
5266 .detect = amdgpu_dm_connector_detect,
5267 .fill_modes = drm_helper_probe_single_connector_modes,
5268 .destroy = amdgpu_dm_connector_destroy,
5269 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5270 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5271 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5272 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5273 .late_register = amdgpu_dm_connector_late_register,
5274 .early_unregister = amdgpu_dm_connector_unregister
5275};
5276
5277static int get_modes(struct drm_connector *connector)
5278{
5279 return amdgpu_dm_connector_get_modes(connector);
5280}
5281
5282static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5283{
5284 struct dc_sink_init_data init_params = {
5285 .link = aconnector->dc_link,
5286 .sink_signal = SIGNAL_TYPE_VIRTUAL
5287 };
5288 struct edid *edid;
5289
5290 if (!aconnector->base.edid_blob_ptr) {
5291 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",__drm_err("No EDID firmware found on connector: %s ,forcing to OFF!\n"
, aconnector->base.name)
5292 aconnector->base.name)__drm_err("No EDID firmware found on connector: %s ,forcing to OFF!\n"
, aconnector->base.name)
;
5293
5294 aconnector->base.force = DRM_FORCE_OFF;
5295 aconnector->base.override_edid = false0;
5296 return;
5297 }
5298
5299 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5300
5301 aconnector->edid = edid;
5302
5303 aconnector->dc_em_sink = dc_link_add_remote_sink(
5304 aconnector->dc_link,
5305 (uint8_t *)edid,
5306 (edid->extensions + 1) * EDID_LENGTH128,
5307 &init_params);
5308
5309 if (aconnector->base.force == DRM_FORCE_ON) {
5310 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5311 aconnector->dc_link->local_sink :
5312 aconnector->dc_em_sink;
5313 dc_sink_retain(aconnector->dc_sink);
5314 }
5315}
5316
5317static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5318{
5319 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5320
5321 /*
5322 * In case of headless boot with force on for DP managed connector
5323 * Those settings have to be != 0 to get initial modeset
5324 */
5325 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5326 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5327 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5328 }
5329
5330
5331 aconnector->base.override_edid = true1;
5332 create_eml_sink(aconnector);
5333}
5334
5335static struct dc_stream_state *
5336create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5337 const struct drm_display_mode *drm_mode,
5338 const struct dm_connector_state *dm_state,
5339 const struct dc_stream_state *old_stream)
5340{
5341 struct drm_connector *connector = &aconnector->base;
5342 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5343 struct dc_stream_state *stream;
5344 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL((void *)0);
5345 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5346 enum dc_status dc_result = DC_OK;
5347
5348 do {
5349 stream = create_stream_for_sink(aconnector, drm_mode,
5350 dm_state, old_stream,
5351 requested_bpc);
5352 if (stream == NULL((void *)0)) {
5353 DRM_ERROR("Failed to create stream for sink!\n")__drm_err("Failed to create stream for sink!\n");
5354 break;
5355 }
5356
5357 dc_result = dc_validate_stream(adev->dm.dc, stream);
5358
5359 if (dc_result != DC_OK) {
5360 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5361 drm_mode->hdisplay,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5362 drm_mode->vdisplay,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5363 drm_mode->clock,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5364 dc_result,__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
5365 dc_status_to_str(dc_result))__drm_dbg(DRM_UT_KMS, "Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n"
, drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->
clock, dc_result, dc_status_to_str(dc_result))
;
5366
5367 dc_stream_release(stream);
5368 stream = NULL((void *)0);
5369 requested_bpc -= 2; /* lower bpc to retry validation */
5370 }
5371
5372 } while (stream == NULL((void *)0) && requested_bpc >= 6);
5373
5374 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5375 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n")__drm_dbg(DRM_UT_KMS, "Retry forcing YCbCr420 encoding\n");
5376
5377 aconnector->force_yuv420_output = true1;
5378 stream = create_validate_stream_for_sink(aconnector, drm_mode,
5379 dm_state, old_stream);
5380 aconnector->force_yuv420_output = false0;
5381 }
5382
5383 return stream;
5384}
5385
5386enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5387 struct drm_display_mode *mode)
5388{
5389 int result = MODE_ERROR;
5390 struct dc_sink *dc_sink;
5391 /* TODO: Unhardcode stream count */
5392 struct dc_stream_state *stream;
5393 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5394
5395 if ((mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4)) ||
5396 (mode->flags & DRM_MODE_FLAG_DBLSCAN(1<<5)))
5397 return result;
5398
5399 /*
5400 * Only run this the first time mode_valid is called to initilialize
5401 * EDID mgmt
5402 */
5403 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5404 !aconnector->dc_em_sink)
5405 handle_edid_mgmt(aconnector);
5406
5407 dc_sink = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
->dc_sink;
5408
5409 if (dc_sink == NULL((void *)0)) {
5410 DRM_ERROR("dc_sink is NULL!\n")__drm_err("dc_sink is NULL!\n");
5411 goto fail;
5412 }
5413
5414 stream = create_validate_stream_for_sink(aconnector, mode, NULL((void *)0), NULL((void *)0));
5415 if (stream) {
5416 dc_stream_release(stream);
5417 result = MODE_OK;
5418 }
5419
5420fail:
5421 /* TODO: error handling*/
5422 return result;
5423}
5424
5425static int fill_hdr_info_packet(const struct drm_connector_state *state,
5426 struct dc_info_packet *out)
5427{
5428 struct hdmi_drm_infoframe frame;
5429 unsigned char buf[30]; /* 26 + 4 */
5430 ssize_t len;
5431 int ret, i;
5432
5433 memset(out, 0, sizeof(*out))__builtin_memset((out), (0), (sizeof(*out)));
5434
5435 if (!state->hdr_output_metadata)
5436 return 0;
5437
5438 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5439 if (ret)
5440 return ret;
5441
5442 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5443 if (len < 0)
5444 return (int)len;
5445
5446 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5447 if (len != 30)
5448 return -EINVAL22;
5449
5450 /* Prepare the infopacket for DC. */
5451 switch (state->connector->connector_type) {
5452 case DRM_MODE_CONNECTOR_HDMIA11:
5453 out->hb0 = 0x87; /* type */
5454 out->hb1 = 0x01; /* version */
5455 out->hb2 = 0x1A; /* length */
5456 out->sb[0] = buf[3]; /* checksum */
5457 i = 1;
5458 break;
5459
5460 case DRM_MODE_CONNECTOR_DisplayPort10:
5461 case DRM_MODE_CONNECTOR_eDP14:
5462 out->hb0 = 0x00; /* sdp id, zero */
5463 out->hb1 = 0x87; /* type */
5464 out->hb2 = 0x1D; /* payload len - 1 */
5465 out->hb3 = (0x13 << 2); /* sdp version */
5466 out->sb[0] = 0x01; /* version */
5467 out->sb[1] = 0x1A; /* length */
5468 i = 2;
5469 break;
5470
5471 default:
5472 return -EINVAL22;
5473 }
5474
5475 memcpy(&out->sb[i], &buf[4], 26)__builtin_memcpy((&out->sb[i]), (&buf[4]), (26));
5476 out->valid = true1;
5477
5478 print_hex_dump(KERN_DEBUG"\0017", "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5479 sizeof(out->sb), false0);
5480
5481 return 0;
5482}
5483
5484static bool_Bool
5485is_hdr_metadata_different(const struct drm_connector_state *old_state,
5486 const struct drm_connector_state *new_state)
5487{
5488 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5489 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5490
5491 if (old_blob != new_blob) {
5492 if (old_blob && new_blob &&
5493 old_blob->length == new_blob->length)
5494 return memcmp(old_blob->data, new_blob->data,__builtin_memcmp((old_blob->data), (new_blob->data), (old_blob
->length))
5495 old_blob->length)__builtin_memcmp((old_blob->data), (new_blob->data), (old_blob
->length))
;
5496
5497 return true1;
5498 }
5499
5500 return false0;
5501}
5502
5503static int
5504amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5505 struct drm_atomic_state *state)
5506{
5507 struct drm_connector_state *new_con_state =
5508 drm_atomic_get_new_connector_state(state, conn);
5509 struct drm_connector_state *old_con_state =
5510 drm_atomic_get_old_connector_state(state, conn);
5511 struct drm_crtc *crtc = new_con_state->crtc;
5512 struct drm_crtc_state *new_crtc_state;
5513 int ret;
5514
5515 if (!crtc)
5516 return 0;
5517
5518 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5519 struct dc_info_packet hdr_infopacket;
5520
5521 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5522 if (ret)
5523 return ret;
5524
5525 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5526 if (IS_ERR(new_crtc_state))
5527 return PTR_ERR(new_crtc_state);
5528
5529 /*
5530 * DC considers the stream backends changed if the
5531 * static metadata changes. Forcing the modeset also
5532 * gives a simple way for userspace to switch from
5533 * 8bpc to 10bpc when setting the metadata to enter
5534 * or exit HDR.
5535 *
5536 * Changing the static metadata after it's been
5537 * set is permissible, however. So only force a
5538 * modeset if we're entering or exiting HDR.
5539 */
5540 new_crtc_state->mode_changed =
5541 !old_con_state->hdr_output_metadata ||
5542 !new_con_state->hdr_output_metadata;
5543 }
5544
5545 return 0;
5546}
5547
5548static const struct drm_connector_helper_funcs
5549amdgpu_dm_connector_helper_funcs = {
5550 /*
5551 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5552 * modes will be filtered by drm_mode_validate_size(), and those modes
5553 * are missing after user start lightdm. So we need to renew modes list.
5554 * in get_modes call back, not just return the modes count
5555 */
5556 .get_modes = get_modes,
5557 .mode_valid = amdgpu_dm_connector_mode_valid,
5558 .atomic_check = amdgpu_dm_connector_atomic_check,
5559};
5560
5561static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5562{
5563}
5564
5565static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5566{
5567 struct drm_atomic_state *state = new_crtc_state->state;
5568 struct drm_plane *plane;
5569 int num_active = 0;
5570
5571 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)->
head ) *__mptr = ((&(state->dev)->mode_config.plane_list
)->next); (__typeof(*(plane)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(plane)), head) );}); &(plane)->head != (&
(state->dev)->mode_config.plane_list); (plane) = ({ const
__typeof( ((__typeof(*(plane)) *)0)->head ) *__mptr = ((plane
)->head.next); (__typeof(*(plane)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(plane)), head) );})) if (!((new_crtc_state->plane_mask
) & drm_plane_mask(plane))) {} else
{
5572 struct drm_plane_state *new_plane_state;
5573
5574 /* Cursor planes are "fake". */
5575 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5576 continue;
5577
5578 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5579
5580 if (!new_plane_state) {
5581 /*
5582 * The plane is enable on the CRTC and hasn't changed
5583 * state. This means that it previously passed
5584 * validation and is therefore enabled.
5585 */
5586 num_active += 1;
5587 continue;
5588 }
5589
5590 /* We need a framebuffer to be considered enabled. */
5591 num_active += (new_plane_state->fb != NULL((void *)0));
5592 }
5593
5594 return num_active;
5595}
5596
5597static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5598 struct drm_crtc_state *new_crtc_state)
5599{
5600 struct dm_crtc_state *dm_new_crtc_state =
5601 to_dm_crtc_state(new_crtc_state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (new_crtc_state); (struct dm_crtc_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_crtc_state, base) );})
;
5602
5603 dm_new_crtc_state->active_planes = 0;
5604
5605 if (!dm_new_crtc_state->stream)
5606 return;
5607
5608 dm_new_crtc_state->active_planes =
5609 count_crtc_active_planes(new_crtc_state);
5610}
5611
5612static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5613 struct drm_crtc_state *state)
5614{
5615 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5616 struct dc *dc = adev->dm.dc;
5617 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state)({ const __typeof( ((struct dm_crtc_state *)0)->base ) *__mptr
= (state); (struct dm_crtc_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_crtc_state, base) );})
;
5618 int ret = -EINVAL22;
5619
5620 dm_update_crtc_active_planes(crtc, state);
5621
5622 if (unlikely(!dm_crtc_state->stream &&__builtin_expect(!!(!dm_crtc_state->stream && modeset_required
(state, ((void *)0), dm_crtc_state->stream)), 0)
5623 modeset_required(state, NULL, dm_crtc_state->stream))__builtin_expect(!!(!dm_crtc_state->stream && modeset_required
(state, ((void *)0), dm_crtc_state->stream)), 0)
) {
5624 WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "1", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 5624); __builtin_expect(!!(__ret), 0); })
;
5625 return ret;
5626 }
5627
5628 /*
5629 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5630 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5631 * planes are disabled, which is not supported by the hardware. And there is legacy
5632 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5633 */
5634 if (state->enable &&
5635 !(state->plane_mask & drm_plane_mask(crtc->primary)))
5636 return -EINVAL22;
5637
5638 /* In some use cases, like reset, no stream is attached */
5639 if (!dm_crtc_state->stream)
5640 return 0;
5641
5642 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5643 return 0;
5644
5645 return ret;
5646}
5647
5648static bool_Bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5649 const struct drm_display_mode *mode,
5650 struct drm_display_mode *adjusted_mode)
5651{
5652 return true1;
5653}
5654
5655static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5656 .disable = dm_crtc_helper_disable,
5657 .atomic_check = dm_crtc_helper_atomic_check,
5658 .mode_fixup = dm_crtc_helper_mode_fixup,
5659 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5660};
5661
5662static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5663{
5664
5665}
5666
5667static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5668{
5669 switch (display_color_depth) {
5670 case COLOR_DEPTH_666:
5671 return 6;
5672 case COLOR_DEPTH_888:
5673 return 8;
5674 case COLOR_DEPTH_101010:
5675 return 10;
5676 case COLOR_DEPTH_121212:
5677 return 12;
5678 case COLOR_DEPTH_141414:
5679 return 14;
5680 case COLOR_DEPTH_161616:
5681 return 16;
5682 default:
5683 break;
5684 }
5685 return 0;
5686}
5687
5688static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5689 struct drm_crtc_state *crtc_state,
5690 struct drm_connector_state *conn_state)
5691{
5692 struct drm_atomic_state *state = crtc_state->state;
5693 struct drm_connector *connector = conn_state->connector;
5694 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5695 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((conn_state)); (struct dm_connector_state *)( (char
*)__mptr - __builtin_offsetof(struct dm_connector_state, base
) );})
;
5696 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5697 struct drm_dp_mst_topology_mgr *mst_mgr;
5698 struct drm_dp_mst_port *mst_port;
5699 enum dc_color_depth color_depth;
5700 int clock, bpp = 0;
5701 bool_Bool is_y420 = false0;
5702
5703 if (!aconnector->port || !aconnector->dc_sink)
5704 return 0;
5705
5706 mst_port = aconnector->port;
5707 mst_mgr = &aconnector->mst_port->mst_mgr;
5708
5709 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5710 return 0;
5711
5712 if (!state->duplicated) {
5713 int max_bpc = conn_state->max_requested_bpc;
5714 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5715 aconnector->force_yuv420_output;
5716 color_depth = convert_color_depth_from_display_info(connector,
5717 is_y420,
5718 max_bpc);
5719 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5720 clock = adjusted_mode->clock;
5721 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false0);
5722 }
5723 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5724 mst_mgr,
5725 mst_port,
5726 dm_new_connector_state->pbn,
5727 dm_mst_get_pbn_divider(aconnector->dc_link));
5728 if (dm_new_connector_state->vcpi_slots < 0) {
5729 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots)__drm_dbg(DRM_UT_ATOMIC, "failed finding vcpi slots: %d\n", (
int)dm_new_connector_state->vcpi_slots)
;
5730 return dm_new_connector_state->vcpi_slots;
5731 }
5732 return 0;
5733}
5734
5735const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5736 .disable = dm_encoder_helper_disable,
5737 .atomic_check = dm_encoder_helper_atomic_check
5738};
5739
5740#if defined(CONFIG_DRM_AMD_DC_DCN1)
5741static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5742 struct dc_state *dc_state)
5743{
5744 struct dc_stream_state *stream = NULL((void *)0);
5745 struct drm_connector *connector;
5746 struct drm_connector_state *new_con_state, *old_con_state;
5747 struct amdgpu_dm_connector *aconnector;
5748 struct dm_connector_state *dm_conn_state;
5749 int i, j, clock, bpp;
5750 int vcpi, pbn_div, pbn = 0;
5751
5752 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i)for ((i) = 0; (i) < (state)->num_connector; (i)++) if (
!((state)->connectors[i].ptr && ((connector) = (state
)->connectors[i].ptr, (void)(connector) , (old_con_state) =
(state)->connectors[i].old_state, (new_con_state) = (state
)->connectors[i].new_state, 1))) {} else
{
5753
5754 aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base
) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base
) );})
;
5755
5756 if (!aconnector->port)
5757 continue;
5758
5759 if (!new_con_state || !new_con_state->crtc)
5760 continue;
5761
5762 dm_conn_state = to_dm_connector_state(new_con_state)({ const __typeof( ((struct dm_connector_state *)0)->base )
*__mptr = ((new_con_state)); (struct dm_connector_state *)( (
char *)__mptr - __builtin_offsetof(struct dm_connector_state,
base) );})
;
5763
5764 for (j = 0; j < dc_state->stream_count; j++) {
5765 stream = dc_state->streams[j];
5766 if (!stream)
5767 continue;
5768
5769 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5770 break;
5771
5772 stream = NULL((void *)0);
5773 }
5774
5775 if (!stream)
5776 continue;
5777
5778 if (stream->timing.flags.DSC != 1) {
5779 drm_dp_mst_atomic_enable_dsc(state,
5780 aconnector->port,
5781 dm_conn_state->pbn,
5782 0,
5783 false0);
5784 continue;
5785 }
5786
5787 pbn_div = dm_mst_get_pbn_divider(stream->link);
5788 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5789 clock = stream->timing.pix_clk_100hz / 10;
5790 pbn = drm_dp_calc_pbn_mode(clock, bpp, true1);
5791 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5792 aconnector->port,
5793 pbn, pbn_div,
5794 true1);
5795 if (vcpi < 0)
5796 return vcpi;
5797
5798 dm_conn_state->pbn = pbn;
5799 dm_conn_state->vcpi_slots = vcpi;
5800 }
5801 return 0;
5802}
5803#endif
5804
5805static void dm_drm_plane_reset(struct drm_plane *plane)
5806{
5807 struct dm_plane_state *amdgpu_state = NULL((void *)0);
5808
5809 if (plane->state)
5810 plane->funcs->atomic_destroy_state(plane, plane->state);
5811
5812 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL(0x0001 | 0x0004));
5813 WARN_ON(amdgpu_state == NULL)({ int __ret = !!(amdgpu_state == ((void *)0)); if (__ret) printf
("WARNING %s failed at %s:%d\n", "amdgpu_state == ((void *)0)"
, "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c"
, 5813); __builtin_expect(!!(__ret), 0); })
;
5814
5815 if (amdgpu_state)
5816 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5817}
5818
5819static struct drm_plane_state *
5820dm_drm_plane_duplicate_state(struct drm_plane *plane)
5821{
5822 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5823
5824 old_dm_plane_state = to_dm_plane_state(plane->state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (plane->state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
5825 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL(0x0001 | 0x0004));
5826 if (!dm_plane_state)
5827 return NULL((void *)0);
5828
5829 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5830
5831 if (old_dm_plane_state->dc_state) {
5832 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5833 dc_plane_state_retain(dm_plane_state->dc_state);
5834 }
5835
5836 /* Framebuffer hasn't been updated yet, so retain old flags. */
5837 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5838 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5839
5840 return &dm_plane_state->base;
5841}
5842
5843static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5844 struct drm_plane_state *state)
5845{
5846 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (state); (struct dm_plane_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_plane_state, base) );})
;
5847
5848 if (dm_plane_state->dc_state)
5849 dc_plane_state_release(dm_plane_state->dc_state);
5850
5851 drm_atomic_helper_plane_destroy_state(plane, state);
5852}
5853
5854static const struct drm_plane_funcs dm_plane_funcs = {
5855 .update_plane = drm_atomic_helper_update_plane,
5856 .disable_plane = drm_atomic_helper_disable_plane,
5857 .destroy = drm_primary_helper_destroy,
5858 .reset = dm_drm_plane_reset,
5859 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5860 .atomic_destroy_state = dm_drm_plane_destroy_state,
5861};
5862
5863static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5864 struct drm_plane_state *new_state)
5865{
5866 struct amdgpu_framebuffer *afb;
5867 struct drm_gem_object *obj;
5868 struct amdgpu_device *adev;
5869 struct amdgpu_bo *rbo;
5870 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5871 struct list_head list;
5872 struct ttm_validate_buffer tv;
5873 struct ww_acquire_ctx ticket;
5874 uint32_t domain;
5875 int r;
5876
5877 if (!new_state->fb) {
5878 DRM_DEBUG_DRIVER("No FB bound\n")__drm_dbg(DRM_UT_DRIVER, "No FB bound\n");
5879 return 0;
5880 }
5881
5882 afb = to_amdgpu_framebuffer(new_state->fb)({ const __typeof( ((struct amdgpu_framebuffer *)0)->base )
*__mptr = (new_state->fb); (struct amdgpu_framebuffer *)(
(char *)__mptr - __builtin_offsetof(struct amdgpu_framebuffer
, base) );})
;
5883 obj = new_state->fb->obj[0];
5884 rbo = gem_to_amdgpu_bo(obj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_bo, tbo.base) );})
;
5885 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5886 INIT_LIST_HEAD(&list);
5887
5888 tv.bo = &rbo->tbo;
5889 tv.num_shared = 1;
5890 list_add(&tv.head, &list);
5891
5892 r = ttm_eu_reserve_buffers(&ticket, &list, false0, NULL((void *)0));
5893 if (r) {
5894 dev_err(adev->dev, "fail to reserve bo (%d)\n", r)printf("drm:pid%d:%s *ERROR* " "fail to reserve bo (%d)\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
5895 return r;
5896 }
5897
5898 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5899 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5900 else
5901 domain = AMDGPU_GEM_DOMAIN_VRAM0x4;
5902
5903 r = amdgpu_bo_pin(rbo, domain);
5904 if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) {
5905 if (r != -ERESTARTSYS4)
5906 DRM_ERROR("Failed to pin framebuffer with error %d\n", r)__drm_err("Failed to pin framebuffer with error %d\n", r);
5907 ttm_eu_backoff_reservation(&ticket, &list);
5908 return r;
5909 }
5910
5911 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5912 if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) {
5913 amdgpu_bo_unpin(rbo);
5914 ttm_eu_backoff_reservation(&ticket, &list);
5915 DRM_ERROR("%p bind failed\n", rbo)__drm_err("%p bind failed\n", rbo);
5916 return r;
5917 }
5918
5919 ttm_eu_backoff_reservation(&ticket, &list);
5920
5921 afb->address = amdgpu_bo_gpu_offset(rbo);
5922
5923 amdgpu_bo_ref(rbo);
5924
5925 /**
5926 * We don't do surface updates on planes that have been newly created,
5927 * but we also don't have the afb->address during atomic check.
5928 *
5929 * Fill in buffer attributes depending on the address here, but only on
5930 * newly created planes since they're not being used by DC yet and this
5931 * won't modify global state.
5932 */
5933 dm_plane_state_old = to_dm_plane_state(plane->state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (plane->state); (struct dm_plane_state *)( (char *)__mptr
- __builtin_offsetof(struct dm_plane_state, base) );})
;
5934 dm_plane_state_new = to_dm_plane_state(new_state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (new_state); (struct dm_plane_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_plane_state, base) );})
;
5935
5936 if (dm_plane_state_new->dc_state &&
5937 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5938 struct dc_plane_state *plane_state =
5939 dm_plane_state_new->dc_state;
5940 bool_Bool force_disable_dcc = !plane_state->dcc.enable;
5941
5942 fill_plane_buffer_attributes(
5943 adev, afb, plane_state->format, plane_state->rotation,
5944 dm_plane_state_new->tiling_flags,
5945 &plane_state->tiling_info, &plane_state->plane_size,
5946 &plane_state->dcc, &plane_state->address,
5947 dm_plane_state_new->tmz_surface, force_disable_dcc);
5948 }
5949
5950 return 0;
5951}
5952
5953static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5954 struct drm_plane_state *old_state)
5955{
5956 struct amdgpu_bo *rbo;
5957 int r;
5958
5959 if (!old_state->fb)
5960 return;
5961
5962 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0])({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr
= ((old_state->fb->obj[0])); (struct amdgpu_bo *)( (char
*)__mptr - __builtin_offsetof(struct amdgpu_bo, tbo.base) );
})
;
5963 r = amdgpu_bo_reserve(rbo, false0);
5964 if (unlikely(r)__builtin_expect(!!(r), 0)) {
5965 DRM_ERROR("failed to reserve rbo before unpin\n")__drm_err("failed to reserve rbo before unpin\n");
5966 return;
5967 }
5968
5969 amdgpu_bo_unpin(rbo);
5970 amdgpu_bo_unreserve(rbo);
5971 amdgpu_bo_unref(&rbo);
5972}
5973
5974static int dm_plane_helper_check_state(struct drm_plane_state *state,
5975 struct drm_crtc_state *new_crtc_state)
5976{
5977 int max_downscale = 0;
5978 int max_upscale = INT_MAX0x7fffffff;
5979
5980 /* TODO: These should be checked against DC plane caps */
5981 return drm_atomic_helper_check_plane_state(
5982 state, new_crtc_state, max_downscale, max_upscale, true1, true1);
5983}
5984
5985static int dm_plane_atomic_check(struct drm_plane *plane,
5986 struct drm_plane_state *state)
5987{
5988 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5989 struct dc *dc = adev->dm.dc;
5990 struct dm_plane_state *dm_plane_state;
5991 struct dc_scaling_info scaling_info;
5992 struct drm_crtc_state *new_crtc_state;
5993 int ret;
5994
5995 dm_plane_state = to_dm_plane_state(state)({ const __typeof( ((struct dm_plane_state *)0)->base ) *__mptr
= (state); (struct dm_plane_state *)( (char *)__mptr - __builtin_offsetof
(struct dm_plane_state, base) );})
;
5996
5997 if (!dm_plane_state->dc_state)
5998 return 0;
5999
6000 new_crtc_state =
6001 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6002 if (!new_crtc_state)
6003 return -EINVAL22;
6004
6005 ret = dm_plane_helper_check_state(state, new_crtc_state);
6006 if (ret)
6007 return ret;
6008
6009 ret = fill_dc_scaling_info(state, &scaling_info);
6010 if (ret)
6011 return ret;
6012
6013 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6014 return 0;
6015
6016 return -EINVAL22;
6017}
6018
6019static int dm_plane_atomic_async_check(struct drm_plane *plane,
6020 struct drm_plane_state *new_plane_state)
6021{
6022 /* Only support async updates on cursor planes. */
6023 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6024 return -EINVAL22;
6025
6026 return 0;
6027}
6028
6029static void dm_plane_atomic_async_update(struct drm_plane *plane,
6030 struct drm_plane_state *new_state)
6031{
6032 struct drm_plane_state *old_state =
6033 drm_atomic_get_old_plane_state(new_state->state, plane);
6034
6035 swap(plane->state->fb, new_state->fb)do { __typeof(plane->state->fb) __tmp = (plane->state
->fb); (plane->state->fb) = (new_state->fb); (new_state
->fb) = __tmp; } while(0)
;
6036
6037 plane->state->src_x = new_state->src_x;
6038 plane->state->src_y = new_state->src_y;
6039 plane->state->src_w = new_state->src_w;
6040 plane->state->src_h = new_state->src_h;
6041 plane->state->crtc_x = new_state->crtc_x;
6042 plane->state->crtc_y = new_state->crtc_y;
6043 plane->state->crtc_w = new_state->crtc_w;
6044 plane->state->crtc_h = new_state->crtc_h;
6045
6046 handle_cursor_update(plane, old_state);
6047}
6048
6049static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6050 .prepare_fb = dm_plane_helper_prepare_fb,
6051 .cleanup_fb = dm_plane_helper_cleanup_fb,
6052 .atomic_check = dm_plane_atomic_check,
6053 .atomic_async_check = dm_plane_atomic_async_check,
6054 .atomic_async_update = dm_plane_atomic_async_update
6055};
6056
6057/*
6058 * TODO: these are currently initialized to rgb formats only.
6059 * For future use cases we should either initialize them dynamically based on
6060 * plane capabilities, or initialize this array to all formats, so internal drm
6061 * check will succeed, and let DC implement proper check
6062 */
6063static const uint32_t rgb_formats[] = {
6064 DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6065 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6066 DRM_FORMAT_RGBA8888((__u32)('R') | ((__u32)('A') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6067 DRM_FORMAT_XRGB2101010((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6068 DRM_FORMAT_XBGR2101010((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6069 DRM_FORMAT_ARGB2101010((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6070 DRM_FORMAT_ABGR2101010((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('3') <<
16) | ((__u32)('0') << 24))
,
6071 DRM_FORMAT_XBGR8888((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6072 DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6073 DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
,
6074};
6075
6076static const uint32_t overlay_formats[] = {
6077 DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6078 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6079 DRM_FORMAT_RGBA8888((__u32)('R') | ((__u32)('A') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6080 DRM_FORMAT_XBGR8888((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6081 DRM_FORMAT_ABGR8888((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
,
6082 DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
6083};
6084
6085static const u32 cursor_formats[] = {
6086 DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
6087};
6088
6089static int get_plane_formats(const struct drm_plane *plane,
6090 const struct dc_plane_cap *plane_cap,
6091 uint32_t *formats, int max_formats)
6092{
6093 int i, num_formats = 0;
6094
6095 /*
6096 * TODO: Query support for each group of formats directly from
6097 * DC plane caps. This will require adding more formats to the
6098 * caps list.
6099 */
6100
6101 switch (plane->type) {
6102 case DRM_PLANE_TYPE_PRIMARY:
6103 for (i = 0; i < ARRAY_SIZE(rgb_formats)(sizeof((rgb_formats)) / sizeof((rgb_formats)[0])); ++i) {
6104 if (num_formats >= max_formats)
6105 break;
6106
6107 formats[num_formats++] = rgb_formats[i];
6108 }
6109
6110 if (plane_cap && plane_cap->pixel_format_support.nv12)
6111 formats[num_formats++] = DRM_FORMAT_NV12((__u32)('N') | ((__u32)('V') << 8) | ((__u32)('1') <<
16) | ((__u32)('2') << 24))
;
6112 if (plane_cap && plane_cap->pixel_format_support.p010)
6113 formats[num_formats++] = DRM_FORMAT_P010((__u32)('P') | ((__u32)('0') << 8) | ((__u32)('1') <<
16) | ((__u32)('0') << 24))
;
6114 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6115 formats[num_formats++] = DRM_FORMAT_XRGB16161616F((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6116 formats[num_formats++] = DRM_FORMAT_ARGB16161616F((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6117 formats[num_formats++] = DRM_FORMAT_XBGR16161616F((__u32)('X') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6118 formats[num_formats++] = DRM_FORMAT_ABGR16161616F((__u32)('A') | ((__u32)('B') << 8) | ((__u32)('4') <<
16) | ((__u32)('H') << 24))
;
6119 }
6120 break;
6121
6122 case DRM_PLANE_TYPE_OVERLAY:
6123 for (i = 0; i < ARRAY_SIZE(overlay_form