Bug Summary

File:dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
Warning:line 616, column 2
Value stored to 'ret' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name amdgpu_dm_helpers.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/string.h>
27#include <linux/acpi.h>
28#include <linux/i2c.h>
29
30#include <drm/drm_atomic.h>
31#include <drm/drm_probe_helper.h>
32#include <drm/amdgpu_drm.h>
33#include <drm/drm_edid.h>
34
35#include "dm_services.h"
36#include "amdgpu.h"
37#include "dc.h"
38#include "amdgpu_dm.h"
39#include "amdgpu_dm_irq.h"
40#include "amdgpu_dm_mst_types.h"
41#include "dpcd_defs.h"
42#include "dc/inc/core_types.h"
43#include "dc_link_dp.h"
44
45#include "dm_helpers.h"
46#include "ddc_service_types.h"
47
48static u32 edid_extract_panel_id(struct edid *edid)
49{
50 return (u32)edid->mfg_id[0] << 24 |
51 (u32)edid->mfg_id[1] << 16 |
52 (u32)EDID_PRODUCT_ID(edid)((edid)->prod_code[0] | ((edid)->prod_code[1] << 8
))
;
53}
54
55static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
56{
57 uint32_t panel_id = edid_extract_panel_id(edid);
58
59 switch (panel_id) {
60 /* Workaround for some monitors which does not work well with FAMS */
61 case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E)((((u32)('S') - '@') & 0x1f) << 26 | (((u32)('A') -
'@') & 0x1f) << 21 | (((u32)('M') - '@') & 0x1f
) << 16 | ((0x0E5E) & 0xffff))
:
62 case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053)((((u32)('S') - '@') & 0x1f) << 26 | (((u32)('A') -
'@') & 0x1f) << 21 | (((u32)('M') - '@') & 0x1f
) << 16 | ((0x7053) & 0xffff))
:
63 case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC)((((u32)('S') - '@') & 0x1f) << 26 | (((u32)('A') -
'@') & 0x1f) << 21 | (((u32)('M') - '@') & 0x1f
) << 16 | ((0x71AC) & 0xffff))
:
64 DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id)___drm_dbg(((void *)0), DRM_UT_DRIVER, "Disabling FAMS on monitor with panel id %X\n"
, panel_id)
;
65 edid_caps->panel_patch.disable_fams = true1;
66 break;
67 default:
68 return;
69 }
70}
71
72/* dm_helpers_parse_edid_caps
73 *
74 * Parse edid caps
75 *
76 * @edid: [in] pointer to edid
77 * edid_caps: [in] pointer to edid caps
78 * @return
79 * void
80 * */
81enum dc_edid_status dm_helpers_parse_edid_caps(
82 struct dc_link *link,
83 const struct dc_edid *edid,
84 struct dc_edid_caps *edid_caps)
85{
86 struct amdgpu_dm_connector *aconnector = link->priv;
87 struct drm_connector *connector = &aconnector->base;
88 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL((void *)0);
89 struct cea_sad *sads;
90 int sad_count = -1;
91 int sadb_count = -1;
92 int i = 0;
93 uint8_t *sadb = NULL((void *)0);
94
95 enum dc_edid_status result = EDID_OK;
96
97 if (!edid_caps || !edid)
98 return EDID_BAD_INPUT;
99
100 if (!drm_edid_is_valid(edid_buf))
101 result = EDID_BAD_CHECKSUM;
102
103 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
104 ((uint16_t) edid_buf->mfg_id[1])<<8;
105 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
106 ((uint16_t) edid_buf->prod_code[1])<<8;
107 edid_caps->serial_number = edid_buf->serial;
108 edid_caps->manufacture_week = edid_buf->mfg_week;
109 edid_caps->manufacture_year = edid_buf->mfg_year;
110
111 drm_edid_get_monitor_name(edid_buf,
112 edid_caps->display_name,
113 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS20);
114
115 edid_caps->edid_hdmi = connector->display_info.is_hdmi;
116
117 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
118 if (sad_count <= 0)
119 return result;
120
121 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT16 ? sad_count : DC_MAX_AUDIO_DESC_COUNT16;
122 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
123 struct cea_sad *sad = &sads[i];
124
125 edid_caps->audio_modes[i].format_code = sad->format;
126 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
127 edid_caps->audio_modes[i].sample_rate = sad->freq;
128 edid_caps->audio_modes[i].sample_size = sad->byte2;
129 }
130
131 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
132
133 if (sadb_count < 0) {
134 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count)__drm_err("Couldn't read Speaker Allocation Data Block: %d\n"
, sadb_count)
;
135 sadb_count = 0;
136 }
137
138 if (sadb_count)
139 edid_caps->speaker_flags = sadb[0];
140 else
141 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION5;
142
143 apply_edid_quirks(edid_buf, edid_caps);
144
145 kfree(sads);
146 kfree(sadb);
147
148 return result;
149}
150
151static void
152fill_dc_mst_payload_table_from_drm(struct dc_link *link,
153 bool_Bool enable,
154 struct drm_dp_mst_atomic_payload *target_payload,
155 struct dc_dp_mst_stream_allocation_table *table)
156{
157 struct dc_dp_mst_stream_allocation_table new_table = { 0 };
158 struct dc_dp_mst_stream_allocation *sa;
159 struct link_mst_stream_allocation_table copy_of_link_table =
160 link->mst_stream_alloc_table;
161
162 int i;
163 int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
164 struct link_mst_stream_allocation *dc_alloc;
165
166 /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
167 if (enable) {
168 dc_alloc =
169 &copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
170 dc_alloc->vcp_id = target_payload->vcpi;
171 dc_alloc->slot_count = target_payload->time_slots;
172 } else {
173 for (i = 0; i < copy_of_link_table.stream_count; i++) {
174 dc_alloc =
175 &copy_of_link_table.stream_allocations[i];
176
177 if (dc_alloc->vcp_id == target_payload->vcpi) {
178 dc_alloc->vcp_id = 0;
179 dc_alloc->slot_count = 0;
180 break;
181 }
182 }
183 ASSERT(i != copy_of_link_table.stream_count)do { if (({ static int __warned; int __ret = !!(!(i != copy_of_link_table
.stream_count)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n"
, "!(i != copy_of_link_table.stream_count)", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c"
, 183); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do
{} while (0); } while (0)
;
184 }
185
186 /* Fill payload info*/
187 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
188 dc_alloc =
189 &copy_of_link_table.stream_allocations[i];
190 if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
191 sa = &new_table.stream_allocations[new_table.stream_count];
192 sa->slot_count = dc_alloc->slot_count;
193 sa->vcp_id = dc_alloc->vcp_id;
194 new_table.stream_count++;
195 }
196 }
197
198 /* Overwrite the old table */
199 *table = new_table;
200}
201
202void dm_helpers_dp_update_branch_info(
203 struct dc_context *ctx,
204 const struct dc_link *link)
205{}
206
207static void dm_helpers_construct_old_payload(
208 struct dc_link *link,
209 int pbn_per_slot,
210 struct drm_dp_mst_atomic_payload *new_payload,
211 struct drm_dp_mst_atomic_payload *old_payload)
212{
213 struct link_mst_stream_allocation_table current_link_table =
214 link->mst_stream_alloc_table;
215 struct link_mst_stream_allocation *dc_alloc;
216 int i;
217
218 *old_payload = *new_payload;
219
220 /* Set correct time_slots/PBN of old payload.
221 * other fields (delete & dsc_enabled) in
222 * struct drm_dp_mst_atomic_payload are don't care fields
223 * while calling drm_dp_remove_payload()
224 */
225 for (i = 0; i < current_link_table.stream_count; i++) {
226 dc_alloc =
227 &current_link_table.stream_allocations[i];
228
229 if (dc_alloc->vcp_id == new_payload->vcpi) {
230 old_payload->time_slots = dc_alloc->slot_count;
231 old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
232 break;
233 }
234 }
235
236 /* make sure there is an old payload*/
237 ASSERT(i != current_link_table.stream_count)do { if (({ static int __warned; int __ret = !!(!(i != current_link_table
.stream_count)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n"
, "!(i != current_link_table.stream_count)", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c"
, 237); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do
{} while (0); } while (0)
;
238
239}
240
241/*
242 * Writes payload allocation table in immediate downstream device.
243 */
244bool_Bool dm_helpers_dp_mst_write_payload_allocation_table(
245 struct dc_context *ctx,
246 const struct dc_stream_state *stream,
247 struct dc_dp_mst_stream_allocation_table *proposed_table,
248 bool_Bool enable)
249{
250 struct amdgpu_dm_connector *aconnector;
251 struct drm_dp_mst_topology_state *mst_state;
252 struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
253 struct drm_dp_mst_topology_mgr *mst_mgr;
254
255 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
256 /* Accessing the connector state is required for vcpi_slots allocation
257 * and directly relies on behaviour in commit check
258 * that blocks before commit guaranteeing that the state
259 * is not gonna be swapped while still in use in commit tail */
260
261 if (!aconnector || !aconnector->mst_port)
262 return false0;
263
264 mst_mgr = &aconnector->mst_port->mst_mgr;
265 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
266
267 /* It's OK for this to fail */
268 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
269
270 if (enable) {
271 target_payload = new_payload;
272
273 drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
274 } else {
275 /* construct old payload by VCPI*/
276 dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
277 new_payload, &old_payload);
278 target_payload = &old_payload;
279
280 drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
281 }
282
283 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
284 * AUX message. The sequence is slot 1-63 allocated sequence for each
285 * stream. AMD ASIC stream slot allocation should follow the same
286 * sequence. copy DRM MST allocation to dc */
287 fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
288
289 return true1;
290}
291
292/*
293 * poll pending down reply
294 */
295void dm_helpers_dp_mst_poll_pending_down_reply(
296 struct dc_context *ctx,
297 const struct dc_link *link)
298{}
299
300/*
301 * Clear payload allocation table before enable MST DP link.
302 */
303void dm_helpers_dp_mst_clear_payload_allocation_table(
304 struct dc_context *ctx,
305 const struct dc_link *link)
306{}
307
308/*
309 * Polls for ACT (allocation change trigger) handled and sends
310 * ALLOCATE_PAYLOAD message.
311 */
312enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
313 struct dc_context *ctx,
314 const struct dc_stream_state *stream)
315{
316 struct amdgpu_dm_connector *aconnector;
317 struct drm_dp_mst_topology_mgr *mst_mgr;
318 int ret;
319
320 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
321
322 if (!aconnector || !aconnector->mst_port)
323 return ACT_FAILED;
324
325 mst_mgr = &aconnector->mst_port->mst_mgr;
326
327 if (!mst_mgr->mst_state)
328 return ACT_FAILED;
329
330 ret = drm_dp_check_act_status(mst_mgr);
331
332 if (ret)
333 return ACT_FAILED;
334
335 return ACT_SUCCESS;
336}
337
338bool_Bool dm_helpers_dp_mst_send_payload_allocation(
339 struct dc_context *ctx,
340 const struct dc_stream_state *stream,
341 bool_Bool enable)
342{
343 struct amdgpu_dm_connector *aconnector;
344 struct drm_dp_mst_topology_state *mst_state;
345 struct drm_dp_mst_topology_mgr *mst_mgr;
346 struct drm_dp_mst_atomic_payload *payload;
347 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
348 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
349
350 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
351
352 if (!aconnector || !aconnector->mst_port)
353 return false0;
354
355 mst_mgr = &aconnector->mst_port->mst_mgr;
356 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
357
358 payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
359 if (!enable) {
360 set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
361 clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
362 }
363
364 if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
365 amdgpu_dm_set_mst_status(&aconnector->mst_status,
366 set_flag, false0);
367 } else {
368 amdgpu_dm_set_mst_status(&aconnector->mst_status,
369 set_flag, true1);
370 amdgpu_dm_set_mst_status(&aconnector->mst_status,
371 clr_flag, false0);
372 }
373
374 return true1;
375}
376
377void dm_dtn_log_begin(struct dc_context *ctx,
378 struct dc_log_buffer_ctx *log_ctx)
379{
380 static const char msg[] = "[dtn begin]\n";
381
382 if (!log_ctx) {
383 pr_info("%s", msg)do { } while(0);
384 return;
385 }
386
387 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
388}
389
390__printf(3, 4)__attribute__((__format__(__kprintf__,3,4)))
391void dm_dtn_log_append_v(struct dc_context *ctx,
392 struct dc_log_buffer_ctx *log_ctx,
393 const char *msg, ...)
394{
395 va_list args;
396 size_t total;
397 int n;
398
399 if (!log_ctx) {
400 /* No context, redirect to dmesg. */
401 struct va_format vaf;
402
403 vaf.fmt = msg;
404 vaf.va = &args;
405
406 va_start(args, msg)__builtin_va_start((args), msg);
407 pr_info("%pV", &vaf)do { } while(0);
408 va_end(args)__builtin_va_end((args));
409
410 return;
411 }
412
413 /* Measure the output. */
414 va_start(args, msg)__builtin_va_start((args), msg);
415 n = vsnprintf(NULL((void *)0), 0, msg, args);
416 va_end(args)__builtin_va_end((args));
417
418 if (n <= 0)
419 return;
420
421 /* Reallocate the string buffer as needed. */
422 total = log_ctx->pos + n + 1;
423
424 if (total > log_ctx->size) {
425 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL(0x0001 | 0x0004));
426
427 if (buf) {
428 memcpy(buf, log_ctx->buf, log_ctx->pos)__builtin_memcpy((buf), (log_ctx->buf), (log_ctx->pos));
429 kfree(log_ctx->buf);
430
431 log_ctx->buf = buf;
432 log_ctx->size = total;
433 }
434 }
435
436 if (!log_ctx->buf)
437 return;
438
439 /* Write the formatted string to the log buffer. */
440 va_start(args, msg)__builtin_va_start((args), msg);
441 n = vscnprintf(
442 log_ctx->buf + log_ctx->pos,
443 log_ctx->size - log_ctx->pos,
444 msg,
445 args);
446 va_end(args)__builtin_va_end((args));
447
448 if (n > 0)
449 log_ctx->pos += n;
450}
451
452void dm_dtn_log_end(struct dc_context *ctx,
453 struct dc_log_buffer_ctx *log_ctx)
454{
455 static const char msg[] = "[dtn end]\n";
456
457 if (!log_ctx) {
458 pr_info("%s", msg)do { } while(0);
459 return;
460 }
461
462 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
463}
464
465bool_Bool dm_helpers_dp_mst_start_top_mgr(
466 struct dc_context *ctx,
467 const struct dc_link *link,
468 bool_Bool boot)
469{
470 struct amdgpu_dm_connector *aconnector = link->priv;
471
472 if (!aconnector) {
473 DRM_ERROR("Failed to find connector for link!")__drm_err("Failed to find connector for link!");
474 return false0;
475 }
476
477 if (boot) {
478 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",printk("\0016" "[" "drm" "] " "DM_MST: Differing MST start on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
479 aconnector, aconnector->base.base.id)printk("\0016" "[" "drm" "] " "DM_MST: Differing MST start on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
;
480 return true1;
481 }
482
483 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",printk("\0016" "[" "drm" "] " "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
484 aconnector, aconnector->base.base.id)printk("\0016" "[" "drm" "] " "DM_MST: starting TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
;
485
486 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true1) == 0);
487}
488
489bool_Bool dm_helpers_dp_mst_stop_top_mgr(
490 struct dc_context *ctx,
491 struct dc_link *link)
492{
493 struct amdgpu_dm_connector *aconnector = link->priv;
494
495 if (!aconnector) {
496 DRM_ERROR("Failed to find connector for link!")__drm_err("Failed to find connector for link!");
497 return false0;
498 }
499
500 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",printk("\0016" "[" "drm" "] " "DM_MST: stopping TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
501 aconnector, aconnector->base.base.id)printk("\0016" "[" "drm" "] " "DM_MST: stopping TM on aconnector: %p [id: %d]\n"
, aconnector, aconnector->base.base.id)
;
502
503 if (aconnector->mst_mgr.mst_state == true1) {
504 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false0);
505 link->cur_link_settings.lane_count = 0;
506 }
507
508 return false0;
509}
510
511bool_Bool dm_helpers_dp_read_dpcd(
512 struct dc_context *ctx,
513 const struct dc_link *link,
514 uint32_t address,
515 uint8_t *data,
516 uint32_t size)
517{
518
519 struct amdgpu_dm_connector *aconnector = link->priv;
520
521 if (!aconnector) {
522 DC_LOG_DC("Failed to find connector for link!\n")___drm_dbg(((void *)0), DRM_UT_KMS, "Failed to find connector for link!\n"
)
;
523 return false0;
524 }
525
526 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
527 data, size) > 0;
528}
529
530bool_Bool dm_helpers_dp_write_dpcd(
531 struct dc_context *ctx,
532 const struct dc_link *link,
533 uint32_t address,
534 const uint8_t *data,
535 uint32_t size)
536{
537 struct amdgpu_dm_connector *aconnector = link->priv;
538
539 if (!aconnector) {
540 DRM_ERROR("Failed to find connector for link!")__drm_err("Failed to find connector for link!");
541 return false0;
542 }
543
544 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
545 address, (uint8_t *)data, size) > 0;
546}
547
548bool_Bool dm_helpers_submit_i2c(
549 struct dc_context *ctx,
550 const struct dc_link *link,
551 struct i2c_command *cmd)
552{
553 struct amdgpu_dm_connector *aconnector = link->priv;
554 struct i2c_msg *msgs;
555 int i = 0;
556 int num = cmd->number_of_payloads;
557 bool_Bool result;
558
559 if (!aconnector) {
560 DRM_ERROR("Failed to find connector for link!")__drm_err("Failed to find connector for link!");
561 return false0;
562 }
563
564 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL(0x0001 | 0x0004));
565
566 if (!msgs)
567 return false0;
568
569 for (i = 0; i < num; i++) {
570 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD0x0001;
571 msgs[i].addr = cmd->payloads[i].address;
572 msgs[i].len = cmd->payloads[i].length;
573 msgs[i].buf = cmd->payloads[i].data;
574 }
575
576 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
577
578 kfree(msgs);
579
580 return result;
581}
582
583#if defined(CONFIG_DRM_AMD_DC_DCN1)
584static bool_Bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
585 bool_Bool is_write_cmd,
586 unsigned char cmd,
587 unsigned int length,
588 unsigned int offset,
589 unsigned char *data)
590{
591 bool_Bool success = false0;
592 unsigned char rc_data[16] = {0};
593 unsigned char rc_offset[4] = {0};
594 unsigned char rc_length[2] = {0};
595 unsigned char rc_cmd = 0;
596 unsigned char rc_result = 0xFF;
597 unsigned char i = 0;
598 int ret;
599
600 if (is_write_cmd) {
601 // write rc data
602 memmove(rc_data, data, length)__builtin_memmove((rc_data), (data), (length));
603 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA0x4C0, rc_data, sizeof(rc_data));
604 }
605
606 // write rc offset
607 rc_offset[0] = (unsigned char) offset & 0xFF;
608 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF;
609 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
610 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
611 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET0x4BC, rc_offset, sizeof(rc_offset));
612
613 // write rc length
614 rc_length[0] = (unsigned char) length & 0xFF;
615 rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
616 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH0x4B8, rc_length, sizeof(rc_length));
Value stored to 'ret' is never read
617
618 // write rc cmd
619 rc_cmd = cmd | 0x80;
620 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND0x4B2, &rc_cmd, sizeof(rc_cmd));
621
622 if (ret < 0) {
623 DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret)__drm_err(" execute_synaptics_rc_command - write cmd ..., err = %d\n"
, ret)
;
624 return false0;
625 }
626
627 // poll until active is 0
628 for (i = 0; i < 10; i++) {
629 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND0x4B2, &rc_cmd, sizeof(rc_cmd));
630 if (rc_cmd == cmd)
631 // active is 0
632 break;
633 drm_msleep(10)mdelay(10);
634 }
635
636 // read rc result
637 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT0x4B3, &rc_result, sizeof(rc_result));
638 success = (rc_result == 0);
639
640 if (success && !is_write_cmd) {
641 // read rc data
642 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA0x4C0, data, length);
643 }
644
645 DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success)___drm_dbg(((void *)0), DRM_UT_KMS, " execute_synaptics_rc_command - success = %d\n"
, success)
;
646
647 return success;
648}
649
650static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
651{
652 unsigned char data[16] = {0};
653
654 DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n")___drm_dbg(((void *)0), DRM_UT_KMS, "Start apply_synaptics_fifo_reset_wa\n"
)
;
655
656 // Step 2
657 data[0] = 'P';
658 data[1] = 'R';
659 data[2] = 'I';
660 data[3] = 'U';
661 data[4] = 'S';
662
663 if (!execute_synaptics_rc_command(aux, true1, 0x01, 5, 0, data))
664 return;
665
666 // Step 3 and 4
667 if (!execute_synaptics_rc_command(aux, false0, 0x31, 4, 0x220998, data))
668 return;
669
670 data[0] &= (~(1 << 1)); // set bit 1 to 0
671 if (!execute_synaptics_rc_command(aux, true1, 0x21, 4, 0x220998, data))
672 return;
673
674 if (!execute_synaptics_rc_command(aux, false0, 0x31, 4, 0x220D98, data))
675 return;
676
677 data[0] &= (~(1 << 1)); // set bit 1 to 0
678 if (!execute_synaptics_rc_command(aux, true1, 0x21, 4, 0x220D98, data))
679 return;
680
681 if (!execute_synaptics_rc_command(aux, false0, 0x31, 4, 0x221198, data))
682 return;
683
684 data[0] &= (~(1 << 1)); // set bit 1 to 0
685 if (!execute_synaptics_rc_command(aux, true1, 0x21, 4, 0x221198, data))
686 return;
687
688 // Step 3 and 5
689 if (!execute_synaptics_rc_command(aux, false0, 0x31, 4, 0x220998, data))
690 return;
691
692 data[0] |= (1 << 1); // set bit 1 to 1
693 if (!execute_synaptics_rc_command(aux, true1, 0x21, 4, 0x220998, data))
694 return;
695
696 if (!execute_synaptics_rc_command(aux, false0, 0x31, 4, 0x220D98, data))
697 return;
698
699 data[0] |= (1 << 1); // set bit 1 to 1
700 return;
701
702 if (!execute_synaptics_rc_command(aux, false0, 0x31, 4, 0x221198, data))
703 return;
704
705 data[0] |= (1 << 1); // set bit 1 to 1
706 if (!execute_synaptics_rc_command(aux, true1, 0x21, 4, 0x221198, data))
707 return;
708
709 // Step 6
710 if (!execute_synaptics_rc_command(aux, true1, 0x02, 0, 0, NULL((void *)0)))
711 return;
712
713 DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n")___drm_dbg(((void *)0), DRM_UT_KMS, "Done apply_synaptics_fifo_reset_wa\n"
)
;
714}
715
716static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
717 struct drm_dp_aux *aux,
718 const struct dc_stream_state *stream,
719 bool_Bool enable)
720{
721 uint8_t ret = 0;
722
723 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n")___drm_dbg(((void *)0), DRM_UT_KMS, "Configure DSC to non-virtual dpcd synaptics\n"
)
;
724
725 if (enable) {
726 /* When DSC is enabled on previous boot and reboot with the hub,
727 * there is a chance that Synaptics hub gets stuck during reboot sequence.
728 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream
729 */
730 if (!stream->link->link_status.link_active &&
731 memcmp(stream->link->dpcd_caps.branch_dev_name,__builtin_memcmp((stream->link->dpcd_caps.branch_dev_name
), ((int8_t *)SYNAPTICS_DEVICE_ID), (4))
732 (int8_t *)SYNAPTICS_DEVICE_ID, 4)__builtin_memcmp((stream->link->dpcd_caps.branch_dev_name
), ((int8_t *)SYNAPTICS_DEVICE_ID), (4))
== 0)
733 apply_synaptics_fifo_reset_wa(aux);
734
735 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE0x160, &enable, 1);
736 DRM_INFO("Send DSC enable to synaptics\n")printk("\0016" "[" "drm" "] " "Send DSC enable to synaptics\n"
)
;
737
738 } else {
739 /* Synaptics hub not support virtual dpcd,
740 * external monitor occur garbage while disable DSC,
741 * Disable DSC only when entire link status turn to false,
742 */
743 if (!stream->link->link_status.link_active) {
744 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE0x160, &enable, 1);
745 DRM_INFO("Send DSC disable to synaptics\n")printk("\0016" "[" "drm" "] " "Send DSC disable to synaptics\n"
)
;
746 }
747 }
748
749 return ret;
750}
751#endif
752
753bool_Bool dm_helpers_dp_write_dsc_enable(
754 struct dc_context *ctx,
755 const struct dc_stream_state *stream,
756 bool_Bool enable)
757{
758 static const uint8_t DSC_DISABLE;
759 static const uint8_t DSC_DECODING = 0x01;
760 static const uint8_t DSC_PASSTHROUGH = 0x02;
761
762 struct amdgpu_dm_connector *aconnector;
763 struct drm_dp_mst_port *port;
764 uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE;
765 uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
766 uint8_t ret = 0;
767
768 if (!stream)
769 return false0;
770
771 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
772 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
773
774 if (!aconnector->dsc_aux)
775 return false0;
776
777#if defined(CONFIG_DRM_AMD_DC_DCN1)
778 // apply w/a to synaptics
779 if (needs_dsc_aux_workaround(aconnector->dc_link) &&
780 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
781 return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
782 aconnector->dsc_aux, stream, enable_dsc);
783#endif
784
785 port = aconnector->port;
786
787 if (enable) {
788 if (port->passthrough_aux) {
789 ret = drm_dp_dpcd_write(port->passthrough_aux,
790 DP_DSC_ENABLE0x160,
791 &enable_passthrough, 1);
792 DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n"
, ret)
793 ret)___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n"
, ret)
;
794 }
795
796 ret = drm_dp_dpcd_write(aconnector->dsc_aux,
797 DP_DSC_ENABLE0x160, &enable_dsc, 1);
798 DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n",___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding enable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
799 (port->passthrough_aux) ? "remote RX" :___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding enable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
800 "virtual dpcd",___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding enable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
801 ret)___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding enable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
;
802 } else {
803 ret = drm_dp_dpcd_write(aconnector->dsc_aux,
804 DP_DSC_ENABLE0x160, &enable_dsc, 1);
805 DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n",___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding disable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
806 (port->passthrough_aux) ? "remote RX" :___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding disable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
807 "virtual dpcd",___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding disable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
808 ret)___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC decoding disable to %s port, ret = %u\n"
, (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret
)
;
809
810 if (port->passthrough_aux) {
811 ret = drm_dp_dpcd_write(port->passthrough_aux,
812 DP_DSC_ENABLE0x160,
813 &enable_passthrough, 1);
814 DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n"
, ret)
815 ret)___drm_dbg(((void *)0), DRM_UT_KMS, "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n"
, ret)
;
816 }
817 }
818 }
819
820 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
821#if defined(CONFIG_DRM_AMD_DC_DCN1)
822 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
823#endif
824 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE0x160, &enable_dsc, 1);
825 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable")___drm_dbg(((void *)0), DRM_UT_KMS, "Send DSC %s to SST RX\n"
, enable_dsc ? "enable" : "disable")
;
826#if defined(CONFIG_DRM_AMD_DC_DCN1)
827 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
828 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE0x160, &enable_dsc, 1);
829 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable")___drm_dbg(((void *)0), DRM_UT_KMS, "Send DSC %s to DP-HDMI PCON\n"
, enable_dsc ? "enable" : "disable")
;
830 }
831#endif
832 }
833
834 return ret;
835}
836
837bool_Bool dm_helpers_is_dp_sink_present(struct dc_link *link)
838{
839 bool_Bool dp_sink_present;
840 struct amdgpu_dm_connector *aconnector = link->priv;
841
842 if (!aconnector) {
843 BUG_ON("Failed to find connector for link!")((!("Failed to find connector for link!")) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c"
, 843, "!(\"Failed to find connector for link!\")"))
;
844 return true1;
845 }
846
847 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex)rw_enter_write(&aconnector->dm_dp_aux.aux.hw_mutex);
848 dp_sink_present = dc_link_is_dp_sink_present(link);
849 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex)rw_exit_write(&aconnector->dm_dp_aux.aux.hw_mutex);
850 return dp_sink_present;
851}
852
853enum dc_edid_status dm_helpers_read_local_edid(
854 struct dc_context *ctx,
855 struct dc_link *link,
856 struct dc_sink *sink)
857{
858 struct amdgpu_dm_connector *aconnector = link->priv;
859 struct drm_connector *connector = &aconnector->base;
860 struct i2c_adapter *ddc;
861 int retry = 3;
862 enum dc_edid_status edid_status;
863 struct edid *edid;
864
865 if (link->aux_mode)
866 ddc = &aconnector->dm_dp_aux.aux.ddc;
867 else
868 ddc = &aconnector->i2c->base;
869
870 /* some dongles read edid incorrectly the first time,
871 * do check sum and retry to make sure read correct edid.
872 */
873 do {
874
875 edid = drm_get_edid(&aconnector->base, ddc);
876
877 /* DP Compliance Test 4.2.2.6 */
878 if (link->aux_mode && connector->edid_corrupt)
879 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
880
881 if (!edid && connector->edid_corrupt) {
882 connector->edid_corrupt = false0;
883 return EDID_BAD_CHECKSUM;
884 }
885
886 if (!edid)
887 return EDID_NO_RESPONSE;
888
889 sink->dc_edid.length = EDID_LENGTH128 * (edid->extensions + 1);
890 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length)__builtin_memmove((sink->dc_edid.raw_edid), ((uint8_t *)edid
), (sink->dc_edid.length))
;
891
892 /* We don't need the original edid anymore */
893 kfree(edid);
894
895 edid_status = dm_helpers_parse_edid_caps(
896 link,
897 &sink->dc_edid,
898 &sink->edid_caps);
899
900 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
901
902 if (edid_status != EDID_OK)
903 DRM_ERROR("EDID err: %d, on connector: %s",__drm_err("EDID err: %d, on connector: %s", edid_status, aconnector
->base.name)
904 edid_status,__drm_err("EDID err: %d, on connector: %s", edid_status, aconnector
->base.name)
905 aconnector->base.name)__drm_err("EDID err: %d, on connector: %s", edid_status, aconnector
->base.name)
;
906
907 /* DP Compliance Test 4.2.2.3 */
908 if (link->aux_mode)
909 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
910
911 return edid_status;
912}
913int dm_helper_dmub_aux_transfer_sync(
914 struct dc_context *ctx,
915 const struct dc_link *link,
916 struct aux_payload *payload,
917 enum aux_return_code_type *operation_result)
918{
919 return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
920 operation_result);
921}
922
923int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
924 const struct dc_link *link,
925 struct set_config_cmd_payload *payload,
926 enum set_config_status *operation_result)
927{
928 return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload,
929 operation_result);
930}
931
932void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
933{
934 /* TODO: something */
935}
936
937void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
938{
939 // TODO:
940 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
941}
942
943void dm_helpers_init_panel_settings(
944 struct dc_context *ctx,
945 struct dc_panel_config *panel_config,
946 struct dc_sink *sink)
947{
948 // Extra Panel Power Sequence
949 panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
950 panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
951 panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
952 panel_config->pps.extra_post_t7_ms = 0;
953 panel_config->pps.extra_pre_t11_ms = 0;
954 panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
955 panel_config->pps.extra_post_OUI_ms = 0;
956 // Feature DSC
957 panel_config->dsc.disable_dsc_edp = false0;
958 panel_config->dsc.force_dsc_edp_policy = 0;
959}
960
961void dm_helpers_override_panel_settings(
962 struct dc_context *ctx,
963 struct dc_panel_config *panel_config)
964{
965 // Feature DSC
966 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
967 panel_config->dsc.disable_dsc_edp = true1;
968 }
969}
970
971void *dm_helpers_allocate_gpu_mem(
972 struct dc_context *ctx,
973 enum dc_gpu_mem_alloc_type type,
974 size_t size,
975 long long *addr)
976{
977 struct amdgpu_device *adev = ctx->driver_context;
978 struct dal_allocation *da;
979 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
980 AMDGPU_GEM_DOMAIN_GTT0x2 : AMDGPU_GEM_DOMAIN_VRAM0x4;
981 int ret;
982
983 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL(0x0001 | 0x0004));
984 if (!da)
985 return NULL((void *)0);
986
987 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE(1 << 12),
988 domain, &da->bo,
989 &da->gpu_addr, &da->cpu_ptr);
990
991 *addr = da->gpu_addr;
992
993 if (ret) {
994 kfree(da);
995 return NULL((void *)0);
996 }
997
998 /* add da to list in dm */
999 list_add(&da->list, &adev->dm.da_list);
1000
1001 return da->cpu_ptr;
1002}
1003
1004void dm_helpers_free_gpu_mem(
1005 struct dc_context *ctx,
1006 enum dc_gpu_mem_alloc_type type,
1007 void *pvMem)
1008{
1009 struct amdgpu_device *adev = ctx->driver_context;
1010 struct dal_allocation *da;
1011
1012 /* walk the da list in DM */
1013 list_for_each_entry(da, &adev->dm.da_list, list)for (da = ({ const __typeof( ((__typeof(*da) *)0)->list ) *
__mptr = ((&adev->dm.da_list)->next); (__typeof(*da
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*da), list
) );}); &da->list != (&adev->dm.da_list); da = (
{ const __typeof( ((__typeof(*da) *)0)->list ) *__mptr = (
da->list.next); (__typeof(*da) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*da), list) );}))
{
1014 if (pvMem == da->cpu_ptr) {
1015 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
1016 list_del(&da->list);
1017 kfree(da);
1018 break;
1019 }
1020 }
1021}
1022
1023bool_Bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool_Bool enable)
1024{
1025 enum dc_irq_source irq_source;
1026 bool_Bool ret;
1027
1028 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
1029
1030 ret = dc_interrupt_set(ctx->dc, irq_source, enable);
1031
1032 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",___drm_dbg(((void *)0), DRM_UT_DRIVER, "Dmub trace irq %sabling: r=%d\n"
, enable ? "en" : "dis", ret)
1033 enable ? "en" : "dis", ret)___drm_dbg(((void *)0), DRM_UT_DRIVER, "Dmub trace irq %sabling: r=%d\n"
, enable ? "en" : "dis", ret)
;
1034 return ret;
1035}
1036
1037void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
1038{
1039 /* TODO: virtual DPCD */
1040 struct dc_link *link = stream->link;
1041 union down_spread_ctrl old_downspread;
1042 union down_spread_ctrl new_downspread;
1043
1044 if (link->aux_access_disabled)
1045 return;
1046
1047 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL0x107,
1048 &old_downspread.raw,
1049 sizeof(old_downspread)))
1050 return;
1051
1052 new_downspread.raw = old_downspread.raw;
1053 new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
1054 (stream->ignore_msa_timing_param) ? 1 : 0;
1055
1056 if (new_downspread.raw != old_downspread.raw)
1057 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL0x107,
1058 &new_downspread.raw,
1059 sizeof(new_downspread));
1060}
1061
1062bool_Bool dm_helpers_dp_handle_test_pattern_request(
1063 struct dc_context *ctx,
1064 const struct dc_link *link,
1065 union link_test_pattern dpcd_test_pattern,
1066 union test_misc dpcd_test_params)
1067{
1068 enum dp_test_pattern test_pattern;
1069 enum dp_test_pattern_color_space test_pattern_color_space =
1070 DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
1071 enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
1072 enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
1073 struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
1074 struct pipe_ctx *pipe_ctx = NULL((void *)0);
1075 struct amdgpu_dm_connector *aconnector = link->priv;
1076 int i;
1077
1078 for (i = 0; i < MAX_PIPES6; i++) {
1079 if (pipes[i].stream == NULL((void *)0))
1080 continue;
1081
1082 if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
1083 !pipes[i].prev_odm_pipe) {
1084 pipe_ctx = &pipes[i];
1085 break;
1086 }
1087 }
1088
1089 if (pipe_ctx == NULL((void *)0))
1090 return false0;
1091
1092 switch (dpcd_test_pattern.bits.PATTERN) {
1093 case LINK_TEST_PATTERN_COLOR_RAMP:
1094 test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
1095 break;
1096 case LINK_TEST_PATTERN_VERTICAL_BARS:
1097 test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
1098 break; /* black and white */
1099 case LINK_TEST_PATTERN_COLOR_SQUARES:
1100 test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
1101 TEST_DYN_RANGE_VESA ?
1102 DP_TEST_PATTERN_COLOR_SQUARES :
1103 DP_TEST_PATTERN_COLOR_SQUARES_CEA);
1104 break;
1105 default:
1106 test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
1107 break;
1108 }
1109
1110 if (dpcd_test_params.bits.CLR_FORMAT == 0)
1111 test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
1112 else
1113 test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
1114 DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
1115 DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
1116
1117 switch (dpcd_test_params.bits.BPC) {
1118 case 0: // 6 bits
1119 requestColorDepth = COLOR_DEPTH_666;
1120 break;
1121 case 1: // 8 bits
1122 requestColorDepth = COLOR_DEPTH_888;
1123 break;
1124 case 2: // 10 bits
1125 requestColorDepth = COLOR_DEPTH_101010;
1126 break;
1127 case 3: // 12 bits
1128 requestColorDepth = COLOR_DEPTH_121212;
1129 break;
1130 default:
1131 break;
1132 }
1133
1134 switch (dpcd_test_params.bits.CLR_FORMAT) {
1135 case 0:
1136 requestPixelEncoding = PIXEL_ENCODING_RGB;
1137 break;
1138 case 1:
1139 requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
1140 break;
1141 case 2:
1142 requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
1143 break;
1144 default:
1145 requestPixelEncoding = PIXEL_ENCODING_RGB;
1146 break;
1147 }
1148
1149 if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
1150 && pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
1151 || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
1152 && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
1153 DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n",___drm_dbg(((void *)0), DRM_UT_KMS, "%s: original bpc %d pix encoding %d, changing to %d %d\n"
, __func__, pipe_ctx->stream->timing.display_color_depth
, pipe_ctx->stream->timing.pixel_encoding, requestColorDepth
, requestPixelEncoding)
1154 __func__,___drm_dbg(((void *)0), DRM_UT_KMS, "%s: original bpc %d pix encoding %d, changing to %d %d\n"
, __func__, pipe_ctx->stream->timing.display_color_depth
, pipe_ctx->stream->timing.pixel_encoding, requestColorDepth
, requestPixelEncoding)
1155 pipe_ctx->stream->timing.display_color_depth,___drm_dbg(((void *)0), DRM_UT_KMS, "%s: original bpc %d pix encoding %d, changing to %d %d\n"
, __func__, pipe_ctx->stream->timing.display_color_depth
, pipe_ctx->stream->timing.pixel_encoding, requestColorDepth
, requestPixelEncoding)
1156 pipe_ctx->stream->timing.pixel_encoding,___drm_dbg(((void *)0), DRM_UT_KMS, "%s: original bpc %d pix encoding %d, changing to %d %d\n"
, __func__, pipe_ctx->stream->timing.display_color_depth
, pipe_ctx->stream->timing.pixel_encoding, requestColorDepth
, requestPixelEncoding)
1157 requestColorDepth,___drm_dbg(((void *)0), DRM_UT_KMS, "%s: original bpc %d pix encoding %d, changing to %d %d\n"
, __func__, pipe_ctx->stream->timing.display_color_depth
, pipe_ctx->stream->timing.pixel_encoding, requestColorDepth
, requestPixelEncoding)
1158 requestPixelEncoding)___drm_dbg(((void *)0), DRM_UT_KMS, "%s: original bpc %d pix encoding %d, changing to %d %d\n"
, __func__, pipe_ctx->stream->timing.display_color_depth
, pipe_ctx->stream->timing.pixel_encoding, requestColorDepth
, requestPixelEncoding)
;
1159 pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
1160 pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
1161
1162 dp_update_dsc_config(pipe_ctx);
1163
1164 aconnector->timing_changed = true1;
1165 /* store current timing */
1166 if (aconnector->timing_requested)
1167 *aconnector->timing_requested = pipe_ctx->stream->timing;
1168 else
1169 DC_LOG_ERROR("%s: timing storage failed\n", __func__)__drm_err("%s: timing storage failed\n", __func__);
1170
1171 }
1172
1173 dc_link_dp_set_test_pattern(
1174 (struct dc_link *) link,
1175 test_pattern,
1176 test_pattern_color_space,
1177 NULL((void *)0),
1178 NULL((void *)0),
1179 0);
1180
1181 return false0;
1182}
1183
1184void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
1185{
1186 // TODO
1187}
1188
1189void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool_Bool enable)
1190{
1191 /* TODO: add periodic detection implementation */
1192}