Bug Summary

File:dev/pci/drm/display/drm_dp_mst_topology.c
Warning:line 2556, column 30
The left operand of '>>' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name drm_dp_mst_topology.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c
1/*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/bitfield.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/i2c.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/random.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/iopoll.h>
33
34#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)0
35#include <linux/stacktrace.h>
36#include <linux/sort.h>
37#include <linux/timekeeping.h>
38#include <linux/math64.h>
39#endif
40
41#include <drm/display/drm_dp_mst_helper.h>
42#include <drm/drm_atomic.h>
43#include <drm/drm_atomic_helper.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_edid.h>
46#include <drm/drm_print.h>
47#include <drm/drm_probe_helper.h>
48
49#include "drm_dp_helper_internal.h"
50#include "drm_dp_mst_topology_internal.h"
51
52/**
53 * DOC: dp mst helper
54 *
55 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
56 * protocol. The helpers contain a topology manager and bandwidth manager.
57 * The helpers encapsulate the sending and received of sideband msgs.
58 */
59struct drm_dp_pending_up_req {
60 struct drm_dp_sideband_msg_hdr hdr;
61 struct drm_dp_sideband_msg_req_body msg;
62 struct list_head next;
63};
64
65static bool_Bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
66 char *buf);
67
68static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
69
70static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
71 int id, u8 start_slot, u8 num_slots);
72
73static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
74 struct drm_dp_mst_port *port,
75 int offset, int size, u8 *bytes);
76static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
77 struct drm_dp_mst_port *port,
78 int offset, int size, u8 *bytes);
79
80static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
81 struct drm_dp_mst_branch *mstb);
82
83static void
84drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
85 struct drm_dp_mst_branch *mstb);
86
87static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
88 struct drm_dp_mst_branch *mstb,
89 struct drm_dp_mst_port *port);
90static bool_Bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
91 u8 *guid);
92
93static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
94static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
95static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
96
97static bool_Bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
98 struct drm_dp_mst_branch *branch);
99
100#define DBG_PREFIX"[dp_mst]" "[dp_mst]"
101
102#define DP_STR(x)[DRM_DP_SIDEBAND_TX_x] = "x" [DP_ ## x] = #x
103
104static const char *drm_dp_mst_req_type_str(u8 req_type)
105{
106 static const char * const req_type_str[] = {
107 DP_STR(GET_MSG_TRANSACTION_VERSION)[DRM_DP_SIDEBAND_TX_GET_MSG_TRANSACTION_VERSION] = "GET_MSG_TRANSACTION_VERSION",
108 DP_STR(LINK_ADDRESS)[DRM_DP_SIDEBAND_TX_LINK_ADDRESS] = "LINK_ADDRESS",
109 DP_STR(CONNECTION_STATUS_NOTIFY)[DRM_DP_SIDEBAND_TX_CONNECTION_STATUS_NOTIFY] = "CONNECTION_STATUS_NOTIFY",
110 DP_STR(ENUM_PATH_RESOURCES)[DRM_DP_SIDEBAND_TX_ENUM_PATH_RESOURCES] = "ENUM_PATH_RESOURCES",
111 DP_STR(ALLOCATE_PAYLOAD)[DRM_DP_SIDEBAND_TX_ALLOCATE_PAYLOAD] = "ALLOCATE_PAYLOAD",
112 DP_STR(QUERY_PAYLOAD)[DRM_DP_SIDEBAND_TX_QUERY_PAYLOAD] = "QUERY_PAYLOAD",
113 DP_STR(RESOURCE_STATUS_NOTIFY)[DRM_DP_SIDEBAND_TX_RESOURCE_STATUS_NOTIFY] = "RESOURCE_STATUS_NOTIFY",
114 DP_STR(CLEAR_PAYLOAD_ID_TABLE)[DRM_DP_SIDEBAND_TX_CLEAR_PAYLOAD_ID_TABLE] = "CLEAR_PAYLOAD_ID_TABLE",
115 DP_STR(REMOTE_DPCD_READ)[DRM_DP_SIDEBAND_TX_REMOTE_DPCD_READ] = "REMOTE_DPCD_READ",
116 DP_STR(REMOTE_DPCD_WRITE)[DRM_DP_SIDEBAND_TX_REMOTE_DPCD_WRITE] = "REMOTE_DPCD_WRITE",
117 DP_STR(REMOTE_I2C_READ)[DRM_DP_SIDEBAND_TX_REMOTE_I2C_READ] = "REMOTE_I2C_READ",
118 DP_STR(REMOTE_I2C_WRITE)[DRM_DP_SIDEBAND_TX_REMOTE_I2C_WRITE] = "REMOTE_I2C_WRITE",
119 DP_STR(POWER_UP_PHY)[DRM_DP_SIDEBAND_TX_POWER_UP_PHY] = "POWER_UP_PHY",
120 DP_STR(POWER_DOWN_PHY)[DRM_DP_SIDEBAND_TX_POWER_DOWN_PHY] = "POWER_DOWN_PHY",
121 DP_STR(SINK_EVENT_NOTIFY)[DRM_DP_SIDEBAND_TX_SINK_EVENT_NOTIFY] = "SINK_EVENT_NOTIFY",
122 DP_STR(QUERY_STREAM_ENC_STATUS)[DRM_DP_SIDEBAND_TX_QUERY_STREAM_ENC_STATUS] = "QUERY_STREAM_ENC_STATUS",
123 };
124
125 if (req_type >= ARRAY_SIZE(req_type_str)(sizeof((req_type_str)) / sizeof((req_type_str)[0])) ||
126 !req_type_str[req_type])
127 return "unknown";
128
129 return req_type_str[req_type];
130}
131
132#undef DP_STR
133#define DP_STR(x)[DRM_DP_SIDEBAND_TX_x] = "x" [DP_NAK_ ## x] = #x
134
135static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
136{
137 static const char * const nak_reason_str[] = {
138 DP_STR(WRITE_FAILURE)[DRM_DP_SIDEBAND_TX_WRITE_FAILURE] = "WRITE_FAILURE",
139 DP_STR(INVALID_READ)[DRM_DP_SIDEBAND_TX_INVALID_READ] = "INVALID_READ",
140 DP_STR(CRC_FAILURE)[DRM_DP_SIDEBAND_TX_CRC_FAILURE] = "CRC_FAILURE",
141 DP_STR(BAD_PARAM)[DRM_DP_SIDEBAND_TX_BAD_PARAM] = "BAD_PARAM",
142 DP_STR(DEFER)[DRM_DP_SIDEBAND_TX_DEFER] = "DEFER",
143 DP_STR(LINK_FAILURE)[DRM_DP_SIDEBAND_TX_LINK_FAILURE] = "LINK_FAILURE",
144 DP_STR(NO_RESOURCES)[DRM_DP_SIDEBAND_TX_NO_RESOURCES] = "NO_RESOURCES",
145 DP_STR(DPCD_FAIL)[DRM_DP_SIDEBAND_TX_DPCD_FAIL] = "DPCD_FAIL",
146 DP_STR(I2C_NAK)[DRM_DP_SIDEBAND_TX_I2C_NAK] = "I2C_NAK",
147 DP_STR(ALLOCATE_FAIL)[DRM_DP_SIDEBAND_TX_ALLOCATE_FAIL] = "ALLOCATE_FAIL",
148 };
149
150 if (nak_reason >= ARRAY_SIZE(nak_reason_str)(sizeof((nak_reason_str)) / sizeof((nak_reason_str)[0])) ||
151 !nak_reason_str[nak_reason])
152 return "unknown";
153
154 return nak_reason_str[nak_reason];
155}
156
157#undef DP_STR
158#define DP_STR(x)[DRM_DP_SIDEBAND_TX_x] = "x" [DRM_DP_SIDEBAND_TX_ ## x] = #x
159
160static const char *drm_dp_mst_sideband_tx_state_str(int state)
161{
162 static const char * const sideband_reason_str[] = {
163 DP_STR(QUEUED)[0] = "QUEUED",
164 DP_STR(START_SEND)[1] = "START_SEND",
165 DP_STR(SENT)[2] = "SENT",
166 DP_STR(RX)[3] = "RX",
167 DP_STR(TIMEOUT)[4] = "TIMEOUT",
168 };
169
170 if (state >= ARRAY_SIZE(sideband_reason_str)(sizeof((sideband_reason_str)) / sizeof((sideband_reason_str)
[0]))
||
171 !sideband_reason_str[state])
172 return "unknown";
173
174 return sideband_reason_str[state];
175}
176
177static int
178drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
179{
180 int i;
181 u8 unpacked_rad[16];
182
183 for (i = 0; i < lct; i++) {
184 if (i % 2)
185 unpacked_rad[i] = rad[i / 2] >> 4;
186 else
187 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4)(1UL << ((4) % 64));
188 }
189
190 /* TODO: Eventually add something to printk so we can format the rad
191 * like this: 1.2.3
192 */
193 return snprintf(out, len, "%*phC", lct, unpacked_rad);
194}
195
196/* sideband msg handling */
197static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
198{
199 u8 bitmask = 0x80;
200 u8 bitshift = 7;
201 u8 array_index = 0;
202 int number_of_bits = num_nibbles * 4;
203 u8 remainder = 0;
204
205 while (number_of_bits != 0) {
206 number_of_bits--;
207 remainder <<= 1;
208 remainder |= (data[array_index] & bitmask) >> bitshift;
209 bitmask >>= 1;
210 bitshift--;
211 if (bitmask == 0) {
212 bitmask = 0x80;
213 bitshift = 7;
214 array_index++;
215 }
216 if ((remainder & 0x10) == 0x10)
217 remainder ^= 0x13;
218 }
219
220 number_of_bits = 4;
221 while (number_of_bits != 0) {
222 number_of_bits--;
223 remainder <<= 1;
224 if ((remainder & 0x10) != 0)
225 remainder ^= 0x13;
226 }
227
228 return remainder;
229}
230
231static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
232{
233 u8 bitmask = 0x80;
234 u8 bitshift = 7;
235 u8 array_index = 0;
236 int number_of_bits = number_of_bytes * 8;
237 u16 remainder = 0;
238
239 while (number_of_bits != 0) {
240 number_of_bits--;
241 remainder <<= 1;
242 remainder |= (data[array_index] & bitmask) >> bitshift;
243 bitmask >>= 1;
244 bitshift--;
245 if (bitmask == 0) {
246 bitmask = 0x80;
247 bitshift = 7;
248 array_index++;
249 }
250 if ((remainder & 0x100) == 0x100)
251 remainder ^= 0xd5;
252 }
253
254 number_of_bits = 8;
255 while (number_of_bits != 0) {
256 number_of_bits--;
257 remainder <<= 1;
258 if ((remainder & 0x100) != 0)
259 remainder ^= 0xd5;
260 }
261
262 return remainder & 0xff;
263}
264static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
265{
266 u8 size = 3;
267
268 size += (hdr->lct / 2);
269 return size;
270}
271
272static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
273 u8 *buf, int *len)
274{
275 int idx = 0;
276 int i;
277 u8 crc4;
278
279 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
280 for (i = 0; i < (hdr->lct / 2); i++)
281 buf[idx++] = hdr->rad[i];
282 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
283 (hdr->msg_len & 0x3f);
284 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
285
286 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
287 buf[idx - 1] |= (crc4 & 0xf);
288
289 *len = idx;
290}
291
292static bool_Bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
293 struct drm_dp_sideband_msg_hdr *hdr,
294 u8 *buf, int buflen, u8 *hdrlen)
295{
296 u8 crc4;
297 u8 len;
298 int i;
299 u8 idx;
300
301 if (buf[0] == 0)
15
Assuming the condition is false
16
Taking false branch
302 return false0;
303 len = 3;
304 len += ((buf[0] & 0xf0) >> 4) / 2;
305 if (len > buflen)
17
Assuming 'len' is <= 'buflen'
18
Taking false branch
306 return false0;
307 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
308
309 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
19
Assuming the condition is false
20
Taking false branch
310 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1])__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "crc4 mismatch 0x%x 0x%x\n", crc4
, buf[len - 1])
;
311 return false0;
312 }
313
314 hdr->lct = (buf[0] & 0xf0) >> 4;
315 hdr->lcr = (buf[0] & 0xf);
316 idx = 1;
317 for (i = 0; i < (hdr->lct / 2); i++)
21
Assuming the condition is false
22
Loop condition is false. Execution continues on line 319
318 hdr->rad[i] = buf[idx++];
319 hdr->broadcast = (buf[idx] >> 7) & 0x1;
320 hdr->path_msg = (buf[idx] >> 6) & 0x1;
321 hdr->msg_len = buf[idx] & 0x3f;
322 idx++;
323 hdr->somt = (buf[idx] >> 7) & 0x1;
324 hdr->eomt = (buf[idx] >> 6) & 0x1;
325 hdr->seqno = (buf[idx] >> 4) & 0x1;
326 idx++;
327 *hdrlen = idx;
328 return true1;
329}
330
331void
332drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
333 struct drm_dp_sideband_msg_tx *raw)
334{
335 int idx = 0;
336 int i;
337 u8 *buf = raw->msg;
338
339 buf[idx++] = req->req_type & 0x7f;
340
341 switch (req->req_type) {
342 case DP_ENUM_PATH_RESOURCES0x10:
343 case DP_POWER_DOWN_PHY0x25:
344 case DP_POWER_UP_PHY0x24:
345 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
346 idx++;
347 break;
348 case DP_ALLOCATE_PAYLOAD0x11:
349 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
350 (req->u.allocate_payload.number_sdp_streams & 0xf);
351 idx++;
352 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
353 idx++;
354 buf[idx] = (req->u.allocate_payload.pbn >> 8);
355 idx++;
356 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
357 idx++;
358 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
359 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
360 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
361 idx++;
362 }
363 if (req->u.allocate_payload.number_sdp_streams & 1) {
364 i = req->u.allocate_payload.number_sdp_streams - 1;
365 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
366 idx++;
367 }
368 break;
369 case DP_QUERY_PAYLOAD0x12:
370 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
371 idx++;
372 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
373 idx++;
374 break;
375 case DP_REMOTE_DPCD_READ0x20:
376 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
377 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
378 idx++;
379 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
380 idx++;
381 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
382 idx++;
383 buf[idx] = (req->u.dpcd_read.num_bytes);
384 idx++;
385 break;
386
387 case DP_REMOTE_DPCD_WRITE0x21:
388 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
389 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
390 idx++;
391 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
392 idx++;
393 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
394 idx++;
395 buf[idx] = (req->u.dpcd_write.num_bytes);
396 idx++;
397 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes)__builtin_memcpy((&buf[idx]), (req->u.dpcd_write.bytes
), (req->u.dpcd_write.num_bytes))
;
398 idx += req->u.dpcd_write.num_bytes;
399 break;
400 case DP_REMOTE_I2C_READ0x22:
401 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
402 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
403 idx++;
404 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
405 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
406 idx++;
407 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
408 idx++;
409 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes)__builtin_memcpy((&buf[idx]), (req->u.i2c_read.transactions
[i].bytes), (req->u.i2c_read.transactions[i].num_bytes))
;
410 idx += req->u.i2c_read.transactions[i].num_bytes;
411
412 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
413 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
414 idx++;
415 }
416 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
417 idx++;
418 buf[idx] = (req->u.i2c_read.num_bytes_read);
419 idx++;
420 break;
421
422 case DP_REMOTE_I2C_WRITE0x23:
423 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
424 idx++;
425 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
426 idx++;
427 buf[idx] = (req->u.i2c_write.num_bytes);
428 idx++;
429 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes)__builtin_memcpy((&buf[idx]), (req->u.i2c_write.bytes)
, (req->u.i2c_write.num_bytes))
;
430 idx += req->u.i2c_write.num_bytes;
431 break;
432 case DP_QUERY_STREAM_ENC_STATUS0x38: {
433 const struct drm_dp_query_stream_enc_status *msg;
434
435 msg = &req->u.enc_status;
436 buf[idx] = msg->stream_id;
437 idx++;
438 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id))__builtin_memcpy((&buf[idx]), (msg->client_id), (sizeof
(msg->client_id)))
;
439 idx += sizeof(msg->client_id);
440 buf[idx] = 0;
441 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event)(((typeof((((~0UL) >> (64 - (1) - 1)) & ((~0UL) <<
(0)))))(msg->stream_event) << (__builtin_ffsll((((~
0UL) >> (64 - (1) - 1)) & ((~0UL) << (0)))) -
1)) & ((((~0UL) >> (64 - (1) - 1)) & ((~0UL) <<
(0)))))
;
442 buf[idx] |= msg->valid_stream_event ? BIT(2)(1UL << (2)) : 0;
443 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior)(((typeof((((~0UL) >> (64 - (4) - 1)) & ((~0UL) <<
(3)))))(msg->stream_behavior) << (__builtin_ffsll((
((~0UL) >> (64 - (4) - 1)) & ((~0UL) << (3)))
) - 1)) & ((((~0UL) >> (64 - (4) - 1)) & ((~0UL
) << (3)))))
;
444 buf[idx] |= msg->valid_stream_behavior ? BIT(5)(1UL << (5)) : 0;
445 idx++;
446 }
447 break;
448 }
449 raw->cur_len = idx;
450}
451EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
452
453/* Decode a sideband request we've encoded, mainly used for debugging */
454int
455drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
456 struct drm_dp_sideband_msg_req_body *req)
457{
458 const u8 *buf = raw->msg;
459 int i, idx = 0;
460
461 req->req_type = buf[idx++] & 0x7f;
462 switch (req->req_type) {
463 case DP_ENUM_PATH_RESOURCES0x10:
464 case DP_POWER_DOWN_PHY0x25:
465 case DP_POWER_UP_PHY0x24:
466 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
467 break;
468 case DP_ALLOCATE_PAYLOAD0x11:
469 {
470 struct drm_dp_allocate_payload *a =
471 &req->u.allocate_payload;
472
473 a->number_sdp_streams = buf[idx] & 0xf;
474 a->port_number = (buf[idx] >> 4) & 0xf;
475
476 WARN_ON(buf[++idx] & 0x80)({ int __ret = !!(buf[++idx] & 0x80); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "buf[++idx] & 0x80", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 476); __builtin_expect(!!(__ret), 0); })
;
477 a->vcpi = buf[idx] & 0x7f;
478
479 a->pbn = buf[++idx] << 8;
480 a->pbn |= buf[++idx];
481
482 idx++;
483 for (i = 0; i < a->number_sdp_streams; i++) {
484 a->sdp_stream_sink[i] =
485 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
486 }
487 }
488 break;
489 case DP_QUERY_PAYLOAD0x12:
490 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
491 WARN_ON(buf[++idx] & 0x80)({ int __ret = !!(buf[++idx] & 0x80); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "buf[++idx] & 0x80", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 491); __builtin_expect(!!(__ret), 0); })
;
492 req->u.query_payload.vcpi = buf[idx] & 0x7f;
493 break;
494 case DP_REMOTE_DPCD_READ0x20:
495 {
496 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
497
498 r->port_number = (buf[idx] >> 4) & 0xf;
499
500 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
501 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
502 r->dpcd_address |= buf[++idx] & 0xff;
503
504 r->num_bytes = buf[++idx];
505 }
506 break;
507 case DP_REMOTE_DPCD_WRITE0x21:
508 {
509 struct drm_dp_remote_dpcd_write *w =
510 &req->u.dpcd_write;
511
512 w->port_number = (buf[idx] >> 4) & 0xf;
513
514 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
515 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
516 w->dpcd_address |= buf[++idx] & 0xff;
517
518 w->num_bytes = buf[++idx];
519
520 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
521 GFP_KERNEL(0x0001 | 0x0004));
522 if (!w->bytes)
523 return -ENOMEM12;
524 }
525 break;
526 case DP_REMOTE_I2C_READ0x22:
527 {
528 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
529 struct drm_dp_remote_i2c_read_tx *tx;
530 bool_Bool failed = false0;
531
532 r->num_transactions = buf[idx] & 0x3;
533 r->port_number = (buf[idx] >> 4) & 0xf;
534 for (i = 0; i < r->num_transactions; i++) {
535 tx = &r->transactions[i];
536
537 tx->i2c_dev_id = buf[++idx] & 0x7f;
538 tx->num_bytes = buf[++idx];
539 tx->bytes = kmemdup(&buf[++idx],
540 tx->num_bytes,
541 GFP_KERNEL(0x0001 | 0x0004));
542 if (!tx->bytes) {
543 failed = true1;
544 break;
545 }
546 idx += tx->num_bytes;
547 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
548 tx->i2c_transaction_delay = buf[idx] & 0xf;
549 }
550
551 if (failed) {
552 for (i = 0; i < r->num_transactions; i++) {
553 tx = &r->transactions[i];
554 kfree(tx->bytes);
555 }
556 return -ENOMEM12;
557 }
558
559 r->read_i2c_device_id = buf[++idx] & 0x7f;
560 r->num_bytes_read = buf[++idx];
561 }
562 break;
563 case DP_REMOTE_I2C_WRITE0x23:
564 {
565 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
566
567 w->port_number = (buf[idx] >> 4) & 0xf;
568 w->write_i2c_device_id = buf[++idx] & 0x7f;
569 w->num_bytes = buf[++idx];
570 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
571 GFP_KERNEL(0x0001 | 0x0004));
572 if (!w->bytes)
573 return -ENOMEM12;
574 }
575 break;
576 case DP_QUERY_STREAM_ENC_STATUS0x38:
577 req->u.enc_status.stream_id = buf[idx++];
578 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
579 req->u.enc_status.client_id[i] = buf[idx++];
580
581 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),((typeof((((~0UL) >> (64 - (1) - 1)) & ((~0UL) <<
(0)))))(((buf[idx]) & ((((~0UL) >> (64 - (1) - 1))
& ((~0UL) << (0))))) >> (__builtin_ffsll((((
~0UL) >> (64 - (1) - 1)) & ((~0UL) << (0)))) -
1)))
582 buf[idx])((typeof((((~0UL) >> (64 - (1) - 1)) & ((~0UL) <<
(0)))))(((buf[idx]) & ((((~0UL) >> (64 - (1) - 1))
& ((~0UL) << (0))))) >> (__builtin_ffsll((((
~0UL) >> (64 - (1) - 1)) & ((~0UL) << (0)))) -
1)))
;
583 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),((typeof((1UL << (2))))(((buf[idx]) & ((1UL <<
(2)))) >> (__builtin_ffsll((1UL << (2))) - 1)))
584 buf[idx])((typeof((1UL << (2))))(((buf[idx]) & ((1UL <<
(2)))) >> (__builtin_ffsll((1UL << (2))) - 1)))
;
585 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),((typeof((((~0UL) >> (64 - (4) - 1)) & ((~0UL) <<
(3)))))(((buf[idx]) & ((((~0UL) >> (64 - (4) - 1))
& ((~0UL) << (3))))) >> (__builtin_ffsll((((
~0UL) >> (64 - (4) - 1)) & ((~0UL) << (3)))) -
1)))
586 buf[idx])((typeof((((~0UL) >> (64 - (4) - 1)) & ((~0UL) <<
(3)))))(((buf[idx]) & ((((~0UL) >> (64 - (4) - 1))
& ((~0UL) << (3))))) >> (__builtin_ffsll((((
~0UL) >> (64 - (4) - 1)) & ((~0UL) << (3)))) -
1)))
;
587 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),((typeof((1UL << (5))))(((buf[idx]) & ((1UL <<
(5)))) >> (__builtin_ffsll((1UL << (5))) - 1)))
588 buf[idx])((typeof((1UL << (5))))(((buf[idx]) & ((1UL <<
(5)))) >> (__builtin_ffsll((1UL << (5))) - 1)))
;
589 break;
590 }
591
592 return 0;
593}
594EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
595
596void
597drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
598 int indent, struct drm_printer *printer)
599{
600 int i;
601
602#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)drm_printf((printer), "%.*s" f, (indent), "\t\t\t\t\tX", ##__VA_ARGS__
)
603 if (req->req_type == DP_LINK_ADDRESS0x01) {
604 /* No contents to print */
605 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
606 return;
607 }
608
609 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
610 indent++;
611
612 switch (req->req_type) {
613 case DP_ENUM_PATH_RESOURCES0x10:
614 case DP_POWER_DOWN_PHY0x25:
615 case DP_POWER_UP_PHY0x24:
616 P("port=%d\n", req->u.port_num.port_number);
617 break;
618 case DP_ALLOCATE_PAYLOAD0x11:
619 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
620 req->u.allocate_payload.port_number,
621 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
622 req->u.allocate_payload.number_sdp_streams,
623 req->u.allocate_payload.number_sdp_streams,
624 req->u.allocate_payload.sdp_stream_sink);
625 break;
626 case DP_QUERY_PAYLOAD0x12:
627 P("port=%d vcpi=%d\n",
628 req->u.query_payload.port_number,
629 req->u.query_payload.vcpi);
630 break;
631 case DP_REMOTE_DPCD_READ0x20:
632 P("port=%d dpcd_addr=%05x len=%d\n",
633 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
634 req->u.dpcd_read.num_bytes);
635 break;
636 case DP_REMOTE_DPCD_WRITE0x21:
637 P("port=%d addr=%05x len=%d: %*ph\n",
638 req->u.dpcd_write.port_number,
639 req->u.dpcd_write.dpcd_address,
640 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
641 req->u.dpcd_write.bytes);
642 break;
643 case DP_REMOTE_I2C_READ0x22:
644 P("port=%d num_tx=%d id=%d size=%d:\n",
645 req->u.i2c_read.port_number,
646 req->u.i2c_read.num_transactions,
647 req->u.i2c_read.read_i2c_device_id,
648 req->u.i2c_read.num_bytes_read);
649
650 indent++;
651 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
652 const struct drm_dp_remote_i2c_read_tx *rtx =
653 &req->u.i2c_read.transactions[i];
654
655 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
656 i, rtx->i2c_dev_id, rtx->num_bytes,
657 rtx->no_stop_bit, rtx->i2c_transaction_delay,
658 rtx->num_bytes, rtx->bytes);
659 }
660 break;
661 case DP_REMOTE_I2C_WRITE0x23:
662 P("port=%d id=%d size=%d: %*ph\n",
663 req->u.i2c_write.port_number,
664 req->u.i2c_write.write_i2c_device_id,
665 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
666 req->u.i2c_write.bytes);
667 break;
668 case DP_QUERY_STREAM_ENC_STATUS0x38:
669 P("stream_id=%u client_id=%*ph stream_event=%x "
670 "valid_event=%d stream_behavior=%x valid_behavior=%d",
671 req->u.enc_status.stream_id,
672 (int)ARRAY_SIZE(req->u.enc_status.client_id)(sizeof((req->u.enc_status.client_id)) / sizeof((req->u
.enc_status.client_id)[0]))
,
673 req->u.enc_status.client_id, req->u.enc_status.stream_event,
674 req->u.enc_status.valid_stream_event,
675 req->u.enc_status.stream_behavior,
676 req->u.enc_status.valid_stream_behavior);
677 break;
678 default:
679 P("???\n");
680 break;
681 }
682#undef P
683}
684EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
685
686static inline void
687drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
688 const struct drm_dp_sideband_msg_tx *txmsg)
689{
690 struct drm_dp_sideband_msg_req_body req;
691 char buf[64];
692 int ret;
693 int i;
694
695 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
696 sizeof(buf));
697 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
698 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
699 drm_dp_mst_sideband_tx_state_str(txmsg->state),
700 txmsg->path_msg, buf);
701
702 ret = drm_dp_decode_sideband_req(txmsg, &req);
703 if (ret) {
704 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
705 return;
706 }
707 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
708
709 switch (req.req_type) {
710 case DP_REMOTE_DPCD_WRITE0x21:
711 kfree(req.u.dpcd_write.bytes);
712 break;
713 case DP_REMOTE_I2C_READ0x22:
714 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
715 kfree(req.u.i2c_read.transactions[i].bytes);
716 break;
717 case DP_REMOTE_I2C_WRITE0x23:
718 kfree(req.u.i2c_write.bytes);
719 break;
720 }
721}
722
723static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
724{
725 u8 crc4;
726
727 crc4 = drm_dp_msg_data_crc4(msg, len);
728 msg[len] = crc4;
729}
730
731static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
732 struct drm_dp_sideband_msg_tx *raw)
733{
734 int idx = 0;
735 u8 *buf = raw->msg;
736
737 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
738
739 raw->cur_len = idx;
740}
741
742static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
743 struct drm_dp_sideband_msg_hdr *hdr,
744 u8 hdrlen)
745{
746 /*
747 * ignore out-of-order messages or messages that are part of a
748 * failed transaction
749 */
750 if (!hdr->somt && !msg->have_somt)
751 return false0;
752
753 /* get length contained in this portion */
754 msg->curchunk_idx = 0;
755 msg->curchunk_len = hdr->msg_len;
756 msg->curchunk_hdrlen = hdrlen;
757
758 /* we have already gotten an somt - don't bother parsing */
759 if (hdr->somt && msg->have_somt)
760 return false0;
761
762 if (hdr->somt) {
763 memcpy(&msg->initial_hdr, hdr,__builtin_memcpy((&msg->initial_hdr), (hdr), (sizeof(struct
drm_dp_sideband_msg_hdr)))
764 sizeof(struct drm_dp_sideband_msg_hdr))__builtin_memcpy((&msg->initial_hdr), (hdr), (sizeof(struct
drm_dp_sideband_msg_hdr)))
;
765 msg->have_somt = true1;
766 }
767 if (hdr->eomt)
768 msg->have_eomt = true1;
769
770 return true1;
771}
772
773/* this adds a chunk of msg to the builder to get the final msg */
774static bool_Bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
775 u8 *replybuf, u8 replybuflen)
776{
777 u8 crc4;
778
779 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen)__builtin_memcpy((&msg->chunk[msg->curchunk_idx]), (
replybuf), (replybuflen))
;
780 msg->curchunk_idx += replybuflen;
781
782 if (msg->curchunk_idx >= msg->curchunk_len) {
783 /* do CRC */
784 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
785 if (crc4 != msg->chunk[msg->curchunk_len - 1])
786 print_hex_dump(KERN_DEBUG"\0017", "wrong crc",
787 DUMP_PREFIX_NONE, 16, 1,
788 msg->chunk, msg->curchunk_len, false0);
789 /* copy chunk into bigger msg */
790 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1)__builtin_memcpy((&msg->msg[msg->curlen]), (msg->
chunk), (msg->curchunk_len - 1))
;
791 msg->curlen += msg->curchunk_len - 1;
792 }
793 return true1;
794}
795
796static bool_Bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
797 struct drm_dp_sideband_msg_rx *raw,
798 struct drm_dp_sideband_msg_reply_body *repmsg)
799{
800 int idx = 1;
801 int i;
802
803 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16)__builtin_memcpy((repmsg->u.link_addr.guid), (&raw->
msg[idx]), (16))
;
804 idx += 16;
805 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
806 idx++;
807 if (idx > raw->curlen)
808 goto fail_len;
809 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
810 if (raw->msg[idx] & 0x80)
811 repmsg->u.link_addr.ports[i].input_port = 1;
812
813 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
814 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
815
816 idx++;
817 if (idx > raw->curlen)
818 goto fail_len;
819 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
820 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
821 if (repmsg->u.link_addr.ports[i].input_port == 0)
822 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
823 idx++;
824 if (idx > raw->curlen)
825 goto fail_len;
826 if (repmsg->u.link_addr.ports[i].input_port == 0) {
827 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
828 idx++;
829 if (idx > raw->curlen)
830 goto fail_len;
831 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16)__builtin_memcpy((repmsg->u.link_addr.ports[i].peer_guid),
(&raw->msg[idx]), (16))
;
832 idx += 16;
833 if (idx > raw->curlen)
834 goto fail_len;
835 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
836 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
837 idx++;
838
839 }
840 if (idx > raw->curlen)
841 goto fail_len;
842 }
843
844 return true1;
845fail_len:
846 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "link address reply parse length fail %d %d\n"
, idx, raw->curlen)
;
847 return false0;
848}
849
850static bool_Bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
851 struct drm_dp_sideband_msg_reply_body *repmsg)
852{
853 int idx = 1;
854
855 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
856 idx++;
857 if (idx > raw->curlen)
858 goto fail_len;
859 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
860 idx++;
861 if (idx > raw->curlen)
862 goto fail_len;
863
864 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes)__builtin_memcpy((repmsg->u.remote_dpcd_read_ack.bytes), (
&raw->msg[idx]), (repmsg->u.remote_dpcd_read_ack.num_bytes
))
;
865 return true1;
866fail_len:
867 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "link address reply parse length fail %d %d\n"
, idx, raw->curlen)
;
868 return false0;
869}
870
871static bool_Bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
872 struct drm_dp_sideband_msg_reply_body *repmsg)
873{
874 int idx = 1;
875
876 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
877 idx++;
878 if (idx > raw->curlen)
879 goto fail_len;
880 return true1;
881fail_len:
882 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "parse length fail %d %d\n"
, idx, raw->curlen)
;
883 return false0;
884}
885
886static bool_Bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
887 struct drm_dp_sideband_msg_reply_body *repmsg)
888{
889 int idx = 1;
890
891 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
892 idx++;
893 if (idx > raw->curlen)
894 goto fail_len;
895 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
896 idx++;
897 /* TODO check */
898 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes)__builtin_memcpy((repmsg->u.remote_i2c_read_ack.bytes), (&
raw->msg[idx]), (repmsg->u.remote_i2c_read_ack.num_bytes
))
;
899 return true1;
900fail_len:
901 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "remote i2c reply parse length fail %d %d\n"
, idx, raw->curlen)
;
902 return false0;
903}
904
905static bool_Bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
906 struct drm_dp_sideband_msg_reply_body *repmsg)
907{
908 int idx = 1;
909
910 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
911 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
912 idx++;
913 if (idx > raw->curlen)
914 goto fail_len;
915 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
916 idx += 2;
917 if (idx > raw->curlen)
918 goto fail_len;
919 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
920 idx += 2;
921 if (idx > raw->curlen)
922 goto fail_len;
923 return true1;
924fail_len:
925 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "enum resource parse length fail %d %d\n"
, idx, raw->curlen)
;
926 return false0;
927}
928
929static bool_Bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
930 struct drm_dp_sideband_msg_reply_body *repmsg)
931{
932 int idx = 1;
933
934 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
935 idx++;
936 if (idx > raw->curlen)
937 goto fail_len;
938 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
939 idx++;
940 if (idx > raw->curlen)
941 goto fail_len;
942 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
943 idx += 2;
944 if (idx > raw->curlen)
945 goto fail_len;
946 return true1;
947fail_len:
948 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "allocate payload parse length fail %d %d\n"
, idx, raw->curlen)
;
949 return false0;
950}
951
952static bool_Bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
953 struct drm_dp_sideband_msg_reply_body *repmsg)
954{
955 int idx = 1;
956
957 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
958 idx++;
959 if (idx > raw->curlen)
960 goto fail_len;
961 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
962 idx += 2;
963 if (idx > raw->curlen)
964 goto fail_len;
965 return true1;
966fail_len:
967 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "query payload parse length fail %d %d\n"
, idx, raw->curlen)
;
968 return false0;
969}
970
971static bool_Bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
972 struct drm_dp_sideband_msg_reply_body *repmsg)
973{
974 int idx = 1;
975
976 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
977 idx++;
978 if (idx > raw->curlen) {
979 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",___drm_dbg(((void *)0), DRM_UT_KMS, "power up/down phy parse length fail %d %d\n"
, idx, raw->curlen)
980 idx, raw->curlen)___drm_dbg(((void *)0), DRM_UT_KMS, "power up/down phy parse length fail %d %d\n"
, idx, raw->curlen)
;
981 return false0;
982 }
983 return true1;
984}
985
986static bool_Bool
987drm_dp_sideband_parse_query_stream_enc_status(
988 struct drm_dp_sideband_msg_rx *raw,
989 struct drm_dp_sideband_msg_reply_body *repmsg)
990{
991 struct drm_dp_query_stream_enc_status_ack_reply *reply;
992
993 reply = &repmsg->u.enc_status;
994
995 reply->stream_id = raw->msg[3];
996
997 reply->reply_signed = raw->msg[2] & BIT(0)(1UL << (0));
998
999 /*
1000 * NOTE: It's my impression from reading the spec that the below parsing
1001 * is correct. However I noticed while testing with an HDCP 1.4 display
1002 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
1003 * would expect both bits to be set. So keep the parsing following the
1004 * spec, but beware reality might not match the spec (at least for some
1005 * configurations).
1006 */
1007 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4)(1UL << (4));
1008 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3)(1UL << (3));
1009
1010 reply->query_capable_device_present = raw->msg[2] & BIT(5)(1UL << (5));
1011 reply->legacy_device_present = raw->msg[2] & BIT(6)(1UL << (6));
1012 reply->unauthorizable_device_present = raw->msg[2] & BIT(7)(1UL << (7));
1013
1014 reply->auth_completed = !!(raw->msg[1] & BIT(3)(1UL << (3)));
1015 reply->encryption_enabled = !!(raw->msg[1] & BIT(4)(1UL << (4)));
1016 reply->repeater_present = !!(raw->msg[1] & BIT(5)(1UL << (5)));
1017 reply->state = (raw->msg[1] & GENMASK(7, 6)(((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (6))
)
) >> 6;
1018
1019 return true1;
1020}
1021
1022static bool_Bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
1023 struct drm_dp_sideband_msg_rx *raw,
1024 struct drm_dp_sideband_msg_reply_body *msg)
1025{
1026 memset(msg, 0, sizeof(*msg))__builtin_memset((msg), (0), (sizeof(*msg)));
1027 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
1028 msg->req_type = (raw->msg[0] & 0x7f);
1029
1030 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK0x01) {
1031 memcpy(msg->u.nak.guid, &raw->msg[1], 16)__builtin_memcpy((msg->u.nak.guid), (&raw->msg[1]),
(16))
;
1032 msg->u.nak.reason = raw->msg[17];
1033 msg->u.nak.nak_data = raw->msg[18];
1034 return false0;
1035 }
1036
1037 switch (msg->req_type) {
1038 case DP_LINK_ADDRESS0x01:
1039 return drm_dp_sideband_parse_link_address(mgr, raw, msg);
1040 case DP_QUERY_PAYLOAD0x12:
1041 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
1042 case DP_REMOTE_DPCD_READ0x20:
1043 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
1044 case DP_REMOTE_DPCD_WRITE0x21:
1045 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
1046 case DP_REMOTE_I2C_READ0x22:
1047 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1048 case DP_REMOTE_I2C_WRITE0x23:
1049 return true1; /* since there's nothing to parse */
1050 case DP_ENUM_PATH_RESOURCES0x10:
1051 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
1052 case DP_ALLOCATE_PAYLOAD0x11:
1053 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
1054 case DP_POWER_DOWN_PHY0x25:
1055 case DP_POWER_UP_PHY0x24:
1056 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1057 case DP_CLEAR_PAYLOAD_ID_TABLE0x14:
1058 return true1; /* since there's nothing to parse */
1059 case DP_QUERY_STREAM_ENC_STATUS0x38:
1060 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
1061 default:
1062 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Got unknown reply 0x%02x (%s)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , msg->
req_type, drm_dp_mst_req_type_str(msg->req_type))
1063 msg->req_type, drm_dp_mst_req_type_str(msg->req_type))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Got unknown reply 0x%02x (%s)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , msg->
req_type, drm_dp_mst_req_type_str(msg->req_type))
;
1064 return false0;
1065 }
1066}
1067
1068static bool_Bool
1069drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1070 struct drm_dp_sideband_msg_rx *raw,
1071 struct drm_dp_sideband_msg_req_body *msg)
1072{
1073 int idx = 1;
1074
1075 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1076 idx++;
1077 if (idx > raw->curlen)
1078 goto fail_len;
1079
1080 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16)__builtin_memcpy((msg->u.conn_stat.guid), (&raw->msg
[idx]), (16))
;
1081 idx += 16;
1082 if (idx > raw->curlen)
1083 goto fail_len;
1084
1085 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
1086 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
1087 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
1088 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
1089 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1090 idx++;
1091 return true1;
1092fail_len:
1093 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "connection status reply parse length fail %d %d\n"
, idx, raw->curlen)
1094 idx, raw->curlen)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "connection status reply parse length fail %d %d\n"
, idx, raw->curlen)
;
1095 return false0;
1096}
1097
1098static bool_Bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1099 struct drm_dp_sideband_msg_rx *raw,
1100 struct drm_dp_sideband_msg_req_body *msg)
1101{
1102 int idx = 1;
1103
1104 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1105 idx++;
1106 if (idx > raw->curlen)
1107 goto fail_len;
1108
1109 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16)__builtin_memcpy((msg->u.resource_stat.guid), (&raw->
msg[idx]), (16))
;
1110 idx += 16;
1111 if (idx > raw->curlen)
1112 goto fail_len;
1113
1114 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1115 idx++;
1116 return true1;
1117fail_len:
1118 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "resource status reply parse length fail %d %d\n"
, idx, raw->curlen)
;
1119 return false0;
1120}
1121
1122static bool_Bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
1123 struct drm_dp_sideband_msg_rx *raw,
1124 struct drm_dp_sideband_msg_req_body *msg)
1125{
1126 memset(msg, 0, sizeof(*msg))__builtin_memset((msg), (0), (sizeof(*msg)));
1127 msg->req_type = (raw->msg[0] & 0x7f);
1128
1129 switch (msg->req_type) {
1130 case DP_CONNECTION_STATUS_NOTIFY0x02:
1131 return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
1132 case DP_RESOURCE_STATUS_NOTIFY0x13:
1133 return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
1134 default:
1135 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Got unknown request 0x%02x (%s)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , msg->
req_type, drm_dp_mst_req_type_str(msg->req_type))
1136 msg->req_type, drm_dp_mst_req_type_str(msg->req_type))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Got unknown request 0x%02x (%s)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , msg->
req_type, drm_dp_mst_req_type_str(msg->req_type))
;
1137 return false0;
1138 }
1139}
1140
1141static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1142 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1143{
1144 struct drm_dp_sideband_msg_req_body req;
1145
1146 req.req_type = DP_REMOTE_DPCD_WRITE0x21;
1147 req.u.dpcd_write.port_number = port_num;
1148 req.u.dpcd_write.dpcd_address = offset;
1149 req.u.dpcd_write.num_bytes = num_bytes;
1150 req.u.dpcd_write.bytes = bytes;
1151 drm_dp_encode_sideband_req(&req, msg);
1152}
1153
1154static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1155{
1156 struct drm_dp_sideband_msg_req_body req;
1157
1158 req.req_type = DP_LINK_ADDRESS0x01;
1159 drm_dp_encode_sideband_req(&req, msg);
1160}
1161
1162static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1163{
1164 struct drm_dp_sideband_msg_req_body req;
1165
1166 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE0x14;
1167 drm_dp_encode_sideband_req(&req, msg);
1168 msg->path_msg = true1;
1169}
1170
1171static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1172 int port_num)
1173{
1174 struct drm_dp_sideband_msg_req_body req;
1175
1176 req.req_type = DP_ENUM_PATH_RESOURCES0x10;
1177 req.u.port_num.port_number = port_num;
1178 drm_dp_encode_sideband_req(&req, msg);
1179 msg->path_msg = true1;
1180 return 0;
1181}
1182
1183static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1184 int port_num,
1185 u8 vcpi, uint16_t pbn,
1186 u8 number_sdp_streams,
1187 u8 *sdp_stream_sink)
1188{
1189 struct drm_dp_sideband_msg_req_body req;
1190
1191 memset(&req, 0, sizeof(req))__builtin_memset((&req), (0), (sizeof(req)));
1192 req.req_type = DP_ALLOCATE_PAYLOAD0x11;
1193 req.u.allocate_payload.port_number = port_num;
1194 req.u.allocate_payload.vcpi = vcpi;
1195 req.u.allocate_payload.pbn = pbn;
1196 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1197 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,__builtin_memcpy((req.u.allocate_payload.sdp_stream_sink), (sdp_stream_sink
), (number_sdp_streams))
1198 number_sdp_streams)__builtin_memcpy((req.u.allocate_payload.sdp_stream_sink), (sdp_stream_sink
), (number_sdp_streams))
;
1199 drm_dp_encode_sideband_req(&req, msg);
1200 msg->path_msg = true1;
1201}
1202
1203static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1204 int port_num, bool_Bool power_up)
1205{
1206 struct drm_dp_sideband_msg_req_body req;
1207
1208 if (power_up)
1209 req.req_type = DP_POWER_UP_PHY0x24;
1210 else
1211 req.req_type = DP_POWER_DOWN_PHY0x25;
1212
1213 req.u.port_num.port_number = port_num;
1214 drm_dp_encode_sideband_req(&req, msg);
1215 msg->path_msg = true1;
1216}
1217
1218static int
1219build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1220 u8 *q_id)
1221{
1222 struct drm_dp_sideband_msg_req_body req;
1223
1224 req.req_type = DP_QUERY_STREAM_ENC_STATUS0x38;
1225 req.u.enc_status.stream_id = stream_id;
1226 memcpy(req.u.enc_status.client_id, q_id,__builtin_memcpy((req.u.enc_status.client_id), (q_id), (sizeof
(req.u.enc_status.client_id)))
1227 sizeof(req.u.enc_status.client_id))__builtin_memcpy((req.u.enc_status.client_id), (q_id), (sizeof
(req.u.enc_status.client_id)))
;
1228 req.u.enc_status.stream_event = 0;
1229 req.u.enc_status.valid_stream_event = false0;
1230 req.u.enc_status.stream_behavior = 0;
1231 req.u.enc_status.valid_stream_behavior = false0;
1232
1233 drm_dp_encode_sideband_req(&req, msg);
1234 return 0;
1235}
1236
1237static bool_Bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1238 struct drm_dp_sideband_msg_tx *txmsg)
1239{
1240 unsigned int state;
1241
1242 /*
1243 * All updates to txmsg->state are protected by mgr->qlock, and the two
1244 * cases we check here are terminal states. For those the barriers
1245 * provided by the wake_up/wait_event pair are enough.
1246 */
1247 state = READ_ONCE(txmsg->state)({ typeof(txmsg->state) __tmp = *(volatile typeof(txmsg->
state) *)&(txmsg->state); membar_datadep_consumer(); __tmp
; })
;
1248 return (state == DRM_DP_SIDEBAND_TX_RX3 ||
1249 state == DRM_DP_SIDEBAND_TX_TIMEOUT4);
1250}
1251
1252static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1253 struct drm_dp_sideband_msg_tx *txmsg)
1254{
1255 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1256 unsigned long wait_timeout = msecs_to_jiffies(4000)(((uint64_t)(4000)) * hz / 1000);
1257 unsigned long wait_expires = jiffies + wait_timeout;
1258 int ret;
1259
1260 for (;;) {
1261 /*
1262 * If the driver provides a way for this, change to
1263 * poll-waiting for the MST reply interrupt if we didn't receive
1264 * it for 50 msec. This would cater for cases where the HPD
1265 * pulse signal got lost somewhere, even though the sink raised
1266 * the corresponding MST interrupt correctly. One example is the
1267 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
1268 * filters out short pulses with a duration less than ~540 usec.
1269 *
1270 * The poll period is 50 msec to avoid missing an interrupt
1271 * after the sink has cleared it (after a 110msec timeout
1272 * since it raised the interrupt).
1273 */
1274 ret = wait_event_timeout(mgr->tx_waitq,({ long __ret = mgr->cbs->poll_hpd_irq ? (((uint64_t)(50
)) * hz / 1000) : wait_timeout; if (!(check_txmsg_state(mgr, txmsg
))) __ret = ({ long __ret = mgr->cbs->poll_hpd_irq ? ((
(uint64_t)(50)) * hz / 1000) : wait_timeout; struct wait_queue_entry
__wq_entry; init_wait_entry(&__wq_entry, 0); do { int __error
, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 1278, "!cold")); prepare_to_wait(&mgr->tx_waitq, &
__wq_entry, 0); deadline = jiffies + __ret; __wait = !(check_txmsg_state
(mgr, txmsg)); __error = sleep_finish(__ret, __wait); if ((mgr
->cbs->poll_hpd_irq ? (((uint64_t)(50)) * hz / 1000) : wait_timeout
) > 0) __ret = deadline - jiffies; if (__error == -1 || __error
== 4) { __ret = -4; break; } if ((mgr->cbs->poll_hpd_irq
? (((uint64_t)(50)) * hz / 1000) : wait_timeout) > 0 &&
(__ret <= 0 || __error == 35)) { __ret = ((check_txmsg_state
(mgr, txmsg))) ? 1 : 0; break; } } while (__ret > 0 &&
!(check_txmsg_state(mgr, txmsg))); finish_wait(&mgr->
tx_waitq, &__wq_entry); __ret; }); __ret; })
1275 check_txmsg_state(mgr, txmsg),({ long __ret = mgr->cbs->poll_hpd_irq ? (((uint64_t)(50
)) * hz / 1000) : wait_timeout; if (!(check_txmsg_state(mgr, txmsg
))) __ret = ({ long __ret = mgr->cbs->poll_hpd_irq ? ((
(uint64_t)(50)) * hz / 1000) : wait_timeout; struct wait_queue_entry
__wq_entry; init_wait_entry(&__wq_entry, 0); do { int __error
, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 1278, "!cold")); prepare_to_wait(&mgr->tx_waitq, &
__wq_entry, 0); deadline = jiffies + __ret; __wait = !(check_txmsg_state
(mgr, txmsg)); __error = sleep_finish(__ret, __wait); if ((mgr
->cbs->poll_hpd_irq ? (((uint64_t)(50)) * hz / 1000) : wait_timeout
) > 0) __ret = deadline - jiffies; if (__error == -1 || __error
== 4) { __ret = -4; break; } if ((mgr->cbs->poll_hpd_irq
? (((uint64_t)(50)) * hz / 1000) : wait_timeout) > 0 &&
(__ret <= 0 || __error == 35)) { __ret = ((check_txmsg_state
(mgr, txmsg))) ? 1 : 0; break; } } while (__ret > 0 &&
!(check_txmsg_state(mgr, txmsg))); finish_wait(&mgr->
tx_waitq, &__wq_entry); __ret; }); __ret; })
1276 mgr->cbs->poll_hpd_irq ?({ long __ret = mgr->cbs->poll_hpd_irq ? (((uint64_t)(50
)) * hz / 1000) : wait_timeout; if (!(check_txmsg_state(mgr, txmsg
))) __ret = ({ long __ret = mgr->cbs->poll_hpd_irq ? ((
(uint64_t)(50)) * hz / 1000) : wait_timeout; struct wait_queue_entry
__wq_entry; init_wait_entry(&__wq_entry, 0); do { int __error
, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 1278, "!cold")); prepare_to_wait(&mgr->tx_waitq, &
__wq_entry, 0); deadline = jiffies + __ret; __wait = !(check_txmsg_state
(mgr, txmsg)); __error = sleep_finish(__ret, __wait); if ((mgr
->cbs->poll_hpd_irq ? (((uint64_t)(50)) * hz / 1000) : wait_timeout
) > 0) __ret = deadline - jiffies; if (__error == -1 || __error
== 4) { __ret = -4; break; } if ((mgr->cbs->poll_hpd_irq
? (((uint64_t)(50)) * hz / 1000) : wait_timeout) > 0 &&
(__ret <= 0 || __error == 35)) { __ret = ((check_txmsg_state
(mgr, txmsg))) ? 1 : 0; break; } } while (__ret > 0 &&
!(check_txmsg_state(mgr, txmsg))); finish_wait(&mgr->
tx_waitq, &__wq_entry); __ret; }); __ret; })
1277 msecs_to_jiffies(50) :({ long __ret = mgr->cbs->poll_hpd_irq ? (((uint64_t)(50
)) * hz / 1000) : wait_timeout; if (!(check_txmsg_state(mgr, txmsg
))) __ret = ({ long __ret = mgr->cbs->poll_hpd_irq ? ((
(uint64_t)(50)) * hz / 1000) : wait_timeout; struct wait_queue_entry
__wq_entry; init_wait_entry(&__wq_entry, 0); do { int __error
, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 1278, "!cold")); prepare_to_wait(&mgr->tx_waitq, &
__wq_entry, 0); deadline = jiffies + __ret; __wait = !(check_txmsg_state
(mgr, txmsg)); __error = sleep_finish(__ret, __wait); if ((mgr
->cbs->poll_hpd_irq ? (((uint64_t)(50)) * hz / 1000) : wait_timeout
) > 0) __ret = deadline - jiffies; if (__error == -1 || __error
== 4) { __ret = -4; break; } if ((mgr->cbs->poll_hpd_irq
? (((uint64_t)(50)) * hz / 1000) : wait_timeout) > 0 &&
(__ret <= 0 || __error == 35)) { __ret = ((check_txmsg_state
(mgr, txmsg))) ? 1 : 0; break; } } while (__ret > 0 &&
!(check_txmsg_state(mgr, txmsg))); finish_wait(&mgr->
tx_waitq, &__wq_entry); __ret; }); __ret; })
1278 wait_timeout)({ long __ret = mgr->cbs->poll_hpd_irq ? (((uint64_t)(50
)) * hz / 1000) : wait_timeout; if (!(check_txmsg_state(mgr, txmsg
))) __ret = ({ long __ret = mgr->cbs->poll_hpd_irq ? ((
(uint64_t)(50)) * hz / 1000) : wait_timeout; struct wait_queue_entry
__wq_entry; init_wait_entry(&__wq_entry, 0); do { int __error
, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 1278, "!cold")); prepare_to_wait(&mgr->tx_waitq, &
__wq_entry, 0); deadline = jiffies + __ret; __wait = !(check_txmsg_state
(mgr, txmsg)); __error = sleep_finish(__ret, __wait); if ((mgr
->cbs->poll_hpd_irq ? (((uint64_t)(50)) * hz / 1000) : wait_timeout
) > 0) __ret = deadline - jiffies; if (__error == -1 || __error
== 4) { __ret = -4; break; } if ((mgr->cbs->poll_hpd_irq
? (((uint64_t)(50)) * hz / 1000) : wait_timeout) > 0 &&
(__ret <= 0 || __error == 35)) { __ret = ((check_txmsg_state
(mgr, txmsg))) ? 1 : 0; break; } } while (__ret > 0 &&
!(check_txmsg_state(mgr, txmsg))); finish_wait(&mgr->
tx_waitq, &__wq_entry); __ret; }); __ret; })
;
1279
1280 if (ret || !mgr->cbs->poll_hpd_irq ||
1281 time_after(jiffies, wait_expires))
1282 break;
1283
1284 mgr->cbs->poll_hpd_irq(mgr);
1285 }
1286
1287 mutex_lock(&mgr->qlock)rw_enter_write(&mgr->qlock);
1288 if (ret > 0) {
1289 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT4) {
1290 ret = -EIO5;
1291 goto out;
1292 }
1293 } else {
1294 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "timedout msg send %p %d %d\n"
, txmsg, txmsg->state, txmsg->seqno)
1295 txmsg, txmsg->state, txmsg->seqno)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "timedout msg send %p %d %d\n"
, txmsg, txmsg->state, txmsg->seqno)
;
1296
1297 /* dump some state */
1298 ret = -EIO5;
1299
1300 /* remove from q */
1301 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED0 ||
1302 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND1 ||
1303 txmsg->state == DRM_DP_SIDEBAND_TX_SENT2)
1304 list_del(&txmsg->next);
1305 }
1306out:
1307 if (unlikely(ret == -EIO)__builtin_expect(!!(ret == -5), 0) && drm_debug_enabled(DRM_UT_DP)drm_debug_enabled_raw(DRM_UT_DP)) {
1308 struct drm_printer p = drm_debug_printer(DBG_PREFIX"[dp_mst]");
1309
1310 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1311 }
1312 mutex_unlock(&mgr->qlock)rw_exit_write(&mgr->qlock);
1313
1314 drm_dp_mst_kick_tx(mgr);
1315 return ret;
1316}
1317
1318static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1319{
1320 struct drm_dp_mst_branch *mstb;
1321
1322 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL(0x0001 | 0x0004));
1323 if (!mstb)
1324 return NULL((void *)0);
1325
1326 mstb->lct = lct;
1327 if (lct > 1)
1328 memcpy(mstb->rad, rad, lct / 2)__builtin_memcpy((mstb->rad), (rad), (lct / 2));
1329 INIT_LIST_HEAD(&mstb->ports);
1330 kref_init(&mstb->topology_kref);
1331 kref_init(&mstb->malloc_kref);
1332 return mstb;
1333}
1334
1335static void drm_dp_free_mst_branch_device(struct kref *kref)
1336{
1337 struct drm_dp_mst_branch *mstb =
1338 container_of(kref, struct drm_dp_mst_branch, malloc_kref)({ const __typeof( ((struct drm_dp_mst_branch *)0)->malloc_kref
) *__mptr = (kref); (struct drm_dp_mst_branch *)( (char *)__mptr
- __builtin_offsetof(struct drm_dp_mst_branch, malloc_kref) )
;})
;
1339
1340 if (mstb->port_parent)
1341 drm_dp_mst_put_port_malloc(mstb->port_parent);
1342
1343 kfree(mstb);
1344}
1345
1346/**
1347 * DOC: Branch device and port refcounting
1348 *
1349 * Topology refcount overview
1350 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1351 *
1352 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1353 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1354 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1355 *
1356 * Topology refcounts are not exposed to drivers, and are handled internally
1357 * by the DP MST helpers. The helpers use them in order to prevent the
1358 * in-memory topology state from being changed in the middle of critical
1359 * operations like changing the internal state of payload allocations. This
1360 * means each branch and port will be considered to be connected to the rest
1361 * of the topology until its topology refcount reaches zero. Additionally,
1362 * for ports this means that their associated &struct drm_connector will stay
1363 * registered with userspace until the port's refcount reaches 0.
1364 *
1365 * Malloc refcount overview
1366 * ~~~~~~~~~~~~~~~~~~~~~~~~
1367 *
1368 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1369 * drm_dp_mst_branch allocated even after all of its topology references have
1370 * been dropped, so that the driver or MST helpers can safely access each
1371 * branch's last known state before it was disconnected from the topology.
1372 * When the malloc refcount of a port or branch reaches 0, the memory
1373 * allocation containing the &struct drm_dp_mst_branch or &struct
1374 * drm_dp_mst_port respectively will be freed.
1375 *
1376 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1377 * to drivers. As of writing this documentation, there are no drivers that
1378 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1379 * helpers. Exposing this API to drivers in a race-free manner would take more
1380 * tweaking of the refcounting scheme, however patches are welcome provided
1381 * there is a legitimate driver usecase for this.
1382 *
1383 * Refcount relationships in a topology
1384 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1385 *
1386 * Let's take a look at why the relationship between topology and malloc
1387 * refcounts is designed the way it is.
1388 *
1389 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1390 *
1391 * An example of topology and malloc refs in a DP MST topology with two
1392 * active payloads. Topology refcount increments are indicated by solid
1393 * lines, and malloc refcount increments are indicated by dashed lines.
1394 * Each starts from the branch which incremented the refcount, and ends at
1395 * the branch to which the refcount belongs to, i.e. the arrow points the
1396 * same way as the C pointers used to reference a structure.
1397 *
1398 * As you can see in the above figure, every branch increments the topology
1399 * refcount of its children, and increments the malloc refcount of its
1400 * parent. Additionally, every payload increments the malloc refcount of its
1401 * assigned port by 1.
1402 *
1403 * So, what would happen if MSTB #3 from the above figure was unplugged from
1404 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1405 * topology would start to look like the figure below.
1406 *
1407 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1408 *
1409 * Ports and branch devices which have been released from memory are
1410 * colored grey, and references which have been removed are colored red.
1411 *
1412 * Whenever a port or branch device's topology refcount reaches zero, it will
1413 * decrement the topology refcounts of all its children, the malloc refcount
1414 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1415 * #4, this means they both have been disconnected from the topology and freed
1416 * from memory. But, because payload #2 is still holding a reference to port
1417 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1418 * is still accessible from memory. This also means port #3 has not yet
1419 * decremented the malloc refcount of MSTB #3, so its &struct
1420 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1421 * malloc refcount reaches 0.
1422 *
1423 * This relationship is necessary because in order to release payload #2, we
1424 * need to be able to figure out the last relative of port #3 that's still
1425 * connected to the topology. In this case, we would travel up the topology as
1426 * shown below.
1427 *
1428 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1429 *
1430 * And finally, remove payload #2 by communicating with port #2 through
1431 * sideband transactions.
1432 */
1433
1434/**
1435 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1436 * device
1437 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1438 *
1439 * Increments &drm_dp_mst_branch.malloc_kref. When
1440 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1441 * will be released and @mstb may no longer be used.
1442 *
1443 * See also: drm_dp_mst_put_mstb_malloc()
1444 */
1445static void
1446drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1447{
1448 kref_get(&mstb->malloc_kref);
1449 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref))__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "mstb %p (%d)\n"
, mstb, kref_read(&mstb->malloc_kref))
;
1450}
1451
1452/**
1453 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1454 * device
1455 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1456 *
1457 * Decrements &drm_dp_mst_branch.malloc_kref. When
1458 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1459 * will be released and @mstb may no longer be used.
1460 *
1461 * See also: drm_dp_mst_get_mstb_malloc()
1462 */
1463static void
1464drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1465{
1466 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1)__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "mstb %p (%d)\n"
, mstb, kref_read(&mstb->malloc_kref) - 1)
;
1467 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1468}
1469
1470static void drm_dp_free_mst_port(struct kref *kref)
1471{
1472 struct drm_dp_mst_port *port =
1473 container_of(kref, struct drm_dp_mst_port, malloc_kref)({ const __typeof( ((struct drm_dp_mst_port *)0)->malloc_kref
) *__mptr = (kref); (struct drm_dp_mst_port *)( (char *)__mptr
- __builtin_offsetof(struct drm_dp_mst_port, malloc_kref) );
})
;
1474
1475 drm_dp_mst_put_mstb_malloc(port->parent);
1476 kfree(port);
1477}
1478
1479/**
1480 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1481 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1482 *
1483 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1484 * reaches 0, the memory allocation for @port will be released and @port may
1485 * no longer be used.
1486 *
1487 * Because @port could potentially be freed at any time by the DP MST helpers
1488 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1489 * function, drivers that which to make use of &struct drm_dp_mst_port should
1490 * ensure that they grab at least one main malloc reference to their MST ports
1491 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1492 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1493 *
1494 * See also: drm_dp_mst_put_port_malloc()
1495 */
1496void
1497drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1498{
1499 kref_get(&port->malloc_kref);
1500 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref))__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "port %p (%d)\n"
, port, kref_read(&port->malloc_kref))
;
1501}
1502EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1503
1504/**
1505 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1506 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1507 *
1508 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1509 * reaches 0, the memory allocation for @port will be released and @port may
1510 * no longer be used.
1511 *
1512 * See also: drm_dp_mst_get_port_malloc()
1513 */
1514void
1515drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1516{
1517 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "port %p (%d)\n"
, port, kref_read(&port->malloc_kref) - 1)
;
1518 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1519}
1520EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1521
1522#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)0
1523
1524#define STACK_DEPTH 8
1525
1526static noinline__attribute__((__noinline__)) void
1527__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1528 struct drm_dp_mst_topology_ref_history *history,
1529 enum drm_dp_mst_topology_ref_type type)
1530{
1531 struct drm_dp_mst_topology_ref_entry *entry = NULL((void *)0);
1532 depot_stack_handle_t backtrace;
1533 ulong stack_entries[STACK_DEPTH];
1534 uint n;
1535 int i;
1536
1537 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries)(sizeof((stack_entries)) / sizeof((stack_entries)[0])), 1);
1538 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL(0x0001 | 0x0004));
1539 if (!backtrace)
1540 return;
1541
1542 /* Try to find an existing entry for this backtrace */
1543 for (i = 0; i < history->len; i++) {
1544 if (history->entries[i].backtrace == backtrace) {
1545 entry = &history->entries[i];
1546 break;
1547 }
1548 }
1549
1550 /* Otherwise add one */
1551 if (!entry) {
1552 struct drm_dp_mst_topology_ref_entry *new;
1553 int new_len = history->len + 1;
1554
1555 new = krealloc(history->entries, sizeof(*new) * new_len,
1556 GFP_KERNEL(0x0001 | 0x0004));
1557 if (!new)
1558 return;
1559
1560 entry = &new[history->len];
1561 history->len = new_len;
1562 history->entries = new;
1563
1564 entry->backtrace = backtrace;
1565 entry->type = type;
1566 entry->count = 0;
1567 }
1568 entry->count++;
1569 entry->ts_nsec = ktime_get_ns();
1570}
1571
1572static int
1573topology_ref_history_cmp(const void *a, const void *b)
1574{
1575 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1576
1577 if (entry_a->ts_nsec > entry_b->ts_nsec)
1578 return 1;
1579 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1580 return -1;
1581 else
1582 return 0;
1583}
1584
1585static inline const char *
1586topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1587{
1588 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1589 return "get";
1590 else
1591 return "put";
1592}
1593
1594static void
1595__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1596 void *ptr, const char *type_str)
1597{
1598 struct drm_printer p = drm_debug_printer(DBG_PREFIX"[dp_mst]");
1599 char *buf = kzalloc(PAGE_SIZE(1 << 12), GFP_KERNEL(0x0001 | 0x0004));
1600 int i;
1601
1602 if (!buf)
1603 return;
1604
1605 if (!history->len)
1606 goto out;
1607
1608 /* First, sort the list so that it goes from oldest to newest
1609 * reference entry
1610 */
1611 sort(history->entries, history->len, sizeof(*history->entries),
1612 topology_ref_history_cmp, NULL((void *)0));
1613
1614 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1615 type_str, ptr);
1616
1617 for (i = 0; i < history->len; i++) {
1618 const struct drm_dp_mst_topology_ref_entry *entry =
1619 &history->entries[i];
1620 u64 ts_nsec = entry->ts_nsec;
1621 u32 rem_nsec = do_div(ts_nsec, 1000000000)({ uint32_t __base = (1000000000); uint32_t __rem = ((uint64_t
)(ts_nsec)) % __base; (ts_nsec) = ((uint64_t)(ts_nsec)) / __base
; __rem; })
;
1622
1623 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE(1 << 12), 4);
1624
1625 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1626 entry->count,
1627 topology_ref_type_to_str(entry->type),
1628 ts_nsec, rem_nsec / 1000, buf);
1629 }
1630
1631 /* Now free the history, since this is the only time we expose it */
1632 kfree(history->entries);
1633out:
1634 kfree(buf);
1635}
1636
1637static __always_inlineinline __attribute__((__always_inline__)) void
1638drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1639{
1640 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1641 "MSTB");
1642}
1643
1644static __always_inlineinline __attribute__((__always_inline__)) void
1645drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1646{
1647 __dump_topology_ref_history(&port->topology_ref_history, port,
1648 "Port");
1649}
1650
1651static __always_inlineinline __attribute__((__always_inline__)) void
1652save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1653 enum drm_dp_mst_topology_ref_type type)
1654{
1655 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1656}
1657
1658static __always_inlineinline __attribute__((__always_inline__)) void
1659save_port_topology_ref(struct drm_dp_mst_port *port,
1660 enum drm_dp_mst_topology_ref_type type)
1661{
1662 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1663}
1664
1665static inline void
1666topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1667{
1668 mutex_lock(&mgr->topology_ref_history_lock)rw_enter_write(&mgr->topology_ref_history_lock);
1669}
1670
1671static inline void
1672topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1673{
1674 mutex_unlock(&mgr->topology_ref_history_lock)rw_exit_write(&mgr->topology_ref_history_lock);
1675}
1676#else
1677static inline void
1678topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1679static inline void
1680topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1681static inline void
1682drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1683static inline void
1684drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1685#define save_mstb_topology_ref(mstb, type)
1686#define save_port_topology_ref(port, type)
1687#endif
1688
1689struct drm_dp_mst_atomic_payload *
1690drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
1691 struct drm_dp_mst_port *port)
1692{
1693 struct drm_dp_mst_atomic_payload *payload;
1694
1695 list_for_each_entry(payload, &state->payloads, next)for (payload = ({ const __typeof( ((__typeof(*payload) *)0)->
next ) *__mptr = ((&state->payloads)->next); (__typeof
(*payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*
payload), next) );}); &payload->next != (&state->
payloads); payload = ({ const __typeof( ((__typeof(*payload) *
)0)->next ) *__mptr = (payload->next.next); (__typeof(*
payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*payload
), next) );}))
1696 if (payload->port == port)
1697 return payload;
1698
1699 return NULL((void *)0);
1700}
1701EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
1702
1703static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1704{
1705 struct drm_dp_mst_branch *mstb =
1706 container_of(kref, struct drm_dp_mst_branch, topology_kref)({ const __typeof( ((struct drm_dp_mst_branch *)0)->topology_kref
) *__mptr = (kref); (struct drm_dp_mst_branch *)( (char *)__mptr
- __builtin_offsetof(struct drm_dp_mst_branch, topology_kref
) );})
;
1707 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1708
1709 drm_dp_mst_dump_mstb_topology_history(mstb);
1710
1711 INIT_LIST_HEAD(&mstb->destroy_next);
1712
1713 /*
1714 * This can get called under mgr->mutex, so we need to perform the
1715 * actual destruction of the mstb in another worker
1716 */
1717 mutex_lock(&mgr->delayed_destroy_lock)rw_enter_write(&mgr->delayed_destroy_lock);
1718 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1719 mutex_unlock(&mgr->delayed_destroy_lock)rw_exit_write(&mgr->delayed_destroy_lock);
1720 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1721}
1722
1723/**
1724 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1725 * branch device unless it's zero
1726 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1727 *
1728 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1729 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1730 * reached 0). Holding a topology reference implies that a malloc reference
1731 * will be held to @mstb as long as the user holds the topology reference.
1732 *
1733 * Care should be taken to ensure that the user has at least one malloc
1734 * reference to @mstb. If you already have a topology reference to @mstb, you
1735 * should use drm_dp_mst_topology_get_mstb() instead.
1736 *
1737 * See also:
1738 * drm_dp_mst_topology_get_mstb()
1739 * drm_dp_mst_topology_put_mstb()
1740 *
1741 * Returns:
1742 * * 1: A topology reference was grabbed successfully
1743 * * 0: @port is no longer in the topology, no reference was grabbed
1744 */
1745static int __must_check
1746drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1747{
1748 int ret;
1749
1750 topology_ref_history_lock(mstb->mgr);
1751 ret = kref_get_unless_zero(&mstb->topology_kref);
1752 if (ret) {
1753 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref))__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "mstb %p (%d)\n"
, mstb, kref_read(&mstb->topology_kref))
;
1754 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1755 }
1756
1757 topology_ref_history_unlock(mstb->mgr);
1758
1759 return ret;
1760}
1761
1762/**
1763 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1764 * branch device
1765 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1766 *
1767 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1768 * not it's already reached 0. This is only valid to use in scenarios where
1769 * you are already guaranteed to have at least one active topology reference
1770 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1771 *
1772 * See also:
1773 * drm_dp_mst_topology_try_get_mstb()
1774 * drm_dp_mst_topology_put_mstb()
1775 */
1776static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1777{
1778 topology_ref_history_lock(mstb->mgr);
1779
1780 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1781 WARN_ON(kref_read(&mstb->topology_kref) == 0)({ int __ret = !!(kref_read(&mstb->topology_kref) == 0
); if (__ret) printf("WARNING %s failed at %s:%d\n", "kref_read(&mstb->topology_kref) == 0"
, "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c", 1781
); __builtin_expect(!!(__ret), 0); })
;
1782 kref_get(&mstb->topology_kref);
1783 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref))__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "mstb %p (%d)\n"
, mstb, kref_read(&mstb->topology_kref))
;
1784
1785 topology_ref_history_unlock(mstb->mgr);
1786}
1787
1788/**
1789 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1790 * device
1791 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1792 *
1793 * Releases a topology reference from @mstb by decrementing
1794 * &drm_dp_mst_branch.topology_kref.
1795 *
1796 * See also:
1797 * drm_dp_mst_topology_try_get_mstb()
1798 * drm_dp_mst_topology_get_mstb()
1799 */
1800static void
1801drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1802{
1803 topology_ref_history_lock(mstb->mgr);
1804
1805 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1)__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "mstb %p (%d)\n"
, mstb, kref_read(&mstb->topology_kref) - 1)
;
1806 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1807
1808 topology_ref_history_unlock(mstb->mgr);
1809 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1810}
1811
1812static void drm_dp_destroy_port(struct kref *kref)
1813{
1814 struct drm_dp_mst_port *port =
1815 container_of(kref, struct drm_dp_mst_port, topology_kref)({ const __typeof( ((struct drm_dp_mst_port *)0)->topology_kref
) *__mptr = (kref); (struct drm_dp_mst_port *)( (char *)__mptr
- __builtin_offsetof(struct drm_dp_mst_port, topology_kref) )
;})
;
1816 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1817
1818 drm_dp_mst_dump_port_topology_history(port);
1819
1820 /* There's nothing that needs locking to destroy an input port yet */
1821 if (port->input) {
1822 drm_dp_mst_put_port_malloc(port);
1823 return;
1824 }
1825
1826 kfree(port->cached_edid);
1827
1828 /*
1829 * we can't destroy the connector here, as we might be holding the
1830 * mode_config.mutex from an EDID retrieval
1831 */
1832 mutex_lock(&mgr->delayed_destroy_lock)rw_enter_write(&mgr->delayed_destroy_lock);
1833 list_add(&port->next, &mgr->destroy_port_list);
1834 mutex_unlock(&mgr->delayed_destroy_lock)rw_exit_write(&mgr->delayed_destroy_lock);
1835 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1836}
1837
1838/**
1839 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1840 * port unless it's zero
1841 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1842 *
1843 * Attempts to grab a topology reference to @port, if it hasn't yet been
1844 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1845 * 0). Holding a topology reference implies that a malloc reference will be
1846 * held to @port as long as the user holds the topology reference.
1847 *
1848 * Care should be taken to ensure that the user has at least one malloc
1849 * reference to @port. If you already have a topology reference to @port, you
1850 * should use drm_dp_mst_topology_get_port() instead.
1851 *
1852 * See also:
1853 * drm_dp_mst_topology_get_port()
1854 * drm_dp_mst_topology_put_port()
1855 *
1856 * Returns:
1857 * * 1: A topology reference was grabbed successfully
1858 * * 0: @port is no longer in the topology, no reference was grabbed
1859 */
1860static int __must_check
1861drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1862{
1863 int ret;
1864
1865 topology_ref_history_lock(port->mgr);
1866 ret = kref_get_unless_zero(&port->topology_kref);
1867 if (ret) {
1868 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref))__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "port %p (%d)\n"
, port, kref_read(&port->topology_kref))
;
1869 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1870 }
1871
1872 topology_ref_history_unlock(port->mgr);
1873 return ret;
1874}
1875
1876/**
1877 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1878 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1879 *
1880 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1881 * not it's already reached 0. This is only valid to use in scenarios where
1882 * you are already guaranteed to have at least one active topology reference
1883 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1884 *
1885 * See also:
1886 * drm_dp_mst_topology_try_get_port()
1887 * drm_dp_mst_topology_put_port()
1888 */
1889static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1890{
1891 topology_ref_history_lock(port->mgr);
1892
1893 WARN_ON(kref_read(&port->topology_kref) == 0)({ int __ret = !!(kref_read(&port->topology_kref) == 0
); if (__ret) printf("WARNING %s failed at %s:%d\n", "kref_read(&port->topology_kref) == 0"
, "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c", 1893
); __builtin_expect(!!(__ret), 0); })
;
1894 kref_get(&port->topology_kref);
1895 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref))__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "port %p (%d)\n"
, port, kref_read(&port->topology_kref))
;
1896 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1897
1898 topology_ref_history_unlock(port->mgr);
1899}
1900
1901/**
1902 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1903 * @port: The &struct drm_dp_mst_port to release the topology reference from
1904 *
1905 * Releases a topology reference from @port by decrementing
1906 * &drm_dp_mst_port.topology_kref.
1907 *
1908 * See also:
1909 * drm_dp_mst_topology_try_get_port()
1910 * drm_dp_mst_topology_get_port()
1911 */
1912static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1913{
1914 topology_ref_history_lock(port->mgr);
1915
1916 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_DRIVER, "port %p (%d)\n"
, port, kref_read(&port->topology_kref) - 1)
;
1917 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1918
1919 topology_ref_history_unlock(port->mgr);
1920 kref_put(&port->topology_kref, drm_dp_destroy_port);
1921}
1922
1923static struct drm_dp_mst_branch *
1924drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1925 struct drm_dp_mst_branch *to_find)
1926{
1927 struct drm_dp_mst_port *port;
1928 struct drm_dp_mst_branch *rmstb;
1929
1930 if (to_find == mstb)
1931 return mstb;
1932
1933 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
1934 if (port->mstb) {
1935 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1936 port->mstb, to_find);
1937 if (rmstb)
1938 return rmstb;
1939 }
1940 }
1941 return NULL((void *)0);
1942}
1943
1944static struct drm_dp_mst_branch *
1945drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1946 struct drm_dp_mst_branch *mstb)
1947{
1948 struct drm_dp_mst_branch *rmstb = NULL((void *)0);
1949
1950 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
1951 if (mgr->mst_primary) {
1952 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1953 mgr->mst_primary, mstb);
1954
1955 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1956 rmstb = NULL((void *)0);
1957 }
1958 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
1959 return rmstb;
1960}
1961
1962static struct drm_dp_mst_port *
1963drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1964 struct drm_dp_mst_port *to_find)
1965{
1966 struct drm_dp_mst_port *port, *mport;
1967
1968 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
1969 if (port == to_find)
1970 return port;
1971
1972 if (port->mstb) {
1973 mport = drm_dp_mst_topology_get_port_validated_locked(
1974 port->mstb, to_find);
1975 if (mport)
1976 return mport;
1977 }
1978 }
1979 return NULL((void *)0);
1980}
1981
1982static struct drm_dp_mst_port *
1983drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1984 struct drm_dp_mst_port *port)
1985{
1986 struct drm_dp_mst_port *rport = NULL((void *)0);
1987
1988 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
1989 if (mgr->mst_primary) {
1990 rport = drm_dp_mst_topology_get_port_validated_locked(
1991 mgr->mst_primary, port);
1992
1993 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1994 rport = NULL((void *)0);
1995 }
1996 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
1997 return rport;
1998}
1999
2000static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
2001{
2002 struct drm_dp_mst_port *port;
2003 int ret;
2004
2005 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
2006 if (port->port_num == port_num) {
2007 ret = drm_dp_mst_topology_try_get_port(port);
2008 return ret ? port : NULL((void *)0);
2009 }
2010 }
2011
2012 return NULL((void *)0);
2013}
2014
2015/*
2016 * calculate a new RAD for this MST branch device
2017 * if parent has an LCT of 2 then it has 1 nibble of RAD,
2018 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
2019 */
2020static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
2021 u8 *rad)
2022{
2023 int parent_lct = port->parent->lct;
2024 int shift = 4;
2025 int idx = (parent_lct - 1) / 2;
2026
2027 if (parent_lct > 1) {
2028 memcpy(rad, port->parent->rad, idx + 1)__builtin_memcpy((rad), (port->parent->rad), (idx + 1));
2029 shift = (parent_lct % 2) ? 4 : 0;
2030 } else
2031 rad[0] = 0;
2032
2033 rad[idx] |= port->port_num << shift;
2034 return parent_lct + 1;
2035}
2036
2037static bool_Bool drm_dp_mst_is_end_device(u8 pdt, bool_Bool mcs)
2038{
2039 switch (pdt) {
2040 case DP_PEER_DEVICE_DP_LEGACY_CONV0x4:
2041 case DP_PEER_DEVICE_SST_SINK0x3:
2042 return true1;
2043 case DP_PEER_DEVICE_MST_BRANCHING0x2:
2044 /* For sst branch device */
2045 if (!mcs)
2046 return true1;
2047
2048 return false0;
2049 }
2050 return true1;
2051}
2052
2053static int
2054drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2055 bool_Bool new_mcs)
2056{
2057 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2058 struct drm_dp_mst_branch *mstb;
2059 u8 rad[8], lct;
2060 int ret = 0;
2061
2062 if (port->pdt == new_pdt && port->mcs == new_mcs)
2063 return 0;
2064
2065 /* Teardown the old pdt, if there is one */
2066 if (port->pdt != DP_PEER_DEVICE_NONE0x0) {
2067 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2068 /*
2069 * If the new PDT would also have an i2c bus,
2070 * don't bother with reregistering it
2071 */
2072 if (new_pdt != DP_PEER_DEVICE_NONE0x0 &&
2073 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2074 port->pdt = new_pdt;
2075 port->mcs = new_mcs;
2076 return 0;
2077 }
2078
2079 /* remove i2c over sideband */
2080 drm_dp_mst_unregister_i2c_bus(port);
2081 } else {
2082 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2083 drm_dp_mst_topology_put_mstb(port->mstb);
2084 port->mstb = NULL((void *)0);
2085 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2086 }
2087 }
2088
2089 port->pdt = new_pdt;
2090 port->mcs = new_mcs;
2091
2092 if (port->pdt != DP_PEER_DEVICE_NONE0x0) {
2093 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2094 /* add i2c over sideband */
2095 ret = drm_dp_mst_register_i2c_bus(port);
2096 } else {
2097 lct = drm_dp_calculate_rad(port, rad);
2098 mstb = drm_dp_add_mst_branch_device(lct, rad);
2099 if (!mstb) {
2100 ret = -ENOMEM12;
2101 drm_err(mgr->dev, "Failed to create MSTB for port %p", port)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to create MSTB for port %p"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port)
;
2102 goto out;
2103 }
2104
2105 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2106 port->mstb = mstb;
2107 mstb->mgr = port->mgr;
2108 mstb->port_parent = port;
2109
2110 /*
2111 * Make sure this port's memory allocation stays
2112 * around until its child MSTB releases it
2113 */
2114 drm_dp_mst_get_port_malloc(port);
2115 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2116
2117 /* And make sure we send a link address for this */
2118 ret = 1;
2119 }
2120 }
2121
2122out:
2123 if (ret < 0)
2124 port->pdt = DP_PEER_DEVICE_NONE0x0;
2125 return ret;
2126}
2127
2128/**
2129 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2130 * @aux: Fake sideband AUX CH
2131 * @offset: address of the (first) register to read
2132 * @buffer: buffer to store the register values
2133 * @size: number of bytes in @buffer
2134 *
2135 * Performs the same functionality for remote devices via
2136 * sideband messaging as drm_dp_dpcd_read() does for local
2137 * devices via actual AUX CH.
2138 *
2139 * Return: Number of bytes read, or negative error code on failure.
2140 */
2141ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2142 unsigned int offset, void *buffer, size_t size)
2143{
2144 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,({ const __typeof( ((struct drm_dp_mst_port *)0)->aux ) *__mptr
= (aux); (struct drm_dp_mst_port *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_port, aux) );})
2145 aux)({ const __typeof( ((struct drm_dp_mst_port *)0)->aux ) *__mptr
= (aux); (struct drm_dp_mst_port *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_port, aux) );})
;
2146
2147 return drm_dp_send_dpcd_read(port->mgr, port,
2148 offset, size, buffer);
2149}
2150
2151/**
2152 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2153 * @aux: Fake sideband AUX CH
2154 * @offset: address of the (first) register to write
2155 * @buffer: buffer containing the values to write
2156 * @size: number of bytes in @buffer
2157 *
2158 * Performs the same functionality for remote devices via
2159 * sideband messaging as drm_dp_dpcd_write() does for local
2160 * devices via actual AUX CH.
2161 *
2162 * Return: number of bytes written on success, negative error code on failure.
2163 */
2164ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2165 unsigned int offset, void *buffer, size_t size)
2166{
2167 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,({ const __typeof( ((struct drm_dp_mst_port *)0)->aux ) *__mptr
= (aux); (struct drm_dp_mst_port *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_port, aux) );})
2168 aux)({ const __typeof( ((struct drm_dp_mst_port *)0)->aux ) *__mptr
= (aux); (struct drm_dp_mst_port *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_port, aux) );})
;
2169
2170 return drm_dp_send_dpcd_write(port->mgr, port,
2171 offset, size, buffer);
2172}
2173
2174static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2175{
2176 int ret = 0;
2177
2178 memcpy(mstb->guid, guid, 16)__builtin_memcpy((mstb->guid), (guid), (16));
2179
2180 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2181 if (mstb->port_parent) {
2182 ret = drm_dp_send_dpcd_write(mstb->mgr,
2183 mstb->port_parent,
2184 DP_GUID0x030, 16, mstb->guid);
2185 } else {
2186 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2187 DP_GUID0x030, mstb->guid, 16);
2188 }
2189 }
2190
2191 if (ret < 16 && ret > 0)
2192 return -EPROTO95;
2193
2194 return ret == 16 ? 0 : ret;
2195}
2196
2197static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2198 int pnum,
2199 char *proppath,
2200 size_t proppath_size)
2201{
2202 int i;
2203 char temp[8];
2204
2205 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2206 for (i = 0; i < (mstb->lct - 1); i++) {
2207 int shift = (i % 2) ? 0 : 4;
2208 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2209
2210 snprintf(temp, sizeof(temp), "-%d", port_num);
2211 strlcat(proppath, temp, proppath_size);
2212 }
2213 snprintf(temp, sizeof(temp), "-%d", pnum);
2214 strlcat(proppath, temp, proppath_size);
2215}
2216
2217/**
2218 * drm_dp_mst_connector_late_register() - Late MST connector registration
2219 * @connector: The MST connector
2220 * @port: The MST port for this connector
2221 *
2222 * Helper to register the remote aux device for this MST port. Drivers should
2223 * call this from their mst connector's late_register hook to enable MST aux
2224 * devices.
2225 *
2226 * Return: 0 on success, negative error code on failure.
2227 */
2228int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2229 struct drm_dp_mst_port *port)
2230{
2231#ifdef __linux__
2232 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "registering %s remote bus for %s\n"
, port->aux.name, connector->kdev->kobj.name)
2233 port->aux.name, connector->kdev->kobj.name)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "registering %s remote bus for %s\n"
, port->aux.name, connector->kdev->kobj.name)
;
2234#else
2235 drm_dbg_kms(port->mgr->dev, "registering %s remote bus\n",__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "registering %s remote bus\n"
, port->aux.name)
2236 port->aux.name)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "registering %s remote bus\n"
, port->aux.name)
;
2237#endif
2238
2239 port->aux.dev = connector->kdev;
2240 return drm_dp_aux_register_devnode(&port->aux);
2241}
2242EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2243
2244/**
2245 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2246 * @connector: The MST connector
2247 * @port: The MST port for this connector
2248 *
2249 * Helper to unregister the remote aux device for this MST port, registered by
2250 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2251 * connector's early_unregister hook.
2252 */
2253void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2254 struct drm_dp_mst_port *port)
2255{
2256#ifdef __linux__
2257 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "unregistering %s remote bus for %s\n"
, port->aux.name, connector->kdev->kobj.name)
2258 port->aux.name, connector->kdev->kobj.name)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "unregistering %s remote bus for %s\n"
, port->aux.name, connector->kdev->kobj.name)
;
2259#else
2260 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus\n",__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "unregistering %s remote bus\n"
, port->aux.name)
2261 port->aux.name)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_KMS, "unregistering %s remote bus\n"
, port->aux.name)
;
2262#endif
2263 drm_dp_aux_unregister_devnode(&port->aux);
2264}
2265EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2266
2267static void
2268drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2269 struct drm_dp_mst_port *port)
2270{
2271 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2272 char proppath[255];
2273 int ret;
2274
2275 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2276 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2277 if (!port->connector) {
2278 ret = -ENOMEM12;
2279 goto error;
2280 }
2281
2282 if (port->pdt != DP_PEER_DEVICE_NONE0x0 &&
2283 drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2284 port->port_num >= DP_MST_LOGICAL_PORT_08)
2285 port->cached_edid = drm_get_edid(port->connector,
2286 &port->aux.ddc);
2287
2288 drm_connector_register(port->connector);
2289 return;
2290
2291error:
2292 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to create connector for port %p: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port, ret
)
;
2293}
2294
2295/*
2296 * Drop a topology reference, and unlink the port from the in-memory topology
2297 * layout
2298 */
2299static void
2300drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2301 struct drm_dp_mst_port *port)
2302{
2303 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2304 port->parent->num_ports--;
2305 list_del(&port->next);
2306 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2307 drm_dp_mst_topology_put_port(port);
2308}
2309
2310static struct drm_dp_mst_port *
2311drm_dp_mst_add_port(struct drm_device *dev,
2312 struct drm_dp_mst_topology_mgr *mgr,
2313 struct drm_dp_mst_branch *mstb, u8 port_number)
2314{
2315 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL(0x0001 | 0x0004));
2316
2317 if (!port)
2318 return NULL((void *)0);
2319
2320 kref_init(&port->topology_kref);
2321 kref_init(&port->malloc_kref);
2322 port->parent = mstb;
2323 port->port_num = port_number;
2324 port->mgr = mgr;
2325 port->aux.name = "DPMST";
2326 port->aux.dev = dev->dev;
2327 port->aux.is_remote = true1;
2328
2329 /* initialize the MST downstream port's AUX crc work queue */
2330 port->aux.drm_dev = dev;
2331 drm_dp_remote_aux_init(&port->aux);
2332
2333 /*
2334 * Make sure the memory allocation for our parent branch stays
2335 * around until our own memory allocation is released
2336 */
2337 drm_dp_mst_get_mstb_malloc(mstb);
2338
2339 return port;
2340}
2341
2342static int
2343drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2344 struct drm_device *dev,
2345 struct drm_dp_link_addr_reply_port *port_msg)
2346{
2347 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2348 struct drm_dp_mst_port *port;
2349 int old_ddps = 0, ret;
2350 u8 new_pdt = DP_PEER_DEVICE_NONE0x0;
2351 bool_Bool new_mcs = 0;
2352 bool_Bool created = false0, send_link_addr = false0, changed = false0;
2353
2354 port = drm_dp_get_port(mstb, port_msg->port_number);
2355 if (!port) {
2356 port = drm_dp_mst_add_port(dev, mgr, mstb,
2357 port_msg->port_number);
2358 if (!port)
2359 return -ENOMEM12;
2360 created = true1;
2361 changed = true1;
2362 } else if (!port->input && port_msg->input_port && port->connector) {
2363 /* Since port->connector can't be changed here, we create a
2364 * new port if input_port changes from 0 to 1
2365 */
2366 drm_dp_mst_topology_unlink_port(mgr, port);
2367 drm_dp_mst_topology_put_port(port);
2368 port = drm_dp_mst_add_port(dev, mgr, mstb,
2369 port_msg->port_number);
2370 if (!port)
2371 return -ENOMEM12;
2372 changed = true1;
2373 created = true1;
2374 } else if (port->input && !port_msg->input_port) {
2375 changed = true1;
2376 } else if (port->connector) {
2377 /* We're updating a port that's exposed to userspace, so do it
2378 * under lock
2379 */
2380 drm_modeset_lock(&mgr->base.lock, NULL((void *)0));
2381
2382 old_ddps = port->ddps;
2383 changed = port->ddps != port_msg->ddps ||
2384 (port->ddps &&
2385 (port->ldps != port_msg->legacy_device_plug_status ||
2386 port->dpcd_rev != port_msg->dpcd_revision ||
2387 port->mcs != port_msg->mcs ||
2388 port->pdt != port_msg->peer_device_type ||
2389 port->num_sdp_stream_sinks !=
2390 port_msg->num_sdp_stream_sinks));
2391 }
2392
2393 port->input = port_msg->input_port;
2394 if (!port->input)
2395 new_pdt = port_msg->peer_device_type;
2396 new_mcs = port_msg->mcs;
2397 port->ddps = port_msg->ddps;
2398 port->ldps = port_msg->legacy_device_plug_status;
2399 port->dpcd_rev = port_msg->dpcd_revision;
2400 port->num_sdp_streams = port_msg->num_sdp_streams;
2401 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2402
2403 /* manage mstb port lists with mgr lock - take a reference
2404 for this list */
2405 if (created) {
2406 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2407 drm_dp_mst_topology_get_port(port);
2408 list_add(&port->next, &mstb->ports);
2409 mstb->num_ports++;
2410 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2411 }
2412
2413 /*
2414 * Reprobe PBN caps on both hotplug, and when re-probing the link
2415 * for our parent mstb
2416 */
2417 if (old_ddps != port->ddps || !created) {
2418 if (port->ddps && !port->input) {
2419 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2420 port);
2421 if (ret == 1)
2422 changed = true1;
2423 } else {
2424 port->full_pbn = 0;
2425 }
2426 }
2427
2428 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2429 if (ret == 1) {
2430 send_link_addr = true1;
2431 } else if (ret < 0) {
2432 drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to change PDT on port %p: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port, ret
)
;
2433 goto fail;
2434 }
2435
2436 /*
2437 * If this port wasn't just created, then we're reprobing because
2438 * we're coming out of suspend. In this case, always resend the link
2439 * address if there's an MSTB on this port
2440 */
2441 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING0x2 &&
2442 port->mcs)
2443 send_link_addr = true1;
2444
2445 if (port->connector)
2446 drm_modeset_unlock(&mgr->base.lock);
2447 else if (!port->input)
2448 drm_dp_mst_port_add_connector(mstb, port);
2449
2450 if (send_link_addr && port->mstb) {
2451 ret = drm_dp_send_link_address(mgr, port->mstb);
2452 if (ret == 1) /* MSTB below us changed */
2453 changed = true1;
2454 else if (ret < 0)
2455 goto fail_put;
2456 }
2457
2458 /* put reference to this port */
2459 drm_dp_mst_topology_put_port(port);
2460 return changed;
2461
2462fail:
2463 drm_dp_mst_topology_unlink_port(mgr, port);
2464 if (port->connector)
2465 drm_modeset_unlock(&mgr->base.lock);
2466fail_put:
2467 drm_dp_mst_topology_put_port(port);
2468 return ret;
2469}
2470
2471static int
2472drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2473 struct drm_dp_connection_status_notify *conn_stat)
2474{
2475 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2476 struct drm_dp_mst_port *port;
2477 int old_ddps, ret;
2478 u8 new_pdt;
2479 bool_Bool new_mcs;
2480 bool_Bool dowork = false0, create_connector = false0;
2481
2482 port = drm_dp_get_port(mstb, conn_stat->port_number);
2483 if (!port)
2484 return 0;
2485
2486 if (port->connector) {
2487 if (!port->input && conn_stat->input_port) {
2488 /*
2489 * We can't remove a connector from an already exposed
2490 * port, so just throw the port out and make sure we
2491 * reprobe the link address of it's parent MSTB
2492 */
2493 drm_dp_mst_topology_unlink_port(mgr, port);
2494 mstb->link_address_sent = false0;
2495 dowork = true1;
2496 goto out;
2497 }
2498
2499 /* Locking is only needed if the port's exposed to userspace */
2500 drm_modeset_lock(&mgr->base.lock, NULL((void *)0));
2501 } else if (port->input && !conn_stat->input_port) {
2502 create_connector = true1;
2503 /* Reprobe link address so we get num_sdp_streams */
2504 mstb->link_address_sent = false0;
2505 dowork = true1;
2506 }
2507
2508 old_ddps = port->ddps;
2509 port->input = conn_stat->input_port;
2510 port->ldps = conn_stat->legacy_device_plug_status;
2511 port->ddps = conn_stat->displayport_device_plug_status;
2512
2513 if (old_ddps != port->ddps) {
2514 if (port->ddps && !port->input)
2515 drm_dp_send_enum_path_resources(mgr, mstb, port);
2516 else
2517 port->full_pbn = 0;
2518 }
2519
2520 new_pdt = port->input ? DP_PEER_DEVICE_NONE0x0 : conn_stat->peer_device_type;
2521 new_mcs = conn_stat->message_capability_status;
2522 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2523 if (ret == 1) {
2524 dowork = true1;
2525 } else if (ret < 0) {
2526 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to change PDT for port %p: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port, ret
)
;
2527 dowork = false0;
2528 }
2529
2530 if (port->connector)
2531 drm_modeset_unlock(&mgr->base.lock);
2532 else if (create_connector)
2533 drm_dp_mst_port_add_connector(mstb, port);
2534
2535out:
2536 drm_dp_mst_topology_put_port(port);
2537 return dowork;
2538}
2539
2540static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2541 u8 lct, u8 *rad)
2542{
2543 struct drm_dp_mst_branch *mstb;
2544 struct drm_dp_mst_port *port;
2545 int i, ret;
2546 /* find the port by iterating down */
2547
2548 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2549 mstb = mgr->mst_primary;
2550
2551 if (!mstb)
27
Assuming 'mstb' is non-null
28
Taking false branch
2552 goto out;
2553
2554 for (i = 0; i < lct - 1; i++) {
29
The value 0 is assigned to 'i'
30
Assuming the condition is true
2555 int shift = (i % 2) ? 0 : 4;
31
'?' condition is false
2556 int port_num = (rad[i / 2] >> shift) & 0xf;
32
The left operand of '>>' is a garbage value
2557
2558 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
2559 if (port->port_num == port_num) {
2560 mstb = port->mstb;
2561 if (!mstb) {
2562 drm_err(mgr->dev,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to lookup MSTB with lct %d, rad %02x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , lct, rad
[0])
2563 "failed to lookup MSTB with lct %d, rad %02x\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to lookup MSTB with lct %d, rad %02x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , lct, rad
[0])
2564 lct, rad[0])printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to lookup MSTB with lct %d, rad %02x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , lct, rad
[0])
;
2565 goto out;
2566 }
2567
2568 break;
2569 }
2570 }
2571 }
2572 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2573 if (!ret)
2574 mstb = NULL((void *)0);
2575out:
2576 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2577 return mstb;
2578}
2579
2580static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2581 struct drm_dp_mst_branch *mstb,
2582 const uint8_t *guid)
2583{
2584 struct drm_dp_mst_branch *found_mstb;
2585 struct drm_dp_mst_port *port;
2586
2587 if (!mstb)
2588 return NULL((void *)0);
2589
2590 if (memcmp(mstb->guid, guid, 16)__builtin_memcmp((mstb->guid), (guid), (16)) == 0)
2591 return mstb;
2592
2593
2594 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
2595 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2596
2597 if (found_mstb)
2598 return found_mstb;
2599 }
2600
2601 return NULL((void *)0);
2602}
2603
2604static struct drm_dp_mst_branch *
2605drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2606 const uint8_t *guid)
2607{
2608 struct drm_dp_mst_branch *mstb;
2609 int ret;
2610
2611 /* find the port by iterating down */
2612 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2613
2614 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2615 if (mstb) {
2616 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2617 if (!ret)
2618 mstb = NULL((void *)0);
2619 }
2620
2621 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2622 return mstb;
2623}
2624
2625static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2626 struct drm_dp_mst_branch *mstb)
2627{
2628 struct drm_dp_mst_port *port;
2629 int ret;
2630 bool_Bool changed = false0;
2631
2632 if (!mstb->link_address_sent) {
2633 ret = drm_dp_send_link_address(mgr, mstb);
2634 if (ret == 1)
2635 changed = true1;
2636 else if (ret < 0)
2637 return ret;
2638 }
2639
2640 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
2641 if (port->input || !port->ddps || !port->mstb)
2642 continue;
2643
2644 ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
2645 if (ret == 1)
2646 changed = true1;
2647 else if (ret < 0)
2648 return ret;
2649 }
2650
2651 return changed;
2652}
2653
2654static void drm_dp_mst_link_probe_work(struct work_struct *work)
2655{
2656 struct drm_dp_mst_topology_mgr *mgr =
2657 container_of(work, struct drm_dp_mst_topology_mgr, work)({ const __typeof( ((struct drm_dp_mst_topology_mgr *)0)->
work ) *__mptr = (work); (struct drm_dp_mst_topology_mgr *)( (
char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_mgr
, work) );})
;
2658 struct drm_device *dev = mgr->dev;
2659 struct drm_dp_mst_branch *mstb;
2660 int ret;
2661 bool_Bool clear_payload_id_table;
2662
2663 mutex_lock(&mgr->probe_lock)rw_enter_write(&mgr->probe_lock);
2664
2665 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2666 clear_payload_id_table = !mgr->payload_id_table_cleared;
2667 mgr->payload_id_table_cleared = true1;
2668
2669 mstb = mgr->mst_primary;
2670 if (mstb) {
2671 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2672 if (!ret)
2673 mstb = NULL((void *)0);
2674 }
2675 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2676 if (!mstb) {
2677 mutex_unlock(&mgr->probe_lock)rw_exit_write(&mgr->probe_lock);
2678 return;
2679 }
2680
2681 /*
2682 * Certain branch devices seem to incorrectly report an available_pbn
2683 * of 0 on downstream sinks, even after clearing the
2684 * DP_PAYLOAD_ALLOCATE_* registers in
2685 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2686 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2687 * things work again.
2688 */
2689 if (clear_payload_id_table) {
2690 drm_dbg_kms(dev, "Clearing payload ID table\n")__drm_dev_dbg(((void *)0), (dev) ? (dev)->dev : ((void *)0
), DRM_UT_KMS, "Clearing payload ID table\n")
;
2691 drm_dp_send_clear_payload_id_table(mgr, mstb);
2692 }
2693
2694 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2695 drm_dp_mst_topology_put_mstb(mstb);
2696
2697 mutex_unlock(&mgr->probe_lock)rw_exit_write(&mgr->probe_lock);
2698 if (ret > 0)
2699 drm_kms_helper_hotplug_event(dev);
2700}
2701
2702static bool_Bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2703 u8 *guid)
2704{
2705 u64 salt;
2706
2707 if (memchr_inv(guid, 0, 16))
2708 return true1;
2709
2710 salt = get_jiffies_64();
2711
2712 memcpy(&guid[0], &salt, sizeof(u64))__builtin_memcpy((&guid[0]), (&salt), (sizeof(u64)));
2713 memcpy(&guid[8], &salt, sizeof(u64))__builtin_memcpy((&guid[8]), (&salt), (sizeof(u64)));
2714
2715 return false0;
2716}
2717
2718static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2719 u8 port_num, u32 offset, u8 num_bytes)
2720{
2721 struct drm_dp_sideband_msg_req_body req;
2722
2723 req.req_type = DP_REMOTE_DPCD_READ0x20;
2724 req.u.dpcd_read.port_number = port_num;
2725 req.u.dpcd_read.dpcd_address = offset;
2726 req.u.dpcd_read.num_bytes = num_bytes;
2727 drm_dp_encode_sideband_req(&req, msg);
2728}
2729
2730static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2731 bool_Bool up, u8 *msg, int len)
2732{
2733 int ret;
2734 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE0x1200 : DP_SIDEBAND_MSG_DOWN_REQ_BASE0x1000;
2735 int tosend, total, offset;
2736 int retries = 0;
2737
2738retry:
2739 total = len;
2740 offset = 0;
2741 do {
2742 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total)(((mgr->max_dpcd_transaction_bytes)<((((16)<(total))
?(16):(total))))?(mgr->max_dpcd_transaction_bytes):((((16)
<(total))?(16):(total))))
;
2743
2744 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2745 &msg[offset],
2746 tosend);
2747 if (ret != tosend) {
2748 if (ret == -EIO5 && retries < 5) {
2749 retries++;
2750 goto retry;
2751 }
2752 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to dpcd write %d %d\n"
, tosend, ret)
;
2753
2754 return -EIO5;
2755 }
2756 offset += tosend;
2757 total -= tosend;
2758 } while (total > 0);
2759 return 0;
2760}
2761
2762static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2763 struct drm_dp_sideband_msg_tx *txmsg)
2764{
2765 struct drm_dp_mst_branch *mstb = txmsg->dst;
2766 u8 req_type;
2767
2768 req_type = txmsg->msg[0] & 0x7f;
2769 if (req_type == DP_CONNECTION_STATUS_NOTIFY0x02 ||
2770 req_type == DP_RESOURCE_STATUS_NOTIFY0x13 ||
2771 req_type == DP_CLEAR_PAYLOAD_ID_TABLE0x14)
2772 hdr->broadcast = 1;
2773 else
2774 hdr->broadcast = 0;
2775 hdr->path_msg = txmsg->path_msg;
2776 if (hdr->broadcast) {
2777 hdr->lct = 1;
2778 hdr->lcr = 6;
2779 } else {
2780 hdr->lct = mstb->lct;
2781 hdr->lcr = mstb->lct - 1;
2782 }
2783
2784 memcpy(hdr->rad, mstb->rad, hdr->lct / 2)__builtin_memcpy((hdr->rad), (mstb->rad), (hdr->lct /
2))
;
2785
2786 return 0;
2787}
2788/*
2789 * process a single block of the next message in the sideband queue
2790 */
2791static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2792 struct drm_dp_sideband_msg_tx *txmsg,
2793 bool_Bool up)
2794{
2795 u8 chunk[48];
2796 struct drm_dp_sideband_msg_hdr hdr;
2797 int len, space, idx, tosend;
2798 int ret;
2799
2800 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT2)
2801 return 0;
2802
2803 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr))__builtin_memset((&hdr), (0), (sizeof(struct drm_dp_sideband_msg_hdr
)))
;
2804
2805 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED0)
2806 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND1;
2807
2808 /* make hdr from dst mst */
2809 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2810 if (ret < 0)
2811 return ret;
2812
2813 /* amount left to send in this message */
2814 len = txmsg->cur_len - txmsg->cur_offset;
2815
2816 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2817 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2818
2819 tosend = min(len, space)(((len)<(space))?(len):(space));
2820 if (len == txmsg->cur_len)
2821 hdr.somt = 1;
2822 if (space >= len)
2823 hdr.eomt = 1;
2824
2825
2826 hdr.msg_len = tosend + 1;
2827 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2828 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend)__builtin_memcpy((&chunk[idx]), (&txmsg->msg[txmsg
->cur_offset]), (tosend))
;
2829 /* add crc at end */
2830 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2831 idx += tosend + 1;
2832
2833 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2834 if (ret) {
2835 if (drm_debug_enabled(DRM_UT_DP)drm_debug_enabled_raw(DRM_UT_DP)) {
2836 struct drm_printer p = drm_debug_printer(DBG_PREFIX"[dp_mst]");
2837
2838 drm_printf(&p, "sideband msg failed to send\n");
2839 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2840 }
2841 return ret;
2842 }
2843
2844 txmsg->cur_offset += tosend;
2845 if (txmsg->cur_offset == txmsg->cur_len) {
2846 txmsg->state = DRM_DP_SIDEBAND_TX_SENT2;
2847 return 1;
2848 }
2849 return 0;
2850}
2851
2852static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2853{
2854 struct drm_dp_sideband_msg_tx *txmsg;
2855 int ret;
2856
2857 WARN_ON(!mutex_is_locked(&mgr->qlock))({ int __ret = !!(!(rw_status(&mgr->qlock) != 0)); if (
__ret) printf("WARNING %s failed at %s:%d\n", "!(rw_status(&mgr->qlock) != 0)"
, "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c", 2857
); __builtin_expect(!!(__ret), 0); })
;
2858
2859 /* construct a chunk from the first msg in the tx_msg queue */
2860 if (list_empty(&mgr->tx_msg_downq))
2861 return;
2862
2863 txmsg = list_first_entry(&mgr->tx_msg_downq,({ const __typeof( ((struct drm_dp_sideband_msg_tx *)0)->next
) *__mptr = ((&mgr->tx_msg_downq)->next); (struct drm_dp_sideband_msg_tx
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_sideband_msg_tx
, next) );})
2864 struct drm_dp_sideband_msg_tx, next)({ const __typeof( ((struct drm_dp_sideband_msg_tx *)0)->next
) *__mptr = ((&mgr->tx_msg_downq)->next); (struct drm_dp_sideband_msg_tx
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_sideband_msg_tx
, next) );})
;
2865 ret = process_single_tx_qlock(mgr, txmsg, false0);
2866 if (ret < 0) {
2867 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to send msg in q %d\n"
, ret)
;
2868 list_del(&txmsg->next);
2869 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT4;
2870 wake_up_all(&mgr->tx_waitq)wake_up(&mgr->tx_waitq);
2871 }
2872}
2873
2874static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2875 struct drm_dp_sideband_msg_tx *txmsg)
2876{
2877 mutex_lock(&mgr->qlock)rw_enter_write(&mgr->qlock);
2878 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2879
2880 if (drm_debug_enabled(DRM_UT_DP)drm_debug_enabled_raw(DRM_UT_DP)) {
2881 struct drm_printer p = drm_debug_printer(DBG_PREFIX"[dp_mst]");
2882
2883 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2884 }
2885
2886 if (list_is_singular(&mgr->tx_msg_downq))
2887 process_single_down_tx_qlock(mgr);
2888 mutex_unlock(&mgr->qlock)rw_exit_write(&mgr->qlock);
2889}
2890
2891static void
2892drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
2893 struct drm_dp_link_address_ack_reply *reply)
2894{
2895 struct drm_dp_link_addr_reply_port *port_reply;
2896 int i;
2897
2898 for (i = 0; i < reply->nports; i++) {
2899 port_reply = &reply->ports[i];
2900 drm_dbg_kms(mgr->dev,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2901 "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2902 i,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2903 port_reply->input_port,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2904 port_reply->peer_device_type,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2905 port_reply->port_number,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2906 port_reply->dpcd_revision,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2907 port_reply->mcs,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2908 port_reply->ddps,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2909 port_reply->legacy_device_plug_status,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2910 port_reply->num_sdp_streams,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
2911 port_reply->num_sdp_stream_sinks)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n"
, i, port_reply->input_port, port_reply->peer_device_type
, port_reply->port_number, port_reply->dpcd_revision, port_reply
->mcs, port_reply->ddps, port_reply->legacy_device_plug_status
, port_reply->num_sdp_streams, port_reply->num_sdp_stream_sinks
)
;
2912 }
2913}
2914
2915static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2916 struct drm_dp_mst_branch *mstb)
2917{
2918 struct drm_dp_sideband_msg_tx *txmsg;
2919 struct drm_dp_link_address_ack_reply *reply;
2920 struct drm_dp_mst_port *port, *tmp;
2921 int i, ret, port_mask = 0;
2922 bool_Bool changed = false0;
2923
2924 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
2925 if (!txmsg)
2926 return -ENOMEM12;
2927
2928 txmsg->dst = mstb;
2929 build_link_address(txmsg);
2930
2931 mstb->link_address_sent = true1;
2932 drm_dp_queue_down_tx(mgr, txmsg);
2933
2934 /* FIXME: Actually do some real error handling here */
2935 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2936 if (ret <= 0) {
2937 drm_err(mgr->dev, "Sending link address failed with %d\n", ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Sending link address failed with %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ret)
;
2938 goto out;
2939 }
2940 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01) {
2941 drm_err(mgr->dev, "link address NAK received\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "link address NAK received\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2942 ret = -EIO5;
2943 goto out;
2944 }
2945
2946 reply = &txmsg->reply.u.link_addr;
2947 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "link address reply: %d\n", reply
->nports)
;
2948 drm_dp_dump_link_address(mgr, reply);
2949
2950 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2951 if (ret) {
2952 char buf[64];
2953
2954 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2955 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "GUID check on %s failed: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , buf, ret
)
;
2956 goto out;
2957 }
2958
2959 for (i = 0; i < reply->nports; i++) {
2960 port_mask |= BIT(reply->ports[i].port_number)(1UL << (reply->ports[i].port_number));
2961 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2962 &reply->ports[i]);
2963 if (ret == 1)
2964 changed = true1;
2965 else if (ret < 0)
2966 goto out;
2967 }
2968
2969 /* Prune any ports that are currently a part of mstb in our in-memory
2970 * topology, but were not seen in this link address. Usually this
2971 * means that they were removed while the topology was out of sync,
2972 * e.g. during suspend/resume
2973 */
2974 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
2975 list_for_each_entry_safe(port, tmp, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}), tmp = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = (port->next.next); (__typeof(*port) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*port), next) );}); &
port->next != (&mstb->ports); port = tmp, tmp = ({ const
__typeof( ((__typeof(*tmp) *)0)->next ) *__mptr = (tmp->
next.next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*tmp), next) );}))
{
2976 if (port_mask & BIT(port->port_num)(1UL << (port->port_num)))
2977 continue;
2978
2979 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d was not in link address, removing\n"
, port->port_num)
2980 port->port_num)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "port %d was not in link address, removing\n"
, port->port_num)
;
2981 list_del(&port->next);
2982 drm_dp_mst_topology_put_port(port);
2983 changed = true1;
2984 }
2985 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
2986
2987out:
2988 if (ret <= 0)
2989 mstb->link_address_sent = false0;
2990 kfree(txmsg);
2991 return ret < 0 ? ret : changed;
2992}
2993
2994static void
2995drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2996 struct drm_dp_mst_branch *mstb)
2997{
2998 struct drm_dp_sideband_msg_tx *txmsg;
2999 int ret;
3000
3001 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3002 if (!txmsg)
3003 return;
3004
3005 txmsg->dst = mstb;
3006 build_clear_payload_id_table(txmsg);
3007
3008 drm_dp_queue_down_tx(mgr, txmsg);
3009
3010 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3011 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01)
3012 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "clear payload table id nak received\n"
)
;
3013
3014 kfree(txmsg);
3015}
3016
3017static int
3018drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3019 struct drm_dp_mst_branch *mstb,
3020 struct drm_dp_mst_port *port)
3021{
3022 struct drm_dp_enum_path_resources_ack_reply *path_res;
3023 struct drm_dp_sideband_msg_tx *txmsg;
3024 int ret;
3025
3026 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3027 if (!txmsg)
3028 return -ENOMEM12;
3029
3030 txmsg->dst = mstb;
3031 build_enum_path_resources(txmsg, port->port_num);
3032
3033 drm_dp_queue_down_tx(mgr, txmsg);
3034
3035 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3036 if (ret > 0) {
3037 ret = 0;
3038 path_res = &txmsg->reply.u.path_resources;
3039
3040 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01) {
3041 drm_dbg_kms(mgr->dev, "enum path resources nak received\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "enum path resources nak received\n"
)
;
3042 } else {
3043 if (port->port_num != path_res->port_number)
3044 DRM_ERROR("got incorrect port in response\n")__drm_err("got incorrect port in response\n");
3045
3046 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "enum path resources %d: %d %d\n"
, path_res->port_number, path_res->full_payload_bw_number
, path_res->avail_payload_bw_number)
3047 path_res->port_number,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "enum path resources %d: %d %d\n"
, path_res->port_number, path_res->full_payload_bw_number
, path_res->avail_payload_bw_number)
3048 path_res->full_payload_bw_number,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "enum path resources %d: %d %d\n"
, path_res->port_number, path_res->full_payload_bw_number
, path_res->avail_payload_bw_number)
3049 path_res->avail_payload_bw_number)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "enum path resources %d: %d %d\n"
, path_res->port_number, path_res->full_payload_bw_number
, path_res->avail_payload_bw_number)
;
3050
3051 /*
3052 * If something changed, make sure we send a
3053 * hotplug
3054 */
3055 if (port->full_pbn != path_res->full_payload_bw_number ||
3056 port->fec_capable != path_res->fec_capable)
3057 ret = 1;
3058
3059 port->full_pbn = path_res->full_payload_bw_number;
3060 port->fec_capable = path_res->fec_capable;
3061 }
3062 }
3063
3064 kfree(txmsg);
3065 return ret;
3066}
3067
3068static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3069{
3070 if (!mstb->port_parent)
3071 return NULL((void *)0);
3072
3073 if (mstb->port_parent->mstb != mstb)
3074 return mstb->port_parent;
3075
3076 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3077}
3078
3079/*
3080 * Searches upwards in the topology starting from mstb to try to find the
3081 * closest available parent of mstb that's still connected to the rest of the
3082 * topology. This can be used in order to perform operations like releasing
3083 * payloads, where the branch device which owned the payload may no longer be
3084 * around and thus would require that the payload on the last living relative
3085 * be freed instead.
3086 */
3087static struct drm_dp_mst_branch *
3088drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3089 struct drm_dp_mst_branch *mstb,
3090 int *port_num)
3091{
3092 struct drm_dp_mst_branch *rmstb = NULL((void *)0);
3093 struct drm_dp_mst_port *found_port;
3094
3095 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
3096 if (!mgr->mst_primary)
3097 goto out;
3098
3099 do {
3100 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3101 if (!found_port)
3102 break;
3103
3104 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3105 rmstb = found_port->parent;
3106 *port_num = found_port->port_num;
3107 } else {
3108 /* Search again, starting from this parent */
3109 mstb = found_port->parent;
3110 }
3111 } while (!rmstb);
3112out:
3113 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
3114 return rmstb;
3115}
3116
3117static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3118 struct drm_dp_mst_port *port,
3119 int id,
3120 int pbn)
3121{
3122 struct drm_dp_sideband_msg_tx *txmsg;
3123 struct drm_dp_mst_branch *mstb;
3124 int ret, port_num;
3125 u8 sinks[DRM_DP_MAX_SDP_STREAMS16];
3126 int i;
3127
3128 port_num = port->port_num;
3129 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3130 if (!mstb) {
3131 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3132 port->parent,
3133 &port_num);
3134
3135 if (!mstb)
3136 return -EINVAL22;
3137 }
3138
3139 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3140 if (!txmsg) {
3141 ret = -ENOMEM12;
3142 goto fail_put;
3143 }
3144
3145 for (i = 0; i < port->num_sdp_streams; i++)
3146 sinks[i] = i;
3147
3148 txmsg->dst = mstb;
3149 build_allocate_payload(txmsg, port_num,
3150 id,
3151 pbn, port->num_sdp_streams, sinks);
3152
3153 drm_dp_queue_down_tx(mgr, txmsg);
3154
3155 /*
3156 * FIXME: there is a small chance that between getting the last
3157 * connected mstb and sending the payload message, the last connected
3158 * mstb could also be removed from the topology. In the future, this
3159 * needs to be fixed by restarting the
3160 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3161 * timeout if the topology is still connected to the system.
3162 */
3163 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3164 if (ret > 0) {
3165 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01)
3166 ret = -EINVAL22;
3167 else
3168 ret = 0;
3169 }
3170 kfree(txmsg);
3171fail_put:
3172 drm_dp_mst_topology_put_mstb(mstb);
3173 return ret;
3174}
3175
3176int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3177 struct drm_dp_mst_port *port, bool_Bool power_up)
3178{
3179 struct drm_dp_sideband_msg_tx *txmsg;
3180 int ret;
3181
3182 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3183 if (!port)
3184 return -EINVAL22;
3185
3186 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3187 if (!txmsg) {
3188 drm_dp_mst_topology_put_port(port);
3189 return -ENOMEM12;
3190 }
3191
3192 txmsg->dst = port->parent;
3193 build_power_updown_phy(txmsg, port->port_num, power_up);
3194 drm_dp_queue_down_tx(mgr, txmsg);
3195
3196 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3197 if (ret > 0) {
3198 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01)
3199 ret = -EINVAL22;
3200 else
3201 ret = 0;
3202 }
3203 kfree(txmsg);
3204 drm_dp_mst_topology_put_port(port);
3205
3206 return ret;
3207}
3208EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3209
3210int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3211 struct drm_dp_mst_port *port,
3212 struct drm_dp_query_stream_enc_status_ack_reply *status)
3213{
3214 struct drm_dp_mst_topology_state *state;
3215 struct drm_dp_mst_atomic_payload *payload;
3216 struct drm_dp_sideband_msg_tx *txmsg;
3217 u8 nonce[7];
3218 int ret;
3219
3220 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3221 if (!txmsg)
3222 return -ENOMEM12;
3223
3224 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3225 if (!port) {
3226 ret = -EINVAL22;
3227 goto out_get_port;
3228 }
3229
3230 get_random_bytes(nonce, sizeof(nonce));
3231
3232 drm_modeset_lock(&mgr->base.lock, NULL((void *)0));
3233 state = to_drm_dp_mst_topology_state(mgr->base.state);
3234 payload = drm_atomic_get_mst_payload_state(state, port);
3235
3236 /*
3237 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
3238 * transaction at the MST Branch device directly connected to the
3239 * Source"
3240 */
3241 txmsg->dst = mgr->mst_primary;
3242
3243 build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
3244
3245 drm_dp_queue_down_tx(mgr, txmsg);
3246
3247 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3248 if (ret < 0) {
3249 goto out;
3250 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01) {
3251 drm_dbg_kms(mgr->dev, "query encryption status nak received\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "query encryption status nak received\n"
)
;
3252 ret = -ENXIO6;
3253 goto out;
3254 }
3255
3256 ret = 0;
3257 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status))__builtin_memcpy((status), (&txmsg->reply.u.enc_status
), (sizeof(*status)))
;
3258
3259out:
3260 drm_modeset_unlock(&mgr->base.lock);
3261 drm_dp_mst_topology_put_port(port);
3262out_get_port:
3263 kfree(txmsg);
3264 return ret;
3265}
3266EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
3267
3268static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3269 struct drm_dp_mst_atomic_payload *payload)
3270{
3271 return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
3272 payload->time_slots);
3273}
3274
3275static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3276 struct drm_dp_mst_atomic_payload *payload)
3277{
3278 int ret;
3279 struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3280
3281 if (!port)
3282 return -EIO5;
3283
3284 ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
3285 drm_dp_mst_topology_put_port(port);
3286 return ret;
3287}
3288
3289static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3290 struct drm_dp_mst_topology_state *mst_state,
3291 struct drm_dp_mst_atomic_payload *payload)
3292{
3293 drm_dbg_kms(mgr->dev, "\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "\n")
;
3294
3295 /* it's okay for these to fail */
3296 drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
3297 drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
3298
3299 return 0;
3300}
3301
3302/**
3303 * drm_dp_add_payload_part1() - Execute payload update part 1
3304 * @mgr: Manager to use.
3305 * @mst_state: The MST atomic state
3306 * @payload: The payload to write
3307 *
3308 * Determines the starting time slot for the given payload, and programs the VCPI for this payload
3309 * into hardware. After calling this, the driver should generate ACT and payload packets.
3310 *
3311 * Returns: 0 on success, error code on failure. In the event that this fails,
3312 * @payload.vc_start_slot will also be set to -1.
3313 */
3314int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
3315 struct drm_dp_mst_topology_state *mst_state,
3316 struct drm_dp_mst_atomic_payload *payload)
3317{
3318 struct drm_dp_mst_port *port;
3319 int ret;
3320
3321 port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3322 if (!port) {
3323 drm_dbg_kms(mgr->dev,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "VCPI %d for port %p not in topology, not creating a payload\n"
, payload->vcpi, payload->port)
3324 "VCPI %d for port %p not in topology, not creating a payload\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "VCPI %d for port %p not in topology, not creating a payload\n"
, payload->vcpi, payload->port)
3325 payload->vcpi, payload->port)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "VCPI %d for port %p not in topology, not creating a payload\n"
, payload->vcpi, payload->port)
;
3326 payload->vc_start_slot = -1;
3327 return 0;
3328 }
3329
3330 if (mgr->payload_count == 0)
3331 mgr->next_start_slot = mst_state->start_slot;
3332
3333 payload->vc_start_slot = mgr->next_start_slot;
3334
3335 ret = drm_dp_create_payload_step1(mgr, payload);
3336 drm_dp_mst_topology_put_port(port);
3337 if (ret < 0) {
3338 drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",printf("drm:pid%d:%s *WARNING* " "[drm] " "Failed to create MST payload for port %p: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , payload
->port, ret)
3339 payload->port, ret)printf("drm:pid%d:%s *WARNING* " "[drm] " "Failed to create MST payload for port %p: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , payload
->port, ret)
;
3340 payload->vc_start_slot = -1;
3341 return ret;
3342 }
3343
3344 mgr->payload_count++;
3345 mgr->next_start_slot += payload->time_slots;
3346
3347 return 0;
3348}
3349EXPORT_SYMBOL(drm_dp_add_payload_part1);
3350
3351/**
3352 * drm_dp_remove_payload() - Remove an MST payload
3353 * @mgr: Manager to use.
3354 * @mst_state: The MST atomic state
3355 * @old_payload: The payload with its old state
3356 * @new_payload: The payload to write
3357 *
3358 * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
3359 * the starting time slots of all other payloads which would have been shifted towards the start of
3360 * the VC table as a result. After calling this, the driver should generate ACT and payload packets.
3361 */
3362void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
3363 struct drm_dp_mst_topology_state *mst_state,
3364 const struct drm_dp_mst_atomic_payload *old_payload,
3365 struct drm_dp_mst_atomic_payload *new_payload)
3366{
3367 struct drm_dp_mst_atomic_payload *pos;
3368 bool_Bool send_remove = false0;
3369
3370 /* We failed to make the payload, so nothing to do */
3371 if (new_payload->vc_start_slot == -1)
3372 return;
3373
3374 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
3375 send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
3376 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
3377
3378 if (send_remove)
3379 drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
3380 else
3381 drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Payload for VCPI %d not in topology, not sending remove\n"
, new_payload->vcpi)
3382 new_payload->vcpi)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Payload for VCPI %d not in topology, not sending remove\n"
, new_payload->vcpi)
;
3383
3384 list_for_each_entry(pos, &mst_state->payloads, next)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->next )
*__mptr = ((&mst_state->payloads)->next); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, next) );}); &pos->next != (&mst_state->payloads
); pos = ({ const __typeof( ((__typeof(*pos) *)0)->next ) *
__mptr = (pos->next.next); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), next) );}))
{
3385 if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
3386 pos->vc_start_slot -= old_payload->time_slots;
3387 }
3388 new_payload->vc_start_slot = -1;
3389
3390 mgr->payload_count--;
3391 mgr->next_start_slot -= old_payload->time_slots;
3392
3393 if (new_payload->delete)
3394 drm_dp_mst_put_port_malloc(new_payload->port);
3395}
3396EXPORT_SYMBOL(drm_dp_remove_payload);
3397
3398/**
3399 * drm_dp_add_payload_part2() - Execute payload update part 2
3400 * @mgr: Manager to use.
3401 * @state: The global atomic state
3402 * @payload: The payload to update
3403 *
3404 * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
3405 * function will send the sideband messages to finish allocating this payload.
3406 *
3407 * Returns: 0 on success, negative error code on failure.
3408 */
3409int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
3410 struct drm_atomic_state *state,
3411 struct drm_dp_mst_atomic_payload *payload)
3412{
3413 int ret = 0;
3414
3415 /* Skip failed payloads */
3416 if (payload->vc_start_slot == -1) {
3417 drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Part 1 of payload creation for %s failed, skipping part 2\n"
, payload->port->connector->name)
3418 payload->port->connector->name)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Part 1 of payload creation for %s failed, skipping part 2\n"
, payload->port->connector->name)
;
3419 return -EIO5;
3420 }
3421
3422 ret = drm_dp_create_payload_step2(mgr, payload);
3423 if (ret < 0) {
3424 if (!payload->delete)
3425 drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Step 2 of creating MST payload for %p failed: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , payload
->port, ret)
3426 payload->port, ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Step 2 of creating MST payload for %p failed: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , payload
->port, ret)
;
3427 else
3428 drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Step 2 of removing MST payload for %p failed: %d\n"
, payload->port, ret)
3429 payload->port, ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Step 2 of removing MST payload for %p failed: %d\n"
, payload->port, ret)
;
3430 }
3431
3432 return ret;
3433}
3434EXPORT_SYMBOL(drm_dp_add_payload_part2);
3435
3436static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3437 struct drm_dp_mst_port *port,
3438 int offset, int size, u8 *bytes)
3439{
3440 int ret = 0;
3441 struct drm_dp_sideband_msg_tx *txmsg;
3442 struct drm_dp_mst_branch *mstb;
3443
3444 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3445 if (!mstb)
3446 return -EINVAL22;
3447
3448 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3449 if (!txmsg) {
3450 ret = -ENOMEM12;
3451 goto fail_put;
3452 }
3453
3454 build_dpcd_read(txmsg, port->port_num, offset, size);
3455 txmsg->dst = port->parent;
3456
3457 drm_dp_queue_down_tx(mgr, txmsg);
3458
3459 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3460 if (ret < 0)
3461 goto fail_free;
3462
3463 if (txmsg->reply.reply_type == 1) {
3464 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n"
, mstb, port->port_num, offset, size)
3465 mstb, port->port_num, offset, size)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n"
, mstb, port->port_num, offset, size)
;
3466 ret = -EIO5;
3467 goto fail_free;
3468 }
3469
3470 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3471 ret = -EPROTO95;
3472 goto fail_free;
3473 }
3474
3475 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,({ size_t __min_a = (txmsg->reply.u.remote_dpcd_read_ack.num_bytes
); size_t __min_b = (size); __min_a < __min_b ? __min_a : __min_b
; })
3476 size)({ size_t __min_a = (txmsg->reply.u.remote_dpcd_read_ack.num_bytes
); size_t __min_b = (size); __min_a < __min_b ? __min_a : __min_b
; })
;
3477 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret)__builtin_memcpy((bytes), (txmsg->reply.u.remote_dpcd_read_ack
.bytes), (ret))
;
3478
3479fail_free:
3480 kfree(txmsg);
3481fail_put:
3482 drm_dp_mst_topology_put_mstb(mstb);
3483
3484 return ret;
3485}
3486
3487static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3488 struct drm_dp_mst_port *port,
3489 int offset, int size, u8 *bytes)
3490{
3491 int ret;
3492 struct drm_dp_sideband_msg_tx *txmsg;
3493 struct drm_dp_mst_branch *mstb;
3494
3495 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3496 if (!mstb)
3497 return -EINVAL22;
3498
3499 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3500 if (!txmsg) {
3501 ret = -ENOMEM12;
3502 goto fail_put;
3503 }
3504
3505 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3506 txmsg->dst = mstb;
3507
3508 drm_dp_queue_down_tx(mgr, txmsg);
3509
3510 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3511 if (ret > 0) {
3512 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01)
3513 ret = -EIO5;
3514 else
3515 ret = size;
3516 }
3517
3518 kfree(txmsg);
3519fail_put:
3520 drm_dp_mst_topology_put_mstb(mstb);
3521 return ret;
3522}
3523
3524static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3525{
3526 struct drm_dp_sideband_msg_reply_body reply;
3527
3528 reply.reply_type = DP_SIDEBAND_REPLY_ACK0x00;
3529 reply.req_type = req_type;
3530 drm_dp_encode_sideband_reply(&reply, msg);
3531 return 0;
3532}
3533
3534static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3535 struct drm_dp_mst_branch *mstb,
3536 int req_type, bool_Bool broadcast)
3537{
3538 struct drm_dp_sideband_msg_tx *txmsg;
3539
3540 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
3541 if (!txmsg)
3542 return -ENOMEM12;
3543
3544 txmsg->dst = mstb;
3545 drm_dp_encode_up_ack_reply(txmsg, req_type);
3546
3547 mutex_lock(&mgr->qlock)rw_enter_write(&mgr->qlock);
3548 /* construct a chunk from the first msg in the tx_msg queue */
3549 process_single_tx_qlock(mgr, txmsg, true1);
3550 mutex_unlock(&mgr->qlock)rw_exit_write(&mgr->qlock);
3551
3552 kfree(txmsg);
3553 return 0;
3554}
3555
3556/**
3557 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
3558 * @mgr: The &drm_dp_mst_topology_mgr to use
3559 * @link_rate: link rate in 10kbits/s units
3560 * @link_lane_count: lane count
3561 *
3562 * Calculate the total bandwidth of a MultiStream Transport link. The returned
3563 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
3564 * convert the number of PBNs required for a given stream to the number of
3565 * timeslots this stream requires in each MTP.
3566 */
3567int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
3568 int link_rate, int link_lane_count)
3569{
3570 if (link_rate == 0 || link_lane_count == 0)
3571 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "invalid link rate/lane count: (%d / %d)\n"
, link_rate, link_lane_count)
3572 link_rate, link_lane_count)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "invalid link rate/lane count: (%d / %d)\n"
, link_rate, link_lane_count)
;
3573
3574 /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
3575 return link_rate * link_lane_count / 54000;
3576}
3577EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3578
3579/**
3580 * drm_dp_read_mst_cap() - check whether or not a sink supports MST
3581 * @aux: The DP AUX channel to use
3582 * @dpcd: A cached copy of the DPCD capabilities for this sink
3583 *
3584 * Returns: %True if the sink supports MST, %false otherwise
3585 */
3586bool_Bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3587 const u8 dpcd[DP_RECEIVER_CAP_SIZE0xf])
3588{
3589 u8 mstm_cap;
3590
3591 if (dpcd[DP_DPCD_REV0x000] < DP_DPCD_REV_120x12)
3592 return false0;
3593
3594 if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP0x021, &mstm_cap) != 1)
3595 return false0;
3596
3597 return mstm_cap & DP_MST_CAP(1 << 0);
3598}
3599EXPORT_SYMBOL(drm_dp_read_mst_cap);
3600
3601/**
3602 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3603 * @mgr: manager to set state for
3604 * @mst_state: true to enable MST on this connector - false to disable.
3605 *
3606 * This is called by the driver when it detects an MST capable device plugged
3607 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3608 */
3609int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool_Bool mst_state)
3610{
3611 int ret = 0;
3612 struct drm_dp_mst_branch *mstb = NULL((void *)0);
3613
3614 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
3615 if (mst_state == mgr->mst_state)
3616 goto out_unlock;
3617
3618 mgr->mst_state = mst_state;
3619 /* set the device into MST mode */
3620 if (mst_state) {
3621 WARN_ON(mgr->mst_primary)({ int __ret = !!(mgr->mst_primary); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "mgr->mst_primary", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 3621); __builtin_expect(!!(__ret), 0); })
;
3622
3623 /* get dpcd info */
3624 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
3625 if (ret < 0) {
3626 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "%s: failed to read DPCD, ret %d\n"
, mgr->aux->name, ret)
3627 mgr->aux->name, ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "%s: failed to read DPCD, ret %d\n"
, mgr->aux->name, ret)
;
3628 goto out_unlock;
3629 }
3630
3631 /* add initial branch device at LCT 1 */
3632 mstb = drm_dp_add_mst_branch_device(1, NULL((void *)0));
3633 if (mstb == NULL((void *)0)) {
3634 ret = -ENOMEM12;
3635 goto out_unlock;
3636 }
3637 mstb->mgr = mgr;
3638
3639 /* give this the main reference */
3640 mgr->mst_primary = mstb;
3641 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3642
3643 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL0x111,
3644 DP_MST_EN(1 << 0) |
3645 DP_UP_REQ_EN(1 << 1) |
3646 DP_UPSTREAM_IS_SRC(1 << 2));
3647 if (ret < 0)
3648 goto out_unlock;
3649
3650 /* Write reset payload */
3651 drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
3652
3653 queue_work(system_long_wq, &mgr->work);
3654
3655 ret = 0;
3656 } else {
3657 /* disable MST on the device */
3658 mstb = mgr->mst_primary;
3659 mgr->mst_primary = NULL((void *)0);
3660 /* this can fail if the device is gone */
3661 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL0x111, 0);
3662 ret = 0;
3663 mgr->payload_id_table_cleared = false0;
3664
3665 memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv))__builtin_memset((&mgr->down_rep_recv), (0), (sizeof(mgr
->down_rep_recv)))
;
3666 memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv))__builtin_memset((&mgr->up_req_recv), (0), (sizeof(mgr
->up_req_recv)))
;
3667 }
3668
3669out_unlock:
3670 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
3671 if (mstb)
3672 drm_dp_mst_topology_put_mstb(mstb);
3673 return ret;
3674
3675}
3676EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3677
3678static void
3679drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3680{
3681 struct drm_dp_mst_port *port;
3682
3683 /* The link address will need to be re-sent on resume */
3684 mstb->link_address_sent = false0;
3685
3686 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
3687 if (port->mstb)
3688 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3689}
3690
3691/**
3692 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3693 * @mgr: manager to suspend
3694 *
3695 * This function tells the MST device that we can't handle UP messages
3696 * anymore. This should stop it from sending any since we are suspended.
3697 */
3698void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3699{
3700 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
3701 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL0x111,
3702 DP_MST_EN(1 << 0) | DP_UPSTREAM_IS_SRC(1 << 2));
3703 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
3704 flush_work(&mgr->up_req_work);
3705 flush_work(&mgr->work);
3706 flush_work(&mgr->delayed_destroy_work);
3707
3708 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
3709 if (mgr->mst_state && mgr->mst_primary)
3710 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3711 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
3712}
3713EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3714
3715/**
3716 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3717 * @mgr: manager to resume
3718 * @sync: whether or not to perform topology reprobing synchronously
3719 *
3720 * This will fetch DPCD and see if the device is still there,
3721 * if it is, it will rewrite the MSTM control bits, and return.
3722 *
3723 * If the device fails this returns -1, and the driver should do
3724 * a full MST reprobe, in case we were undocked.
3725 *
3726 * During system resume (where it is assumed that the driver will be calling
3727 * drm_atomic_helper_resume()) this function should be called beforehand with
3728 * @sync set to true. In contexts like runtime resume where the driver is not
3729 * expected to be calling drm_atomic_helper_resume(), this function should be
3730 * called with @sync set to false in order to avoid deadlocking.
3731 *
3732 * Returns: -1 if the MST topology was removed while we were suspended, 0
3733 * otherwise.
3734 */
3735int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3736 bool_Bool sync)
3737{
3738 int ret;
3739 u8 guid[16];
3740
3741 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
3742 if (!mgr->mst_primary)
3743 goto out_fail;
3744
3745 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
3746 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "dpcd read failed - undocked during suspend?\n"
)
;
3747 goto out_fail;
3748 }
3749
3750 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL0x111,
3751 DP_MST_EN(1 << 0) |
3752 DP_UP_REQ_EN(1 << 1) |
3753 DP_UPSTREAM_IS_SRC(1 << 2));
3754 if (ret < 0) {
3755 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "mst write failed - undocked during suspend?\n"
)
;
3756 goto out_fail;
3757 }
3758
3759 /* Some hubs forget their guids after they resume */
3760 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID0x030, guid, 16);
3761 if (ret != 16) {
3762 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "dpcd read failed - undocked during suspend?\n"
)
;
3763 goto out_fail;
3764 }
3765
3766 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3767 if (ret) {
3768 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "check mstb failed - undocked during suspend?\n"
)
;
3769 goto out_fail;
3770 }
3771
3772 /*
3773 * For the final step of resuming the topology, we need to bring the
3774 * state of our in-memory topology back into sync with reality. So,
3775 * restart the probing process as if we're probing a new hub
3776 */
3777 queue_work(system_long_wq, &mgr->work);
3778 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
3779
3780 if (sync) {
3781 drm_dbg_kms(mgr->dev,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Waiting for link probe work to finish re-syncing topology...\n"
)
3782 "Waiting for link probe work to finish re-syncing topology...\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Waiting for link probe work to finish re-syncing topology...\n"
)
;
3783 flush_work(&mgr->work);
3784 }
3785
3786 return 0;
3787
3788out_fail:
3789 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
3790 return -1;
3791}
3792EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3793
3794static bool_Bool
3795drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool_Bool up,
3796 struct drm_dp_mst_branch **mstb)
3797{
3798 int len;
3799 u8 replyblock[32];
3800 int replylen, curreply;
3801 int ret;
3802 u8 hdrlen;
3803 struct drm_dp_sideband_msg_hdr hdr;
3804 struct drm_dp_sideband_msg_rx *msg =
3805 up
6.1
'up' is false
? &mgr->up_req_recv : &mgr->down_rep_recv;
7
'?' condition is false
3806 int basereg = up
7.1
'up' is false
? DP_SIDEBAND_MSG_UP_REQ_BASE0x1600 :
8
'?' condition is false
3807 DP_SIDEBAND_MSG_DOWN_REP_BASE0x1400;
3808
3809 if (!up
8.1
'up' is false
)
9
Taking true branch
3810 *mstb = NULL((void *)0);
3811
3812 len = min(mgr->max_dpcd_transaction_bytes, 16)(((mgr->max_dpcd_transaction_bytes)<(16))?(mgr->max_dpcd_transaction_bytes
):(16))
;
10
Assuming field 'max_dpcd_transaction_bytes' is >= 16
11
'?' condition is false
3813 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3814 if (ret != len) {
12
Assuming 'ret' is equal to 'len'
13
Taking false branch
3815 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to read DPCD down rep %d %d\n"
, len, ret)
;
3816 return false0;
3817 }
3818
3819 ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
14
Calling 'drm_dp_decode_sideband_msg_hdr'
23
Returning from 'drm_dp_decode_sideband_msg_hdr'
3820 if (ret
23.1
'ret' is not equal to false
== false0) {
24
Taking false branch
3821 print_hex_dump(KERN_DEBUG"\0017", "failed hdr", DUMP_PREFIX_NONE, 16,
3822 1, replyblock, len, false0);
3823 drm_dbg_kms(mgr->dev, "ERROR: failed header\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "ERROR: failed header\n")
;
3824 return false0;
3825 }
3826
3827 if (!up
24.1
'up' is false
) {
25
Taking true branch
3828 /* Caller is responsible for giving back this reference */
3829 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
26
Calling 'drm_dp_get_mst_branch_device'
3830 if (!*mstb) {
3831 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got MST reply from unknown device %d\n"
, hdr.lct)
;
3832 return false0;
3833 }
3834 }
3835
3836 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3837 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0])__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "sideband msg set header failed %d\n"
, replyblock[0])
;
3838 return false0;
3839 }
3840
3841 replylen = min(msg->curchunk_len, (u8)(len - hdrlen))(((msg->curchunk_len)<((u8)(len - hdrlen)))?(msg->curchunk_len
):((u8)(len - hdrlen)))
;
3842 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3843 if (!ret) {
3844 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0])__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "sideband msg build failed %d\n"
, replyblock[0])
;
3845 return false0;
3846 }
3847
3848 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3849 curreply = len;
3850 while (replylen > 0) {
3851 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16)(((replylen)<((((mgr->max_dpcd_transaction_bytes)<(16
))?(mgr->max_dpcd_transaction_bytes):(16))))?(replylen):((
((mgr->max_dpcd_transaction_bytes)<(16))?(mgr->max_dpcd_transaction_bytes
):(16))))
;
3852 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3853 replyblock, len);
3854 if (ret != len) {
3855 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to read a chunk (len %d, ret %d)\n"
, len, ret)
3856 len, ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to read a chunk (len %d, ret %d)\n"
, len, ret)
;
3857 return false0;
3858 }
3859
3860 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3861 if (!ret) {
3862 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to build sideband msg\n"
)
;
3863 return false0;
3864 }
3865
3866 curreply += len;
3867 replylen -= len;
3868 }
3869 return true1;
3870}
3871
3872static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3873{
3874 struct drm_dp_sideband_msg_tx *txmsg;
3875 struct drm_dp_mst_branch *mstb = NULL((void *)0);
3876 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3877
3878 if (!drm_dp_get_one_sb_msg(mgr, false0, &mstb))
6
Calling 'drm_dp_get_one_sb_msg'
3879 goto out_clear_reply;
3880
3881 /* Multi-packet message transmission, don't clear the reply */
3882 if (!msg->have_eomt)
3883 goto out;
3884
3885 /* find the message */
3886 mutex_lock(&mgr->qlock)rw_enter_write(&mgr->qlock);
3887 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,(list_empty(&mgr->tx_msg_downq) ? ((void *)0) : ({ const
__typeof( ((struct drm_dp_sideband_msg_tx *)0)->next ) *__mptr
= ((&mgr->tx_msg_downq)->next); (struct drm_dp_sideband_msg_tx
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_sideband_msg_tx
, next) );}))
3888 struct drm_dp_sideband_msg_tx, next)(list_empty(&mgr->tx_msg_downq) ? ((void *)0) : ({ const
__typeof( ((struct drm_dp_sideband_msg_tx *)0)->next ) *__mptr
= ((&mgr->tx_msg_downq)->next); (struct drm_dp_sideband_msg_tx
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_sideband_msg_tx
, next) );}))
;
3889 mutex_unlock(&mgr->qlock)rw_exit_write(&mgr->qlock);
3890
3891 /* Were we actually expecting a response, and from this mstb? */
3892 if (!txmsg || txmsg->dst != mstb) {
3893 struct drm_dp_sideband_msg_hdr *hdr;
3894
3895 hdr = &msg->initial_hdr;
3896 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got MST reply with no msg %p %d %d %02x %02x\n"
, mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg
[0])
3897 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0])__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got MST reply with no msg %p %d %d %02x %02x\n"
, mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg
[0])
;
3898 goto out_clear_reply;
3899 }
3900
3901 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
3902
3903 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01) {
3904 drm_dbg_kms(mgr->dev,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n"
, txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->
reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str
(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data
)
3905 "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n"
, txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->
reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str
(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data
)
3906 txmsg->reply.req_type,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n"
, txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->
reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str
(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data
)
3907 drm_dp_mst_req_type_str(txmsg->reply.req_type),__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n"
, txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->
reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str
(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data
)
3908 txmsg->reply.u.nak.reason,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n"
, txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->
reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str
(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data
)
3909 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n"
, txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->
reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str
(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data
)
3910 txmsg->reply.u.nak.nak_data)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n"
, txmsg->reply.req_type, drm_dp_mst_req_type_str(txmsg->
reply.req_type), txmsg->reply.u.nak.reason, drm_dp_mst_nak_reason_str
(txmsg->reply.u.nak.reason), txmsg->reply.u.nak.nak_data
)
;
3911 }
3912
3913 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx))__builtin_memset((msg), (0), (sizeof(struct drm_dp_sideband_msg_rx
)))
;
3914 drm_dp_mst_topology_put_mstb(mstb);
3915
3916 mutex_lock(&mgr->qlock)rw_enter_write(&mgr->qlock);
3917 txmsg->state = DRM_DP_SIDEBAND_TX_RX3;
3918 list_del(&txmsg->next);
3919 mutex_unlock(&mgr->qlock)rw_exit_write(&mgr->qlock);
3920
3921 wake_up_all(&mgr->tx_waitq)wake_up(&mgr->tx_waitq);
3922
3923 return 0;
3924
3925out_clear_reply:
3926 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx))__builtin_memset((msg), (0), (sizeof(struct drm_dp_sideband_msg_rx
)))
;
3927out:
3928 if (mstb)
3929 drm_dp_mst_topology_put_mstb(mstb);
3930
3931 return 0;
3932}
3933
3934static inline bool_Bool
3935drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3936 struct drm_dp_pending_up_req *up_req)
3937{
3938 struct drm_dp_mst_branch *mstb = NULL((void *)0);
3939 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3940 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3941 bool_Bool hotplug = false0, dowork = false0;
3942
3943 if (hdr->broadcast) {
3944 const u8 *guid = NULL((void *)0);
3945
3946 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY0x02)
3947 guid = msg->u.conn_stat.guid;
3948 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY0x13)
3949 guid = msg->u.resource_stat.guid;
3950
3951 if (guid)
3952 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3953 } else {
3954 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3955 }
3956
3957 if (!mstb) {
3958 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got MST reply from unknown device %d\n"
, hdr->lct)
;
3959 return false0;
3960 }
3961
3962 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3963 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY0x02) {
3964 dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3965 hotplug = true1;
3966 }
3967
3968 drm_dp_mst_topology_put_mstb(mstb);
3969
3970 if (dowork)
3971 queue_work(system_long_wq, &mgr->work);
3972 return hotplug;
3973}
3974
3975static void drm_dp_mst_up_req_work(struct work_struct *work)
3976{
3977 struct drm_dp_mst_topology_mgr *mgr =
3978 container_of(work, struct drm_dp_mst_topology_mgr,({ const __typeof( ((struct drm_dp_mst_topology_mgr *)0)->
up_req_work ) *__mptr = (work); (struct drm_dp_mst_topology_mgr
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_mgr
, up_req_work) );})
3979 up_req_work)({ const __typeof( ((struct drm_dp_mst_topology_mgr *)0)->
up_req_work ) *__mptr = (work); (struct drm_dp_mst_topology_mgr
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_mgr
, up_req_work) );})
;
3980 struct drm_dp_pending_up_req *up_req;
3981 bool_Bool send_hotplug = false0;
3982
3983 mutex_lock(&mgr->probe_lock)rw_enter_write(&mgr->probe_lock);
3984 while (true1) {
3985 mutex_lock(&mgr->up_req_lock)rw_enter_write(&mgr->up_req_lock);
3986 up_req = list_first_entry_or_null(&mgr->up_req_list,(list_empty(&mgr->up_req_list) ? ((void *)0) : ({ const
__typeof( ((struct drm_dp_pending_up_req *)0)->next ) *__mptr
= ((&mgr->up_req_list)->next); (struct drm_dp_pending_up_req
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_pending_up_req
, next) );}))
3987 struct drm_dp_pending_up_req,(list_empty(&mgr->up_req_list) ? ((void *)0) : ({ const
__typeof( ((struct drm_dp_pending_up_req *)0)->next ) *__mptr
= ((&mgr->up_req_list)->next); (struct drm_dp_pending_up_req
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_pending_up_req
, next) );}))
3988 next)(list_empty(&mgr->up_req_list) ? ((void *)0) : ({ const
__typeof( ((struct drm_dp_pending_up_req *)0)->next ) *__mptr
= ((&mgr->up_req_list)->next); (struct drm_dp_pending_up_req
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_pending_up_req
, next) );}))
;
3989 if (up_req)
3990 list_del(&up_req->next);
3991 mutex_unlock(&mgr->up_req_lock)rw_exit_write(&mgr->up_req_lock);
3992
3993 if (!up_req)
3994 break;
3995
3996 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3997 kfree(up_req);
3998 }
3999 mutex_unlock(&mgr->probe_lock)rw_exit_write(&mgr->probe_lock);
4000
4001 if (send_hotplug)
4002 drm_kms_helper_hotplug_event(mgr->dev);
4003}
4004
4005static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4006{
4007 struct drm_dp_pending_up_req *up_req;
4008
4009 if (!drm_dp_get_one_sb_msg(mgr, true1, NULL((void *)0)))
4010 goto out;
4011
4012 if (!mgr->up_req_recv.have_eomt)
4013 return 0;
4014
4015 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL(0x0001 | 0x0004));
4016 if (!up_req)
4017 return -ENOMEM12;
4018
4019 INIT_LIST_HEAD(&up_req->next);
4020
4021 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
4022
4023 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY0x02 &&
4024 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY0x13) {
4025 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Received unknown up req type, ignoring: %x\n"
, up_req->msg.req_type)
4026 up_req->msg.req_type)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Received unknown up req type, ignoring: %x\n"
, up_req->msg.req_type)
;
4027 kfree(up_req);
4028 goto out;
4029 }
4030
4031 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4032 false0);
4033
4034 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY0x02) {
4035 const struct drm_dp_connection_status_notify *conn_stat =
4036 &up_req->msg.u.conn_stat;
4037
4038 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n"
, conn_stat->port_number, conn_stat->legacy_device_plug_status
, conn_stat->displayport_device_plug_status, conn_stat->
message_capability_status, conn_stat->input_port, conn_stat
->peer_device_type)
4039 conn_stat->port_number,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n"
, conn_stat->port_number, conn_stat->legacy_device_plug_status
, conn_stat->displayport_device_plug_status, conn_stat->
message_capability_status, conn_stat->input_port, conn_stat
->peer_device_type)
4040 conn_stat->legacy_device_plug_status,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n"
, conn_stat->port_number, conn_stat->legacy_device_plug_status
, conn_stat->displayport_device_plug_status, conn_stat->
message_capability_status, conn_stat->input_port, conn_stat
->peer_device_type)
4041 conn_stat->displayport_device_plug_status,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n"
, conn_stat->port_number, conn_stat->legacy_device_plug_status
, conn_stat->displayport_device_plug_status, conn_stat->
message_capability_status, conn_stat->input_port, conn_stat
->peer_device_type)
4042 conn_stat->message_capability_status,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n"
, conn_stat->port_number, conn_stat->legacy_device_plug_status
, conn_stat->displayport_device_plug_status, conn_stat->
message_capability_status, conn_stat->input_port, conn_stat
->peer_device_type)
4043 conn_stat->input_port,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n"
, conn_stat->port_number, conn_stat->legacy_device_plug_status
, conn_stat->displayport_device_plug_status, conn_stat->
message_capability_status, conn_stat->input_port, conn_stat
->peer_device_type)
4044 conn_stat->peer_device_type)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n"
, conn_stat->port_number, conn_stat->legacy_device_plug_status
, conn_stat->displayport_device_plug_status, conn_stat->
message_capability_status, conn_stat->input_port, conn_stat
->peer_device_type)
;
4045 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY0x13) {
4046 const struct drm_dp_resource_status_notify *res_stat =
4047 &up_req->msg.u.resource_stat;
4048
4049 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got RSN: pn: %d avail_pbn %d\n"
, res_stat->port_number, res_stat->available_pbn)
4050 res_stat->port_number,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got RSN: pn: %d avail_pbn %d\n"
, res_stat->port_number, res_stat->available_pbn)
4051 res_stat->available_pbn)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Got RSN: pn: %d avail_pbn %d\n"
, res_stat->port_number, res_stat->available_pbn)
;
4052 }
4053
4054 up_req->hdr = mgr->up_req_recv.initial_hdr;
4055 mutex_lock(&mgr->up_req_lock)rw_enter_write(&mgr->up_req_lock);
4056 list_add_tail(&up_req->next, &mgr->up_req_list);
4057 mutex_unlock(&mgr->up_req_lock)rw_exit_write(&mgr->up_req_lock);
4058 queue_work(system_long_wq, &mgr->up_req_work);
4059
4060out:
4061 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx))__builtin_memset((&mgr->up_req_recv), (0), (sizeof(struct
drm_dp_sideband_msg_rx)))
;
4062 return 0;
4063}
4064
4065/**
4066 * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
4067 * @mgr: manager to notify irq for.
4068 * @esi: 4 bytes from SINK_COUNT_ESI
4069 * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
4070 * @handled: whether the hpd interrupt was consumed or not
4071 *
4072 * This should be called from the driver when it detects a HPD IRQ,
4073 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
4074 * topology manager will process the sideband messages received
4075 * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
4076 * corresponding flags that Driver has to ack the DP receiver later.
4077 *
4078 * Note that driver shall also call
4079 * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
4080 * after calling this function, to try to kick off a new request in
4081 * the queue if the previous message transaction is completed.
4082 *
4083 * See also:
4084 * drm_dp_mst_hpd_irq_send_new_request()
4085 */
4086int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
4087 u8 *ack, bool_Bool *handled)
4088{
4089 int ret = 0;
4090 int sc;
4091 *handled = false0;
4092 sc = DP_GET_SINK_COUNT(esi[0])((((esi[0]) & 0x80) >> 1) | ((esi[0]) & 0x3f));
4093
4094 if (sc != mgr->sink_count) {
1
Assuming 'sc' is equal to field 'sink_count'
2
Taking false branch
4095 mgr->sink_count = sc;
4096 *handled = true1;
4097 }
4098
4099 if (esi[1] & DP_DOWN_REP_MSG_RDY(1 << 4)) {
3
Assuming the condition is true
4
Taking true branch
4100 ret = drm_dp_mst_handle_down_rep(mgr);
5
Calling 'drm_dp_mst_handle_down_rep'
4101 *handled = true1;
4102 ack[1] |= DP_DOWN_REP_MSG_RDY(1 << 4);
4103 }
4104
4105 if (esi[1] & DP_UP_REQ_MSG_RDY(1 << 5)) {
4106 ret |= drm_dp_mst_handle_up_req(mgr);
4107 *handled = true1;
4108 ack[1] |= DP_UP_REQ_MSG_RDY(1 << 5);
4109 }
4110
4111 return ret;
4112}
4113EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
4114
4115/**
4116 * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
4117 * @mgr: manager to notify irq for.
4118 *
4119 * This should be called from the driver when mst irq event is handled
4120 * and acked. Note that new down request should only be sent when
4121 * previous message transaction is completed. Source is not supposed to generate
4122 * interleaved message transactions.
4123 */
4124void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
4125{
4126 struct drm_dp_sideband_msg_tx *txmsg;
4127 bool_Bool kick = true1;
4128
4129 mutex_lock(&mgr->qlock)rw_enter_write(&mgr->qlock);
4130 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,(list_empty(&mgr->tx_msg_downq) ? ((void *)0) : ({ const
__typeof( ((struct drm_dp_sideband_msg_tx *)0)->next ) *__mptr
= ((&mgr->tx_msg_downq)->next); (struct drm_dp_sideband_msg_tx
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_sideband_msg_tx
, next) );}))
4131 struct drm_dp_sideband_msg_tx, next)(list_empty(&mgr->tx_msg_downq) ? ((void *)0) : ({ const
__typeof( ((struct drm_dp_sideband_msg_tx *)0)->next ) *__mptr
= ((&mgr->tx_msg_downq)->next); (struct drm_dp_sideband_msg_tx
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_sideband_msg_tx
, next) );}))
;
4132 /* If last transaction is not completed yet*/
4133 if (!txmsg ||
4134 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND1 ||
4135 txmsg->state == DRM_DP_SIDEBAND_TX_SENT2)
4136 kick = false0;
4137 mutex_unlock(&mgr->qlock)rw_exit_write(&mgr->qlock);
4138
4139 if (kick)
4140 drm_dp_mst_kick_tx(mgr);
4141}
4142EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
4143/**
4144 * drm_dp_mst_detect_port() - get connection status for an MST port
4145 * @connector: DRM connector for this port
4146 * @ctx: The acquisition context to use for grabbing locks
4147 * @mgr: manager for this port
4148 * @port: pointer to a port
4149 *
4150 * This returns the current connection state for a port.
4151 */
4152int
4153drm_dp_mst_detect_port(struct drm_connector *connector,
4154 struct drm_modeset_acquire_ctx *ctx,
4155 struct drm_dp_mst_topology_mgr *mgr,
4156 struct drm_dp_mst_port *port)
4157{
4158 int ret;
4159
4160 /* we need to search for the port in the mgr in case it's gone */
4161 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4162 if (!port)
4163 return connector_status_disconnected;
4164
4165 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4166 if (ret)
4167 goto out;
4168
4169 ret = connector_status_disconnected;
4170
4171 if (!port->ddps)
4172 goto out;
4173
4174 switch (port->pdt) {
4175 case DP_PEER_DEVICE_NONE0x0:
4176 break;
4177 case DP_PEER_DEVICE_MST_BRANCHING0x2:
4178 if (!port->mcs)
4179 ret = connector_status_connected;
4180 break;
4181
4182 case DP_PEER_DEVICE_SST_SINK0x3:
4183 ret = connector_status_connected;
4184 /* for logical ports - cache the EDID */
4185 if (port->port_num >= DP_MST_LOGICAL_PORT_08 && !port->cached_edid)
4186 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4187 break;
4188 case DP_PEER_DEVICE_DP_LEGACY_CONV0x4:
4189 if (port->ldps)
4190 ret = connector_status_connected;
4191 break;
4192 }
4193out:
4194 drm_dp_mst_topology_put_port(port);
4195 return ret;
4196}
4197EXPORT_SYMBOL(drm_dp_mst_detect_port);
4198
4199/**
4200 * drm_dp_mst_get_edid() - get EDID for an MST port
4201 * @connector: toplevel connector to get EDID for
4202 * @mgr: manager for this port
4203 * @port: unverified pointer to a port.
4204 *
4205 * This returns an EDID for the port connected to a connector,
4206 * It validates the pointer still exists so the caller doesn't require a
4207 * reference.
4208 */
4209struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4210{
4211 struct edid *edid = NULL((void *)0);
4212
4213 /* we need to search for the port in the mgr in case it's gone */
4214 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4215 if (!port)
4216 return NULL((void *)0);
4217
4218 if (port->cached_edid)
4219 edid = drm_edid_duplicate(port->cached_edid);
4220 else {
4221 edid = drm_get_edid(connector, &port->aux.ddc);
4222 }
4223 port->has_audio = drm_detect_monitor_audio(edid);
4224 drm_dp_mst_topology_put_port(port);
4225 return edid;
4226}
4227EXPORT_SYMBOL(drm_dp_mst_get_edid);
4228
4229/**
4230 * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
4231 * @state: global atomic state
4232 * @mgr: MST topology manager for the port
4233 * @port: port to find time slots for
4234 * @pbn: bandwidth required for the mode in PBN
4235 *
4236 * Allocates time slots to @port, replacing any previous time slot allocations it may
4237 * have had. Any atomic drivers which support MST must call this function in
4238 * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
4239 * change the current time slot allocation for the new state, and ensure the MST
4240 * atomic state is added whenever the state of payloads in the topology changes.
4241 *
4242 * Allocations set by this function are not checked against the bandwidth
4243 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4244 *
4245 * Additionally, it is OK to call this function multiple times on the same
4246 * @port as needed. It is not OK however, to call this function and
4247 * drm_dp_atomic_release_time_slots() in the same atomic check phase.
4248 *
4249 * See also:
4250 * drm_dp_atomic_release_time_slots()
4251 * drm_dp_mst_atomic_check()
4252 *
4253 * Returns:
4254 * Total slots in the atomic state assigned for this port, or a negative error
4255 * code if the port no longer exists
4256 */
4257int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
4258 struct drm_dp_mst_topology_mgr *mgr,
4259 struct drm_dp_mst_port *port, int pbn)
4260{
4261 struct drm_dp_mst_topology_state *topology_state;
4262 struct drm_dp_mst_atomic_payload *payload = NULL((void *)0);
4263 struct drm_connector_state *conn_state;
4264 int prev_slots = 0, prev_bw = 0, req_slots;
4265
4266 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4267 if (IS_ERR(topology_state))
4268 return PTR_ERR(topology_state);
4269
4270 conn_state = drm_atomic_get_new_connector_state(state, port->connector);
4271 topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
4272
4273 /* Find the current allocation for this port, if any */
4274 payload = drm_atomic_get_mst_payload_state(topology_state, port);
4275 if (payload) {
4276 prev_slots = payload->time_slots;
4277 prev_bw = payload->pbn;
4278
4279 /*
4280 * This should never happen, unless the driver tries
4281 * releasing and allocating the same timeslot allocation,
4282 * which is an error
4283 */
4284 if (drm_WARN_ON(mgr->dev, payload->delete)({ int __ret = !!((payload->delete)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((mgr->dev))->dev), "", "drm_WARN_ON("
"payload->delete" ")"); __builtin_expect(!!(__ret), 0); }
)
) {
4285 drm_err(mgr->dev,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "cannot allocate and release time slots on [MST PORT:%p] in the same state\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port)
4286 "cannot allocate and release time slots on [MST PORT:%p] in the same state\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "cannot allocate and release time slots on [MST PORT:%p] in the same state\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port)
4287 port)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "cannot allocate and release time slots on [MST PORT:%p] in the same state\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port)
;
4288 return -EINVAL22;
4289 }
4290 }
4291
4292 req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div)(((pbn) + ((topology_state->pbn_div) - 1)) / (topology_state
->pbn_div))
;
4293
4294 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n"
, port->connector->base.id, port->connector->name
, port, prev_slots, req_slots)
4295 port->connector->base.id, port->connector->name,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n"
, port->connector->base.id, port->connector->name
, port, prev_slots, req_slots)
4296 port, prev_slots, req_slots)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n"
, port->connector->base.id, port->connector->name
, port, prev_slots, req_slots)
;
4297 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n"
, port->connector->base.id, port->connector->name
, port, prev_bw, pbn)
4298 port->connector->base.id, port->connector->name,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n"
, port->connector->base.id, port->connector->name
, port, prev_bw, pbn)
4299 port, prev_bw, pbn)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n"
, port->connector->base.id, port->connector->name
, port, prev_bw, pbn)
;
4300
4301 /* Add the new allocation to the state, note the VCPI isn't assigned until the end */
4302 if (!payload) {
4303 payload = kzalloc(sizeof(*payload), GFP_KERNEL(0x0001 | 0x0004));
4304 if (!payload)
4305 return -ENOMEM12;
4306
4307 drm_dp_mst_get_port_malloc(port);
4308 payload->port = port;
4309 payload->vc_start_slot = -1;
4310 list_add(&payload->next, &topology_state->payloads);
4311 }
4312 payload->time_slots = req_slots;
4313 payload->pbn = pbn;
4314
4315 return req_slots;
4316}
4317EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
4318
4319/**
4320 * drm_dp_atomic_release_time_slots() - Release allocated time slots
4321 * @state: global atomic state
4322 * @mgr: MST topology manager for the port
4323 * @port: The port to release the time slots from
4324 *
4325 * Releases any time slots that have been allocated to a port in the atomic
4326 * state. Any atomic drivers which support MST must call this function
4327 * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
4328 * This helper will check whether time slots would be released by the new state and
4329 * respond accordingly, along with ensuring the MST state is always added to the
4330 * atomic state whenever a new state would modify the state of payloads on the
4331 * topology.
4332 *
4333 * It is OK to call this even if @port has been removed from the system.
4334 * Additionally, it is OK to call this function multiple times on the same
4335 * @port as needed. It is not OK however, to call this function and
4336 * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
4337 * phase.
4338 *
4339 * See also:
4340 * drm_dp_atomic_find_time_slots()
4341 * drm_dp_mst_atomic_check()
4342 *
4343 * Returns:
4344 * 0 on success, negative error code otherwise
4345 */
4346int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
4347 struct drm_dp_mst_topology_mgr *mgr,
4348 struct drm_dp_mst_port *port)
4349{
4350 struct drm_dp_mst_topology_state *topology_state;
4351 struct drm_dp_mst_atomic_payload *payload;
4352 struct drm_connector_state *old_conn_state, *new_conn_state;
4353 bool_Bool update_payload = true1;
4354
4355 old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
4356 if (!old_conn_state->crtc)
4357 return 0;
4358
4359 /* If the CRTC isn't disabled by this state, don't release it's payload */
4360 new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
4361 if (new_conn_state->crtc) {
4362 struct drm_crtc_state *crtc_state =
4363 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
4364
4365 /* No modeset means no payload changes, so it's safe to not pull in the MST state */
4366 if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
4367 return 0;
4368
4369 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
4370 update_payload = false0;
4371 }
4372
4373 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4374 if (IS_ERR(topology_state))
4375 return PTR_ERR(topology_state);
4376
4377 topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
4378 if (!update_payload)
4379 return 0;
4380
4381 payload = drm_atomic_get_mst_payload_state(topology_state, port);
4382 if (WARN_ON(!payload)({ int __ret = !!(!payload); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!payload", "/usr/src/sys/dev/pci/drm/display/drm_dp_mst_topology.c"
, 4382); __builtin_expect(!!(__ret), 0); })
) {
4383 drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "No payload for [MST PORT:%p] found in mst state %p\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port, &
topology_state->base)
4384 port, &topology_state->base)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "No payload for [MST PORT:%p] found in mst state %p\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , port, &
topology_state->base)
;
4385 return -EINVAL22;
4386 }
4387
4388 if (new_conn_state->crtc)
4389 return 0;
4390
4391 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] TU %d -> 0\n"
, port, payload->time_slots)
;
4392 if (!payload->delete) {
4393 payload->pbn = 0;
4394 payload->delete = true1;
4395 topology_state->payload_mask &= ~BIT(payload->vcpi - 1)(1UL << (payload->vcpi - 1));
4396 }
4397
4398 return 0;
4399}
4400EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
4401
4402/**
4403 * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
4404 * @state: global atomic state
4405 *
4406 * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
4407 * currently assigned to an MST topology. Drivers must call this hook from their
4408 * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
4409 *
4410 * Returns:
4411 * 0 if all CRTC commits were retrieved successfully, negative error code otherwise
4412 */
4413int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
4414{
4415 struct drm_dp_mst_topology_mgr *mgr;
4416 struct drm_dp_mst_topology_state *mst_state;
4417 struct drm_crtc *crtc;
4418 struct drm_crtc_state *crtc_state;
4419 int i, j, commit_idx, num_commit_deps;
4420
4421 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i)for ((i) = 0; (i) < (state)->num_private_objs; (i)++) if
(!(__drm_dp_mst_state_iter_get((state), &(mgr), ((void *
)0), &(mst_state), (i)))) {} else
{
4422 if (!mst_state->pending_crtc_mask)
4423 continue;
4424
4425 num_commit_deps = hweight32(mst_state->pending_crtc_mask);
4426 mst_state->commit_deps = kmalloc_array(num_commit_deps,
4427 sizeof(*mst_state->commit_deps), GFP_KERNEL(0x0001 | 0x0004));
4428 if (!mst_state->commit_deps)
4429 return -ENOMEM12;
4430 mst_state->num_commit_deps = num_commit_deps;
4431
4432 commit_idx = 0;
4433 for_each_new_crtc_in_state(state, crtc, crtc_state, j)for ((j) = 0; (j) < (state)->dev->mode_config.num_crtc
; (j)++) if (!((state)->crtcs[j].ptr && ((crtc) = (
state)->crtcs[j].ptr, (void)(crtc) , (crtc_state) = (state
)->crtcs[j].new_state, (void)(crtc_state) , 1))) {} else
{
4434 if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
4435 mst_state->commit_deps[commit_idx++] =
4436 drm_crtc_commit_get(crtc_state->commit);
4437 }
4438 }
4439 }
4440
4441 return 0;
4442}
4443EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
4444
4445/**
4446 * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
4447 * prepare new MST state for commit
4448 * @state: global atomic state
4449 *
4450 * Goes through any MST topologies in this atomic state, and waits for any pending commits which
4451 * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
4452 * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
4453 * with eachother by forcing them to be executed sequentially in situations where the only resources
4454 * the modeset objects in these commits share are an MST topology.
4455 *
4456 * This function also prepares the new MST state for commit by performing some state preparation
4457 * which can't be done until this point, such as reading back the final VC start slots (which are
4458 * determined at commit-time) from the previous state.
4459 *
4460 * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
4461 * or whatever their equivalent of that is.
4462 */
4463void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
4464{
4465 struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
4466 struct drm_dp_mst_topology_mgr *mgr;
4467 struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
4468 int i, j, ret;
4469
4470 for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i)for ((i) = 0; (i) < (state)->num_private_objs; (i)++) if
(!(__drm_dp_mst_state_iter_get((state), &(mgr), &(old_mst_state
), &(new_mst_state), (i)))) {} else
{
4471 for (j = 0; j < old_mst_state->num_commit_deps; j++) {
4472 ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
4473 if (ret < 0)
4474 drm_err(state->dev, "Failed to wait for %s: %d\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to wait for %s: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , old_mst_state
->commit_deps[j]->crtc->name, ret)
4475 old_mst_state->commit_deps[j]->crtc->name, ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to wait for %s: %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , old_mst_state
->commit_deps[j]->crtc->name, ret)
;
4476 }
4477
4478 /* Now that previous state is committed, it's safe to copy over the start slot
4479 * assignments
4480 */
4481 list_for_each_entry(old_payload, &old_mst_state->payloads, next)for (old_payload = ({ const __typeof( ((__typeof(*old_payload
) *)0)->next ) *__mptr = ((&old_mst_state->payloads
)->next); (__typeof(*old_payload) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*old_payload), next) );}); &old_payload->next
!= (&old_mst_state->payloads); old_payload = ({ const
__typeof( ((__typeof(*old_payload) *)0)->next ) *__mptr =
(old_payload->next.next); (__typeof(*old_payload) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*old_payload), next) )
;}))
{
4482 if (old_payload->delete)
4483 continue;
4484
4485 new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
4486 old_payload->port);
4487 new_payload->vc_start_slot = old_payload->vc_start_slot;
4488 }
4489 }
4490}
4491EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
4492
4493/**
4494 * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
4495 * in SST mode
4496 * @new_conn_state: The new connector state of the &drm_connector
4497 * @mgr: The MST topology manager for the &drm_connector
4498 *
4499 * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
4500 * serialize non-blocking commits happening on the real DP connector of an MST topology switching
4501 * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
4502 * MST topology will never share the same &drm_encoder.
4503 *
4504 * This function takes care of this serialization issue, by checking a root MST connector's atomic
4505 * state to determine if it is about to have a modeset - and then pulling in the MST topology state
4506 * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
4507 *
4508 * Drivers implementing MST must call this function from the
4509 * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
4510 * driving MST sinks.
4511 *
4512 * Returns:
4513 * 0 on success, negative error code otherwise
4514 */
4515int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
4516 struct drm_dp_mst_topology_mgr *mgr)
4517{
4518 struct drm_atomic_state *state = new_conn_state->state;
4519 struct drm_connector_state *old_conn_state =
4520 drm_atomic_get_old_connector_state(state, new_conn_state->connector);
4521 struct drm_crtc_state *crtc_state;
4522 struct drm_dp_mst_topology_state *mst_state = NULL((void *)0);
4523
4524 if (new_conn_state->crtc) {
4525 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
4526 if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
4527 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4528 if (IS_ERR(mst_state))
4529 return PTR_ERR(mst_state);
4530
4531 mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
4532 }
4533 }
4534
4535 if (old_conn_state->crtc) {
4536 crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
4537 if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
4538 if (!mst_state) {
4539 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4540 if (IS_ERR(mst_state))
4541 return PTR_ERR(mst_state);
4542 }
4543
4544 mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
4545 }
4546 }
4547
4548 return 0;
4549}
4550EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
4551
4552/**
4553 * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
4554 * @mst_state: mst_state to update
4555 * @link_encoding_cap: the ecoding format on the link
4556 */
4557void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
4558{
4559 if (link_encoding_cap == DP_CAP_ANSI_128B132B(1 << 1)) {
4560 mst_state->total_avail_slots = 64;
4561 mst_state->start_slot = 0;
4562 } else {
4563 mst_state->total_avail_slots = 63;
4564 mst_state->start_slot = 1;
4565 }
4566
4567 DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",___drm_dbg(((void *)0), DRM_UT_KMS, "%s encoding format on mst_state 0x%p\n"
, (link_encoding_cap == (1 << 1)) ? "128b/132b":"8b/10b"
, mst_state)
4568 (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",___drm_dbg(((void *)0), DRM_UT_KMS, "%s encoding format on mst_state 0x%p\n"
, (link_encoding_cap == (1 << 1)) ? "128b/132b":"8b/10b"
, mst_state)
4569 mst_state)___drm_dbg(((void *)0), DRM_UT_KMS, "%s encoding format on mst_state 0x%p\n"
, (link_encoding_cap == (1 << 1)) ? "128b/132b":"8b/10b"
, mst_state)
;
4570}
4571EXPORT_SYMBOL(drm_dp_mst_update_slots);
4572
4573static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4574 int id, u8 start_slot, u8 num_slots)
4575{
4576 u8 payload_alloc[3], status;
4577 int ret;
4578 int retries = 0;
4579
4580 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS0x2c0,
4581 DP_PAYLOAD_TABLE_UPDATED(1 << 0));
4582
4583 payload_alloc[0] = id;
4584 payload_alloc[1] = start_slot;
4585 payload_alloc[2] = num_slots;
4586
4587 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET0x1c0, payload_alloc, 3);
4588 if (ret != 3) {
4589 drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to write payload allocation %d\n"
, ret)
;
4590 goto fail;
4591 }
4592
4593retry:
4594 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS0x2c0, &status);
4595 if (ret < 0) {
4596 drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "failed to read payload table status %d\n"
, ret)
;
4597 goto fail;
4598 }
4599
4600 if (!(status & DP_PAYLOAD_TABLE_UPDATED(1 << 0))) {
4601 retries++;
4602 if (retries < 20) {
4603 usleep_range(10000, 20000);
4604 goto retry;
4605 }
4606 drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "status not set after read payload table status %d\n"
, status)
4607 status)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "status not set after read payload table status %d\n"
, status)
;
4608 ret = -EINVAL22;
4609 goto fail;
4610 }
4611 ret = 0;
4612fail:
4613 return ret;
4614}
4615
4616static int do_get_act_status(struct drm_dp_aux *aux)
4617{
4618 int ret;
4619 u8 status;
4620
4621 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS0x2c0, &status);
4622 if (ret < 0)
4623 return ret;
4624
4625 return status;
4626}
4627
4628/**
4629 * drm_dp_check_act_status() - Polls for ACT handled status.
4630 * @mgr: manager to use
4631 *
4632 * Tries waiting for the MST hub to finish updating it's payload table by
4633 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4634 * take that long).
4635 *
4636 * Returns:
4637 * 0 if the ACT was handled in time, negative error code on failure.
4638 */
4639int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4640{
4641 /*
4642 * There doesn't seem to be any recommended retry count or timeout in
4643 * the MST specification. Since some hubs have been observed to take
4644 * over 1 second to update their payload allocations under certain
4645 * conditions, we use a rather large timeout value.
4646 */
4647 const int timeout_ms = 3000;
4648 int ret, status;
4649
4650 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,({ struct timeval __end, __now, __timeout_tv; int __timed_out
= 0; if (timeout_ms * 1000L) { microuptime(&__now); USEC_TO_TIMEVAL
(timeout_ms * 1000L, &__timeout_tv); do { (&__end)->
tv_sec = (&__now)->tv_sec + (&__timeout_tv)->tv_sec
; (&__end)->tv_usec = (&__now)->tv_usec + (&
__timeout_tv)->tv_usec; if ((&__end)->tv_usec >=
1000000) { (&__end)->tv_sec++; (&__end)->tv_usec
-= 1000000; } } while (0); } for (;;) { (status) = (do_get_act_status
)(mgr->aux); if (status & (1 << 1) || status <
0) break; if (timeout_ms * 1000L) { microuptime(&__now);
if ((((&__end)->tv_sec == (&__now)->tv_sec) ? (
(&__end)->tv_usec <= (&__now)->tv_usec) : ((
&__end)->tv_sec <= (&__now)->tv_sec))) { __timed_out
= 1; break; } } if (200) (*delay_func)((200) / 2); } (__timed_out
) ? -60 : 0; })
4651 status & DP_PAYLOAD_ACT_HANDLED || status < 0,({ struct timeval __end, __now, __timeout_tv; int __timed_out
= 0; if (timeout_ms * 1000L) { microuptime(&__now); USEC_TO_TIMEVAL
(timeout_ms * 1000L, &__timeout_tv); do { (&__end)->
tv_sec = (&__now)->tv_sec + (&__timeout_tv)->tv_sec
; (&__end)->tv_usec = (&__now)->tv_usec + (&
__timeout_tv)->tv_usec; if ((&__end)->tv_usec >=
1000000) { (&__end)->tv_sec++; (&__end)->tv_usec
-= 1000000; } } while (0); } for (;;) { (status) = (do_get_act_status
)(mgr->aux); if (status & (1 << 1) || status <
0) break; if (timeout_ms * 1000L) { microuptime(&__now);
if ((((&__end)->tv_sec == (&__now)->tv_sec) ? (
(&__end)->tv_usec <= (&__now)->tv_usec) : ((
&__end)->tv_sec <= (&__now)->tv_sec))) { __timed_out
= 1; break; } } if (200) (*delay_func)((200) / 2); } (__timed_out
) ? -60 : 0; })
4652 200, timeout_ms * USEC_PER_MSEC)({ struct timeval __end, __now, __timeout_tv; int __timed_out
= 0; if (timeout_ms * 1000L) { microuptime(&__now); USEC_TO_TIMEVAL
(timeout_ms * 1000L, &__timeout_tv); do { (&__end)->
tv_sec = (&__now)->tv_sec + (&__timeout_tv)->tv_sec
; (&__end)->tv_usec = (&__now)->tv_usec + (&
__timeout_tv)->tv_usec; if ((&__end)->tv_usec >=
1000000) { (&__end)->tv_sec++; (&__end)->tv_usec
-= 1000000; } } while (0); } for (;;) { (status) = (do_get_act_status
)(mgr->aux); if (status & (1 << 1) || status <
0) break; if (timeout_ms * 1000L) { microuptime(&__now);
if ((((&__end)->tv_sec == (&__now)->tv_sec) ? (
(&__end)->tv_usec <= (&__now)->tv_usec) : ((
&__end)->tv_sec <= (&__now)->tv_sec))) { __timed_out
= 1; break; } } if (200) (*delay_func)((200) / 2); } (__timed_out
) ? -60 : 0; })
;
4653 if (ret < 0 && status >= 0) {
4654 drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to get ACT after %dms, last status: %02x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , timeout_ms
, status)
4655 timeout_ms, status)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to get ACT after %dms, last status: %02x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , timeout_ms
, status)
;
4656 return -EINVAL22;
4657 } else if (status < 0) {
4658 /*
4659 * Failure here isn't unexpected - the hub may have
4660 * just been unplugged
4661 */
4662 drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Failed to read payload table status: %d\n"
, status)
;
4663 return status;
4664 }
4665
4666 return 0;
4667}
4668EXPORT_SYMBOL(drm_dp_check_act_status);
4669
4670/**
4671 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4672 * @clock: dot clock for the mode
4673 * @bpp: bpp for the mode.
4674 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
4675 *
4676 * This uses the formula in the spec to calculate the PBN value for a mode.
4677 */
4678int drm_dp_calc_pbn_mode(int clock, int bpp, bool_Bool dsc)
4679{
4680 /*
4681 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4682 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4683 * common multiplier to render an integer PBN for all link rate/lane
4684 * counts combinations
4685 * calculate
4686 * peak_kbps *= (1006/1000)
4687 * peak_kbps *= (64/54)
4688 * peak_kbps *= 8 convert to bytes
4689 *
4690 * If the bpp is in units of 1/16, further divide by 16. Put this
4691 * factor in the numerator rather than the denominator to avoid
4692 * integer overflow
4693 */
4694
4695 if (dsc)
4696 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),(((mul_u32_u32(clock * (bpp / 16), 64 * 1006)) + ((8 * 54 * 1000
* 1000) - 1)) / (8 * 54 * 1000 * 1000))
4697 8 * 54 * 1000 * 1000)(((mul_u32_u32(clock * (bpp / 16), 64 * 1006)) + ((8 * 54 * 1000
* 1000) - 1)) / (8 * 54 * 1000 * 1000))
;
4698
4699 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),(((mul_u32_u32(clock * bpp, 64 * 1006)) + ((8 * 54 * 1000 * 1000
) - 1)) / (8 * 54 * 1000 * 1000))
4700 8 * 54 * 1000 * 1000)(((mul_u32_u32(clock * bpp, 64 * 1006)) + ((8 * 54 * 1000 * 1000
) - 1)) / (8 * 54 * 1000 * 1000))
;
4701}
4702EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4703
4704/* we want to kick the TX after we've ack the up/down IRQs. */
4705static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4706{
4707 queue_work(system_long_wq, &mgr->tx_work);
4708}
4709
4710/*
4711 * Helper function for parsing DP device types into convenient strings
4712 * for use with dp_mst_topology
4713 */
4714static const char *pdt_to_string(u8 pdt)
4715{
4716 switch (pdt) {
4717 case DP_PEER_DEVICE_NONE0x0:
4718 return "NONE";
4719 case DP_PEER_DEVICE_SOURCE_OR_SST0x1:
4720 return "SOURCE OR SST";
4721 case DP_PEER_DEVICE_MST_BRANCHING0x2:
4722 return "MST BRANCHING";
4723 case DP_PEER_DEVICE_SST_SINK0x3:
4724 return "SST SINK";
4725 case DP_PEER_DEVICE_DP_LEGACY_CONV0x4:
4726 return "DP LEGACY CONV";
4727 default:
4728 return "ERR";
4729 }
4730}
4731
4732static void drm_dp_mst_dump_mstb(struct seq_file *m,
4733 struct drm_dp_mst_branch *mstb)
4734{
4735 struct drm_dp_mst_port *port;
4736 int tabs = mstb->lct;
4737 char prefix[10];
4738 int i;
4739
4740 for (i = 0; i < tabs; i++)
4741 prefix[i] = '\t';
4742 prefix[i] = '\0';
4743
4744 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
4745 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
4746 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
4747 prefix,
4748 port->port_num,
4749 port,
4750 port->input ? "input" : "output",
4751 pdt_to_string(port->pdt),
4752 port->ddps,
4753 port->ldps,
4754 port->num_sdp_streams,
4755 port->num_sdp_stream_sinks,
4756 port->fec_capable ? "true" : "false",
4757 port->connector);
4758 if (port->mstb)
4759 drm_dp_mst_dump_mstb(m, port->mstb);
4760 }
4761}
4762
4763#define DP_PAYLOAD_TABLE_SIZE64 64
4764
4765static bool_Bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4766 char *buf)
4767{
4768 int i;
4769
4770 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE64; i += 16) {
4771 if (drm_dp_dpcd_read(mgr->aux,
4772 DP_PAYLOAD_TABLE_UPDATE_STATUS0x2c0 + i,
4773 &buf[i], 16) != 16)
4774 return false0;
4775 }
4776 return true1;
4777}
4778
4779static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4780 struct drm_dp_mst_port *port, char *name,
4781 int namelen)
4782{
4783 struct edid *mst_edid;
4784
4785 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4786 drm_edid_get_monitor_name(mst_edid, name, namelen);
4787 kfree(mst_edid);
4788}
4789
4790/**
4791 * drm_dp_mst_dump_topology(): dump topology to seq file.
4792 * @m: seq_file to dump output to
4793 * @mgr: manager to dump current topology for.
4794 *
4795 * helper to dump MST topology to a seq file for debugfs.
4796 */
4797void drm_dp_mst_dump_topology(struct seq_file *m,
4798 struct drm_dp_mst_topology_mgr *mgr)
4799{
4800 struct drm_dp_mst_topology_state *state;
4801 struct drm_dp_mst_atomic_payload *payload;
4802 int i, ret;
4803
4804 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
4805 if (mgr->mst_primary)
4806 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4807
4808 /* dump VCPIs */
4809 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
4810
4811 ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
4812 if (ret < 0)
4813 return;
4814
4815 state = to_drm_dp_mst_topology_state(mgr->base.state);
4816 seq_printf(m, "\n*** Atomic state info ***\n");
4817 seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
4818 state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
4819
4820 seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n");
4821 for (i = 0; i < mgr->max_payloads; i++) {
4822 list_for_each_entry(payload, &state->payloads, next)for (payload = ({ const __typeof( ((__typeof(*payload) *)0)->
next ) *__mptr = ((&state->payloads)->next); (__typeof
(*payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*
payload), next) );}); &payload->next != (&state->
payloads); payload = ({ const __typeof( ((__typeof(*payload) *
)0)->next ) *__mptr = (payload->next.next); (__typeof(*
payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*payload
), next) );}))
{
4823 char name[14];
4824
4825 if (payload->vcpi != i || payload->delete)
4826 continue;
4827
4828 fetch_monitor_name(mgr, payload->port, name, sizeof(name));
4829 seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
4830 i,
4831 payload->port->port_num,
4832 payload->vcpi,
4833 payload->vc_start_slot,
4834 payload->vc_start_slot + payload->time_slots - 1,
4835 payload->pbn,
4836 payload->dsc_enabled ? "Y" : "N",
4837 (*name != 0) ? name : "Unknown");
4838 }
4839 }
4840
4841 seq_printf(m, "\n*** DPCD Info ***\n");
4842 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
4843 if (mgr->mst_primary) {
4844 u8 buf[DP_PAYLOAD_TABLE_SIZE64];
4845 int ret;
4846
4847 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
4848 seq_printf(m, "dpcd read failed\n");
4849 goto out;
4850 }
4851 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE0xf, buf);
4852
4853 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP0x020, buf, 2);
4854 if (ret != 2) {
4855 seq_printf(m, "faux/mst read failed\n");
4856 goto out;
4857 }
4858 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4859
4860 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL0x111, buf, 1);
4861 if (ret != 1) {
4862 seq_printf(m, "mst ctrl read failed\n");
4863 goto out;
4864 }
4865 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4866
4867 /* dump the standard OUI branch header */
4868 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI0x500, buf, DP_BRANCH_OUI_HEADER_SIZE0xc);
4869 if (ret != DP_BRANCH_OUI_HEADER_SIZE0xc) {
4870 seq_printf(m, "branch oui read failed\n");
4871 goto out;
4872 }
4873 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4874
4875 for (i = 0x3; i < 0x8 && buf[i]; i++)
4876 seq_printf(m, "%c", buf[i]);
4877 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4878 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4879 if (dump_dp_payload_table(mgr, buf))
4880 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE64, buf);
4881 }
4882
4883out:
4884 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
4885 drm_modeset_unlock(&mgr->base.lock);
4886}
4887EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4888
4889static void drm_dp_tx_work(struct work_struct *work)
4890{
4891 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work)({ const __typeof( ((struct drm_dp_mst_topology_mgr *)0)->
tx_work ) *__mptr = (work); (struct drm_dp_mst_topology_mgr *
)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_mgr
, tx_work) );})
;
4892
4893 mutex_lock(&mgr->qlock)rw_enter_write(&mgr->qlock);
4894 if (!list_empty(&mgr->tx_msg_downq))
4895 process_single_down_tx_qlock(mgr);
4896 mutex_unlock(&mgr->qlock)rw_exit_write(&mgr->qlock);
4897}
4898
4899static inline void
4900drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4901{
4902 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE0x0, port->mcs);
4903
4904 if (port->connector) {
4905 drm_connector_unregister(port->connector);
4906 drm_connector_put(port->connector);
4907 }
4908
4909 drm_dp_mst_put_port_malloc(port);
4910}
4911
4912static inline void
4913drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4914{
4915 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4916 struct drm_dp_mst_port *port, *port_tmp;
4917 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4918 bool_Bool wake_tx = false0;
4919
4920 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
4921 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}), port_tmp = ({ const __typeof( ((__typeof(*port) *)0)->
next ) *__mptr = (port->next.next); (__typeof(*port) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*port), next) );}
); &port->next != (&mstb->ports); port = port_tmp
, port_tmp = ({ const __typeof( ((__typeof(*port_tmp) *)0)->
next ) *__mptr = (port_tmp->next.next); (__typeof(*port_tmp
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port_tmp)
, next) );}))
{
4922 list_del(&port->next);
4923 drm_dp_mst_topology_put_port(port);
4924 }
4925 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
4926
4927 /* drop any tx slot msg */
4928 mutex_lock(&mstb->mgr->qlock)rw_enter_write(&mstb->mgr->qlock);
4929 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next)for (txmsg = ({ const __typeof( ((__typeof(*txmsg) *)0)->next
) *__mptr = ((&mgr->tx_msg_downq)->next); (__typeof
(*txmsg) *)( (char *)__mptr - __builtin_offsetof(__typeof(*txmsg
), next) );}), txmsg_tmp = ({ const __typeof( ((__typeof(*txmsg
) *)0)->next ) *__mptr = (txmsg->next.next); (__typeof(
*txmsg) *)( (char *)__mptr - __builtin_offsetof(__typeof(*txmsg
), next) );}); &txmsg->next != (&mgr->tx_msg_downq
); txmsg = txmsg_tmp, txmsg_tmp = ({ const __typeof( ((__typeof
(*txmsg_tmp) *)0)->next ) *__mptr = (txmsg_tmp->next.next
); (__typeof(*txmsg_tmp) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*txmsg_tmp), next) );}))
{
4930 if (txmsg->dst != mstb)
4931 continue;
4932
4933 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT4;
4934 list_del(&txmsg->next);
4935 wake_tx = true1;
4936 }
4937 mutex_unlock(&mstb->mgr->qlock)rw_exit_write(&mstb->mgr->qlock);
4938
4939 if (wake_tx)
4940 wake_up_all(&mstb->mgr->tx_waitq)wake_up(&mstb->mgr->tx_waitq);
4941
4942 drm_dp_mst_put_mstb_malloc(mstb);
4943}
4944
4945static void drm_dp_delayed_destroy_work(struct work_struct *work)
4946{
4947 struct drm_dp_mst_topology_mgr *mgr =
4948 container_of(work, struct drm_dp_mst_topology_mgr,({ const __typeof( ((struct drm_dp_mst_topology_mgr *)0)->
delayed_destroy_work ) *__mptr = (work); (struct drm_dp_mst_topology_mgr
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_mgr
, delayed_destroy_work) );})
4949 delayed_destroy_work)({ const __typeof( ((struct drm_dp_mst_topology_mgr *)0)->
delayed_destroy_work ) *__mptr = (work); (struct drm_dp_mst_topology_mgr
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_mgr
, delayed_destroy_work) );})
;
4950 bool_Bool send_hotplug = false0, go_again;
4951
4952 /*
4953 * Not a regular list traverse as we have to drop the destroy
4954 * connector lock before destroying the mstb/port, to avoid AB->BA
4955 * ordering between this lock and the config mutex.
4956 */
4957 do {
4958 go_again = false0;
4959
4960 for (;;) {
4961 struct drm_dp_mst_branch *mstb;
4962
4963 mutex_lock(&mgr->delayed_destroy_lock)rw_enter_write(&mgr->delayed_destroy_lock);
4964 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,(list_empty(&mgr->destroy_branch_device_list) ? ((void
*)0) : ({ const __typeof( ((struct drm_dp_mst_branch *)0)->
destroy_next ) *__mptr = ((&mgr->destroy_branch_device_list
)->next); (struct drm_dp_mst_branch *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_branch, destroy_next) );}))
4965 struct drm_dp_mst_branch,(list_empty(&mgr->destroy_branch_device_list) ? ((void
*)0) : ({ const __typeof( ((struct drm_dp_mst_branch *)0)->
destroy_next ) *__mptr = ((&mgr->destroy_branch_device_list
)->next); (struct drm_dp_mst_branch *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_branch, destroy_next) );}))
4966 destroy_next)(list_empty(&mgr->destroy_branch_device_list) ? ((void
*)0) : ({ const __typeof( ((struct drm_dp_mst_branch *)0)->
destroy_next ) *__mptr = ((&mgr->destroy_branch_device_list
)->next); (struct drm_dp_mst_branch *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_branch, destroy_next) );}))
;
4967 if (mstb)
4968 list_del(&mstb->destroy_next);
4969 mutex_unlock(&mgr->delayed_destroy_lock)rw_exit_write(&mgr->delayed_destroy_lock);
4970
4971 if (!mstb)
4972 break;
4973
4974 drm_dp_delayed_destroy_mstb(mstb);
4975 go_again = true1;
4976 }
4977
4978 for (;;) {
4979 struct drm_dp_mst_port *port;
4980
4981 mutex_lock(&mgr->delayed_destroy_lock)rw_enter_write(&mgr->delayed_destroy_lock);
4982 port = list_first_entry_or_null(&mgr->destroy_port_list,(list_empty(&mgr->destroy_port_list) ? ((void *)0) : (
{ const __typeof( ((struct drm_dp_mst_port *)0)->next ) *__mptr
= ((&mgr->destroy_port_list)->next); (struct drm_dp_mst_port
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_port
, next) );}))
4983 struct drm_dp_mst_port,(list_empty(&mgr->destroy_port_list) ? ((void *)0) : (
{ const __typeof( ((struct drm_dp_mst_port *)0)->next ) *__mptr
= ((&mgr->destroy_port_list)->next); (struct drm_dp_mst_port
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_port
, next) );}))
4984 next)(list_empty(&mgr->destroy_port_list) ? ((void *)0) : (
{ const __typeof( ((struct drm_dp_mst_port *)0)->next ) *__mptr
= ((&mgr->destroy_port_list)->next); (struct drm_dp_mst_port
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_port
, next) );}))
;
4985 if (port)
4986 list_del(&port->next);
4987 mutex_unlock(&mgr->delayed_destroy_lock)rw_exit_write(&mgr->delayed_destroy_lock);
4988
4989 if (!port)
4990 break;
4991
4992 drm_dp_delayed_destroy_port(port);
4993 send_hotplug = true1;
4994 go_again = true1;
4995 }
4996 } while (go_again);
4997
4998 if (send_hotplug)
4999 drm_kms_helper_hotplug_event(mgr->dev);
5000}
5001
5002static struct drm_private_state *
5003drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
5004{
5005 struct drm_dp_mst_topology_state *state, *old_state =
5006 to_dp_mst_topology_state(obj->state)({ const __typeof( ((struct drm_dp_mst_topology_state *)0)->
base ) *__mptr = (obj->state); (struct drm_dp_mst_topology_state
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_state
, base) );})
;
5007 struct drm_dp_mst_atomic_payload *pos, *payload;
5008
5009 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL(0x0001 | 0x0004));
5010 if (!state)
5011 return NULL((void *)0);
5012
5013 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
5014
5015 INIT_LIST_HEAD(&state->payloads);
5016 state->commit_deps = NULL((void *)0);
5017 state->num_commit_deps = 0;
5018 state->pending_crtc_mask = 0;
5019
5020 list_for_each_entry(pos, &old_state->payloads, next)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->next )
*__mptr = ((&old_state->payloads)->next); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, next) );}); &pos->next != (&old_state->payloads
); pos = ({ const __typeof( ((__typeof(*pos) *)0)->next ) *
__mptr = (pos->next.next); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), next) );}))
{
5021 /* Prune leftover freed timeslot allocations */
5022 if (pos->delete)
5023 continue;
5024
5025 payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL(0x0001 | 0x0004));
5026 if (!payload)
5027 goto fail;
5028
5029 drm_dp_mst_get_port_malloc(payload->port);
5030 list_add(&payload->next, &state->payloads);
5031 }
5032
5033 return &state->base;
5034
5035fail:
5036 list_for_each_entry_safe(pos, payload, &state->payloads, next)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->next )
*__mptr = ((&state->payloads)->next); (__typeof(*pos
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), next
) );}), payload = ({ const __typeof( ((__typeof(*pos) *)0)->
next ) *__mptr = (pos->next.next); (__typeof(*pos) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*pos), next) );}); &
pos->next != (&state->payloads); pos = payload, payload
= ({ const __typeof( ((__typeof(*payload) *)0)->next ) *__mptr
= (payload->next.next); (__typeof(*payload) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*payload), next) );}))
{
5037 drm_dp_mst_put_port_malloc(pos->port);
5038 kfree(pos);
5039 }
5040 kfree(state);
5041
5042 return NULL((void *)0);
5043}
5044
5045static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
5046 struct drm_private_state *state)
5047{
5048 struct drm_dp_mst_topology_state *mst_state =
5049 to_dp_mst_topology_state(state)({ const __typeof( ((struct drm_dp_mst_topology_state *)0)->
base ) *__mptr = (state); (struct drm_dp_mst_topology_state *
)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_state
, base) );})
;
5050 struct drm_dp_mst_atomic_payload *pos, *tmp;
5051 int i;
5052
5053 list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->next )
*__mptr = ((&mst_state->payloads)->next); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, next) );}), tmp = ({ const __typeof( ((__typeof(*pos) *)0)->
next ) *__mptr = (pos->next.next); (__typeof(*pos) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*pos), next) );}); &
pos->next != (&mst_state->payloads); pos = tmp, tmp
= ({ const __typeof( ((__typeof(*tmp) *)0)->next ) *__mptr
= (tmp->next.next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*tmp), next) );}))
{
5054 /* We only keep references to ports with active payloads */
5055 if (!pos->delete)
5056 drm_dp_mst_put_port_malloc(pos->port);
5057 kfree(pos);
5058 }
5059
5060 for (i = 0; i < mst_state->num_commit_deps; i++)
5061 drm_crtc_commit_put(mst_state->commit_deps[i]);
5062
5063 kfree(mst_state->commit_deps);
5064 kfree(mst_state);
5065}
5066
5067static bool_Bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5068 struct drm_dp_mst_branch *branch)
5069{
5070 while (port->parent) {
5071 if (port->parent == branch)
5072 return true1;
5073
5074 if (port->parent->port_parent)
5075 port = port->parent->port_parent;
5076 else
5077 break;
5078 }
5079 return false0;
5080}
5081
5082static int
5083drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5084 struct drm_dp_mst_topology_state *state);
5085
5086static int
5087drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5088 struct drm_dp_mst_topology_state *state)
5089{
5090 struct drm_dp_mst_atomic_payload *payload;
5091 struct drm_dp_mst_port *port;
5092 int pbn_used = 0, ret;
5093 bool_Bool found = false0;
5094
5095 /* Check that we have at least one port in our state that's downstream
5096 * of this branch, otherwise we can skip this branch
5097 */
5098 list_for_each_entry(payload, &state->payloads, next)for (payload = ({ const __typeof( ((__typeof(*payload) *)0)->
next ) *__mptr = ((&state->payloads)->next); (__typeof
(*payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*
payload), next) );}); &payload->next != (&state->
payloads); payload = ({ const __typeof( ((__typeof(*payload) *
)0)->next ) *__mptr = (payload->next.next); (__typeof(*
payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*payload
), next) );}))
{
5099 if (!payload->pbn ||
5100 !drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
5101 continue;
5102
5103 found = true1;
5104 break;
5105 }
5106 if (!found)
5107 return 0;
5108
5109 if (mstb->port_parent)
5110 drm_dbg_atomic(mstb->mgr->dev,__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n"
, mstb->port_parent->parent, mstb->port_parent, mstb
)
5111 "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n"
, mstb->port_parent->parent, mstb->port_parent, mstb
)
5112 mstb->port_parent->parent, mstb->port_parent, mstb)__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n"
, mstb->port_parent->parent, mstb->port_parent, mstb
)
;
5113 else
5114 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb)__drm_dev_dbg(((void *)0), (mstb->mgr->dev) ? (mstb->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] Checking bandwidth limits\n"
, mstb)
;
5115
5116 list_for_each_entry(port, &mstb->ports, next)for (port = ({ const __typeof( ((__typeof(*port) *)0)->next
) *__mptr = ((&mstb->ports)->next); (__typeof(*port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*port), next
) );}); &port->next != (&mstb->ports); port = (
{ const __typeof( ((__typeof(*port) *)0)->next ) *__mptr =
(port->next.next); (__typeof(*port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*port), next) );}))
{
5117 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
5118 if (ret < 0)
5119 return ret;
5120
5121 pbn_used += ret;
5122 }
5123
5124 return pbn_used;
5125}
5126
5127static int
5128drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5129 struct drm_dp_mst_topology_state *state)
5130{
5131 struct drm_dp_mst_atomic_payload *payload;
5132 int pbn_used = 0;
5133
5134 if (port->pdt == DP_PEER_DEVICE_NONE0x0)
5135 return 0;
5136
5137 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5138 payload = drm_atomic_get_mst_payload_state(state, port);
5139 if (!payload)
5140 return 0;
5141
5142 /*
5143 * This could happen if the sink deasserted its HPD line, but
5144 * the branch device still reports it as attached (PDT != NONE).
5145 */
5146 if (!port->full_pbn) {
5147 drm_dbg_atomic(port->mgr->dev,__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] no BW available for the port\n"
, port->parent, port)
5148 "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] no BW available for the port\n"
, port->parent, port)
5149 port->parent, port)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] no BW available for the port\n"
, port->parent, port)
;
5150 return -EINVAL22;
5151 }
5152
5153 pbn_used = payload->pbn;
5154 } else {
5155 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5156 state);
5157 if (pbn_used <= 0)
5158 return pbn_used;
5159 }
5160
5161 if (pbn_used > port->full_pbn) {
5162 drm_dbg_atomic(port->mgr->dev,__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n"
, port->parent, port, pbn_used, port->full_pbn)
5163 "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n"
, port->parent, port, pbn_used, port->full_pbn)
5164 port->parent, port, pbn_used, port->full_pbn)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n"
, port->parent, port, pbn_used, port->full_pbn)
;
5165 return -ENOSPC28;
5166 }
5167
5168 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n"
, port->parent, port, pbn_used, port->full_pbn)
5169 port->parent, port, pbn_used, port->full_pbn)__drm_dev_dbg(((void *)0), (port->mgr->dev) ? (port->
mgr->dev)->dev : ((void *)0), DRM_UT_ATOMIC, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n"
, port->parent, port, pbn_used, port->full_pbn)
;
5170
5171 return pbn_used;
5172}
5173
5174static inline int
5175drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
5176 struct drm_dp_mst_topology_state *mst_state)
5177{
5178 struct drm_dp_mst_atomic_payload *payload;
5179 int avail_slots = mst_state->total_avail_slots, payload_count = 0;
5180
5181 list_for_each_entry(payload, &mst_state->payloads, next)for (payload = ({ const __typeof( ((__typeof(*payload) *)0)->
next ) *__mptr = ((&mst_state->payloads)->next); (__typeof
(*payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*
payload), next) );}); &payload->next != (&mst_state
->payloads); payload = ({ const __typeof( ((__typeof(*payload
) *)0)->next ) *__mptr = (payload->next.next); (__typeof
(*payload) *)( (char *)__mptr - __builtin_offsetof(__typeof(*
payload), next) );}))
{
5182 /* Releasing payloads is always OK-even if the port is gone */
5183 if (payload->delete) {
5184 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] releases all time slots\n"
, payload->port)
5185 payload->port)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] releases all time slots\n"
, payload->port)
;
5186 continue;
5187 }
5188
5189 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] requires %d time slots\n"
, payload->port, payload->time_slots)
5190 payload->port, payload->time_slots)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] requires %d time slots\n"
, payload->port, payload->time_slots)
;
5191
5192 avail_slots -= payload->time_slots;
5193 if (avail_slots < 0) {
5194 drm_dbg_atomic(mgr->dev,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n"
, payload->port, mst_state, avail_slots + payload->time_slots
)
5195 "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n"
, payload->port, mst_state, avail_slots + payload->time_slots
)
5196 payload->port, mst_state, avail_slots + payload->time_slots)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n"
, payload->port, mst_state, avail_slots + payload->time_slots
)
;
5197 return -ENOSPC28;
5198 }
5199
5200 if (++payload_count > mgr->max_payloads) {
5201 drm_dbg_atomic(mgr->dev,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] state %p has too many payloads (max=%d)\n"
, mgr, mst_state, mgr->max_payloads)
5202 "[MST MGR:%p] state %p has too many payloads (max=%d)\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] state %p has too many payloads (max=%d)\n"
, mgr, mst_state, mgr->max_payloads)
5203 mgr, mst_state, mgr->max_payloads)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] state %p has too many payloads (max=%d)\n"
, mgr, mst_state, mgr->max_payloads)
;
5204 return -EINVAL22;
5205 }
5206
5207 /* Assign a VCPI */
5208 if (!payload->vcpi) {
5209 payload->vcpi = ffz(mst_state->payload_mask)__builtin_ctzl(~(mst_state->payload_mask)) + 1;
5210 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] assigned VCPI #%d\n"
, payload->port, payload->vcpi)
5211 payload->port, payload->vcpi)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] assigned VCPI #%d\n"
, payload->port, payload->vcpi)
;
5212 mst_state->payload_mask |= BIT(payload->vcpi - 1)(1UL << (payload->vcpi - 1));
5213 }
5214 }
5215
5216 if (!payload_count)
5217 mst_state->pbn_div = 0;
5218
5219 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n"
, mgr, mst_state, mst_state->pbn_div, avail_slots, mst_state
->total_avail_slots - avail_slots)
5220 mgr, mst_state, mst_state->pbn_div, avail_slots,__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n"
, mgr, mst_state, mst_state->pbn_div, avail_slots, mst_state
->total_avail_slots - avail_slots)
5221 mst_state->total_avail_slots - avail_slots)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n"
, mgr, mst_state, mst_state->pbn_div, avail_slots, mst_state
->total_avail_slots - avail_slots)
;
5222
5223 return 0;
5224}
5225
5226/**
5227 * drm_dp_mst_add_affected_dsc_crtcs
5228 * @state: Pointer to the new struct drm_dp_mst_topology_state
5229 * @mgr: MST topology manager
5230 *
5231 * Whenever there is a change in mst topology
5232 * DSC configuration would have to be recalculated
5233 * therefore we need to trigger modeset on all affected
5234 * CRTCs in that topology
5235 *
5236 * See also:
5237 * drm_dp_mst_atomic_enable_dsc()
5238 */
5239int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5240{
5241 struct drm_dp_mst_topology_state *mst_state;
5242 struct drm_dp_mst_atomic_payload *pos;
5243 struct drm_connector *connector;
5244 struct drm_connector_state *conn_state;
5245 struct drm_crtc *crtc;
5246 struct drm_crtc_state *crtc_state;
5247
5248 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5249
5250 if (IS_ERR(mst_state))
5251 return PTR_ERR(mst_state);
5252
5253 list_for_each_entry(pos, &mst_state->payloads, next)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->next )
*__mptr = ((&mst_state->payloads)->next); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, next) );}); &pos->next != (&mst_state->payloads
); pos = ({ const __typeof( ((__typeof(*pos) *)0)->next ) *
__mptr = (pos->next.next); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), next) );}))
{
5254
5255 connector = pos->port->connector;
5256
5257 if (!connector)
5258 return -EINVAL22;
5259
5260 conn_state = drm_atomic_get_connector_state(state, connector);
5261
5262 if (IS_ERR(conn_state))
5263 return PTR_ERR(conn_state);
5264
5265 crtc = conn_state->crtc;
5266
5267 if (!crtc)
5268 continue;
5269
5270 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5271 continue;
5272
5273 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5274
5275 if (IS_ERR(crtc_state))
5276 return PTR_ERR(crtc_state);
5277
5278 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n"
, mgr, crtc)
5279 mgr, crtc)__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n"
, mgr, crtc)
;
5280
5281 crtc_state->mode_changed = true1;
5282 }
5283 return 0;
5284}
5285EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5286
5287/**
5288 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5289 * @state: Pointer to the new drm_atomic_state
5290 * @port: Pointer to the affected MST Port
5291 * @pbn: Newly recalculated bw required for link with DSC enabled
5292 * @enable: Boolean flag to enable or disable DSC on the port
5293 *
5294 * This function enables DSC on the given Port
5295 * by recalculating its vcpi from pbn provided
5296 * and sets dsc_enable flag to keep track of which
5297 * ports have DSC enabled
5298 *
5299 */
5300int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5301 struct drm_dp_mst_port *port,
5302 int pbn, bool_Bool enable)
5303{
5304 struct drm_dp_mst_topology_state *mst_state;
5305 struct drm_dp_mst_atomic_payload *payload;
5306 int time_slots = 0;
5307
5308 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5309 if (IS_ERR(mst_state))
5310 return PTR_ERR(mst_state);
5311
5312 payload = drm_atomic_get_mst_payload_state(mst_state, port);
5313 if (!payload) {
5314 drm_dbg_atomic(state->dev,__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] Couldn't find payload in mst state %p\n"
, port, mst_state)
5315 "[MST PORT:%p] Couldn't find payload in mst state %p\n",__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] Couldn't find payload in mst state %p\n"
, port, mst_state)
5316 port, mst_state)__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] Couldn't find payload in mst state %p\n"
, port, mst_state)
;
5317 return -EINVAL22;
5318 }
5319
5320 if (payload->dsc_enabled == enable) {
5321 drm_dbg_atomic(state->dev,__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n"
, port, enable, payload->time_slots)
5322 "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n"
, port, enable, payload->time_slots)
5323 port, enable, payload->time_slots)__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n"
, port, enable, payload->time_slots)
;
5324 time_slots = payload->time_slots;
5325 }
5326
5327 if (enable) {
5328 time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
5329 drm_dbg_atomic(state->dev,__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n"
, port, time_slots)
5330 "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n"
, port, time_slots)
5331 port, time_slots)__drm_dev_dbg(((void *)0), (state->dev) ? (state->dev)->
dev : ((void *)0), DRM_UT_ATOMIC, "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n"
, port, time_slots)
;
5332 if (time_slots < 0)
5333 return -EINVAL22;
5334 }
5335
5336 payload->dsc_enabled = enable;
5337
5338 return time_slots;
5339}
5340EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5341
5342/**
5343 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5344 * atomic update is valid
5345 * @state: Pointer to the new &struct drm_dp_mst_topology_state
5346 *
5347 * Checks the given topology state for an atomic update to ensure that it's
5348 * valid. This includes checking whether there's enough bandwidth to support
5349 * the new timeslot allocations in the atomic update.
5350 *
5351 * Any atomic drivers supporting DP MST must make sure to call this after
5352 * checking the rest of their state in their
5353 * &drm_mode_config_funcs.atomic_check() callback.
5354 *
5355 * See also:
5356 * drm_dp_atomic_find_time_slots()
5357 * drm_dp_atomic_release_time_slots()
5358 *
5359 * Returns:
5360 *
5361 * 0 if the new state is valid, negative error code otherwise.
5362 */
5363int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5364{
5365 struct drm_dp_mst_topology_mgr *mgr;
5366 struct drm_dp_mst_topology_state *mst_state;
5367 int i, ret = 0;
5368
5369 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i)for ((i) = 0; (i) < (state)->num_private_objs; (i)++) if
(!(__drm_dp_mst_state_iter_get((state), &(mgr), ((void *
)0), &(mst_state), (i)))) {} else
{
5370 if (!mgr->mst_state)
5371 continue;
5372
5373 ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
5374 if (ret)
5375 break;
5376
5377 mutex_lock(&mgr->lock)rw_enter_write(&mgr->lock);
5378 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5379 mst_state);
5380 mutex_unlock(&mgr->lock)rw_exit_write(&mgr->lock);
5381 if (ret < 0)
5382 break;
5383 else
5384 ret = 0;
5385 }
5386
5387 return ret;
5388}
5389EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5390
5391const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5392 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5393 .atomic_destroy_state = drm_dp_mst_destroy_state,
5394};
5395EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5396
5397/**
5398 * drm_atomic_get_mst_topology_state: get MST topology state
5399 * @state: global atomic state
5400 * @mgr: MST topology manager, also the private object in this case
5401 *
5402 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5403 * state vtable so that the private object state returned is that of a MST
5404 * topology object.
5405 *
5406 * RETURNS:
5407 *
5408 * The MST topology state or error pointer.
5409 */
5410struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5411 struct drm_dp_mst_topology_mgr *mgr)
5412{
5413 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base))({ const __typeof( ((struct drm_dp_mst_topology_state *)0)->
base ) *__mptr = (drm_atomic_get_private_obj_state(state, &
mgr->base)); (struct drm_dp_mst_topology_state *)( (char *
)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_state
, base) );})
;
5414}
5415EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5416
5417/**
5418 * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
5419 * @state: global atomic state
5420 * @mgr: MST topology manager, also the private object in this case
5421 *
5422 * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
5423 * state vtable so that the private object state returned is that of a MST
5424 * topology object.
5425 *
5426 * Returns:
5427 *
5428 * The old MST topology state, or NULL if there's no topology state for this MST mgr
5429 * in the global atomic state
5430 */
5431struct drm_dp_mst_topology_state *
5432drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
5433 struct drm_dp_mst_topology_mgr *mgr)
5434{
5435 struct drm_private_state *old_priv_state =
5436 drm_atomic_get_old_private_obj_state(state, &mgr->base);
5437
5438 return old_priv_state ? to_dp_mst_topology_state(old_priv_state)({ const __typeof( ((struct drm_dp_mst_topology_state *)0)->
base ) *__mptr = (old_priv_state); (struct drm_dp_mst_topology_state
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_state
, base) );})
: NULL((void *)0);
5439}
5440EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
5441
5442/**
5443 * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
5444 * @state: global atomic state
5445 * @mgr: MST topology manager, also the private object in this case
5446 *
5447 * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
5448 * state vtable so that the private object state returned is that of a MST
5449 * topology object.
5450 *
5451 * Returns:
5452 *
5453 * The new MST topology state, or NULL if there's no topology state for this MST mgr
5454 * in the global atomic state
5455 */
5456struct drm_dp_mst_topology_state *
5457drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
5458 struct drm_dp_mst_topology_mgr *mgr)
5459{
5460 struct drm_private_state *new_priv_state =
5461 drm_atomic_get_new_private_obj_state(state, &mgr->base);
5462
5463 return new_priv_state ? to_dp_mst_topology_state(new_priv_state)({ const __typeof( ((struct drm_dp_mst_topology_state *)0)->
base ) *__mptr = (new_priv_state); (struct drm_dp_mst_topology_state
*)( (char *)__mptr - __builtin_offsetof(struct drm_dp_mst_topology_state
, base) );})
: NULL((void *)0);
5464}
5465EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
5466
5467/**
5468 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5469 * @mgr: manager struct to initialise
5470 * @dev: device providing this structure - for i2c addition.
5471 * @aux: DP helper aux channel to talk to this device
5472 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5473 * @max_payloads: maximum number of payloads this GPU can source
5474 * @conn_base_id: the connector object ID the MST device is connected to.
5475 *
5476 * Return 0 for success, or negative error code on failure
5477 */
5478int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5479 struct drm_device *dev, struct drm_dp_aux *aux,
5480 int max_dpcd_transaction_bytes, int max_payloads,
5481 int conn_base_id)
5482{
5483 struct drm_dp_mst_topology_state *mst_state;
5484
5485 rw_init(&mgr->lock, "mst")_rw_init_flags(&mgr->lock, "mst", 0, ((void *)0));
5486 rw_init(&mgr->qlock, "mstq")_rw_init_flags(&mgr->qlock, "mstq", 0, ((void *)0));
5487 rw_init(&mgr->delayed_destroy_lock, "mstdc")_rw_init_flags(&mgr->delayed_destroy_lock, "mstdc", 0,
((void *)0))
;
5488 rw_init(&mgr->up_req_lock, "mstup")_rw_init_flags(&mgr->up_req_lock, "mstup", 0, ((void *
)0))
;
5489 rw_init(&mgr->probe_lock, "mstprb")_rw_init_flags(&mgr->probe_lock, "mstprb", 0, ((void *
)0))
;
5490#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)0
5491 rw_init(&mgr->topology_ref_history_lock, "msttr")_rw_init_flags(&mgr->topology_ref_history_lock, "msttr"
, 0, ((void *)0))
;
5492 stack_depot_init();
5493#endif
5494 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5495 INIT_LIST_HEAD(&mgr->destroy_port_list);
5496 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5497 INIT_LIST_HEAD(&mgr->up_req_list);
5498
5499 /*
5500 * delayed_destroy_work will be queued on a dedicated WQ, so that any
5501 * requeuing will be also flushed when deiniting the topology manager.
5502 */
5503 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5504 if (mgr->delayed_destroy_wq == NULL((void *)0))
5505 return -ENOMEM12;
5506
5507 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5508 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5509 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5510 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5511 init_waitqueue_head(&mgr->tx_waitq);
5512 mgr->dev = dev;
5513 mgr->aux = aux;
5514 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5515 mgr->max_payloads = max_payloads;
5516 mgr->conn_base_id = conn_base_id;
5517
5518 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL(0x0001 | 0x0004));
5519 if (mst_state == NULL((void *)0))
5520 return -ENOMEM12;
5521
5522 mst_state->total_avail_slots = 63;
5523 mst_state->start_slot = 1;
5524
5525 mst_state->mgr = mgr;
5526 INIT_LIST_HEAD(&mst_state->payloads);
5527
5528 drm_atomic_private_obj_init(dev, &mgr->base,
5529 &mst_state->base,
5530 &drm_dp_mst_topology_state_funcs);
5531
5532 return 0;
5533}
5534EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5535
5536/**
5537 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5538 * @mgr: manager to destroy
5539 */
5540void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5541{
5542 drm_dp_mst_topology_mgr_set_mst(mgr, false0);
5543 flush_work(&mgr->work);
5544 /* The following will also drain any requeued work on the WQ. */
5545 if (mgr->delayed_destroy_wq) {
5546 destroy_workqueue(mgr->delayed_destroy_wq);
5547 mgr->delayed_destroy_wq = NULL((void *)0);
5548 }
5549 mgr->dev = NULL((void *)0);
5550 mgr->aux = NULL((void *)0);
5551 drm_atomic_private_obj_fini(&mgr->base);
5552 mgr->funcs = NULL((void *)0);
5553
5554 mutex_destroy(&mgr->delayed_destroy_lock);
5555 mutex_destroy(&mgr->qlock);
5556 mutex_destroy(&mgr->lock);
5557 mutex_destroy(&mgr->up_req_lock);
5558 mutex_destroy(&mgr->probe_lock);
5559#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)0
5560 mutex_destroy(&mgr->topology_ref_history_lock);
5561#endif
5562}
5563EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5564
5565static bool_Bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5566{
5567 int i;
5568
5569 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS4)
5570 return false0;
5571
5572 for (i = 0; i < num - 1; i++) {
5573 if (msgs[i].flags & I2C_M_RD0x0001 ||
5574 msgs[i].len > 0xff)
5575 return false0;
5576 }
5577
5578 return msgs[num - 1].flags & I2C_M_RD0x0001 &&
5579 msgs[num - 1].len <= 0xff;
5580}
5581
5582static bool_Bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5583{
5584 int i;
5585
5586 for (i = 0; i < num - 1; i++) {
5587 if (msgs[i].flags & I2C_M_RD0x0001 || !(msgs[i].flags & I2C_M_STOP0x0004) ||
5588 msgs[i].len > 0xff)
5589 return false0;
5590 }
5591
5592 return !(msgs[num - 1].flags & I2C_M_RD0x0001) && msgs[num - 1].len <= 0xff;
5593}
5594
5595static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5596 struct drm_dp_mst_port *port,
5597 struct i2c_msg *msgs, int num)
5598{
5599 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5600 unsigned int i;
5601 struct drm_dp_sideband_msg_req_body msg;
5602 struct drm_dp_sideband_msg_tx *txmsg = NULL((void *)0);
5603 int ret;
5604
5605 memset(&msg, 0, sizeof(msg))__builtin_memset((&msg), (0), (sizeof(msg)));
5606 msg.req_type = DP_REMOTE_I2C_READ0x22;
5607 msg.u.i2c_read.num_transactions = num - 1;
5608 msg.u.i2c_read.port_number = port->port_num;
5609 for (i = 0; i < num - 1; i++) {
5610 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5611 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5612 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5613 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP0x0004);
5614 }
5615 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5616 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5617
5618 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
5619 if (!txmsg) {
5620 ret = -ENOMEM12;
5621 goto out;
5622 }
5623
5624 txmsg->dst = mstb;
5625 drm_dp_encode_sideband_req(&msg, txmsg);
5626
5627 drm_dp_queue_down_tx(mgr, txmsg);
5628
5629 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5630 if (ret > 0) {
5631
5632 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01) {
5633 ret = -EREMOTEIO5;
5634 goto out;
5635 }
5636 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5637 ret = -EIO5;
5638 goto out;
5639 }
5640 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len)__builtin_memcpy((msgs[num - 1].buf), (txmsg->reply.u.remote_i2c_read_ack
.bytes), (msgs[num - 1].len))
;
5641 ret = num;
5642 }
5643out:
5644 kfree(txmsg);
5645 return ret;
5646}
5647
5648static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5649 struct drm_dp_mst_port *port,
5650 struct i2c_msg *msgs, int num)
5651{
5652 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5653 unsigned int i;
5654 struct drm_dp_sideband_msg_req_body msg;
5655 struct drm_dp_sideband_msg_tx *txmsg = NULL((void *)0);
5656 int ret;
5657
5658 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL(0x0001 | 0x0004));
5659 if (!txmsg) {
5660 ret = -ENOMEM12;
5661 goto out;
5662 }
5663 for (i = 0; i < num; i++) {
5664 memset(&msg, 0, sizeof(msg))__builtin_memset((&msg), (0), (sizeof(msg)));
5665 msg.req_type = DP_REMOTE_I2C_WRITE0x23;
5666 msg.u.i2c_write.port_number = port->port_num;
5667 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5668 msg.u.i2c_write.num_bytes = msgs[i].len;
5669 msg.u.i2c_write.bytes = msgs[i].buf;
5670
5671 memset(txmsg, 0, sizeof(*txmsg))__builtin_memset((txmsg), (0), (sizeof(*txmsg)));
5672 txmsg->dst = mstb;
5673
5674 drm_dp_encode_sideband_req(&msg, txmsg);
5675 drm_dp_queue_down_tx(mgr, txmsg);
5676
5677 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5678 if (ret > 0) {
5679 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK0x01) {
5680 ret = -EREMOTEIO5;
5681 goto out;
5682 }
5683 } else {
5684 goto out;
5685 }
5686 }
5687 ret = num;
5688out:
5689 kfree(txmsg);
5690 return ret;
5691}
5692
5693/* I2C device */
5694static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5695 struct i2c_msg *msgs, int num)
5696{
5697 struct drm_dp_aux *aux = adapter->algo_data;
5698 struct drm_dp_mst_port *port =
5699 container_of(aux, struct drm_dp_mst_port, aux)({ const __typeof( ((struct drm_dp_mst_port *)0)->aux ) *__mptr
= (aux); (struct drm_dp_mst_port *)( (char *)__mptr - __builtin_offsetof
(struct drm_dp_mst_port, aux) );})
;
5700 struct drm_dp_mst_branch *mstb;
5701 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5702 int ret;
5703
5704 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5705 if (!mstb)
5706 return -EREMOTEIO5;
5707
5708 if (remote_i2c_read_ok(msgs, num)) {
5709 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5710 } else if (remote_i2c_write_ok(msgs, num)) {
5711 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5712 } else {
5713 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n")__drm_dev_dbg(((void *)0), (mgr->dev) ? (mgr->dev)->
dev : ((void *)0), DRM_UT_KMS, "Unsupported I2C transaction for MST device\n"
)
;
5714 ret = -EIO5;
5715 }
5716
5717 drm_dp_mst_topology_put_mstb(mstb);
5718 return ret;
5719}
5720
5721static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5722{
5723 return I2C_FUNC_I2C0 | I2C_FUNC_SMBUS_EMUL0 |
5724 I2C_FUNC_SMBUS_READ_BLOCK_DATA0 |
5725 I2C_FUNC_SMBUS_BLOCK_PROC_CALL0 |
5726 I2C_FUNC_10BIT_ADDR0;
5727}
5728
5729static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5730 .functionality = drm_dp_mst_i2c_functionality,
5731 .master_xfer = drm_dp_mst_i2c_xfer,
5732};
5733
5734/**
5735 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5736 * @port: The port to add the I2C bus on
5737 *
5738 * Returns 0 on success or a negative error code on failure.
5739 */
5740static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5741{
5742 struct drm_dp_aux *aux = &port->aux;
5743#ifdef __linux__
5744 struct device *parent_dev = port->mgr->dev->dev;
5745#endif
5746
5747 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5748 aux->ddc.algo_data = aux;
5749 aux->ddc.retries = 3;
5750
5751#ifdef __linux__
5752 aux->ddc.class = I2C_CLASS_DDC;
5753 aux->ddc.owner = THIS_MODULE((void *)0);
5754 /* FIXME: set the kdev of the port's connector as parent */
5755 aux->ddc.dev.parent = parent_dev;
5756 aux->ddc.dev.of_node = parent_dev->of_node;
5757#endif
5758
5759 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev)"",
5760 sizeof(aux->ddc.name));
5761
5762 return i2c_add_adapter(&aux->ddc);
5763}
5764
5765/**
5766 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5767 * @port: The port to remove the I2C bus from
5768 */
5769static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5770{
5771 i2c_del_adapter(&port->aux.ddc);
5772}
5773
5774/**
5775 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5776 * @port: The port to check
5777 *
5778 * A single physical MST hub object can be represented in the topology
5779 * by multiple branches, with virtual ports between those branches.
5780 *
5781 * As of DP1.4, An MST hub with internal (virtual) ports must expose
5782 * certain DPCD registers over those ports. See sections 2.6.1.1.1
5783 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5784 *
5785 * May acquire mgr->lock
5786 *
5787 * Returns:
5788 * true if the port is a virtual DP peer device, false otherwise
5789 */
5790static bool_Bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5791{
5792 struct drm_dp_mst_port *downstream_port;
5793
5794 if (!port || port->dpcd_rev < DP_DPCD_REV_140x14)
5795 return false0;
5796
5797 /* Virtual DP Sink (Internal Display Panel) */
5798 if (port->port_num >= 8)
5799 return true1;
5800
5801 /* DP-to-HDMI Protocol Converter */
5802 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV0x4 &&
5803 !port->mcs &&
5804 port->ldps)
5805 return true1;
5806
5807 /* DP-to-DP */
5808 mutex_lock(&port->mgr->lock)rw_enter_write(&port->mgr->lock);
5809 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING0x2 &&
5810 port->mstb &&
5811 port->mstb->num_ports == 2) {
5812 list_for_each_entry(downstream_port, &port->mstb->ports, next)for (downstream_port = ({ const __typeof( ((__typeof(*downstream_port
) *)0)->next ) *__mptr = ((&port->mstb->ports)->
next); (__typeof(*downstream_port) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*downstream_port), next) );}); &downstream_port
->next != (&port->mstb->ports); downstream_port =
({ const __typeof( ((__typeof(*downstream_port) *)0)->next
) *__mptr = (downstream_port->next.next); (__typeof(*downstream_port
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*downstream_port
), next) );}))
{
5813 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK0x3 &&
5814 !downstream_port->input) {
5815 mutex_unlock(&port->mgr->lock)rw_exit_write(&port->mgr->lock);
5816 return true1;
5817 }
5818 }
5819 }
5820 mutex_unlock(&port->mgr->lock)rw_exit_write(&port->mgr->lock);
5821
5822 return false0;
5823}
5824
5825/**
5826 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5827 * @port: The port to check. A leaf of the MST tree with an attached display.
5828 *
5829 * Depending on the situation, DSC may be enabled via the endpoint aux,
5830 * the immediately upstream aux, or the connector's physical aux.
5831 *
5832 * This is both the correct aux to read DSC_CAPABILITY and the
5833 * correct aux to write DSC_ENABLED.
5834 *
5835 * This operation can be expensive (up to four aux reads), so
5836 * the caller should cache the return.
5837 *
5838 * Returns:
5839 * NULL if DSC cannot be enabled on this port, otherwise the aux device
5840 */
5841struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5842{
5843 struct drm_dp_mst_port *immediate_upstream_port;
5844 struct drm_dp_mst_port *fec_port;
5845 struct drm_dp_desc desc = {};
5846 u8 endpoint_fec;
5847 u8 endpoint_dsc;
5848
5849 if (!port)
5850 return NULL((void *)0);
5851
5852 if (port->parent->port_parent)
5853 immediate_upstream_port = port->parent->port_parent;
5854 else
5855 immediate_upstream_port = NULL((void *)0);
5856
5857 fec_port = immediate_upstream_port;
5858 while (fec_port) {
5859 /*
5860 * Each physical link (i.e. not a virtual port) between the
5861 * output and the primary device must support FEC
5862 */
5863 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5864 !fec_port->fec_capable)
5865 return NULL((void *)0);
5866
5867 fec_port = fec_port->parent->port_parent;
5868 }
5869
5870 /* DP-to-DP peer device */
5871 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5872 u8 upstream_dsc;
5873
5874 if (drm_dp_dpcd_read(&port->aux,
5875 DP_DSC_SUPPORT0x060, &endpoint_dsc, 1) != 1)
5876 return NULL((void *)0);
5877 if (drm_dp_dpcd_read(&port->aux,
5878 DP_FEC_CAPABILITY0x090, &endpoint_fec, 1) != 1)
5879 return NULL((void *)0);
5880 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5881 DP_DSC_SUPPORT0x060, &upstream_dsc, 1) != 1)
5882 return NULL((void *)0);
5883
5884 /* Enpoint decompression with DP-to-DP peer device */
5885 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED(1 << 0)) &&
5886 (endpoint_fec & DP_FEC_CAPABLE(1 << 0)) &&
5887 (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED(1 << 1))) {
5888 port->passthrough_aux = &immediate_upstream_port->aux;
5889 return &port->aux;
5890 }
5891
5892 /* Virtual DPCD decompression with DP-to-DP peer device */
5893 return &immediate_upstream_port->aux;
5894 }
5895
5896 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5897 if (drm_dp_mst_is_virtual_dpcd(port))
5898 return &port->aux;
5899
5900 /*
5901 * Synaptics quirk
5902 * Applies to ports for which:
5903 * - Physical aux has Synaptics OUI
5904 * - DPv1.4 or higher
5905 * - Port is on primary branch device
5906 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5907 */
5908 if (drm_dp_read_desc(port->mgr->aux, &desc, true1))
5909 return NULL((void *)0);
5910
5911 if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5912 port->mgr->dpcd[DP_DPCD_REV0x000] >= DP_DPCD_REV_140x14 &&
5913 port->parent == port->mgr->mst_primary) {
5914 u8 dpcd_ext[DP_RECEIVER_CAP_SIZE0xf];
5915
5916 if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
5917 return NULL((void *)0);
5918
5919 if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT0x005] & DP_DWN_STRM_PORT_PRESENT(1 << 0)) &&
5920 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT0x005] & DP_DWN_STRM_PORT_TYPE_MASK0x06)
5921 != DP_DWN_STRM_PORT_TYPE_ANALOG(1 << 1)))
5922 return port->mgr->aux;
5923 }
5924
5925 /*
5926 * The check below verifies if the MST sink
5927 * connected to the GPU is capable of DSC -
5928 * therefore the endpoint needs to be
5929 * both DSC and FEC capable.
5930 */
5931 if (drm_dp_dpcd_read(&port->aux,
5932 DP_DSC_SUPPORT0x060, &endpoint_dsc, 1) != 1)
5933 return NULL((void *)0);
5934 if (drm_dp_dpcd_read(&port->aux,
5935 DP_FEC_CAPABILITY0x090, &endpoint_fec, 1) != 1)
5936 return NULL((void *)0);
5937 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED(1 << 0)) &&
5938 (endpoint_fec & DP_FEC_CAPABLE(1 << 0)))
5939 return &port->aux;
5940
5941 return NULL((void *)0);
5942}
5943EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);