| File: | dev/pci/drm/i915/display/icl_dsi.c |
| Warning: | line 139, column 11 Dereference of null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright © 2018 Intel Corporation | |||
| 3 | * | |||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 5 | * copy of this software and associated documentation files (the "Software"), | |||
| 6 | * to deal in the Software without restriction, including without limitation | |||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 9 | * Software is furnished to do so, subject to the following conditions: | |||
| 10 | * | |||
| 11 | * The above copyright notice and this permission notice (including the next | |||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | |||
| 13 | * Software. | |||
| 14 | * | |||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |||
| 21 | * DEALINGS IN THE SOFTWARE. | |||
| 22 | * | |||
| 23 | * Authors: | |||
| 24 | * Madhav Chauhan <madhav.chauhan@intel.com> | |||
| 25 | * Jani Nikula <jani.nikula@intel.com> | |||
| 26 | */ | |||
| 27 | ||||
| 28 | #include <drm/drm_atomic_helper.h> | |||
| 29 | #include <drm/drm_mipi_dsi.h> | |||
| 30 | ||||
| 31 | #include "intel_atomic.h" | |||
| 32 | #include "intel_combo_phy.h" | |||
| 33 | #include "intel_connector.h" | |||
| 34 | #include "intel_ddi.h" | |||
| 35 | #include "intel_dsi.h" | |||
| 36 | #include "intel_panel.h" | |||
| 37 | #include "intel_vdsc.h" | |||
| 38 | ||||
| 39 | static int header_credits_available(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 40 | enum transcoder dsi_trans) | |||
| 41 | { | |||
| 42 | return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)((const i915_reg_t){ .reg = (((0x6b0d0) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b8d0) - (0x6b0d0)))) })) & FREE_HEADER_CREDIT_MASK(0x1f << 8)) | |||
| 43 | >> FREE_HEADER_CREDIT_SHIFT0x8; | |||
| 44 | } | |||
| 45 | ||||
| 46 | static int payload_credits_available(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 47 | enum transcoder dsi_trans) | |||
| 48 | { | |||
| 49 | return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)((const i915_reg_t){ .reg = (((0x6b0d0) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b8d0) - (0x6b0d0)))) })) & FREE_PLOAD_CREDIT_MASK(0xff << 0)) | |||
| 50 | >> FREE_PLOAD_CREDIT_SHIFT0; | |||
| 51 | } | |||
| 52 | ||||
| 53 | static void wait_for_header_credits(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 54 | enum transcoder dsi_trans) | |||
| 55 | { | |||
| 56 | if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 100))) ? 1 : -1 ] __attribute__((__unused__)); if ((100) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((100)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((header_credits_available (dev_priv, dsi_trans) >= 0x10))) { ret__ = 0; break; } if ( expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; } ); else ret__ = ({ int cpu, ret, timeout = ((100)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory"); if ((header_credits_available (dev_priv, dsi_trans) >= 0x10)) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if ( !(0)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info * __ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; }) | |||
| 57 | MAX_HEADER_CREDIT, 100)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 100))) ? 1 : -1 ] __attribute__((__unused__)); if ((100) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((100)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((header_credits_available (dev_priv, dsi_trans) >= 0x10))) { ret__ = 0; break; } if ( expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; } ); else ret__ = ({ int cpu, ret, timeout = ((100)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory"); if ((header_credits_available (dev_priv, dsi_trans) >= 0x10)) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if ( !(0)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info * __ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; })) | |||
| 58 | drm_err(&dev_priv->drm, "DSI header credits not released\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI header credits not released\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 59 | } | |||
| 60 | ||||
| 61 | static void wait_for_payload_credits(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 62 | enum transcoder dsi_trans) | |||
| 63 | { | |||
| 64 | if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 100))) ? 1 : -1 ] __attribute__((__unused__)); if ((100) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((100)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((payload_credits_available (dev_priv, dsi_trans) >= 0x40))) { ret__ = 0; break; } if ( expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; } ); else ret__ = ({ int cpu, ret, timeout = ((100)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory"); if ((payload_credits_available (dev_priv, dsi_trans) >= 0x40)) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if ( !(0)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info * __ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; }) | |||
| 65 | MAX_PLOAD_CREDIT, 100)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 100))) ? 1 : -1 ] __attribute__((__unused__)); if ((100) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((100)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((payload_credits_available (dev_priv, dsi_trans) >= 0x40))) { ret__ = 0; break; } if ( expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; } ); else ret__ = ({ int cpu, ret, timeout = ((100)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory"); if ((payload_credits_available (dev_priv, dsi_trans) >= 0x40)) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if ( !(0)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info * __ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; })) | |||
| 66 | drm_err(&dev_priv->drm, "DSI payload credits not released\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI payload credits not released\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 67 | } | |||
| 68 | ||||
| 69 | static enum transcoder dsi_port_to_transcoder(enum port port) | |||
| 70 | { | |||
| 71 | if (port == PORT_A) | |||
| 72 | return TRANSCODER_DSI_0; | |||
| 73 | else | |||
| 74 | return TRANSCODER_DSI_1; | |||
| 75 | } | |||
| 76 | ||||
| 77 | static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) | |||
| 78 | { | |||
| 79 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 80 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 81 | struct mipi_dsi_device *dsi; | |||
| 82 | enum port port; | |||
| 83 | enum transcoder dsi_trans; | |||
| 84 | int ret; | |||
| 85 | ||||
| 86 | /* wait for header/payload credits to be released */ | |||
| 87 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 88 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 89 | wait_for_header_credits(dev_priv, dsi_trans); | |||
| 90 | wait_for_payload_credits(dev_priv, dsi_trans); | |||
| 91 | } | |||
| 92 | ||||
| 93 | /* send nop DCS command */ | |||
| 94 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 95 | dsi = intel_dsi->dsi_hosts[port]->device; | |||
| 96 | dsi->mode_flags |= MIPI_DSI_MODE_LPM(1 << 0); | |||
| 97 | dsi->channel = 0; | |||
| 98 | ret = mipi_dsi_dcs_nop(dsi); | |||
| 99 | if (ret < 0) | |||
| 100 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "error sending DCS NOP command\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 101 | "error sending DCS NOP command\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "error sending DCS NOP command\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 102 | } | |||
| 103 | ||||
| 104 | /* wait for header credits to be released */ | |||
| 105 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 106 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 107 | wait_for_header_credits(dev_priv, dsi_trans); | |||
| 108 | } | |||
| 109 | ||||
| 110 | /* wait for LP TX in progress bit to be cleared */ | |||
| 111 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 112 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 113 | if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 20))) ? 1 : -1 ] __attribute__((__unused__)); if ((20) > 10 ) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((20)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((!(intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x6b0d8) + ((dsi_trans ) - TRANSCODER_DSI_0) * ((0x6b8d8) - (0x6b0d8)))) })) & ( 1 << 17))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((20)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock (); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile ("" : : : "memory"); if ((!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x6b0d8) + ((dsi_trans) - TRANSCODER_DSI_0) * (( 0x6b8d8) - (0x6b0d8)))) })) & (1 << 17)))) { ret = 0 ; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = ( ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__ ; }) | |||
| 114 | LPTX_IN_PROGRESS), 20)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 20))) ? 1 : -1 ] __attribute__((__unused__)); if ((20) > 10 ) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((20)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((!(intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x6b0d8) + ((dsi_trans ) - TRANSCODER_DSI_0) * ((0x6b8d8) - (0x6b0d8)))) })) & ( 1 << 17))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((20)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock (); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile ("" : : : "memory"); if ((!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x6b0d8) + ((dsi_trans) - TRANSCODER_DSI_0) * (( 0x6b8d8) - (0x6b0d8)))) })) & (1 << 17)))) { ret = 0 ; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = ( ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__ ; })) | |||
| 115 | drm_err(&dev_priv->drm, "LPTX bit not cleared\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LPTX bit not cleared\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 116 | } | |||
| 117 | } | |||
| 118 | ||||
| 119 | static bool_Bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data, | |||
| 120 | u32 len) | |||
| 121 | { | |||
| 122 | struct intel_dsi *intel_dsi = host->intel_dsi; | |||
| 123 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(intel_dsi->base.base.dev); | |||
| 124 | enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); | |||
| 125 | int free_credits; | |||
| 126 | int i, j; | |||
| 127 | ||||
| 128 | for (i = 0; i < len; i += 4) { | |||
| 129 | u32 tmp = 0; | |||
| 130 | ||||
| 131 | free_credits = payload_credits_available(dev_priv, dsi_trans); | |||
| 132 | if (free_credits < 1) { | |||
| 133 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Payload credit not available\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 134 | "Payload credit not available\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Payload credit not available\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 135 | return false0; | |||
| 136 | } | |||
| 137 | ||||
| 138 | for (j = 0; j < min_t(u32, len - i, 4)({ u32 __min_a = (len - i); u32 __min_b = (4); __min_a < __min_b ? __min_a : __min_b; }); j++) | |||
| 139 | tmp |= *data++ << 8 * j; | |||
| ||||
| 140 | ||||
| 141 | intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans)((const i915_reg_t){ .reg = (((0x6b104) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b904) - (0x6b104)))) }), tmp); | |||
| 142 | } | |||
| 143 | ||||
| 144 | return true1; | |||
| 145 | } | |||
| 146 | ||||
| 147 | static int dsi_send_pkt_hdr(struct intel_dsi_host *host, | |||
| 148 | struct mipi_dsi_packet pkt, bool_Bool enable_lpdt) | |||
| 149 | { | |||
| 150 | struct intel_dsi *intel_dsi = host->intel_dsi; | |||
| 151 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(intel_dsi->base.base.dev); | |||
| 152 | enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); | |||
| 153 | u32 tmp; | |||
| 154 | int free_credits; | |||
| 155 | ||||
| 156 | /* check if header credit available */ | |||
| 157 | free_credits = header_credits_available(dev_priv, dsi_trans); | |||
| 158 | if (free_credits < 1) { | |||
| 159 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "send pkt header failed, not enough hdr credits\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 160 | "send pkt header failed, not enough hdr credits\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "send pkt header failed, not enough hdr credits\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 161 | return -1; | |||
| 162 | } | |||
| 163 | ||||
| 164 | tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans)((const i915_reg_t){ .reg = (((0x6b100) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b900) - (0x6b100)))) })); | |||
| 165 | ||||
| 166 | if (pkt.payload) | |||
| 167 | tmp |= PAYLOAD_PRESENT(1 << 31); | |||
| 168 | else | |||
| 169 | tmp &= ~PAYLOAD_PRESENT(1 << 31); | |||
| 170 | ||||
| 171 | tmp &= ~VBLANK_FENCE(1 << 29); | |||
| 172 | ||||
| 173 | if (enable_lpdt) | |||
| 174 | tmp |= LP_DATA_TRANSFER(1 << 30); | |||
| 175 | ||||
| 176 | tmp &= ~(PARAM_WC_MASK(0xffff << 8) | VC_MASK(0x3 << 6) | DT_MASK(0x3f << 0)); | |||
| 177 | tmp |= ((pkt.header[0] & VC_MASK(0x3 << 6)) << VC_SHIFT6); | |||
| 178 | tmp |= ((pkt.header[0] & DT_MASK(0x3f << 0)) << DT_SHIFT0); | |||
| 179 | tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT8); | |||
| 180 | tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT16); | |||
| 181 | intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans)((const i915_reg_t){ .reg = (((0x6b100) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b900) - (0x6b100)))) }), tmp); | |||
| 182 | ||||
| 183 | return 0; | |||
| 184 | } | |||
| 185 | ||||
| 186 | static int dsi_send_pkt_payld(struct intel_dsi_host *host, | |||
| 187 | struct mipi_dsi_packet pkt) | |||
| 188 | { | |||
| 189 | struct intel_dsi *intel_dsi = host->intel_dsi; | |||
| 190 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(intel_dsi->base.base.dev); | |||
| 191 | ||||
| 192 | /* payload queue can accept *256 bytes*, check limit */ | |||
| 193 | if (pkt.payload_length > MAX_PLOAD_CREDIT0x40 * 4) { | |||
| 194 | drm_err(&i915->drm, "payload size exceeds max queue limit\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "payload size exceeds max queue limit\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 195 | return -1; | |||
| 196 | } | |||
| 197 | ||||
| 198 | /* load data into command payload queue */ | |||
| 199 | if (!add_payld_to_queue(host, pkt.payload, | |||
| 200 | pkt.payload_length)) { | |||
| 201 | drm_err(&i915->drm, "adding payload to queue failed\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "adding payload to queue failed\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 202 | return -1; | |||
| 203 | } | |||
| 204 | ||||
| 205 | return 0; | |||
| 206 | } | |||
| 207 | ||||
| 208 | static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) | |||
| 209 | { | |||
| 210 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 211 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 212 | enum phy phy; | |||
| 213 | u32 tmp; | |||
| 214 | int lane; | |||
| 215 | ||||
| 216 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 217 | /* | |||
| 218 | * Program voltage swing and pre-emphasis level values as per | |||
| 219 | * table in BSPEC under DDI buffer programing | |||
| 220 | */ | |||
| 221 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (0) * 0x100) + 4 * ( 5))) })); | |||
| 222 | tmp &= ~(SCALING_MODE_SEL_MASK(0x7 << 18) | RTERM_SELECT_MASK(0x7 << 3)); | |||
| 223 | tmp |= SCALING_MODE_SEL(0x2)((0x2) << 18); | |||
| 224 | tmp |= TAP2_DISABLE(1 << 30) | TAP3_DISABLE(1 << 29); | |||
| 225 | tmp |= RTERM_SELECT(0x6)((0x6) << 3); | |||
| 226 | intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x680 + 4 * (5))) }), tmp); | |||
| 227 | ||||
| 228 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (5))) })); | |||
| 229 | tmp &= ~(SCALING_MODE_SEL_MASK(0x7 << 18) | RTERM_SELECT_MASK(0x7 << 3)); | |||
| 230 | tmp |= SCALING_MODE_SEL(0x2)((0x2) << 18); | |||
| 231 | tmp |= TAP2_DISABLE(1 << 30) | TAP3_DISABLE(1 << 29); | |||
| 232 | tmp |= RTERM_SELECT(0x6)((0x6) << 3); | |||
| 233 | intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (5))) }), tmp); | |||
| 234 | ||||
| 235 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (0) * 0x100) + 4 * ( 2))) })); | |||
| 236 | tmp &= ~(SWING_SEL_LOWER_MASK(0x7 << 11) | SWING_SEL_UPPER_MASK(1 << 15) | | |||
| 237 | RCOMP_SCALAR_MASK(0xFF << 0)); | |||
| 238 | tmp |= SWING_SEL_UPPER(0x2)(((0x2) >> 3) << 15); | |||
| 239 | tmp |= SWING_SEL_LOWER(0x2)(((0x2) & 0x7) << 11); | |||
| 240 | tmp |= RCOMP_SCALAR(0x98)((0x98) << 0); | |||
| 241 | intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x680 + 4 * (2))) }), tmp); | |||
| 242 | ||||
| 243 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (2))) })); | |||
| 244 | tmp &= ~(SWING_SEL_LOWER_MASK(0x7 << 11) | SWING_SEL_UPPER_MASK(1 << 15) | | |||
| 245 | RCOMP_SCALAR_MASK(0xFF << 0)); | |||
| 246 | tmp |= SWING_SEL_UPPER(0x2)(((0x2) >> 3) << 15); | |||
| 247 | tmp |= SWING_SEL_LOWER(0x2)(((0x2) & 0x7) << 11); | |||
| 248 | tmp |= RCOMP_SCALAR(0x98)((0x98) << 0); | |||
| 249 | intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (2))) }), tmp); | |||
| 250 | ||||
| 251 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (4))) })); | |||
| 252 | tmp &= ~(POST_CURSOR_1_MASK(0x3F << 12) | POST_CURSOR_2_MASK(0x3F << 6) | | |||
| 253 | CURSOR_COEFF_MASK(0x3F << 0)); | |||
| 254 | tmp |= POST_CURSOR_1(0x0)((0x0) << 12); | |||
| 255 | tmp |= POST_CURSOR_2(0x0)((0x0) << 6); | |||
| 256 | tmp |= CURSOR_COEFF(0x3f)((0x3f) << 0); | |||
| 257 | intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (4))) }), tmp); | |||
| 258 | ||||
| 259 | for (lane = 0; lane <= 3; lane++) { | |||
| 260 | /* Bspec: must not use GRP register for write */ | |||
| 261 | tmp = intel_de_read(dev_priv, | |||
| 262 | ICL_PORT_TX_DW4_LN(lane, phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (lane) * 0x100) + 4 * (4))) })); | |||
| 263 | tmp &= ~(POST_CURSOR_1_MASK(0x3F << 12) | POST_CURSOR_2_MASK(0x3F << 6) | | |||
| 264 | CURSOR_COEFF_MASK(0x3F << 0)); | |||
| 265 | tmp |= POST_CURSOR_1(0x0)((0x0) << 12); | |||
| 266 | tmp |= POST_CURSOR_2(0x0)((0x0) << 6); | |||
| 267 | tmp |= CURSOR_COEFF(0x3f)((0x3f) << 0); | |||
| 268 | intel_de_write(dev_priv, | |||
| 269 | ICL_PORT_TX_DW4_LN(lane, phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (lane) * 0x100) + 4 * (4))) }), tmp); | |||
| 270 | } | |||
| 271 | } | |||
| 272 | } | |||
| 273 | ||||
| 274 | static void configure_dual_link_mode(struct intel_encoder *encoder, | |||
| 275 | const struct intel_crtc_state *pipe_config) | |||
| 276 | { | |||
| 277 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 278 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 279 | u32 dss_ctl1; | |||
| 280 | ||||
| 281 | dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1((const i915_reg_t){ .reg = (0x67400) })); | |||
| 282 | dss_ctl1 |= SPLITTER_ENABLE(1 << 31); | |||
| 283 | dss_ctl1 &= ~OVERLAP_PIXELS_MASK(0xf << 16); | |||
| 284 | dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap)((intel_dsi->pixel_overlap) << 16); | |||
| 285 | ||||
| 286 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK1) { | |||
| 287 | const struct drm_display_mode *adjusted_mode = | |||
| 288 | &pipe_config->hw.adjusted_mode; | |||
| 289 | u32 dss_ctl2; | |||
| 290 | u16 hactive = adjusted_mode->crtc_hdisplay; | |||
| 291 | u16 dl_buffer_depth; | |||
| 292 | ||||
| 293 | dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE(1 << 24); | |||
| 294 | dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; | |||
| 295 | ||||
| 296 | if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH0x5a0) | |||
| 297 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DL buffer depth exceed max value\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 298 | "DL buffer depth exceed max value\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DL buffer depth exceed max value\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 299 | ||||
| 300 | dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK(0xfff << 0); | |||
| 301 | dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)((dl_buffer_depth) << 0); | |||
| 302 | dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2((const i915_reg_t){ .reg = (0x67404) })); | |||
| 303 | dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK(0xfff << 0); | |||
| 304 | dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)((dl_buffer_depth) << 0); | |||
| 305 | intel_de_write(dev_priv, DSS_CTL2((const i915_reg_t){ .reg = (0x67404) }), dss_ctl2); | |||
| 306 | } else { | |||
| 307 | /* Interleave */ | |||
| 308 | dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE(1 << 24); | |||
| 309 | } | |||
| 310 | ||||
| 311 | intel_de_write(dev_priv, DSS_CTL1((const i915_reg_t){ .reg = (0x67400) }), dss_ctl1); | |||
| 312 | } | |||
| 313 | ||||
| 314 | /* aka DSI 8X clock */ | |||
| 315 | static int afe_clk(struct intel_encoder *encoder, | |||
| 316 | const struct intel_crtc_state *crtc_state) | |||
| 317 | { | |||
| 318 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 319 | int bpp; | |||
| 320 | ||||
| 321 | if (crtc_state->dsc.compression_enable) | |||
| 322 | bpp = crtc_state->dsc.compressed_bpp; | |||
| 323 | else | |||
| 324 | bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); | |||
| 325 | ||||
| 326 | return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count)(((intel_dsi->pclk * bpp) + ((intel_dsi->lane_count) / 2 )) / (intel_dsi->lane_count)); | |||
| 327 | } | |||
| 328 | ||||
| 329 | static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, | |||
| 330 | const struct intel_crtc_state *crtc_state) | |||
| 331 | { | |||
| 332 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 333 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 334 | enum port port; | |||
| 335 | int afe_clk_khz; | |||
| 336 | u32 esc_clk_div_m; | |||
| 337 | ||||
| 338 | afe_clk_khz = afe_clk(encoder, crtc_state); | |||
| 339 | esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK)(((afe_clk_khz) + ((20000) - 1)) / (20000)); | |||
| 340 | ||||
| 341 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 342 | intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port)((const i915_reg_t){ .reg = (((0x6b090) + ((port)) * ((0x6b890 ) - (0x6b090)))) }), | |||
| 343 | esc_clk_div_m & ICL_ESC_CLK_DIV_MASK0x1ff); | |||
| 344 | intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port)((const i915_reg_t){ .reg = (((0x6b090) + ((port)) * ((0x6b890 ) - (0x6b090)))) })); | |||
| 345 | } | |||
| 346 | ||||
| 347 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 348 | intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port)((const i915_reg_t){ .reg = (((0x162190) + ((port)) * ((0x6C190 ) - (0x162190)))) }), | |||
| 349 | esc_clk_div_m & ICL_ESC_CLK_DIV_MASK0x1ff); | |||
| 350 | intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port)((const i915_reg_t){ .reg = (((0x162190) + ((port)) * ((0x6C190 ) - (0x162190)))) })); | |||
| 351 | } | |||
| 352 | } | |||
| 353 | ||||
| 354 | static void get_dsi_io_power_domains(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 355 | struct intel_dsi *intel_dsi) | |||
| 356 | { | |||
| 357 | enum port port; | |||
| 358 | ||||
| 359 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 360 | drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port])({ int __ret = !!((intel_dsi->io_wakeref[port])); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "intel_dsi->io_wakeref[port]" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 361 | intel_dsi->io_wakeref[port] = | |||
| 362 | intel_display_power_get(dev_priv, | |||
| 363 | port == PORT_A ? | |||
| 364 | POWER_DOMAIN_PORT_DDI_A_IO : | |||
| 365 | POWER_DOMAIN_PORT_DDI_B_IO); | |||
| 366 | } | |||
| 367 | } | |||
| 368 | ||||
| 369 | static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) | |||
| 370 | { | |||
| 371 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 372 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 373 | enum port port; | |||
| 374 | u32 tmp; | |||
| 375 | ||||
| 376 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 377 | tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)((const i915_reg_t){ .reg = (((0x6B094) + (port) * ((0x6B894) - (0x6B094)))) })); | |||
| 378 | tmp |= COMBO_PHY_MODE_DSI(1 << 0); | |||
| 379 | intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port)((const i915_reg_t){ .reg = (((0x6B094) + (port) * ((0x6B894) - (0x6B094)))) }), tmp); | |||
| 380 | } | |||
| 381 | ||||
| 382 | get_dsi_io_power_domains(dev_priv, intel_dsi); | |||
| 383 | } | |||
| 384 | ||||
| 385 | static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) | |||
| 386 | { | |||
| 387 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 388 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 389 | enum phy phy; | |||
| 390 | ||||
| 391 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else | |||
| 392 | intel_combo_phy_power_up_lanes(dev_priv, phy, true1, | |||
| 393 | intel_dsi->lane_count, false0); | |||
| 394 | } | |||
| 395 | ||||
| 396 | static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) | |||
| 397 | { | |||
| 398 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 399 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 400 | enum phy phy; | |||
| 401 | u32 tmp; | |||
| 402 | int lane; | |||
| 403 | ||||
| 404 | /* Step 4b(i) set loadgen select for transmit and aux lanes */ | |||
| 405 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 406 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (4))) })); | |||
| 407 | tmp &= ~LOADGEN_SELECT(1 << 31); | |||
| 408 | intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (4))) }), tmp); | |||
| 409 | for (lane = 0; lane <= 3; lane++) { | |||
| 410 | tmp = intel_de_read(dev_priv, | |||
| 411 | ICL_PORT_TX_DW4_LN(lane, phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (lane) * 0x100) + 4 * (4))) })); | |||
| 412 | tmp &= ~LOADGEN_SELECT(1 << 31); | |||
| 413 | if (lane != 2) | |||
| 414 | tmp |= LOADGEN_SELECT(1 << 31); | |||
| 415 | intel_de_write(dev_priv, | |||
| 416 | ICL_PORT_TX_DW4_LN(lane, phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (lane) * 0x100) + 4 * (4))) }), tmp); | |||
| 417 | } | |||
| 418 | } | |||
| 419 | ||||
| 420 | /* Step 4b(ii) set latency optimization for transmit and aux lanes */ | |||
| 421 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 422 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (2))) })); | |||
| 423 | tmp &= ~FRC_LATENCY_OPTIM_MASK(0x7 << 8); | |||
| 424 | tmp |= FRC_LATENCY_OPTIM_VAL(0x5)((0x5) << 8); | |||
| 425 | intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (2))) }), tmp); | |||
| 426 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (0) * 0x100) + 4 * ( 2))) })); | |||
| 427 | tmp &= ~FRC_LATENCY_OPTIM_MASK(0x7 << 8); | |||
| 428 | tmp |= FRC_LATENCY_OPTIM_VAL(0x5)((0x5) << 8); | |||
| 429 | intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x680 + 4 * (2))) }), tmp); | |||
| 430 | ||||
| 431 | /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ | |||
| 432 | if (IS_ELKHARTLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) || (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12)) { | |||
| 433 | tmp = intel_de_read(dev_priv, | |||
| 434 | ICL_PORT_PCS_DW1_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x300 + 4 * (1))) })); | |||
| 435 | tmp &= ~LATENCY_OPTIM_MASK(0x3 << 2); | |||
| 436 | tmp |= LATENCY_OPTIM_VAL(0)((0) << 2); | |||
| 437 | intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x300 + 4 * (1))) }), | |||
| 438 | tmp); | |||
| 439 | ||||
| 440 | tmp = intel_de_read(dev_priv, | |||
| 441 | ICL_PORT_PCS_DW1_LN0(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x800 + (0) * 0x100) + 4 * ( 1))) })); | |||
| 442 | tmp &= ~LATENCY_OPTIM_MASK(0x3 << 2); | |||
| 443 | tmp |= LATENCY_OPTIM_VAL(0x1)((0x1) << 2); | |||
| 444 | intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x600 + 4 * (1))) }), | |||
| 445 | tmp); | |||
| 446 | } | |||
| 447 | } | |||
| 448 | ||||
| 449 | } | |||
| 450 | ||||
| 451 | static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) | |||
| 452 | { | |||
| 453 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 454 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 455 | u32 tmp; | |||
| 456 | enum phy phy; | |||
| 457 | ||||
| 458 | /* clear common keeper enable bit */ | |||
| 459 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 460 | tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x800 + (0) * 0x100) + 4 * ( 1))) })); | |||
| 461 | tmp &= ~COMMON_KEEPER_EN(1 << 26); | |||
| 462 | intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x600 + 4 * (1))) }), tmp); | |||
| 463 | tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x300 + 4 * (1))) })); | |||
| 464 | tmp &= ~COMMON_KEEPER_EN(1 << 26); | |||
| 465 | intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x300 + 4 * (1))) }), tmp); | |||
| 466 | } | |||
| 467 | ||||
| 468 | /* | |||
| 469 | * Set SUS Clock Config bitfield to 11b | |||
| 470 | * Note: loadgen select program is done | |||
| 471 | * as part of lane phy sequence configuration | |||
| 472 | */ | |||
| 473 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 474 | tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 4 * (5))) })); | |||
| 475 | tmp |= SUS_CLOCK_CONFIG(3 << 0); | |||
| 476 | intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 4 * (5))) }), tmp); | |||
| 477 | } | |||
| 478 | ||||
| 479 | /* Clear training enable to change swing values */ | |||
| 480 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 481 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (0) * 0x100) + 4 * ( 5))) })); | |||
| 482 | tmp &= ~TX_TRAINING_EN(1 << 31); | |||
| 483 | intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x680 + 4 * (5))) }), tmp); | |||
| 484 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (5))) })); | |||
| 485 | tmp &= ~TX_TRAINING_EN(1 << 31); | |||
| 486 | intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (5))) }), tmp); | |||
| 487 | } | |||
| 488 | ||||
| 489 | /* Program swing and de-emphasis */ | |||
| 490 | dsi_program_swing_and_deemphasis(encoder); | |||
| 491 | ||||
| 492 | /* Set training enable to trigger update */ | |||
| 493 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 494 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + (0x880 + (0) * 0x100) + 4 * ( 5))) })); | |||
| 495 | tmp |= TX_TRAINING_EN(1 << 31); | |||
| 496 | intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x680 + 4 * (5))) }), tmp); | |||
| 497 | tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (5))) })); | |||
| 498 | tmp |= TX_TRAINING_EN(1 << 31); | |||
| 499 | intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x380 + 4 * (5))) }), tmp); | |||
| 500 | } | |||
| 501 | } | |||
| 502 | ||||
| 503 | static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) | |||
| 504 | { | |||
| 505 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 506 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 507 | u32 tmp; | |||
| 508 | enum port port; | |||
| 509 | ||||
| 510 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 511 | tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })); | |||
| 512 | tmp |= DDI_BUF_CTL_ENABLE(1 << 31); | |||
| 513 | intel_de_write(dev_priv, DDI_BUF_CTL(port)((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) }), tmp); | |||
| 514 | ||||
| 515 | if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 500))) ? 1 : -1 ] __attribute__((__unused__)); if ((500) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((500)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((!(intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range (wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((500 )) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory" ); if ((!(intel_de_read(dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & ( 1 << 7)))) { ret = 0; break; } if (now - base >= timeout ) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect (!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; } ); ret__; }) | |||
| 516 | DDI_BUF_IS_IDLE),({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 500))) ? 1 : -1 ] __attribute__((__unused__)); if ((500) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((500)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((!(intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range (wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((500 )) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory" ); if ((!(intel_de_read(dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & ( 1 << 7)))) { ret = 0; break; } if (now - base >= timeout ) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect (!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; } ); ret__; }) | |||
| 517 | 500)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 500))) ? 1 : -1 ] __attribute__((__unused__)); if ((500) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((500)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((!(intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range (wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((500 )) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory" ); if ((!(intel_de_read(dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & ( 1 << 7)))) { ret = 0; break; } if (now - base >= timeout ) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect (!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; } ); ret__; })) | |||
| 518 | drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DDI port:%c buffer idle\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ((port ) + 'A')) | |||
| 519 | port_name(port))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DDI port:%c buffer idle\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ((port ) + 'A')); | |||
| 520 | } | |||
| 521 | } | |||
| 522 | ||||
| 523 | static void | |||
| 524 | gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, | |||
| 525 | const struct intel_crtc_state *crtc_state) | |||
| 526 | { | |||
| 527 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 528 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 529 | u32 tmp; | |||
| 530 | enum port port; | |||
| 531 | enum phy phy; | |||
| 532 | ||||
| 533 | /* Program T-INIT master registers */ | |||
| 534 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 535 | tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port)((const i915_reg_t){ .reg = (((0x6b088) + (port) * ((0x6b888) - (0x6b088)))) })); | |||
| 536 | tmp &= ~MASTER_INIT_TIMER_MASK(0xffff << 0); | |||
| 537 | tmp |= intel_dsi->init_count; | |||
| 538 | intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port)((const i915_reg_t){ .reg = (((0x6b088) + (port) * ((0x6b888) - (0x6b088)))) }), tmp); | |||
| 539 | } | |||
| 540 | ||||
| 541 | /* Program DPHY clock lanes timings */ | |||
| 542 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 543 | intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x162180) + (port) * ((0x6c180 ) - (0x162180)))) }), | |||
| 544 | intel_dsi->dphy_reg); | |||
| 545 | ||||
| 546 | /* shadow register inside display core */ | |||
| 547 | intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x6b080) + (port) * ((0x6b880) - (0x6b080)))) }), | |||
| 548 | intel_dsi->dphy_reg); | |||
| 549 | } | |||
| 550 | ||||
| 551 | /* Program DPHY data lanes timings */ | |||
| 552 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 553 | intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x162184) + (port) * ((0x6c184 ) - (0x162184)))) }), | |||
| 554 | intel_dsi->dphy_data_lane_reg); | |||
| 555 | ||||
| 556 | /* shadow register inside display core */ | |||
| 557 | intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x6B084) + (port) * ((0x6B884) - (0x6B084)))) }), | |||
| 558 | intel_dsi->dphy_data_lane_reg); | |||
| 559 | } | |||
| 560 | ||||
| 561 | /* | |||
| 562 | * If DSI link operating at or below an 800 MHz, | |||
| 563 | * TA_SURE should be override and programmed to | |||
| 564 | * a value '0' inside TA_PARAM_REGISTERS otherwise | |||
| 565 | * leave all fields at HW default values. | |||
| 566 | */ | |||
| 567 | if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11))) { | |||
| 568 | if (afe_clk(encoder, crtc_state) <= 800000) { | |||
| 569 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 570 | tmp = intel_de_read(dev_priv, | |||
| 571 | DPHY_TA_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x162188) + (port) * ((0x6c188 ) - (0x162188)))) })); | |||
| 572 | tmp &= ~TA_SURE_MASK(0x1f << 16); | |||
| 573 | tmp |= TA_SURE_OVERRIDE(1 << 31) | TA_SURE(0)((0) << 16); | |||
| 574 | intel_de_write(dev_priv, | |||
| 575 | DPHY_TA_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x162188) + (port) * ((0x6c188 ) - (0x162188)))) }), | |||
| 576 | tmp); | |||
| 577 | ||||
| 578 | /* shadow register inside display core */ | |||
| 579 | tmp = intel_de_read(dev_priv, | |||
| 580 | DSI_TA_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x6b098) + (port) * ((0x6b898) - (0x6b098)))) })); | |||
| 581 | tmp &= ~TA_SURE_MASK(0x1f << 16); | |||
| 582 | tmp |= TA_SURE_OVERRIDE(1 << 31) | TA_SURE(0)((0) << 16); | |||
| 583 | intel_de_write(dev_priv, | |||
| 584 | DSI_TA_TIMING_PARAM(port)((const i915_reg_t){ .reg = (((0x6b098) + (port) * ((0x6b898) - (0x6b098)))) }), tmp); | |||
| 585 | } | |||
| 586 | } | |||
| 587 | } | |||
| 588 | ||||
| 589 | if (IS_ELKHARTLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) { | |||
| 590 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 591 | tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy)((const i915_reg_t){ .reg = ((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x194) })); | |||
| 592 | tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP((u32)((1UL << (7)) + 0)); | |||
| 593 | intel_de_write(dev_priv, ICL_DPHY_CHKN(phy)((const i915_reg_t){ .reg = ((((const u32 []){ 0x162000, 0x6C000 , 0x160000, 0x161000 })[phy]) + 0x194) }), tmp); | |||
| 594 | } | |||
| 595 | } | |||
| 596 | } | |||
| 597 | ||||
| 598 | static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) | |||
| 599 | { | |||
| 600 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 601 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 602 | u32 tmp; | |||
| 603 | enum phy phy; | |||
| 604 | ||||
| 605 | mutex_lock(&dev_priv->dpll.lock)rw_enter_write(&dev_priv->dpll.lock); | |||
| 606 | tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) })); | |||
| 607 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else | |||
| 608 | tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)(1 << (((const u32 []){ 10, 11, 24 })[phy])); | |||
| 609 | ||||
| 610 | intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) }), tmp); | |||
| 611 | mutex_unlock(&dev_priv->dpll.lock)rw_exit_write(&dev_priv->dpll.lock); | |||
| 612 | } | |||
| 613 | ||||
| 614 | static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) | |||
| 615 | { | |||
| 616 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 617 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 618 | u32 tmp; | |||
| 619 | enum phy phy; | |||
| 620 | ||||
| 621 | mutex_lock(&dev_priv->dpll.lock)rw_enter_write(&dev_priv->dpll.lock); | |||
| 622 | tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) })); | |||
| 623 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else | |||
| 624 | tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)(1 << (((const u32 []){ 10, 11, 24 })[phy])); | |||
| 625 | ||||
| 626 | intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) }), tmp); | |||
| 627 | mutex_unlock(&dev_priv->dpll.lock)rw_exit_write(&dev_priv->dpll.lock); | |||
| 628 | } | |||
| 629 | ||||
| 630 | static void gen11_dsi_map_pll(struct intel_encoder *encoder, | |||
| 631 | const struct intel_crtc_state *crtc_state) | |||
| 632 | { | |||
| 633 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 634 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 635 | struct intel_shared_dpll *pll = crtc_state->shared_dpll; | |||
| 636 | enum phy phy; | |||
| 637 | u32 val; | |||
| 638 | ||||
| 639 | mutex_lock(&dev_priv->dpll.lock)rw_enter_write(&dev_priv->dpll.lock); | |||
| 640 | ||||
| 641 | val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) })); | |||
| 642 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 643 | val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy)(3 << ((phy) * 2)); | |||
| 644 | val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy)((pll->info->id) << ((phy) * 2)); | |||
| 645 | } | |||
| 646 | intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) }), val); | |||
| 647 | ||||
| 648 | for_each_dsi_phy(phy, intel_dsi->phys)for ((phy) = PHY_A; (phy) < I915_MAX_PHYS; (phy)++) if (!( (intel_dsi->phys) & (1UL << (phy)))) {} else { | |||
| 649 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) | |||
| 650 | val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)(1 << (((const u32 []){ 10, 11, 24 })[phy])); | |||
| 651 | else | |||
| 652 | val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)(1 << (((const u32 []){ 10, 11, 24 })[phy])); | |||
| 653 | } | |||
| 654 | intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) }), val); | |||
| 655 | ||||
| 656 | intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0((const i915_reg_t){ .reg = (0x164280) })); | |||
| 657 | ||||
| 658 | mutex_unlock(&dev_priv->dpll.lock)rw_exit_write(&dev_priv->dpll.lock); | |||
| 659 | } | |||
| 660 | ||||
| 661 | static void | |||
| 662 | gen11_dsi_configure_transcoder(struct intel_encoder *encoder, | |||
| 663 | const struct intel_crtc_state *pipe_config) | |||
| 664 | { | |||
| 665 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 666 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 667 | struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (pipe_config->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 668 | enum pipe pipe = intel_crtc->pipe; | |||
| 669 | u32 tmp; | |||
| 670 | enum port port; | |||
| 671 | enum transcoder dsi_trans; | |||
| 672 | ||||
| 673 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 674 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 675 | tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)((const i915_reg_t){ .reg = (((0x6b030) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b830) - (0x6b030)))) })); | |||
| 676 | ||||
| 677 | if (intel_dsi->eotp_pkt) | |||
| 678 | tmp &= ~EOTP_DISABLED(1 << 0); | |||
| 679 | else | |||
| 680 | tmp |= EOTP_DISABLED(1 << 0); | |||
| 681 | ||||
| 682 | /* enable link calibration if freq > 1.5Gbps */ | |||
| 683 | if (afe_clk(encoder, pipe_config) >= 1500 * 1000) { | |||
| 684 | tmp &= ~LINK_CALIBRATION_MASK(0x3 << 4); | |||
| 685 | tmp |= CALIBRATION_ENABLED_INITIAL_ONLY(0x2 << 4); | |||
| 686 | } | |||
| 687 | ||||
| 688 | /* configure continuous clock */ | |||
| 689 | tmp &= ~CONTINUOUS_CLK_MASK(0x3 << 8); | |||
| 690 | if (intel_dsi->clock_stop) | |||
| 691 | tmp |= CLK_ENTER_LP_AFTER_DATA(0x0 << 8); | |||
| 692 | else | |||
| 693 | tmp |= CLK_HS_CONTINUOUS(0x3 << 8); | |||
| 694 | ||||
| 695 | /* configure buffer threshold limit to minimum */ | |||
| 696 | tmp &= ~PIX_BUF_THRESHOLD_MASK(0x3 << 10); | |||
| 697 | tmp |= PIX_BUF_THRESHOLD_1_4(0x0 << 10); | |||
| 698 | ||||
| 699 | /* set virtual channel to '0' */ | |||
| 700 | tmp &= ~PIX_VIRT_CHAN_MASK(0x3 << 12); | |||
| 701 | tmp |= PIX_VIRT_CHAN(0)((0) << 12); | |||
| 702 | ||||
| 703 | /* program BGR transmission */ | |||
| 704 | if (intel_dsi->bgr_enabled) | |||
| 705 | tmp |= BGR_TRANSMISSION(1 << 15); | |||
| 706 | ||||
| 707 | /* select pixel format */ | |||
| 708 | tmp &= ~PIX_FMT_MASK(0x3 << 16); | |||
| 709 | if (pipe_config->dsc.compression_enable) { | |||
| 710 | tmp |= PIX_FMT_COMPRESSED(0x6 << 16); | |||
| 711 | } else { | |||
| 712 | switch (intel_dsi->pixel_format) { | |||
| 713 | default: | |||
| 714 | MISSING_CASE(intel_dsi->pixel_format)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "intel_dsi->pixel_format", (long)(intel_dsi->pixel_format )); __builtin_expect(!!(__ret), 0); }); | |||
| 715 | fallthroughdo {} while (0); | |||
| 716 | case MIPI_DSI_FMT_RGB565: | |||
| 717 | tmp |= PIX_FMT_RGB565(0x0 << 16); | |||
| 718 | break; | |||
| 719 | case MIPI_DSI_FMT_RGB666_PACKED: | |||
| 720 | tmp |= PIX_FMT_RGB666_PACKED(0x1 << 16); | |||
| 721 | break; | |||
| 722 | case MIPI_DSI_FMT_RGB666: | |||
| 723 | tmp |= PIX_FMT_RGB666_LOOSE(0x2 << 16); | |||
| 724 | break; | |||
| 725 | case MIPI_DSI_FMT_RGB888: | |||
| 726 | tmp |= PIX_FMT_RGB888(0x3 << 16); | |||
| 727 | break; | |||
| 728 | } | |||
| 729 | } | |||
| 730 | ||||
| 731 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { | |||
| 732 | if (is_vid_mode(intel_dsi)) | |||
| 733 | tmp |= BLANKING_PACKET_ENABLE(1 << 2); | |||
| 734 | } | |||
| 735 | ||||
| 736 | /* program DSI operation mode */ | |||
| 737 | if (is_vid_mode(intel_dsi)) { | |||
| 738 | tmp &= ~OP_MODE_MASK(0x3 << 28); | |||
| 739 | switch (intel_dsi->video_mode_format) { | |||
| 740 | default: | |||
| 741 | MISSING_CASE(intel_dsi->video_mode_format)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "intel_dsi->video_mode_format", (long)(intel_dsi->video_mode_format )); __builtin_expect(!!(__ret), 0); }); | |||
| 742 | fallthroughdo {} while (0); | |||
| 743 | case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS(2 << 0): | |||
| 744 | tmp |= VIDEO_MODE_SYNC_EVENT(0x2 << 28); | |||
| 745 | break; | |||
| 746 | case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE(1 << 0): | |||
| 747 | tmp |= VIDEO_MODE_SYNC_PULSE(0x3 << 28); | |||
| 748 | break; | |||
| 749 | } | |||
| 750 | } else { | |||
| 751 | /* | |||
| 752 | * FIXME: Retrieve this info from VBT. | |||
| 753 | * As per the spec when dsi transcoder is operating | |||
| 754 | * in TE GATE mode, TE comes from GPIO | |||
| 755 | * which is UTIL PIN for DSI 0. | |||
| 756 | * Also this GPIO would not be used for other | |||
| 757 | * purposes is an assumption. | |||
| 758 | */ | |||
| 759 | tmp &= ~OP_MODE_MASK(0x3 << 28); | |||
| 760 | tmp |= CMD_MODE_TE_GATE(0x1 << 28); | |||
| 761 | tmp |= TE_SOURCE_GPIO(1 << 27); | |||
| 762 | } | |||
| 763 | ||||
| 764 | intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)((const i915_reg_t){ .reg = (((0x6b030) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b830) - (0x6b030)))) }), tmp); | |||
| 765 | } | |||
| 766 | ||||
| 767 | /* enable port sync mode if dual link */ | |||
| 768 | if (intel_dsi->dual_link) { | |||
| 769 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 770 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 771 | tmp = intel_de_read(dev_priv, | |||
| 772 | TRANS_DDI_FUNC_CTL2(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60404) + ((&(dev_priv)-> __info)->display_mmio_offset))) })); | |||
| 773 | tmp |= PORT_SYNC_MODE_ENABLE((u32)((1UL << (4)) + 0)); | |||
| 774 | intel_de_write(dev_priv, | |||
| 775 | TRANS_DDI_FUNC_CTL2(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60404) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), tmp); | |||
| 776 | } | |||
| 777 | ||||
| 778 | /* configure stream splitting */ | |||
| 779 | configure_dual_link_mode(encoder, pipe_config); | |||
| 780 | } | |||
| 781 | ||||
| 782 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 783 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 784 | ||||
| 785 | /* select data lane width */ | |||
| 786 | tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60400) + ((&(dev_priv)-> __info)->display_mmio_offset))) })); | |||
| 787 | tmp &= ~DDI_PORT_WIDTH_MASK(7 << 1); | |||
| 788 | tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count)(((intel_dsi->lane_count) - 1) << 1); | |||
| 789 | ||||
| 790 | /* select input pipe */ | |||
| 791 | tmp &= ~TRANS_DDI_EDP_INPUT_MASK(7 << 12); | |||
| 792 | switch (pipe) { | |||
| 793 | default: | |||
| 794 | MISSING_CASE(pipe)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "pipe", (long)(pipe)); __builtin_expect(!!(__ret), 0); }); | |||
| 795 | fallthroughdo {} while (0); | |||
| 796 | case PIPE_A: | |||
| 797 | tmp |= TRANS_DDI_EDP_INPUT_A_ON(0 << 12); | |||
| 798 | break; | |||
| 799 | case PIPE_B: | |||
| 800 | tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF(5 << 12); | |||
| 801 | break; | |||
| 802 | case PIPE_C: | |||
| 803 | tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF(6 << 12); | |||
| 804 | break; | |||
| 805 | case PIPE_D: | |||
| 806 | tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF(7 << 12); | |||
| 807 | break; | |||
| 808 | } | |||
| 809 | ||||
| 810 | /* enable DDI buffer */ | |||
| 811 | tmp |= TRANS_DDI_FUNC_ENABLE(1 << 31); | |||
| 812 | intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60400) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), tmp); | |||
| 813 | } | |||
| 814 | ||||
| 815 | /* wait for link ready */ | |||
| 816 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 817 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 818 | if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 2500))) ? 1 : -1 ] __attribute__((__unused__)); if ((2500) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((2500)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x6b030) + ((dsi_trans ) - TRANSCODER_DSI_0) * ((0x6b830) - (0x6b030)))) })) & ( 1 << 20))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((2500)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock (); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile ("" : : : "memory"); if (((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x6b030) + ((dsi_trans) - TRANSCODER_DSI_0) * (( 0x6b830) - (0x6b030)))) })) & (1 << 20)))) { ret = 0 ; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = ( ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__ ; }) | |||
| 819 | LINK_READY), 2500)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 2500))) ? 1 : -1 ] __attribute__((__unused__)); if ((2500) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((2500)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x6b030) + ((dsi_trans ) - TRANSCODER_DSI_0) * ((0x6b830) - (0x6b030)))) })) & ( 1 << 20))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((2500)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock (); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile ("" : : : "memory"); if (((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x6b030) + ((dsi_trans) - TRANSCODER_DSI_0) * (( 0x6b830) - (0x6b030)))) })) & (1 << 20)))) { ret = 0 ; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = ( ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__ ; })) | |||
| 820 | drm_err(&dev_priv->drm, "DSI link not ready\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI link not ready\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 821 | } | |||
| 822 | } | |||
| 823 | ||||
| 824 | static void | |||
| 825 | gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, | |||
| 826 | const struct intel_crtc_state *crtc_state) | |||
| 827 | { | |||
| 828 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 829 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 830 | const struct drm_display_mode *adjusted_mode = | |||
| 831 | &crtc_state->hw.adjusted_mode; | |||
| 832 | enum port port; | |||
| 833 | enum transcoder dsi_trans; | |||
| 834 | /* horizontal timings */ | |||
| 835 | u16 htotal, hactive, hsync_start, hsync_end, hsync_size; | |||
| 836 | u16 hback_porch; | |||
| 837 | /* vertical timings */ | |||
| 838 | u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; | |||
| 839 | int mul = 1, div = 1; | |||
| 840 | ||||
| 841 | /* | |||
| 842 | * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account | |||
| 843 | * for slower link speed if DSC is enabled. | |||
| 844 | * | |||
| 845 | * The compression frequency ratio is the ratio between compressed and | |||
| 846 | * non-compressed link speeds, and simplifies down to the ratio between | |||
| 847 | * compressed and non-compressed bpp. | |||
| 848 | */ | |||
| 849 | if (crtc_state->dsc.compression_enable) { | |||
| 850 | mul = crtc_state->dsc.compressed_bpp; | |||
| 851 | div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); | |||
| 852 | } | |||
| 853 | ||||
| 854 | hactive = adjusted_mode->crtc_hdisplay; | |||
| 855 | ||||
| 856 | if (is_vid_mode(intel_dsi)) | |||
| 857 | htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div)(((adjusted_mode->crtc_htotal * mul) + ((div) - 1)) / (div )); | |||
| 858 | else | |||
| 859 | htotal = DIV_ROUND_UP((hactive + 160) * mul, div)((((hactive + 160) * mul) + ((div) - 1)) / (div)); | |||
| 860 | ||||
| 861 | hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div)(((adjusted_mode->crtc_hsync_start * mul) + ((div) - 1)) / (div)); | |||
| 862 | hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div)(((adjusted_mode->crtc_hsync_end * mul) + ((div) - 1)) / ( div)); | |||
| 863 | hsync_size = hsync_end - hsync_start; | |||
| 864 | hback_porch = (adjusted_mode->crtc_htotal - | |||
| 865 | adjusted_mode->crtc_hsync_end); | |||
| 866 | vactive = adjusted_mode->crtc_vdisplay; | |||
| 867 | ||||
| 868 | if (is_vid_mode(intel_dsi)) { | |||
| 869 | vtotal = adjusted_mode->crtc_vtotal; | |||
| 870 | } else { | |||
| 871 | int bpp, line_time_us, byte_clk_period_ns; | |||
| 872 | ||||
| 873 | if (crtc_state->dsc.compression_enable) | |||
| 874 | bpp = crtc_state->dsc.compressed_bpp; | |||
| 875 | else | |||
| 876 | bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); | |||
| 877 | ||||
| 878 | byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state); | |||
| 879 | line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count); | |||
| 880 | vtotal = vactive + DIV_ROUND_UP(400, line_time_us)(((400) + ((line_time_us) - 1)) / (line_time_us)); | |||
| 881 | } | |||
| 882 | vsync_start = adjusted_mode->crtc_vsync_start; | |||
| 883 | vsync_end = adjusted_mode->crtc_vsync_end; | |||
| 884 | vsync_shift = hsync_start - htotal / 2; | |||
| 885 | ||||
| 886 | if (intel_dsi->dual_link) { | |||
| 887 | hactive /= 2; | |||
| 888 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK1) | |||
| 889 | hactive += intel_dsi->pixel_overlap; | |||
| 890 | htotal /= 2; | |||
| 891 | } | |||
| 892 | ||||
| 893 | /* minimum hactive as per bspec: 256 pixels */ | |||
| 894 | if (adjusted_mode->crtc_hdisplay < 256) | |||
| 895 | drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "hactive is less then 256 pixels\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 896 | ||||
| 897 | /* if RGB666 format, then hactive must be multiple of 4 pixels */ | |||
| 898 | if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) | |||
| 899 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "hactive pixels are not multiple of 4\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 900 | "hactive pixels are not multiple of 4\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "hactive pixels are not multiple of 4\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 901 | ||||
| 902 | /* program TRANS_HTOTAL register */ | |||
| 903 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 904 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 905 | intel_de_write(dev_priv, HTOTAL(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60000) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), | |||
| 906 | (hactive - 1) | ((htotal - 1) << 16)); | |||
| 907 | } | |||
| 908 | ||||
| 909 | /* TRANS_HSYNC register to be programmed only for video mode */ | |||
| 910 | if (is_vid_mode(intel_dsi)) { | |||
| 911 | if (intel_dsi->video_mode_format == | |||
| 912 | VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE(1 << 0)) { | |||
| 913 | /* BSPEC: hsync size should be atleast 16 pixels */ | |||
| 914 | if (hsync_size < 16) | |||
| 915 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "hsync size < 16 pixels\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 916 | "hsync size < 16 pixels\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "hsync size < 16 pixels\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 917 | } | |||
| 918 | ||||
| 919 | if (hback_porch < 16) | |||
| 920 | drm_err(&dev_priv->drm, "hback porch < 16 pixels\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "hback porch < 16 pixels\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 921 | ||||
| 922 | if (intel_dsi->dual_link) { | |||
| 923 | hsync_start /= 2; | |||
| 924 | hsync_end /= 2; | |||
| 925 | } | |||
| 926 | ||||
| 927 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 928 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 929 | intel_de_write(dev_priv, HSYNC(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60008) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), | |||
| 930 | (hsync_start - 1) | ((hsync_end - 1) << 16)); | |||
| 931 | } | |||
| 932 | } | |||
| 933 | ||||
| 934 | /* program TRANS_VTOTAL register */ | |||
| 935 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 936 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 937 | /* | |||
| 938 | * FIXME: Programing this by assuming progressive mode, since | |||
| 939 | * non-interlaced info from VBT is not saved inside | |||
| 940 | * struct drm_display_mode. | |||
| 941 | * For interlace mode: program required pixel minus 2 | |||
| 942 | */ | |||
| 943 | intel_de_write(dev_priv, VTOTAL(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x6000c) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), | |||
| 944 | (vactive - 1) | ((vtotal - 1) << 16)); | |||
| 945 | } | |||
| 946 | ||||
| 947 | if (vsync_end < vsync_start || vsync_end > vtotal) | |||
| 948 | drm_err(&dev_priv->drm, "Invalid vsync_end value\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Invalid vsync_end value\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 949 | ||||
| 950 | if (vsync_start < vactive) | |||
| 951 | drm_err(&dev_priv->drm, "vsync_start less than vactive\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "vsync_start less than vactive\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 952 | ||||
| 953 | /* program TRANS_VSYNC register for video mode only */ | |||
| 954 | if (is_vid_mode(intel_dsi)) { | |||
| 955 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 956 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 957 | intel_de_write(dev_priv, VSYNC(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60014) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), | |||
| 958 | (vsync_start - 1) | ((vsync_end - 1) << 16)); | |||
| 959 | } | |||
| 960 | } | |||
| 961 | ||||
| 962 | /* | |||
| 963 | * FIXME: It has to be programmed only for video modes and interlaced | |||
| 964 | * modes. Put the check condition here once interlaced | |||
| 965 | * info available as described above. | |||
| 966 | * program TRANS_VSYNCSHIFT register | |||
| 967 | */ | |||
| 968 | if (is_vid_mode(intel_dsi)) { | |||
| 969 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 970 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 971 | intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60028) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), | |||
| 972 | vsync_shift); | |||
| 973 | } | |||
| 974 | } | |||
| 975 | ||||
| 976 | /* program TRANS_VBLANK register, should be same as vtotal programmed */ | |||
| 977 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { | |||
| 978 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 979 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 980 | intel_de_write(dev_priv, VBLANK(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60010) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), | |||
| 981 | (vactive - 1) | ((vtotal - 1) << 16)); | |||
| 982 | } | |||
| 983 | } | |||
| 984 | } | |||
| 985 | ||||
| 986 | static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) | |||
| 987 | { | |||
| 988 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 989 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 990 | enum port port; | |||
| 991 | enum transcoder dsi_trans; | |||
| 992 | u32 tmp; | |||
| 993 | ||||
| 994 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 995 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 996 | tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[dsi_trans] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset )) })); | |||
| 997 | tmp |= PIPECONF_ENABLE(1 << 31); | |||
| 998 | intel_de_write(dev_priv, PIPECONF(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[dsi_trans] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset )) }), tmp); | |||
| 999 | ||||
| 1000 | /* wait for transcoder to be enabled */ | |||
| 1001 | if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[dsi_trans] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset )) }), | |||
| 1002 | I965_PIPECONF_ACTIVE(1 << 30), 10)) | |||
| 1003 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI transcoder not enabled\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 1004 | "DSI transcoder not enabled\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI transcoder not enabled\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1005 | } | |||
| 1006 | } | |||
| 1007 | ||||
| 1008 | static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, | |||
| 1009 | const struct intel_crtc_state *crtc_state) | |||
| 1010 | { | |||
| 1011 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1012 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1013 | enum port port; | |||
| 1014 | enum transcoder dsi_trans; | |||
| 1015 | u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; | |||
| 1016 | ||||
| 1017 | /* | |||
| 1018 | * escape clock count calculation: | |||
| 1019 | * BYTE_CLK_COUNT = TIME_NS/(8 * UI) | |||
| 1020 | * UI (nsec) = (10^6)/Bitrate | |||
| 1021 | * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate | |||
| 1022 | * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS | |||
| 1023 | */ | |||
| 1024 | divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000; | |||
| 1025 | mul = 8 * 1000000; | |||
| 1026 | hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,(((intel_dsi->hs_tx_timeout * mul) + ((divisor) - 1)) / (divisor )) | |||
| 1027 | divisor)(((intel_dsi->hs_tx_timeout * mul) + ((divisor) - 1)) / (divisor )); | |||
| 1028 | lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor)(((intel_dsi->lp_rx_timeout * mul) + ((divisor) - 1)) / (divisor )); | |||
| 1029 | ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor)(((intel_dsi->turn_arnd_val * mul) + ((divisor) - 1)) / (divisor )); | |||
| 1030 | ||||
| 1031 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1032 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 1033 | ||||
| 1034 | /* program hst_tx_timeout */ | |||
| 1035 | tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans)((const i915_reg_t){ .reg = (((0x6b044) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b844) - (0x6b044)))) })); | |||
| 1036 | tmp &= ~HSTX_TIMEOUT_VALUE_MASK(0xffff << 16); | |||
| 1037 | tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout)((hs_tx_timeout) << 16); | |||
| 1038 | intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans)((const i915_reg_t){ .reg = (((0x6b044) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b844) - (0x6b044)))) }), tmp); | |||
| 1039 | ||||
| 1040 | /* FIXME: DSI_CALIB_TO */ | |||
| 1041 | ||||
| 1042 | /* program lp_rx_host timeout */ | |||
| 1043 | tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans)((const i915_reg_t){ .reg = (((0x6b048) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b848) - (0x6b048)))) })); | |||
| 1044 | tmp &= ~LPRX_TIMEOUT_VALUE_MASK(0xffff << 0); | |||
| 1045 | tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout)((lp_rx_timeout) << 0); | |||
| 1046 | intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans)((const i915_reg_t){ .reg = (((0x6b048) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b848) - (0x6b048)))) }), tmp); | |||
| 1047 | ||||
| 1048 | /* FIXME: DSI_PWAIT_TO */ | |||
| 1049 | ||||
| 1050 | /* program turn around timeout */ | |||
| 1051 | tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans)((const i915_reg_t){ .reg = (((0x6b04c) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b84c) - (0x6b04c)))) })); | |||
| 1052 | tmp &= ~TA_TIMEOUT_VALUE_MASK(0xffff << 0); | |||
| 1053 | tmp |= TA_TIMEOUT_VALUE(ta_timeout)((ta_timeout) << 0); | |||
| 1054 | intel_de_write(dev_priv, DSI_TA_TO(dsi_trans)((const i915_reg_t){ .reg = (((0x6b04c) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b84c) - (0x6b04c)))) }), tmp); | |||
| 1055 | } | |||
| 1056 | } | |||
| 1057 | ||||
| 1058 | static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, | |||
| 1059 | bool_Bool enable) | |||
| 1060 | { | |||
| 1061 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1062 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1063 | u32 tmp; | |||
| 1064 | ||||
| 1065 | /* | |||
| 1066 | * used as TE i/p for DSI0, | |||
| 1067 | * for dual link/DSI1 TE is from slave DSI1 | |||
| 1068 | * through GPIO. | |||
| 1069 | */ | |||
| 1070 | if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)(1UL << (PORT_B)))) | |||
| 1071 | return; | |||
| 1072 | ||||
| 1073 | tmp = intel_de_read(dev_priv, UTIL_PIN_CTL((const i915_reg_t){ .reg = (0x48400) })); | |||
| 1074 | ||||
| 1075 | if (enable) { | |||
| 1076 | tmp |= UTIL_PIN_DIRECTION_INPUT(1 << 19); | |||
| 1077 | tmp |= UTIL_PIN_ENABLE(1 << 31); | |||
| 1078 | } else { | |||
| 1079 | tmp &= ~UTIL_PIN_ENABLE(1 << 31); | |||
| 1080 | } | |||
| 1081 | intel_de_write(dev_priv, UTIL_PIN_CTL((const i915_reg_t){ .reg = (0x48400) }), tmp); | |||
| 1082 | } | |||
| 1083 | ||||
| 1084 | static void | |||
| 1085 | gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, | |||
| 1086 | const struct intel_crtc_state *crtc_state) | |||
| 1087 | { | |||
| 1088 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1089 | ||||
| 1090 | /* step 4a: power up all lanes of the DDI used by DSI */ | |||
| 1091 | gen11_dsi_power_up_lanes(encoder); | |||
| 1092 | ||||
| 1093 | /* step 4b: configure lane sequencing of the Combo-PHY transmitters */ | |||
| 1094 | gen11_dsi_config_phy_lanes_sequence(encoder); | |||
| 1095 | ||||
| 1096 | /* step 4c: configure voltage swing and skew */ | |||
| 1097 | gen11_dsi_voltage_swing_program_seq(encoder); | |||
| 1098 | ||||
| 1099 | /* enable DDI buffer */ | |||
| 1100 | gen11_dsi_enable_ddi_buffer(encoder); | |||
| 1101 | ||||
| 1102 | /* setup D-PHY timings */ | |||
| 1103 | gen11_dsi_setup_dphy_timings(encoder, crtc_state); | |||
| 1104 | ||||
| 1105 | /* Since transcoder is configured to take events from GPIO */ | |||
| 1106 | gen11_dsi_config_util_pin(encoder, true1); | |||
| 1107 | ||||
| 1108 | /* step 4h: setup DSI protocol timeouts */ | |||
| 1109 | gen11_dsi_setup_timeouts(encoder, crtc_state); | |||
| 1110 | ||||
| 1111 | /* Step (4h, 4i, 4j, 4k): Configure transcoder */ | |||
| 1112 | gen11_dsi_configure_transcoder(encoder, crtc_state); | |||
| 1113 | ||||
| 1114 | /* Step 4l: Gate DDI clocks */ | |||
| 1115 | if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11))) | |||
| 1116 | gen11_dsi_gate_clocks(encoder); | |||
| 1117 | } | |||
| 1118 | ||||
| 1119 | static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) | |||
| 1120 | { | |||
| 1121 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1122 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1123 | struct mipi_dsi_device *dsi; | |||
| 1124 | enum port port; | |||
| 1125 | enum transcoder dsi_trans; | |||
| 1126 | u32 tmp; | |||
| 1127 | int ret; | |||
| 1128 | ||||
| 1129 | /* set maximum return packet size */ | |||
| 1130 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1131 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 1132 | ||||
| 1133 | /* | |||
| 1134 | * FIXME: This uses the number of DW's currently in the payload | |||
| 1135 | * receive queue. This is probably not what we want here. | |||
| 1136 | */ | |||
| 1137 | tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans)((const i915_reg_t){ .reg = (((0x6b0d4) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b8d4) - (0x6b0d4)))) })); | |||
| 1138 | tmp &= NUMBER_RX_PLOAD_DW_MASK(0xff << 0); | |||
| 1139 | /* multiply "Number Rx Payload DW" by 4 to get max value */ | |||
| 1140 | tmp = tmp * 4; | |||
| 1141 | dsi = intel_dsi->dsi_hosts[port]->device; | |||
| 1142 | ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); | |||
| 1143 | if (ret < 0) | |||
| 1144 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "error setting max return pkt size%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , tmp) | |||
| 1145 | "error setting max return pkt size%d\n", tmp)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "error setting max return pkt size%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , tmp); | |||
| 1146 | } | |||
| 1147 | ||||
| 1148 | /* panel power on related mipi dsi vbt sequences */ | |||
| 1149 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); | |||
| 1150 | intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); | |||
| 1151 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); | |||
| 1152 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); | |||
| 1153 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); | |||
| 1154 | ||||
| 1155 | /* ensure all panel commands dispatched before enabling transcoder */ | |||
| 1156 | wait_for_cmds_dispatched_to_panel(encoder); | |||
| 1157 | } | |||
| 1158 | ||||
| 1159 | static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state, | |||
| 1160 | struct intel_encoder *encoder, | |||
| 1161 | const struct intel_crtc_state *crtc_state, | |||
| 1162 | const struct drm_connector_state *conn_state) | |||
| 1163 | { | |||
| 1164 | /* step2: enable IO power */ | |||
| 1165 | gen11_dsi_enable_io_power(encoder); | |||
| 1166 | ||||
| 1167 | /* step3: enable DSI PLL */ | |||
| 1168 | gen11_dsi_program_esc_clk_div(encoder, crtc_state); | |||
| 1169 | } | |||
| 1170 | ||||
| 1171 | static void gen11_dsi_pre_enable(struct intel_atomic_state *state, | |||
| 1172 | struct intel_encoder *encoder, | |||
| 1173 | const struct intel_crtc_state *pipe_config, | |||
| 1174 | const struct drm_connector_state *conn_state) | |||
| 1175 | { | |||
| 1176 | /* step3b */ | |||
| 1177 | gen11_dsi_map_pll(encoder, pipe_config); | |||
| 1178 | ||||
| 1179 | /* step4: enable DSI port and DPHY */ | |||
| 1180 | gen11_dsi_enable_port_and_phy(encoder, pipe_config); | |||
| 1181 | ||||
| 1182 | /* step5: program and powerup panel */ | |||
| 1183 | gen11_dsi_powerup_panel(encoder); | |||
| 1184 | ||||
| 1185 | intel_dsc_enable(encoder, pipe_config); | |||
| 1186 | ||||
| 1187 | /* step6c: configure transcoder timings */ | |||
| 1188 | gen11_dsi_set_transcoder_timings(encoder, pipe_config); | |||
| 1189 | } | |||
| 1190 | ||||
| 1191 | static void gen11_dsi_enable(struct intel_atomic_state *state, | |||
| 1192 | struct intel_encoder *encoder, | |||
| 1193 | const struct intel_crtc_state *crtc_state, | |||
| 1194 | const struct drm_connector_state *conn_state) | |||
| 1195 | { | |||
| 1196 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1197 | ||||
| 1198 | drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder)({ int __ret = !!((crtc_state->has_pch_encoder)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((state->base.dev ))->dev), "", "drm_WARN_ON(" "crtc_state->has_pch_encoder" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1199 | ||||
| 1200 | /* step6d: enable dsi transcoder */ | |||
| 1201 | gen11_dsi_enable_transcoder(encoder); | |||
| 1202 | ||||
| 1203 | /* step7: enable backlight */ | |||
| 1204 | intel_panel_enable_backlight(crtc_state, conn_state); | |||
| 1205 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); | |||
| 1206 | ||||
| 1207 | intel_crtc_vblank_on(crtc_state); | |||
| 1208 | } | |||
| 1209 | ||||
| 1210 | static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) | |||
| 1211 | { | |||
| 1212 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1213 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1214 | enum port port; | |||
| 1215 | enum transcoder dsi_trans; | |||
| 1216 | u32 tmp; | |||
| 1217 | ||||
| 1218 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1219 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 1220 | ||||
| 1221 | /* disable transcoder */ | |||
| 1222 | tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[dsi_trans] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset )) })); | |||
| 1223 | tmp &= ~PIPECONF_ENABLE(1 << 31); | |||
| 1224 | intel_de_write(dev_priv, PIPECONF(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[dsi_trans] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset )) }), tmp); | |||
| 1225 | ||||
| 1226 | /* wait for transcoder to be disabled */ | |||
| 1227 | if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[dsi_trans] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset )) }), | |||
| 1228 | I965_PIPECONF_ACTIVE(1 << 30), 50)) | |||
| 1229 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI trancoder not disabled\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 1230 | "DSI trancoder not disabled\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI trancoder not disabled\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1231 | } | |||
| 1232 | } | |||
| 1233 | ||||
| 1234 | static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) | |||
| 1235 | { | |||
| 1236 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1237 | ||||
| 1238 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); | |||
| 1239 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); | |||
| 1240 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); | |||
| 1241 | ||||
| 1242 | /* ensure cmds dispatched to panel */ | |||
| 1243 | wait_for_cmds_dispatched_to_panel(encoder); | |||
| 1244 | } | |||
| 1245 | ||||
| 1246 | static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) | |||
| 1247 | { | |||
| 1248 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1249 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1250 | enum port port; | |||
| 1251 | enum transcoder dsi_trans; | |||
| 1252 | u32 tmp; | |||
| 1253 | ||||
| 1254 | /* disable periodic update mode */ | |||
| 1255 | if (is_cmd_mode(intel_dsi)) { | |||
| 1256 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1257 | tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)((const i915_reg_t){ .reg = (((0x6b034) + (port) * ((0x6b834) - (0x6b034)))) })); | |||
| 1258 | tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE(1 << 29); | |||
| 1259 | intel_de_write(dev_priv, DSI_CMD_FRMCTL(port)((const i915_reg_t){ .reg = (((0x6b034) + (port) * ((0x6b834) - (0x6b034)))) }), tmp); | |||
| 1260 | } | |||
| 1261 | } | |||
| 1262 | ||||
| 1263 | /* put dsi link in ULPS */ | |||
| 1264 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1265 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 1266 | tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)((const i915_reg_t){ .reg = (((0x6b0d8) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b8d8) - (0x6b0d8)))) })); | |||
| 1267 | tmp |= LINK_ENTER_ULPS(1 << 0); | |||
| 1268 | tmp &= ~LINK_ULPS_TYPE_LP11(1 << 8); | |||
| 1269 | intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans)((const i915_reg_t){ .reg = (((0x6b0d8) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b8d8) - (0x6b0d8)))) }), tmp); | |||
| 1270 | ||||
| 1271 | if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 10))) ? 1 : -1 ] __attribute__((__unused__)); if ((10) > 10 ) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((10)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x6b0d8) + ((dsi_trans ) - TRANSCODER_DSI_0) * ((0x6b8d8) - (0x6b0d8)))) })) & ( 1 << 16))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((10)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock (); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile ("" : : : "memory"); if (((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x6b0d8) + ((dsi_trans) - TRANSCODER_DSI_0) * (( 0x6b8d8) - (0x6b0d8)))) })) & (1 << 16)))) { ret = 0 ; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = ( ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__ ; }) | |||
| 1272 | LINK_IN_ULPS),({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 10))) ? 1 : -1 ] __attribute__((__unused__)); if ((10) > 10 ) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((10)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x6b0d8) + ((dsi_trans ) - TRANSCODER_DSI_0) * ((0x6b8d8) - (0x6b0d8)))) })) & ( 1 << 16))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((10)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock (); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile ("" : : : "memory"); if (((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x6b0d8) + ((dsi_trans) - TRANSCODER_DSI_0) * (( 0x6b8d8) - (0x6b0d8)))) })) & (1 << 16)))) { ret = 0 ; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = ( ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__ ; }) | |||
| 1273 | 10)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 10))) ? 1 : -1 ] __attribute__((__unused__)); if ((10) > 10 ) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw (), 1000ll * (((10)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x6b0d8) + ((dsi_trans ) - TRANSCODER_DSI_0) * ((0x6b8d8) - (0x6b0d8)))) })) & ( 1 << 16))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((10)) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock (); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile ("" : : : "memory"); if (((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x6b0d8) + ((dsi_trans) - TRANSCODER_DSI_0) * (( 0x6b8d8) - (0x6b0d8)))) })) & (1 << 16)))) { ret = 0 ; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = ( ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__ ; })) | |||
| 1274 | drm_err(&dev_priv->drm, "DSI link not in ULPS\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI link not in ULPS\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1275 | } | |||
| 1276 | ||||
| 1277 | /* disable ddi function */ | |||
| 1278 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1279 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 1280 | tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60400) + ((&(dev_priv)-> __info)->display_mmio_offset))) })); | |||
| 1281 | tmp &= ~TRANS_DDI_FUNC_ENABLE(1 << 31); | |||
| 1282 | intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60400) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), tmp); | |||
| 1283 | } | |||
| 1284 | ||||
| 1285 | /* disable port sync mode if dual link */ | |||
| 1286 | if (intel_dsi->dual_link) { | |||
| 1287 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1288 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 1289 | tmp = intel_de_read(dev_priv, | |||
| 1290 | TRANS_DDI_FUNC_CTL2(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60404) + ((&(dev_priv)-> __info)->display_mmio_offset))) })); | |||
| 1291 | tmp &= ~PORT_SYNC_MODE_ENABLE((u32)((1UL << (4)) + 0)); | |||
| 1292 | intel_de_write(dev_priv, | |||
| 1293 | TRANS_DDI_FUNC_CTL2(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60404) + ((&(dev_priv)-> __info)->display_mmio_offset))) }), tmp); | |||
| 1294 | } | |||
| 1295 | } | |||
| 1296 | } | |||
| 1297 | ||||
| 1298 | static void gen11_dsi_disable_port(struct intel_encoder *encoder) | |||
| 1299 | { | |||
| 1300 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1301 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1302 | u32 tmp; | |||
| 1303 | enum port port; | |||
| 1304 | ||||
| 1305 | gen11_dsi_ungate_clocks(encoder); | |||
| 1306 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1307 | tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })); | |||
| 1308 | tmp &= ~DDI_BUF_CTL_ENABLE(1 << 31); | |||
| 1309 | intel_de_write(dev_priv, DDI_BUF_CTL(port)((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) }), tmp); | |||
| 1310 | ||||
| 1311 | if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 8))) ? 1 : -1 ] __attribute__((__unused__)); if ((8) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw( ), 1000ll * (((8)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range (wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((8) ) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory" ); if (((intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ( ((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7)))) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect (!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; } ); ret__; }) | |||
| 1312 | DDI_BUF_IS_IDLE),({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 8))) ? 1 : -1 ] __attribute__((__unused__)); if ((8) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw( ), 1000ll * (((8)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range (wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((8) ) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory" ); if (((intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ( ((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7)))) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect (!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; } ); ret__; }) | |||
| 1313 | 8)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 8))) ? 1 : -1 ] __attribute__((__unused__)); if ((8) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw( ), 1000ll * (((8)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7))))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range (wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((8) ) * 1000; u64 base; do { } while (0); if (!(0)) { ; cpu = (({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile("" : : : "memory" ); if (((intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ( ((0x64000) + (port) * ((0x64100) - (0x64000)))) })) & (1 << 7)))) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { ; if (__builtin_expect (!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; } ); ret__; })) | |||
| 1314 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DDI port:%c buffer not idle\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ((port ) + 'A')) | |||
| 1315 | "DDI port:%c buffer not idle\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DDI port:%c buffer not idle\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ((port ) + 'A')) | |||
| 1316 | port_name(port))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DDI port:%c buffer not idle\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ((port ) + 'A')); | |||
| 1317 | } | |||
| 1318 | gen11_dsi_gate_clocks(encoder); | |||
| 1319 | } | |||
| 1320 | ||||
| 1321 | static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) | |||
| 1322 | { | |||
| 1323 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1324 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1325 | enum port port; | |||
| 1326 | u32 tmp; | |||
| 1327 | ||||
| 1328 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1329 | intel_wakeref_t wakeref; | |||
| 1330 | ||||
| 1331 | wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port])({ typeof(*&intel_dsi->io_wakeref[port]) __T = *(& intel_dsi->io_wakeref[port]); *(&intel_dsi->io_wakeref [port]) = (typeof(*&intel_dsi->io_wakeref[port]))0; __T ; }); | |||
| 1332 | intel_display_power_put(dev_priv, | |||
| 1333 | port == PORT_A ? | |||
| 1334 | POWER_DOMAIN_PORT_DDI_A_IO : | |||
| 1335 | POWER_DOMAIN_PORT_DDI_B_IO, | |||
| 1336 | wakeref); | |||
| 1337 | } | |||
| 1338 | ||||
| 1339 | /* set mode to DDI */ | |||
| 1340 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1341 | tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)((const i915_reg_t){ .reg = (((0x6B094) + (port) * ((0x6B894) - (0x6B094)))) })); | |||
| 1342 | tmp &= ~COMBO_PHY_MODE_DSI(1 << 0); | |||
| 1343 | intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port)((const i915_reg_t){ .reg = (((0x6B094) + (port) * ((0x6B894) - (0x6B094)))) }), tmp); | |||
| 1344 | } | |||
| 1345 | } | |||
| 1346 | ||||
| 1347 | static void gen11_dsi_disable(struct intel_atomic_state *state, | |||
| 1348 | struct intel_encoder *encoder, | |||
| 1349 | const struct intel_crtc_state *old_crtc_state, | |||
| 1350 | const struct drm_connector_state *old_conn_state) | |||
| 1351 | { | |||
| 1352 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1353 | ||||
| 1354 | /* step1: turn off backlight */ | |||
| 1355 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); | |||
| 1356 | intel_panel_disable_backlight(old_conn_state); | |||
| 1357 | ||||
| 1358 | /* step2d,e: disable transcoder and wait */ | |||
| 1359 | gen11_dsi_disable_transcoder(encoder); | |||
| 1360 | ||||
| 1361 | /* step2f,g: powerdown panel */ | |||
| 1362 | gen11_dsi_powerdown_panel(encoder); | |||
| 1363 | ||||
| 1364 | /* step2h,i,j: deconfig trancoder */ | |||
| 1365 | gen11_dsi_deconfigure_trancoder(encoder); | |||
| 1366 | ||||
| 1367 | /* step3: disable port */ | |||
| 1368 | gen11_dsi_disable_port(encoder); | |||
| 1369 | ||||
| 1370 | gen11_dsi_config_util_pin(encoder, false0); | |||
| 1371 | ||||
| 1372 | /* step4: disable IO power */ | |||
| 1373 | gen11_dsi_disable_io_power(encoder); | |||
| 1374 | } | |||
| 1375 | ||||
| 1376 | static void gen11_dsi_post_disable(struct intel_atomic_state *state, | |||
| 1377 | struct intel_encoder *encoder, | |||
| 1378 | const struct intel_crtc_state *old_crtc_state, | |||
| 1379 | const struct drm_connector_state *old_conn_state) | |||
| 1380 | { | |||
| 1381 | intel_crtc_vblank_off(old_crtc_state); | |||
| 1382 | ||||
| 1383 | intel_dsc_disable(old_crtc_state); | |||
| 1384 | ||||
| 1385 | skl_scaler_disable(old_crtc_state); | |||
| 1386 | } | |||
| 1387 | ||||
| 1388 | static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector, | |||
| 1389 | struct drm_display_mode *mode) | |||
| 1390 | { | |||
| 1391 | /* FIXME: DSC? */ | |||
| 1392 | return intel_dsi_mode_valid(connector, mode); | |||
| 1393 | } | |||
| 1394 | ||||
| 1395 | static void gen11_dsi_get_timings(struct intel_encoder *encoder, | |||
| 1396 | struct intel_crtc_state *pipe_config) | |||
| 1397 | { | |||
| 1398 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1399 | struct drm_display_mode *adjusted_mode = | |||
| 1400 | &pipe_config->hw.adjusted_mode; | |||
| 1401 | ||||
| 1402 | if (pipe_config->dsc.compressed_bpp) { | |||
| 1403 | int div = pipe_config->dsc.compressed_bpp; | |||
| 1404 | int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); | |||
| 1405 | ||||
| 1406 | adjusted_mode->crtc_htotal = | |||
| 1407 | DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div)(((adjusted_mode->crtc_htotal * mul) + ((div) - 1)) / (div )); | |||
| 1408 | adjusted_mode->crtc_hsync_start = | |||
| 1409 | DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div)(((adjusted_mode->crtc_hsync_start * mul) + ((div) - 1)) / (div)); | |||
| 1410 | adjusted_mode->crtc_hsync_end = | |||
| 1411 | DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div)(((adjusted_mode->crtc_hsync_end * mul) + ((div) - 1)) / ( div)); | |||
| 1412 | } | |||
| 1413 | ||||
| 1414 | if (intel_dsi->dual_link) { | |||
| 1415 | adjusted_mode->crtc_hdisplay *= 2; | |||
| 1416 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK1) | |||
| 1417 | adjusted_mode->crtc_hdisplay -= | |||
| 1418 | intel_dsi->pixel_overlap; | |||
| 1419 | adjusted_mode->crtc_htotal *= 2; | |||
| 1420 | } | |||
| 1421 | adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; | |||
| 1422 | adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; | |||
| 1423 | ||||
| 1424 | if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE0) { | |||
| 1425 | if (intel_dsi->dual_link) { | |||
| 1426 | adjusted_mode->crtc_hsync_start *= 2; | |||
| 1427 | adjusted_mode->crtc_hsync_end *= 2; | |||
| 1428 | } | |||
| 1429 | } | |||
| 1430 | adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; | |||
| 1431 | adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; | |||
| 1432 | } | |||
| 1433 | ||||
| 1434 | static bool_Bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) | |||
| 1435 | { | |||
| 1436 | struct drm_device *dev = intel_dsi->base.base.dev; | |||
| 1437 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev); | |||
| 1438 | enum transcoder dsi_trans; | |||
| 1439 | u32 val; | |||
| 1440 | ||||
| 1441 | if (intel_dsi->ports == BIT(PORT_B)(1UL << (PORT_B))) | |||
| 1442 | dsi_trans = TRANSCODER_DSI_1; | |||
| 1443 | else | |||
| 1444 | dsi_trans = TRANSCODER_DSI_0; | |||
| 1445 | ||||
| 1446 | val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)((const i915_reg_t){ .reg = (((0x6b030) + ((dsi_trans) - TRANSCODER_DSI_0 ) * ((0x6b830) - (0x6b030)))) })); | |||
| 1447 | return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE(1 << 29)); | |||
| 1448 | } | |||
| 1449 | ||||
| 1450 | static void gen11_dsi_get_config(struct intel_encoder *encoder, | |||
| 1451 | struct intel_crtc_state *pipe_config) | |||
| 1452 | { | |||
| 1453 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(encoder->base.dev); | |||
| 1454 | struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (pipe_config->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 1455 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1456 | ||||
| 1457 | intel_dsc_get_config(encoder, pipe_config); | |||
| 1458 | ||||
| 1459 | /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ | |||
| 1460 | pipe_config->port_clock = intel_dpll_get_freq(i915, | |||
| 1461 | pipe_config->shared_dpll); | |||
| 1462 | ||||
| 1463 | pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk; | |||
| 1464 | if (intel_dsi->dual_link) | |||
| 1465 | pipe_config->hw.adjusted_mode.crtc_clock *= 2; | |||
| 1466 | ||||
| 1467 | gen11_dsi_get_timings(encoder, pipe_config); | |||
| 1468 | pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI)(1UL << (INTEL_OUTPUT_DSI)); | |||
| 1469 | pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); | |||
| 1470 | ||||
| 1471 | if (gen11_dsi_is_periodic_cmd_mode(intel_dsi)) | |||
| 1472 | pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE(1<<5); | |||
| 1473 | } | |||
| 1474 | ||||
| 1475 | static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, | |||
| 1476 | struct intel_crtc_state *crtc_state) | |||
| 1477 | { | |||
| 1478 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1479 | struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; | |||
| 1480 | int dsc_max_bpc = INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12 ? 12 : 10; | |||
| 1481 | bool_Bool use_dsc; | |||
| 1482 | int ret; | |||
| 1483 | ||||
| 1484 | use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc); | |||
| 1485 | if (!use_dsc) | |||
| 1486 | return 0; | |||
| 1487 | ||||
| 1488 | if (crtc_state->pipe_bpp < 8 * 3) | |||
| 1489 | return -EINVAL22; | |||
| 1490 | ||||
| 1491 | /* FIXME: split only when necessary */ | |||
| 1492 | if (crtc_state->dsc.slice_count > 1) | |||
| 1493 | crtc_state->dsc.dsc_split = true1; | |||
| 1494 | ||||
| 1495 | vdsc_cfg->convert_rgb = true1; | |||
| 1496 | ||||
| 1497 | ret = intel_dsc_compute_params(encoder, crtc_state); | |||
| 1498 | if (ret) | |||
| 1499 | return ret; | |||
| 1500 | ||||
| 1501 | /* DSI specific sanity checks on the common code */ | |||
| 1502 | drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable)({ int __ret = !!((vdsc_cfg->vbr_enable)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "vdsc_cfg->vbr_enable" ")"); __builtin_expect (!!(__ret), 0); }); | |||
| 1503 | drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422)({ int __ret = !!((vdsc_cfg->simple_422)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "vdsc_cfg->simple_422" ")"); __builtin_expect (!!(__ret), 0); }); | |||
| 1504 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((vdsc_cfg->pic_width % vdsc_cfg->slice_width )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "vdsc_cfg->pic_width % vdsc_cfg->slice_width" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1505 | vdsc_cfg->pic_width % vdsc_cfg->slice_width)({ int __ret = !!((vdsc_cfg->pic_width % vdsc_cfg->slice_width )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "vdsc_cfg->pic_width % vdsc_cfg->slice_width" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1506 | drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8)({ int __ret = !!((vdsc_cfg->slice_height < 8)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "vdsc_cfg->slice_height < 8" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1507 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((vdsc_cfg->pic_height % vdsc_cfg->slice_height )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "vdsc_cfg->pic_height % vdsc_cfg->slice_height" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1508 | vdsc_cfg->pic_height % vdsc_cfg->slice_height)({ int __ret = !!((vdsc_cfg->pic_height % vdsc_cfg->slice_height )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "vdsc_cfg->pic_height % vdsc_cfg->slice_height" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1509 | ||||
| 1510 | ret = drm_dsc_compute_rc_parameters(vdsc_cfg); | |||
| 1511 | if (ret) | |||
| 1512 | return ret; | |||
| 1513 | ||||
| 1514 | crtc_state->dsc.compression_enable = true1; | |||
| 1515 | ||||
| 1516 | return 0; | |||
| 1517 | } | |||
| 1518 | ||||
| 1519 | static int gen11_dsi_compute_config(struct intel_encoder *encoder, | |||
| 1520 | struct intel_crtc_state *pipe_config, | |||
| 1521 | struct drm_connector_state *conn_state) | |||
| 1522 | { | |||
| 1523 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(encoder->base.dev); | |||
| 1524 | struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,({ const __typeof( ((struct intel_dsi *)0)->base ) *__mptr = (encoder); (struct intel_dsi *)( (char *)__mptr - __builtin_offsetof (struct intel_dsi, base) );}) | |||
| 1525 | base)({ const __typeof( ((struct intel_dsi *)0)->base ) *__mptr = (encoder); (struct intel_dsi *)( (char *)__mptr - __builtin_offsetof (struct intel_dsi, base) );}); | |||
| 1526 | struct intel_connector *intel_connector = intel_dsi->attached_connector; | |||
| 1527 | const struct drm_display_mode *fixed_mode = | |||
| 1528 | intel_connector->panel.fixed_mode; | |||
| 1529 | struct drm_display_mode *adjusted_mode = | |||
| 1530 | &pipe_config->hw.adjusted_mode; | |||
| 1531 | int ret; | |||
| 1532 | ||||
| 1533 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | |||
| 1534 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); | |||
| 1535 | ||||
| 1536 | ret = intel_pch_panel_fitting(pipe_config, conn_state); | |||
| 1537 | if (ret) | |||
| 1538 | return ret; | |||
| 1539 | ||||
| 1540 | adjusted_mode->flags = 0; | |||
| 1541 | ||||
| 1542 | /* Dual link goes to trancoder DSI'0' */ | |||
| 1543 | if (intel_dsi->ports == BIT(PORT_B)(1UL << (PORT_B))) | |||
| 1544 | pipe_config->cpu_transcoder = TRANSCODER_DSI_1; | |||
| 1545 | else | |||
| 1546 | pipe_config->cpu_transcoder = TRANSCODER_DSI_0; | |||
| 1547 | ||||
| 1548 | if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888) | |||
| 1549 | pipe_config->pipe_bpp = 24; | |||
| 1550 | else | |||
| 1551 | pipe_config->pipe_bpp = 18; | |||
| 1552 | ||||
| 1553 | pipe_config->clock_set = true1; | |||
| 1554 | ||||
| 1555 | if (gen11_dsi_dsc_compute_config(encoder, pipe_config)) | |||
| 1556 | drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "Attempting to use DSC failed\n" ); | |||
| 1557 | ||||
| 1558 | pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; | |||
| 1559 | ||||
| 1560 | /* | |||
| 1561 | * In case of TE GATE cmd mode, we | |||
| 1562 | * receive TE from the slave if | |||
| 1563 | * dual link is enabled | |||
| 1564 | */ | |||
| 1565 | if (is_cmd_mode(intel_dsi)) { | |||
| 1566 | if (intel_dsi->ports == (BIT(PORT_B)(1UL << (PORT_B)) | BIT(PORT_A)(1UL << (PORT_A)))) | |||
| 1567 | pipe_config->mode_flags |= | |||
| 1568 | I915_MODE_FLAG_DSI_USE_TE1(1<<4) | | |||
| 1569 | I915_MODE_FLAG_DSI_USE_TE0(1<<3); | |||
| 1570 | else if (intel_dsi->ports == BIT(PORT_B)(1UL << (PORT_B))) | |||
| 1571 | pipe_config->mode_flags |= | |||
| 1572 | I915_MODE_FLAG_DSI_USE_TE1(1<<4); | |||
| 1573 | else | |||
| 1574 | pipe_config->mode_flags |= | |||
| 1575 | I915_MODE_FLAG_DSI_USE_TE0(1<<3); | |||
| 1576 | } | |||
| 1577 | ||||
| 1578 | return 0; | |||
| 1579 | } | |||
| 1580 | ||||
| 1581 | static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, | |||
| 1582 | struct intel_crtc_state *crtc_state) | |||
| 1583 | { | |||
| 1584 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(encoder->base.dev); | |||
| 1585 | ||||
| 1586 | get_dsi_io_power_domains(i915, | |||
| 1587 | enc_to_intel_dsi(encoder)); | |||
| 1588 | } | |||
| 1589 | ||||
| 1590 | static bool_Bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, | |||
| 1591 | enum pipe *pipe) | |||
| 1592 | { | |||
| 1593 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev); | |||
| 1594 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | |||
| 1595 | enum transcoder dsi_trans; | |||
| 1596 | intel_wakeref_t wakeref; | |||
| 1597 | enum port port; | |||
| 1598 | bool_Bool ret = false0; | |||
| 1599 | u32 tmp; | |||
| 1600 | ||||
| 1601 | wakeref = intel_display_power_get_if_enabled(dev_priv, | |||
| 1602 | encoder->power_domain); | |||
| 1603 | if (!wakeref) | |||
| 1604 | return false0; | |||
| 1605 | ||||
| 1606 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1607 | dsi_trans = dsi_port_to_transcoder(port); | |||
| 1608 | tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dsi_trans)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60400) + ((&(dev_priv)-> __info)->display_mmio_offset))) })); | |||
| 1609 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK(7 << 12)) { | |||
| 1610 | case TRANS_DDI_EDP_INPUT_A_ON(0 << 12): | |||
| 1611 | *pipe = PIPE_A; | |||
| 1612 | break; | |||
| 1613 | case TRANS_DDI_EDP_INPUT_B_ONOFF(5 << 12): | |||
| 1614 | *pipe = PIPE_B; | |||
| 1615 | break; | |||
| 1616 | case TRANS_DDI_EDP_INPUT_C_ONOFF(6 << 12): | |||
| 1617 | *pipe = PIPE_C; | |||
| 1618 | break; | |||
| 1619 | case TRANS_DDI_EDP_INPUT_D_ONOFF(7 << 12): | |||
| 1620 | *pipe = PIPE_D; | |||
| 1621 | break; | |||
| 1622 | default: | |||
| 1623 | drm_err(&dev_priv->drm, "Invalid PIPE input\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Invalid PIPE input\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1624 | goto out; | |||
| 1625 | } | |||
| 1626 | ||||
| 1627 | tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[dsi_trans] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset )) })); | |||
| 1628 | ret = tmp & PIPECONF_ENABLE(1 << 31); | |||
| 1629 | } | |||
| 1630 | out: | |||
| 1631 | intel_display_power_put(dev_priv, encoder->power_domain, wakeref); | |||
| 1632 | return ret; | |||
| 1633 | } | |||
| 1634 | ||||
| 1635 | static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder) | |||
| 1636 | { | |||
| 1637 | intel_encoder_destroy(encoder); | |||
| 1638 | } | |||
| 1639 | ||||
| 1640 | static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = { | |||
| 1641 | .destroy = gen11_dsi_encoder_destroy, | |||
| 1642 | }; | |||
| 1643 | ||||
| 1644 | static const struct drm_connector_funcs gen11_dsi_connector_funcs = { | |||
| 1645 | .detect = intel_panel_detect, | |||
| 1646 | .late_register = intel_connector_register, | |||
| 1647 | .early_unregister = intel_connector_unregister, | |||
| 1648 | .destroy = intel_connector_destroy, | |||
| 1649 | .fill_modes = drm_helper_probe_single_connector_modes, | |||
| 1650 | .atomic_get_property = intel_digital_connector_atomic_get_property, | |||
| 1651 | .atomic_set_property = intel_digital_connector_atomic_set_property, | |||
| 1652 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | |||
| 1653 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, | |||
| 1654 | }; | |||
| 1655 | ||||
| 1656 | static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = { | |||
| 1657 | .get_modes = intel_dsi_get_modes, | |||
| 1658 | .mode_valid = gen11_dsi_mode_valid, | |||
| 1659 | .atomic_check = intel_digital_connector_atomic_check, | |||
| 1660 | }; | |||
| 1661 | ||||
| 1662 | static int gen11_dsi_host_attach(struct mipi_dsi_host *host, | |||
| 1663 | struct mipi_dsi_device *dsi) | |||
| 1664 | { | |||
| 1665 | return 0; | |||
| 1666 | } | |||
| 1667 | ||||
| 1668 | static int gen11_dsi_host_detach(struct mipi_dsi_host *host, | |||
| 1669 | struct mipi_dsi_device *dsi) | |||
| 1670 | { | |||
| 1671 | return 0; | |||
| 1672 | } | |||
| 1673 | ||||
| 1674 | static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host, | |||
| 1675 | const struct mipi_dsi_msg *msg) | |||
| 1676 | { | |||
| 1677 | struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); | |||
| 1678 | struct mipi_dsi_packet dsi_pkt; | |||
| 1679 | ssize_t ret; | |||
| 1680 | bool_Bool enable_lpdt = false0; | |||
| 1681 | ||||
| 1682 | ret = mipi_dsi_create_packet(&dsi_pkt, msg); | |||
| 1683 | if (ret < 0) | |||
| ||||
| 1684 | return ret; | |||
| 1685 | ||||
| 1686 | if (msg->flags & MIPI_DSI_MSG_USE_LPM(1 << 0)) | |||
| 1687 | enable_lpdt = true1; | |||
| 1688 | ||||
| 1689 | /* send packet header */ | |||
| 1690 | ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt); | |||
| 1691 | if (ret
| |||
| 1692 | return ret; | |||
| 1693 | ||||
| 1694 | /* only long packet contains payload */ | |||
| 1695 | if (mipi_dsi_packet_format_is_long(msg->type)) { | |||
| 1696 | ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt); | |||
| 1697 | if (ret < 0) | |||
| 1698 | return ret; | |||
| 1699 | } | |||
| 1700 | ||||
| 1701 | //TODO: add payload receive code if needed | |||
| 1702 | ||||
| 1703 | ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length; | |||
| 1704 | ||||
| 1705 | return ret; | |||
| 1706 | } | |||
| 1707 | ||||
| 1708 | static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { | |||
| 1709 | .attach = gen11_dsi_host_attach, | |||
| 1710 | .detach = gen11_dsi_host_detach, | |||
| 1711 | .transfer = gen11_dsi_host_transfer, | |||
| 1712 | }; | |||
| 1713 | ||||
| 1714 | #define ICL_PREPARE_CNT_MAX0x7 0x7 | |||
| 1715 | #define ICL_CLK_ZERO_CNT_MAX0xf 0xf | |||
| 1716 | #define ICL_TRAIL_CNT_MAX0x7 0x7 | |||
| 1717 | #define ICL_TCLK_PRE_CNT_MAX0x3 0x3 | |||
| 1718 | #define ICL_TCLK_POST_CNT_MAX0x7 0x7 | |||
| 1719 | #define ICL_HS_ZERO_CNT_MAX0xf 0xf | |||
| 1720 | #define ICL_EXIT_ZERO_CNT_MAX0x7 0x7 | |||
| 1721 | ||||
| 1722 | static void icl_dphy_param_init(struct intel_dsi *intel_dsi) | |||
| 1723 | { | |||
| 1724 | struct drm_device *dev = intel_dsi->base.base.dev; | |||
| 1725 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev); | |||
| 1726 | struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; | |||
| 1727 | u32 tlpx_ns; | |||
| 1728 | u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; | |||
| 1729 | u32 ths_prepare_ns, tclk_trail_ns; | |||
| 1730 | u32 hs_zero_cnt; | |||
| 1731 | u32 tclk_pre_cnt, tclk_post_cnt; | |||
| 1732 | ||||
| 1733 | tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); | |||
| 1734 | ||||
| 1735 | tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail)(((mipi_config->tclk_trail)>(mipi_config->ths_trail) )?(mipi_config->tclk_trail):(mipi_config->ths_trail)); | |||
| 1736 | ths_prepare_ns = max(mipi_config->ths_prepare,(((mipi_config->ths_prepare)>(mipi_config->tclk_prepare ))?(mipi_config->ths_prepare):(mipi_config->tclk_prepare )) | |||
| 1737 | mipi_config->tclk_prepare)(((mipi_config->ths_prepare)>(mipi_config->tclk_prepare ))?(mipi_config->ths_prepare):(mipi_config->tclk_prepare )); | |||
| 1738 | ||||
| 1739 | /* | |||
| 1740 | * prepare cnt in escape clocks | |||
| 1741 | * this field represents a hexadecimal value with a precision | |||
| 1742 | * of 1.2 – i.e. the most significant bit is the integer | |||
| 1743 | * and the least significant 2 bits are fraction bits. | |||
| 1744 | * so, the field can represent a range of 0.25 to 1.75 | |||
| 1745 | */ | |||
| 1746 | prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns)(((ths_prepare_ns * 4) + ((tlpx_ns) - 1)) / (tlpx_ns)); | |||
| 1747 | if (prepare_cnt > ICL_PREPARE_CNT_MAX0x7) { | |||
| 1748 | drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "prepare_cnt out of range (%d)\n" , prepare_cnt) | |||
| 1749 | prepare_cnt)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "prepare_cnt out of range (%d)\n" , prepare_cnt); | |||
| 1750 | prepare_cnt = ICL_PREPARE_CNT_MAX0x7; | |||
| 1751 | } | |||
| 1752 | ||||
| 1753 | /* clk zero count in escape clocks */ | |||
| 1754 | clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -(((mipi_config->tclk_prepare_clkzero - ths_prepare_ns) + ( (tlpx_ns) - 1)) / (tlpx_ns)) | |||
| 1755 | ths_prepare_ns, tlpx_ns)(((mipi_config->tclk_prepare_clkzero - ths_prepare_ns) + ( (tlpx_ns) - 1)) / (tlpx_ns)); | |||
| 1756 | if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX0xf) { | |||
| 1757 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "clk_zero_cnt out of range (%d)\n" , clk_zero_cnt) | |||
| 1758 | "clk_zero_cnt out of range (%d)\n", clk_zero_cnt)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "clk_zero_cnt out of range (%d)\n" , clk_zero_cnt); | |||
| 1759 | clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX0xf; | |||
| 1760 | } | |||
| 1761 | ||||
| 1762 | /* trail cnt in escape clocks*/ | |||
| 1763 | trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns)(((tclk_trail_ns) + ((tlpx_ns) - 1)) / (tlpx_ns)); | |||
| 1764 | if (trail_cnt > ICL_TRAIL_CNT_MAX0x7) { | |||
| 1765 | drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "trail_cnt out of range (%d)\n" , trail_cnt) | |||
| 1766 | trail_cnt)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "trail_cnt out of range (%d)\n" , trail_cnt); | |||
| 1767 | trail_cnt = ICL_TRAIL_CNT_MAX0x7; | |||
| 1768 | } | |||
| 1769 | ||||
| 1770 | /* tclk pre count in escape clocks */ | |||
| 1771 | tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns)(((mipi_config->tclk_pre) + ((tlpx_ns) - 1)) / (tlpx_ns)); | |||
| 1772 | if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX0x3) { | |||
| 1773 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "tclk_pre_cnt out of range (%d)\n" , tclk_pre_cnt) | |||
| 1774 | "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "tclk_pre_cnt out of range (%d)\n" , tclk_pre_cnt); | |||
| 1775 | tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX0x3; | |||
| 1776 | } | |||
| 1777 | ||||
| 1778 | /* tclk post count in escape clocks */ | |||
| 1779 | tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns)(((mipi_config->tclk_post) + ((tlpx_ns) - 1)) / (tlpx_ns)); | |||
| 1780 | if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX0x7) { | |||
| 1781 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "tclk_post_cnt out of range (%d)\n" , tclk_post_cnt) | |||
| 1782 | "tclk_post_cnt out of range (%d)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "tclk_post_cnt out of range (%d)\n" , tclk_post_cnt) | |||
| 1783 | tclk_post_cnt)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "tclk_post_cnt out of range (%d)\n" , tclk_post_cnt); | |||
| 1784 | tclk_post_cnt = ICL_TCLK_POST_CNT_MAX0x7; | |||
| 1785 | } | |||
| 1786 | ||||
| 1787 | /* hs zero cnt in escape clocks */ | |||
| 1788 | hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -(((mipi_config->ths_prepare_hszero - ths_prepare_ns) + ((tlpx_ns ) - 1)) / (tlpx_ns)) | |||
| 1789 | ths_prepare_ns, tlpx_ns)(((mipi_config->ths_prepare_hszero - ths_prepare_ns) + ((tlpx_ns ) - 1)) / (tlpx_ns)); | |||
| 1790 | if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX0xf) { | |||
| 1791 | drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "hs_zero_cnt out of range (%d)\n" , hs_zero_cnt) | |||
| 1792 | hs_zero_cnt)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "hs_zero_cnt out of range (%d)\n" , hs_zero_cnt); | |||
| 1793 | hs_zero_cnt = ICL_HS_ZERO_CNT_MAX0xf; | |||
| 1794 | } | |||
| 1795 | ||||
| 1796 | /* hs exit zero cnt in escape clocks */ | |||
| 1797 | exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns)(((mipi_config->ths_exit) + ((tlpx_ns) - 1)) / (tlpx_ns)); | |||
| 1798 | if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX0x7) { | |||
| 1799 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "exit_zero_cnt out of range (%d)\n" , exit_zero_cnt) | |||
| 1800 | "exit_zero_cnt out of range (%d)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "exit_zero_cnt out of range (%d)\n" , exit_zero_cnt) | |||
| 1801 | exit_zero_cnt)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "exit_zero_cnt out of range (%d)\n" , exit_zero_cnt); | |||
| 1802 | exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX0x7; | |||
| 1803 | } | |||
| 1804 | ||||
| 1805 | /* clock lane dphy timings */ | |||
| 1806 | intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE(1 << 31) | | |||
| 1807 | CLK_PREPARE(prepare_cnt)((prepare_cnt) << 28) | | |||
| 1808 | CLK_ZERO_OVERRIDE(1 << 27) | | |||
| 1809 | CLK_ZERO(clk_zero_cnt)((clk_zero_cnt) << 20) | | |||
| 1810 | CLK_PRE_OVERRIDE(1 << 19) | | |||
| 1811 | CLK_PRE(tclk_pre_cnt)((tclk_pre_cnt) << 16) | | |||
| 1812 | CLK_POST_OVERRIDE(1 << 15) | | |||
| 1813 | CLK_POST(tclk_post_cnt)((tclk_post_cnt) << 8) | | |||
| 1814 | CLK_TRAIL_OVERRIDE(1 << 7) | | |||
| 1815 | CLK_TRAIL(trail_cnt)((trail_cnt) << 0)); | |||
| 1816 | ||||
| 1817 | /* data lanes dphy timings */ | |||
| 1818 | intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE(1 << 31) | | |||
| 1819 | HS_PREPARE(prepare_cnt)((prepare_cnt) << 24) | | |||
| 1820 | HS_ZERO_OVERRIDE(1 << 23) | | |||
| 1821 | HS_ZERO(hs_zero_cnt)((hs_zero_cnt) << 16) | | |||
| 1822 | HS_TRAIL_OVERRIDE(1 << 15) | | |||
| 1823 | HS_TRAIL(trail_cnt)((trail_cnt) << 8) | | |||
| 1824 | HS_EXIT_OVERRIDE(1 << 7) | | |||
| 1825 | HS_EXIT(exit_zero_cnt)((exit_zero_cnt) << 0)); | |||
| 1826 | ||||
| 1827 | intel_dsi_log_params(intel_dsi); | |||
| 1828 | } | |||
| 1829 | ||||
| 1830 | static void icl_dsi_add_properties(struct intel_connector *connector) | |||
| 1831 | { | |||
| 1832 | u32 allowed_scalers; | |||
| 1833 | ||||
| 1834 | allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT)(1UL << (3)) | | |||
| 1835 | BIT(DRM_MODE_SCALE_FULLSCREEN)(1UL << (1)) | | |||
| 1836 | BIT(DRM_MODE_SCALE_CENTER)(1UL << (2)); | |||
| 1837 | ||||
| 1838 | drm_connector_attach_scaling_mode_property(&connector->base, | |||
| 1839 | allowed_scalers); | |||
| 1840 | ||||
| 1841 | connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT3; | |||
| 1842 | ||||
| 1843 | drm_connector_set_panel_orientation_with_quirk(&connector->base, | |||
| 1844 | intel_dsi_get_panel_orientation(connector), | |||
| 1845 | connector->panel.fixed_mode->hdisplay, | |||
| 1846 | connector->panel.fixed_mode->vdisplay); | |||
| 1847 | } | |||
| 1848 | ||||
| 1849 | void icl_dsi_init(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1850 | { | |||
| 1851 | struct drm_device *dev = &dev_priv->drm; | |||
| 1852 | struct intel_dsi *intel_dsi; | |||
| 1853 | struct intel_encoder *encoder; | |||
| 1854 | struct intel_connector *intel_connector; | |||
| 1855 | struct drm_connector *connector; | |||
| 1856 | struct drm_display_mode *fixed_mode; | |||
| 1857 | enum port port; | |||
| 1858 | ||||
| 1859 | if (!intel_bios_is_dsi_present(dev_priv, &port)) | |||
| 1860 | return; | |||
| 1861 | ||||
| 1862 | intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 1863 | if (!intel_dsi) | |||
| 1864 | return; | |||
| 1865 | ||||
| 1866 | intel_connector = intel_connector_alloc(); | |||
| 1867 | if (!intel_connector) { | |||
| 1868 | kfree(intel_dsi); | |||
| 1869 | return; | |||
| 1870 | } | |||
| 1871 | ||||
| 1872 | encoder = &intel_dsi->base; | |||
| 1873 | intel_dsi->attached_connector = intel_connector; | |||
| 1874 | connector = &intel_connector->base; | |||
| 1875 | ||||
| 1876 | /* register DSI encoder with DRM subsystem */ | |||
| 1877 | drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs, | |||
| 1878 | DRM_MODE_ENCODER_DSI6, "DSI %c", port_name(port)((port) + 'A')); | |||
| 1879 | ||||
| 1880 | encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; | |||
| 1881 | encoder->pre_enable = gen11_dsi_pre_enable; | |||
| 1882 | encoder->enable = gen11_dsi_enable; | |||
| 1883 | encoder->disable = gen11_dsi_disable; | |||
| 1884 | encoder->post_disable = gen11_dsi_post_disable; | |||
| 1885 | encoder->port = port; | |||
| 1886 | encoder->get_config = gen11_dsi_get_config; | |||
| 1887 | encoder->update_pipe = intel_panel_update_backlight; | |||
| 1888 | encoder->compute_config = gen11_dsi_compute_config; | |||
| 1889 | encoder->get_hw_state = gen11_dsi_get_hw_state; | |||
| 1890 | encoder->type = INTEL_OUTPUT_DSI; | |||
| 1891 | encoder->cloneable = 0; | |||
| 1892 | encoder->pipe_mask = ~0; | |||
| 1893 | encoder->power_domain = POWER_DOMAIN_PORT_DSI; | |||
| 1894 | encoder->get_power_domains = gen11_dsi_get_power_domains; | |||
| 1895 | ||||
| 1896 | /* register DSI connector with DRM subsystem */ | |||
| 1897 | drm_connector_init(dev, connector, &gen11_dsi_connector_funcs, | |||
| 1898 | DRM_MODE_CONNECTOR_DSI16); | |||
| 1899 | drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); | |||
| 1900 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | |||
| 1901 | connector->interlace_allowed = false0; | |||
| 1902 | connector->doublescan_allowed = false0; | |||
| 1903 | intel_connector->get_hw_state = intel_connector_get_hw_state; | |||
| 1904 | ||||
| 1905 | /* attach connector to encoder */ | |||
| 1906 | intel_connector_attach_encoder(intel_connector, encoder); | |||
| 1907 | ||||
| 1908 | mutex_lock(&dev->mode_config.mutex)rw_enter_write(&dev->mode_config.mutex); | |||
| 1909 | fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); | |||
| 1910 | mutex_unlock(&dev->mode_config.mutex)rw_exit_write(&dev->mode_config.mutex); | |||
| 1911 | ||||
| 1912 | if (!fixed_mode) { | |||
| 1913 | drm_err(&dev_priv->drm, "DSI fixed mode info missing\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI fixed mode info missing\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1914 | goto err; | |||
| 1915 | } | |||
| 1916 | ||||
| 1917 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL((void *)0)); | |||
| 1918 | intel_panel_setup_backlight(connector, INVALID_PIPE); | |||
| 1919 | ||||
| 1920 | if (dev_priv->vbt.dsi.config->dual_link) | |||
| 1921 | intel_dsi->ports = BIT(PORT_A)(1UL << (PORT_A)) | BIT(PORT_B)(1UL << (PORT_B)); | |||
| 1922 | else | |||
| 1923 | intel_dsi->ports = BIT(port)(1UL << (port)); | |||
| 1924 | ||||
| 1925 | intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports; | |||
| 1926 | intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports; | |||
| 1927 | ||||
| 1928 | for_each_dsi_port(port, intel_dsi->ports)for ((port) = PORT_A; (port) < I915_MAX_PORTS; (port)++) if (!((intel_dsi->ports) & (1UL << (port)))) {} else { | |||
| 1929 | struct intel_dsi_host *host; | |||
| 1930 | ||||
| 1931 | host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port); | |||
| 1932 | if (!host) | |||
| 1933 | goto err; | |||
| 1934 | ||||
| 1935 | intel_dsi->dsi_hosts[port] = host; | |||
| 1936 | } | |||
| 1937 | ||||
| 1938 | if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID1)) { | |||
| 1939 | drm_dbg_kms(&dev_priv->drm, "no device found\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "no device found\n" ); | |||
| 1940 | goto err; | |||
| 1941 | } | |||
| 1942 | ||||
| 1943 | icl_dphy_param_init(intel_dsi); | |||
| 1944 | ||||
| 1945 | icl_dsi_add_properties(intel_connector); | |||
| 1946 | return; | |||
| 1947 | ||||
| 1948 | err: | |||
| 1949 | drm_connector_cleanup(connector); | |||
| 1950 | drm_encoder_cleanup(&encoder->base); | |||
| 1951 | kfree(intel_dsi); | |||
| 1952 | kfree(intel_connector); | |||
| 1953 | } |