File: | dev/pci/drm/amd/amdgpu/hdp_v6_0.c |
Warning: | line 51, column 17 Although the value stored to 'hdp_clk_cntl1' is used in the enclosing expression, the value is never actually read from 'hdp_clk_cntl1' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2020 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include "amdgpu.h" |
24 | #include "amdgpu_atombios.h" |
25 | #include "hdp_v6_0.h" |
26 | |
27 | #include "hdp/hdp_6_0_0_offset.h" |
28 | #include "hdp/hdp_6_0_0_sh_mask.h" |
29 | #include <uapi/linux/kfd_ioctl.h> |
30 | |
31 | static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, |
32 | struct amdgpu_ring *ring) |
33 | { |
34 | if (!ring || !ring->funcs->emit_wreg) |
35 | WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0)amdgpu_device_wreg(adev, ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL ) >> 2), (0), (1<<1)); |
36 | else |
37 | amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0)(ring)->funcs->emit_wreg((ring), ((adev->rmmio_remap .reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2), (0)); |
38 | } |
39 | |
40 | static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev, |
41 | bool_Bool enable) |
42 | { |
43 | uint32_t hdp_clk_cntl, hdp_clk_cntl1; |
44 | uint32_t hdp_mem_pwr_cntl; |
45 | |
46 | if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS(1ULL << 15) | |
47 | AMD_CG_SUPPORT_HDP_DS(1ULL << 25) | |
48 | AMD_CG_SUPPORT_HDP_SD(1ULL << 26)))) |
49 | return; |
50 | |
51 | hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0,regHDP_CLK_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[HDP_HWIP][0][ 0] + 0x00d8, 0, HDP_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[HDP_HWIP][0][0] + 0x00d8), 0)); |
Although the value stored to 'hdp_clk_cntl1' is used in the enclosing expression, the value is never actually read from 'hdp_clk_cntl1' | |
52 | hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[HDP_HWIP][0][ 0] + 0x00d4, 0, HDP_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[HDP_HWIP][0][0] + 0x00d4), 0)); |
53 | |
54 | /* Before doing clock/power mode switch, |
55 | * forced on IPH & RC clock */ |
56 | hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,(((hdp_clk_cntl) & ~0x08000000L) | (0x08000000L & ((1 ) << 0x1b))) |
57 | RC_MEM_CLK_SOFT_OVERRIDE, 1)(((hdp_clk_cntl) & ~0x08000000L) | (0x08000000L & ((1 ) << 0x1b))); |
58 | WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[HDP_HWIP][0] [0] + 0x00d8), hdp_clk_cntl, 0, HDP_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[HDP_HWIP][0][0] + 0x00d8)), (hdp_clk_cntl ), 0)); |
59 | |
60 | /* disable clock and power gating before any changing */ |
61 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000001L) | (0x00000001L & ((0) << 0x0))) |
62 | ATOMIC_MEM_POWER_CTRL_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00000001L) | (0x00000001L & ((0) << 0x0))); |
63 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000002L) | (0x00000002L & ((0) << 0x1))) |
64 | ATOMIC_MEM_POWER_LS_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00000002L) | (0x00000002L & ((0) << 0x1))); |
65 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000004L) | (0x00000004L & ((0) << 0x2))) |
66 | ATOMIC_MEM_POWER_DS_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00000004L) | (0x00000004L & ((0) << 0x2))); |
67 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000008L) | (0x00000008L & ((0) << 0x3))) |
68 | ATOMIC_MEM_POWER_SD_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00000008L) | (0x00000008L & ((0) << 0x3))); |
69 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00010000L) | (0x00010000L & ((0) << 0x10))) |
70 | RC_MEM_POWER_CTRL_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00010000L) | (0x00010000L & ((0) << 0x10))); |
71 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00020000L) | (0x00020000L & ((0) << 0x11))) |
72 | RC_MEM_POWER_LS_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00020000L) | (0x00020000L & ((0) << 0x11))); |
73 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00040000L) | (0x00040000L & ((0) << 0x12))) |
74 | RC_MEM_POWER_DS_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00040000L) | (0x00040000L & ((0) << 0x12))); |
75 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00080000L) | (0x00080000L & ((0) << 0x13))) |
76 | RC_MEM_POWER_SD_EN, 0)(((hdp_mem_pwr_cntl) & ~0x00080000L) | (0x00080000L & ((0) << 0x13))); |
77 | WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[HDP_HWIP][0] [0] + 0x00d4), hdp_mem_pwr_cntl, 0, HDP_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[HDP_HWIP][0][0] + 0x00d4)), (hdp_mem_pwr_cntl ), 0)); |
78 | |
79 | /* Already disabled above. The actions below are for "enabled" only */ |
80 | if (enable) { |
81 | /* only one clock gating mode (LS/DS/SD) can be enabled */ |
82 | if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD(1ULL << 26)) { |
83 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,(((hdp_mem_pwr_cntl) & ~0x00000008L) | (0x00000008L & ((1) << 0x3))) |
84 | HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000008L) | (0x00000008L & ((1) << 0x3))) |
85 | ATOMIC_MEM_POWER_SD_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00000008L) | (0x00000008L & ((1) << 0x3))); |
86 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,(((hdp_mem_pwr_cntl) & ~0x00080000L) | (0x00080000L & ((1) << 0x13))) |
87 | HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00080000L) | (0x00080000L & ((1) << 0x13))) |
88 | RC_MEM_POWER_SD_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00080000L) | (0x00080000L & ((1) << 0x13))); |
89 | } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS(1ULL << 15)) { |
90 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,(((hdp_mem_pwr_cntl) & ~0x00000002L) | (0x00000002L & ((1) << 0x1))) |
91 | HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000002L) | (0x00000002L & ((1) << 0x1))) |
92 | ATOMIC_MEM_POWER_LS_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00000002L) | (0x00000002L & ((1) << 0x1))); |
93 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,(((hdp_mem_pwr_cntl) & ~0x00020000L) | (0x00020000L & ((1) << 0x11))) |
94 | HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00020000L) | (0x00020000L & ((1) << 0x11))) |
95 | RC_MEM_POWER_LS_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00020000L) | (0x00020000L & ((1) << 0x11))); |
96 | } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS(1ULL << 25)) { |
97 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,(((hdp_mem_pwr_cntl) & ~0x00000004L) | (0x00000004L & ((1) << 0x2))) |
98 | HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000004L) | (0x00000004L & ((1) << 0x2))) |
99 | ATOMIC_MEM_POWER_DS_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00000004L) | (0x00000004L & ((1) << 0x2))); |
100 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,(((hdp_mem_pwr_cntl) & ~0x00040000L) | (0x00040000L & ((1) << 0x12))) |
101 | HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00040000L) | (0x00040000L & ((1) << 0x12))) |
102 | RC_MEM_POWER_DS_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00040000L) | (0x00040000L & ((1) << 0x12))); |
103 | } |
104 | |
105 | /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to |
106 | * be set for SRAM LS/DS/SD */ |
107 | if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS(1ULL << 15) | AMD_CG_SUPPORT_HDP_DS(1ULL << 25) | |
108 | AMD_CG_SUPPORT_HDP_SD(1ULL << 26))) { |
109 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))) |
110 | ATOMIC_MEM_POWER_CTRL_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); |
111 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,(((hdp_mem_pwr_cntl) & ~0x00010000L) | (0x00010000L & ((1) << 0x10))) |
112 | RC_MEM_POWER_CTRL_EN, 1)(((hdp_mem_pwr_cntl) & ~0x00010000L) | (0x00010000L & ((1) << 0x10))); |
113 | WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[HDP_HWIP][0] [0] + 0x00d4), hdp_mem_pwr_cntl, 0, HDP_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[HDP_HWIP][0][0] + 0x00d4)), (hdp_mem_pwr_cntl ), 0)); |
114 | } |
115 | } |
116 | |
117 | /* disable IPH & RC clock override after clock/power mode changing */ |
118 | hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,(((hdp_clk_cntl) & ~0x08000000L) | (0x08000000L & ((0 ) << 0x1b))) |
119 | RC_MEM_CLK_SOFT_OVERRIDE, 0)(((hdp_clk_cntl) & ~0x08000000L) | (0x08000000L & ((0 ) << 0x1b))); |
120 | WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[HDP_HWIP][0] [0] + 0x00d8), hdp_clk_cntl, 0, HDP_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[HDP_HWIP][0][0] + 0x00d8)), (hdp_clk_cntl ), 0)); |
121 | } |
122 | |
123 | static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev, |
124 | u64 *flags) |
125 | { |
126 | uint32_t tmp; |
127 | |
128 | /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ |
129 | tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[HDP_HWIP][0][ 0] + 0x00d4, 0, HDP_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[HDP_HWIP][0][0] + 0x00d4), 0)); |
130 | if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK0x00000002L) |
131 | *flags |= AMD_CG_SUPPORT_HDP_LS(1ULL << 15); |
132 | else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK0x00000004L) |
133 | *flags |= AMD_CG_SUPPORT_HDP_DS(1ULL << 25); |
134 | else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK0x00000008L) |
135 | *flags |= AMD_CG_SUPPORT_HDP_SD(1ULL << 26); |
136 | } |
137 | |
138 | const struct amdgpu_hdp_funcs hdp_v6_0_funcs = { |
139 | .flush_hdp = hdp_v6_0_flush_hdp, |
140 | .update_clock_gating = hdp_v6_0_update_clock_gating, |
141 | .get_clock_gating_state = hdp_v6_0_get_clockgating_state, |
142 | }; |