| File: | dev/pci/drm/amd/amdgpu/mmhub_v2_0.c |
| Warning: | line 594, column 8 The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright 2019 Advanced Micro Devices, Inc. | |||
| 3 | * | |||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 5 | * copy of this software and associated documentation files (the "Software"), | |||
| 6 | * to deal in the Software without restriction, including without limitation | |||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 9 | * Software is furnished to do so, subject to the following conditions: | |||
| 10 | * | |||
| 11 | * The above copyright notice and this permission notice shall be included in | |||
| 12 | * all copies or substantial portions of the Software. | |||
| 13 | * | |||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | |||
| 21 | * | |||
| 22 | */ | |||
| 23 | ||||
| 24 | #include "amdgpu.h" | |||
| 25 | #include "mmhub_v2_0.h" | |||
| 26 | ||||
| 27 | #include "mmhub/mmhub_2_0_0_offset.h" | |||
| 28 | #include "mmhub/mmhub_2_0_0_sh_mask.h" | |||
| 29 | #include "mmhub/mmhub_2_0_0_default.h" | |||
| 30 | #include "navi10_enum.h" | |||
| 31 | ||||
| 32 | #include "gc/gc_10_1_0_offset.h" | |||
| 33 | #include "soc15_common.h" | |||
| 34 | ||||
| 35 | #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid0x0070 0x0070 | |||
| 36 | #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX0 0 | |||
| 37 | ||||
| 38 | static const char *mmhub_client_ids_navi1x[][2] = { | |||
| 39 | [3][0] = "DCEDMC", | |||
| 40 | [4][0] = "DCEVGA", | |||
| 41 | [5][0] = "MP0", | |||
| 42 | [6][0] = "MP1", | |||
| 43 | [13][0] = "VMC", | |||
| 44 | [14][0] = "HDP", | |||
| 45 | [15][0] = "OSS", | |||
| 46 | [16][0] = "VCNU", | |||
| 47 | [17][0] = "JPEG", | |||
| 48 | [18][0] = "VCN", | |||
| 49 | [3][1] = "DCEDMC", | |||
| 50 | [4][1] = "DCEXFC", | |||
| 51 | [5][1] = "DCEVGA", | |||
| 52 | [6][1] = "DCEDWB", | |||
| 53 | [7][1] = "MP0", | |||
| 54 | [8][1] = "MP1", | |||
| 55 | [9][1] = "DBGU1", | |||
| 56 | [10][1] = "DBGU0", | |||
| 57 | [11][1] = "XDP", | |||
| 58 | [14][1] = "HDP", | |||
| 59 | [15][1] = "OSS", | |||
| 60 | [16][1] = "VCNU", | |||
| 61 | [17][1] = "JPEG", | |||
| 62 | [18][1] = "VCN", | |||
| 63 | }; | |||
| 64 | ||||
| 65 | static const char *mmhub_client_ids_sienna_cichlid[][2] = { | |||
| 66 | [3][0] = "DCEDMC", | |||
| 67 | [4][0] = "DCEVGA", | |||
| 68 | [5][0] = "MP0", | |||
| 69 | [6][0] = "MP1", | |||
| 70 | [8][0] = "VMC", | |||
| 71 | [9][0] = "VCNU0", | |||
| 72 | [10][0] = "JPEG", | |||
| 73 | [12][0] = "VCNU1", | |||
| 74 | [13][0] = "VCN1", | |||
| 75 | [14][0] = "HDP", | |||
| 76 | [15][0] = "OSS", | |||
| 77 | [32+11][0] = "VCN0", | |||
| 78 | [0][1] = "DBGU0", | |||
| 79 | [1][1] = "DBGU1", | |||
| 80 | [2][1] = "DCEDWB", | |||
| 81 | [3][1] = "DCEDMC", | |||
| 82 | [4][1] = "DCEVGA", | |||
| 83 | [5][1] = "MP0", | |||
| 84 | [6][1] = "MP1", | |||
| 85 | [7][1] = "XDP", | |||
| 86 | [9][1] = "VCNU0", | |||
| 87 | [10][1] = "JPEG", | |||
| 88 | [11][1] = "VCN0", | |||
| 89 | [12][1] = "VCNU1", | |||
| 90 | [13][1] = "VCN1", | |||
| 91 | [14][1] = "HDP", | |||
| 92 | [15][1] = "OSS", | |||
| 93 | }; | |||
| 94 | ||||
| 95 | static const char *mmhub_client_ids_beige_goby[][2] = { | |||
| 96 | [3][0] = "DCEDMC", | |||
| 97 | [4][0] = "DCEVGA", | |||
| 98 | [5][0] = "MP0", | |||
| 99 | [6][0] = "MP1", | |||
| 100 | [8][0] = "VMC", | |||
| 101 | [9][0] = "VCNU0", | |||
| 102 | [11][0] = "VCN0", | |||
| 103 | [14][0] = "HDP", | |||
| 104 | [15][0] = "OSS", | |||
| 105 | [0][1] = "DBGU0", | |||
| 106 | [1][1] = "DBGU1", | |||
| 107 | [2][1] = "DCEDWB", | |||
| 108 | [3][1] = "DCEDMC", | |||
| 109 | [4][1] = "DCEVGA", | |||
| 110 | [5][1] = "MP0", | |||
| 111 | [6][1] = "MP1", | |||
| 112 | [7][1] = "XDP", | |||
| 113 | [9][1] = "VCNU0", | |||
| 114 | [11][1] = "VCN0", | |||
| 115 | [14][1] = "HDP", | |||
| 116 | [15][1] = "OSS", | |||
| 117 | }; | |||
| 118 | ||||
| 119 | static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid, | |||
| 120 | uint32_t flush_type) | |||
| 121 | { | |||
| 122 | u32 req = 0; | |||
| 123 | ||||
| 124 | /* invalidate using legacy mode on vmid*/ | |||
| 125 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,(((req) & ~0x0000FFFFL) | (0x0000FFFFL & ((1 << vmid) << 0x0))) | |||
| 126 | PER_VMID_INVALIDATE_REQ, 1 << vmid)(((req) & ~0x0000FFFFL) | (0x0000FFFFL & ((1 << vmid) << 0x0))); | |||
| 127 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type)(((req) & ~0x00070000L) | (0x00070000L & ((flush_type ) << 0x10))); | |||
| 128 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1)(((req) & ~0x00080000L) | (0x00080000L & ((1) << 0x13))); | |||
| 129 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1)(((req) & ~0x00100000L) | (0x00100000L & ((1) << 0x14))); | |||
| 130 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1)(((req) & ~0x00200000L) | (0x00200000L & ((1) << 0x15))); | |||
| 131 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1)(((req) & ~0x00400000L) | (0x00400000L & ((1) << 0x16))); | |||
| 132 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1)(((req) & ~0x00800000L) | (0x00800000L & ((1) << 0x17))); | |||
| 133 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,(((req) & ~0x01000000L) | (0x01000000L & ((0) << 0x18))) | |||
| 134 | CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0)(((req) & ~0x01000000L) | (0x01000000L & ((0) << 0x18))); | |||
| 135 | ||||
| 136 | return req; | |||
| 137 | } | |||
| 138 | ||||
| 139 | static void | |||
| 140 | mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, | |||
| 141 | uint32_t status) | |||
| 142 | { | |||
| 143 | uint32_t cid, rw; | |||
| 144 | const char *mmhub_cid = NULL((void *)0); | |||
| 145 | ||||
| 146 | cid = REG_GET_FIELD(status,(((status) & 0x0003FE00L) >> 0x9) | |||
| 147 | MMVM_L2_PROTECTION_FAULT_STATUS, CID)(((status) & 0x0003FE00L) >> 0x9); | |||
| 148 | rw = REG_GET_FIELD(status,(((status) & 0x00040000L) >> 0x12) | |||
| 149 | MMVM_L2_PROTECTION_FAULT_STATUS, RW)(((status) & 0x00040000L) >> 0x12); | |||
| 150 | ||||
| 151 | dev_err(adev->dev,printf("drm:pid%d:%s *ERROR* " "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , status ) | |||
| 152 | "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",printf("drm:pid%d:%s *ERROR* " "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , status ) | |||
| 153 | status)printf("drm:pid%d:%s *ERROR* " "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , status ); | |||
| 154 | switch (adev->ip_versions[MMHUB_HWIP][0]) { | |||
| 155 | case IP_VERSION(2, 0, 0)(((2) << 16) | ((0) << 8) | (0)): | |||
| 156 | case IP_VERSION(2, 0, 2)(((2) << 16) | ((0) << 8) | (2)): | |||
| 157 | mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; | |||
| 158 | break; | |||
| 159 | case IP_VERSION(2, 1, 0)(((2) << 16) | ((1) << 8) | (0)): | |||
| 160 | case IP_VERSION(2, 1, 1)(((2) << 16) | ((1) << 8) | (1)): | |||
| 161 | mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; | |||
| 162 | break; | |||
| 163 | case IP_VERSION(2, 1, 2)(((2) << 16) | ((1) << 8) | (2)): | |||
| 164 | mmhub_cid = mmhub_client_ids_beige_goby[cid][rw]; | |||
| 165 | break; | |||
| 166 | default: | |||
| 167 | mmhub_cid = NULL((void *)0); | |||
| 168 | break; | |||
| 169 | } | |||
| 170 | dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",printf("drm:pid%d:%s *ERROR* " "\t Faulty UTCL2 client ID: %s (0x%x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , mmhub_cid ? mmhub_cid : "unknown", cid) | |||
| 171 | mmhub_cid ? mmhub_cid : "unknown", cid)printf("drm:pid%d:%s *ERROR* " "\t Faulty UTCL2 client ID: %s (0x%x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , mmhub_cid ? mmhub_cid : "unknown", cid); | |||
| 172 | dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",printf("drm:pid%d:%s *ERROR* " "\t MORE_FAULTS: 0x%lx\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , (((status) & 0x00000001L) >> 0x0)) | |||
| 173 | REG_GET_FIELD(status,printf("drm:pid%d:%s *ERROR* " "\t MORE_FAULTS: 0x%lx\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , (((status) & 0x00000001L) >> 0x0)) | |||
| 174 | MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS))printf("drm:pid%d:%s *ERROR* " "\t MORE_FAULTS: 0x%lx\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , (((status) & 0x00000001L) >> 0x0)); | |||
| 175 | dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",printf("drm:pid%d:%s *ERROR* " "\t WALKER_ERROR: 0x%lx\n", ({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x0000000EL) >> 0x1)) | |||
| 176 | REG_GET_FIELD(status,printf("drm:pid%d:%s *ERROR* " "\t WALKER_ERROR: 0x%lx\n", ({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x0000000EL) >> 0x1)) | |||
| 177 | MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR))printf("drm:pid%d:%s *ERROR* " "\t WALKER_ERROR: 0x%lx\n", ({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x0000000EL) >> 0x1)); | |||
| 178 | dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",printf("drm:pid%d:%s *ERROR* " "\t PERMISSION_FAULTS: 0x%lx\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x000000F0L) >> 0x4)) | |||
| 179 | REG_GET_FIELD(status,printf("drm:pid%d:%s *ERROR* " "\t PERMISSION_FAULTS: 0x%lx\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x000000F0L) >> 0x4)) | |||
| 180 | MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS))printf("drm:pid%d:%s *ERROR* " "\t PERMISSION_FAULTS: 0x%lx\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x000000F0L) >> 0x4)); | |||
| 181 | dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",printf("drm:pid%d:%s *ERROR* " "\t MAPPING_ERROR: 0x%lx\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x00000100L) >> 0x8)) | |||
| 182 | REG_GET_FIELD(status,printf("drm:pid%d:%s *ERROR* " "\t MAPPING_ERROR: 0x%lx\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x00000100L) >> 0x8)) | |||
| 183 | MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR))printf("drm:pid%d:%s *ERROR* " "\t MAPPING_ERROR: 0x%lx\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , (((status ) & 0x00000100L) >> 0x8)); | |||
| 184 | dev_err(adev->dev, "\t RW: 0x%x\n", rw)printf("drm:pid%d:%s *ERROR* " "\t RW: 0x%x\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , rw); | |||
| 185 | } | |||
| 186 | ||||
| 187 | static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, | |||
| 188 | uint64_t page_table_base) | |||
| 189 | { | |||
| 190 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_01]; | |||
| 191 | ||||
| 192 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x072b) + hub->ctx_addr_distance * vmid, ((u32)(page_table_base )), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[MMHUB_HWIP][0][0] + 0x072b) + hub->ctx_addr_distance * vmid), (((u32)(page_table_base))), 0)) | |||
| 193 | hub->ctx_addr_distance * vmid,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x072b) + hub->ctx_addr_distance * vmid, ((u32)(page_table_base )), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[MMHUB_HWIP][0][0] + 0x072b) + hub->ctx_addr_distance * vmid), (((u32)(page_table_base))), 0)) | |||
| 194 | lower_32_bits(page_table_base))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x072b) + hub->ctx_addr_distance * vmid, ((u32)(page_table_base )), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[MMHUB_HWIP][0][0] + 0x072b) + hub->ctx_addr_distance * vmid), (((u32)(page_table_base))), 0)); | |||
| 195 | ||||
| 196 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x072c) + hub->ctx_addr_distance * vmid, ((u32)((( page_table_base) >> 16) >> 16)), (1<<2), MMHUB_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP ][0][0] + 0x072c) + hub->ctx_addr_distance * vmid), (((u32 )(((page_table_base) >> 16) >> 16))), 0)) | |||
| 197 | hub->ctx_addr_distance * vmid,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x072c) + hub->ctx_addr_distance * vmid, ((u32)((( page_table_base) >> 16) >> 16)), (1<<2), MMHUB_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP ][0][0] + 0x072c) + hub->ctx_addr_distance * vmid), (((u32 )(((page_table_base) >> 16) >> 16))), 0)) | |||
| 198 | upper_32_bits(page_table_base))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x072c) + hub->ctx_addr_distance * vmid, ((u32)((( page_table_base) >> 16) >> 16)), (1<<2), MMHUB_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP ][0][0] + 0x072c) + hub->ctx_addr_distance * vmid), (((u32 )(((page_table_base) >> 16) >> 16))), 0)); | |||
| 199 | } | |||
| 200 | ||||
| 201 | static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) | |||
| 202 | { | |||
| 203 | uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); | |||
| 204 | ||||
| 205 | mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); | |||
| 206 | ||||
| 207 | WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x074b; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_start >> 12), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_start >> 12 )), 0)); } while (0) | |||
| 208 | (u32)(adev->gmc.gart_start >> 12))do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x074b; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_start >> 12), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_start >> 12 )), 0)); } while (0); | |||
| 209 | WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x074c; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_start >> 44), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_start >> 44 )), 0)); } while (0) | |||
| 210 | (u32)(adev->gmc.gart_start >> 44))do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x074c; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_start >> 44), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_start >> 44 )), 0)); } while (0); | |||
| 211 | ||||
| 212 | WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x076b; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_end >> 12), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_end >> 12 )), 0)); } while (0) | |||
| 213 | (u32)(adev->gmc.gart_end >> 12))do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x076b; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_end >> 12), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_end >> 12 )), 0)); } while (0); | |||
| 214 | WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x076c; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_end >> 44), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_end >> 44 )), 0)); } while (0) | |||
| 215 | (u32)(adev->gmc.gart_end >> 44))do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x076c; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, (u32)(adev->gmc.gart_end >> 44), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, (target_reg), ((u32)(adev->gmc.gart_end >> 44 )), 0)); } while (0); | |||
| 216 | } | |||
| 217 | ||||
| 218 | static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) | |||
| 219 | { | |||
| 220 | uint64_t value; | |||
| 221 | uint32_t tmp; | |||
| 222 | ||||
| 223 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | |||
| 224 | /* Program the AGP BAR */ | |||
| 225 | WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0)do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x0870; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, 0, (1<<2), MMHUB_HWIP ) : amdgpu_device_wreg(adev, (target_reg), (0), 0)); } while ( 0); | |||
| 226 | WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24)do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x086f; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, adev->gmc.agp_start >> 24, (1<<2), MMHUB_HWIP) : amdgpu_device_wreg( adev, (target_reg), (adev->gmc.agp_start >> 24), 0)) ; } while (0); | |||
| 227 | WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24)do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x086e; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, adev->gmc.agp_end >> 24, (1<<2), MMHUB_HWIP) : amdgpu_device_wreg(adev, (target_reg ), (adev->gmc.agp_end >> 24), 0)); } while (0); | |||
| 228 | ||||
| 229 | /* Program the system aperture low logical page number. */ | |||
| 230 | WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0871), (((adev->gmc.fb_start)<(adev->gmc.agp_start ))?(adev->gmc.fb_start):(adev->gmc.agp_start)) >> 18, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0871)), ((((adev->gmc.fb_start)< (adev->gmc.agp_start))?(adev->gmc.fb_start):(adev->gmc .agp_start)) >> 18), 0)) | |||
| 231 | min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0871), (((adev->gmc.fb_start)<(adev->gmc.agp_start ))?(adev->gmc.fb_start):(adev->gmc.agp_start)) >> 18, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0871)), ((((adev->gmc.fb_start)< (adev->gmc.agp_start))?(adev->gmc.fb_start):(adev->gmc .agp_start)) >> 18), 0)); | |||
| 232 | WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0872), (((adev->gmc.fb_end)>(adev->gmc.agp_end ))?(adev->gmc.fb_end):(adev->gmc.agp_end)) >> 18, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0872)), ((((adev->gmc.fb_end)>(adev ->gmc.agp_end))?(adev->gmc.fb_end):(adev->gmc.agp_end )) >> 18), 0)) | |||
| 233 | max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0872), (((adev->gmc.fb_end)>(adev->gmc.agp_end ))?(adev->gmc.fb_end):(adev->gmc.agp_end)) >> 18, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0872)), ((((adev->gmc.fb_end)>(adev ->gmc.agp_end))?(adev->gmc.fb_end):(adev->gmc.agp_end )) >> 18), 0)); | |||
| 234 | } | |||
| 235 | ||||
| 236 | /* Set default page address. */ | |||
| 237 | value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); | |||
| 238 | WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0858), (u32)(value >> 12), 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0858)), (( u32)(value >> 12)), 0)) | |||
| 239 | (u32)(value >> 12))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0858), (u32)(value >> 12), 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0858)), (( u32)(value >> 12)), 0)); | |||
| 240 | WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0859), (u32)(value >> 44), 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0859)), (( u32)(value >> 44)), 0)) | |||
| 241 | (u32)(value >> 44))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0859), (u32)(value >> 44), 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0859)), (( u32)(value >> 44)), 0)); | |||
| 242 | ||||
| 243 | /* Program "protection fault". */ | |||
| 244 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x068f), (u32)(adev->dummy_page_addr >> 12), 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x068f)), ((u32)(adev->dummy_page_addr >> 12)), 0)) | |||
| 245 | (u32)(adev->dummy_page_addr >> 12))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x068f), (u32)(adev->dummy_page_addr >> 12), 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x068f)), ((u32)(adev->dummy_page_addr >> 12)), 0)); | |||
| 246 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0690), (u32)((u64)adev->dummy_page_addr >> 44), 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0690)), ((u32)((u64)adev->dummy_page_addr >> 44)), 0)) | |||
| 247 | (u32)((u64)adev->dummy_page_addr >> 44))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0690), (u32)((u64)adev->dummy_page_addr >> 44), 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0690)), ((u32)((u64)adev->dummy_page_addr >> 44)), 0)); | |||
| 248 | ||||
| 249 | tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0689, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0689), 0)); | |||
| 250 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,(((tmp) & ~0x00040000L) | (0x00040000L & ((1) << 0x12))) | |||
| 251 | ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1)(((tmp) & ~0x00040000L) | (0x00040000L & ((1) << 0x12))); | |||
| 252 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0689), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0689)), (tmp), 0 )); | |||
| 253 | } | |||
| 254 | ||||
| 255 | static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) | |||
| 256 | { | |||
| 257 | uint32_t tmp; | |||
| 258 | ||||
| 259 | /* Setup TLB control */ | |||
| 260 | tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0873, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0873), 0)); | |||
| 261 | ||||
| 262 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1)(((tmp) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); | |||
| 263 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3)(((tmp) & ~0x00000018L) | (0x00000018L & ((3) << 0x3))); | |||
| 264 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,(((tmp) & ~0x00000040L) | (0x00000040L & ((1) << 0x6))) | |||
| 265 | ENABLE_ADVANCED_DRIVER_MODEL, 1)(((tmp) & ~0x00000040L) | (0x00000040L & ((1) << 0x6))); | |||
| 266 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,(((tmp) & ~0x00000020L) | (0x00000020L & ((0) << 0x5))) | |||
| 267 | SYSTEM_APERTURE_UNMAPPED_ACCESS, 0)(((tmp) & ~0x00000020L) | (0x00000020L & ((0) << 0x5))); | |||
| 268 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,(((tmp) & ~0x00003800L) | (0x00003800L & ((MTYPE_UC) << 0xb))) | |||
| 269 | MTYPE, MTYPE_UC)(((tmp) & ~0x00003800L) | (0x00003800L & ((MTYPE_UC) << 0xb))); /* UC, uncached */ | |||
| 270 | ||||
| 271 | WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0873), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0873)), (tmp), 0 )); | |||
| 272 | } | |||
| 273 | ||||
| 274 | static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) | |||
| 275 | { | |||
| 276 | uint32_t tmp; | |||
| 277 | ||||
| 278 | /* These registers are not accessible to VF-SRIOV. | |||
| 279 | * The PF will program them instead. | |||
| 280 | */ | |||
| 281 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | |||
| 282 | return; | |||
| 283 | ||||
| 284 | /* Setup L2 cache */ | |||
| 285 | tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0680, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0680), 0)); | |||
| 286 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1)(((tmp) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); | |||
| 287 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0)(((tmp) & ~0x00000002L) | (0x00000002L & ((0) << 0x1))); | |||
| 288 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,(((tmp) & ~0x00000800L) | (0x00000800L & ((1) << 0xb))) | |||
| 289 | ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1)(((tmp) & ~0x00000800L) | (0x00000800L & ((1) << 0xb))); | |||
| 290 | /* XXX for emulation, Refer to closed source code.*/ | |||
| 291 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,(((tmp) & ~0x00000100L) | (0x00000100L & ((0) << 0x8))) | |||
| 292 | 0)(((tmp) & ~0x00000100L) | (0x00000100L & ((0) << 0x8))); | |||
| 293 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0)(((tmp) & ~0x00040000L) | (0x00040000L & ((0) << 0x12))); | |||
| 294 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1)(((tmp) & ~0x00180000L) | (0x00180000L & ((1) << 0x13))); | |||
| 295 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0)(((tmp) & ~0x03E00000L) | (0x03E00000L & ((0) << 0x15))); | |||
| 296 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0680), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0680)), (tmp), 0 )); | |||
| 297 | ||||
| 298 | tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0681, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0681), 0)); | |||
| 299 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1)(((tmp) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); | |||
| 300 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1)(((tmp) & ~0x00000002L) | (0x00000002L & ((1) << 0x1))); | |||
| 301 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0681), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0681)), (tmp), 0 )); | |||
| 302 | ||||
| 303 | tmp = mmMMVM_L2_CNTL3_DEFAULT0x80100007; | |||
| 304 | if (adev->gmc.translate_further) { | |||
| 305 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12)(((tmp) & ~0x0000003FL) | (0x0000003FL & ((12) << 0x0))); | |||
| 306 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,(((tmp) & ~0x000F8000L) | (0x000F8000L & ((9) << 0xf))) | |||
| 307 | L2_CACHE_BIGK_FRAGMENT_SIZE, 9)(((tmp) & ~0x000F8000L) | (0x000F8000L & ((9) << 0xf))); | |||
| 308 | } else { | |||
| 309 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9)(((tmp) & ~0x0000003FL) | (0x0000003FL & ((9) << 0x0))); | |||
| 310 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,(((tmp) & ~0x000F8000L) | (0x000F8000L & ((6) << 0xf))) | |||
| 311 | L2_CACHE_BIGK_FRAGMENT_SIZE, 6)(((tmp) & ~0x000F8000L) | (0x000F8000L & ((6) << 0xf))); | |||
| 312 | } | |||
| 313 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0682), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0682)), (tmp), 0 )); | |||
| 314 | ||||
| 315 | tmp = mmMMVM_L2_CNTL4_DEFAULT0x000000c1; | |||
| 316 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0)(((tmp) & ~0x00000040L) | (0x00000040L & ((0) << 0x6))); | |||
| 317 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0)(((tmp) & ~0x00000080L) | (0x00000080L & ((0) << 0x7))); | |||
| 318 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0698), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0698)), (tmp), 0 )); | |||
| 319 | ||||
| 320 | tmp = mmMMVM_L2_CNTL5_DEFAULT0x00003fe0; | |||
| 321 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0)(((tmp) & ~0x0000001FL) | (0x0000001FL & ((0) << 0x0))); | |||
| 322 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x06a1), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x06a1)), (tmp), 0 )); | |||
| 323 | } | |||
| 324 | ||||
| 325 | static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) | |||
| 326 | { | |||
| 327 | uint32_t tmp; | |||
| 328 | ||||
| 329 | tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x06c0, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x06c0), 0)); | |||
| 330 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1)(((tmp) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); | |||
| 331 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0)(((tmp) & ~0x00000006L) | (0x00000006L & ((0) << 0x1))); | |||
| 332 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,(((tmp) & ~0x00000080L) | (0x00000080L & ((0) << 0x7))) | |||
| 333 | RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0)(((tmp) & ~0x00000080L) | (0x00000080L & ((0) << 0x7))); | |||
| 334 | WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp)do { uint32_t target_reg = adev->reg_offset[MMHUB_HWIP][0] [0] + 0x06c0; ((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, target_reg, tmp, (1<<2), MMHUB_HWIP ) : amdgpu_device_wreg(adev, (target_reg), (tmp), 0)); } while (0); | |||
| 335 | } | |||
| 336 | ||||
| 337 | static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) | |||
| 338 | { | |||
| 339 | /* These registers are not accessible to VF-SRIOV. | |||
| 340 | * The PF will program them instead. | |||
| 341 | */ | |||
| 342 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | |||
| 343 | return; | |||
| 344 | ||||
| 345 | WREG32_SOC15(MMHUB, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0692), 0xFFFFFFFF, 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0692)), (0xFFFFFFFF ), 0)) | |||
| 346 | mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0692), 0xFFFFFFFF, 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0692)), (0xFFFFFFFF ), 0)) | |||
| 347 | 0xFFFFFFFF)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0692), 0xFFFFFFFF, 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0692)), (0xFFFFFFFF ), 0)); | |||
| 348 | WREG32_SOC15(MMHUB, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0693), 0x0000000F, 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0693)), (0x0000000F ), 0)) | |||
| 349 | mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0693), 0x0000000F, 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0693)), (0x0000000F ), 0)) | |||
| 350 | 0x0000000F)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0693), 0x0000000F, 0, MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0693)), (0x0000000F ), 0)); | |||
| 351 | ||||
| 352 | WREG32_SOC15(MMHUB, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0694), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0694)), (0), 0)) | |||
| 353 | mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0694), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0694)), (0), 0)); | |||
| 354 | WREG32_SOC15(MMHUB, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0695), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0695)), (0), 0)) | |||
| 355 | mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0695), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0695)), (0), 0)); | |||
| 356 | ||||
| 357 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0696), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0696)), (0), 0)) | |||
| 358 | 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0696), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0696)), (0), 0)); | |||
| 359 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0697), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0697)), (0), 0)) | |||
| 360 | 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0697), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0697)), (0), 0)); | |||
| 361 | } | |||
| 362 | ||||
| 363 | static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) | |||
| 364 | { | |||
| 365 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_01]; | |||
| 366 | int i; | |||
| 367 | uint32_t tmp; | |||
| 368 | ||||
| 369 | for (i = 0; i <= 14; i++) { | |||
| 370 | tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x06c1) + i, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x06c1) + i), 0)); | |||
| 371 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1)(((tmp) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); | |||
| 372 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,(((tmp) & ~0x00000006L) | (0x00000006L & ((adev->vm_manager .num_level) << 0x1))) | |||
| 373 | adev->vm_manager.num_level)(((tmp) & ~0x00000006L) | (0x00000006L & ((adev->vm_manager .num_level) << 0x1))); | |||
| 374 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00000400L) | (0x00000400L & ((1) << 0xa))) | |||
| 375 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1)(((tmp) & ~0x00000400L) | (0x00000400L & ((1) << 0xa))); | |||
| 376 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00001000L) | (0x00001000L & ((1) << 0xc))) | |||
| 377 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,(((tmp) & ~0x00001000L) | (0x00001000L & ((1) << 0xc))) | |||
| 378 | 1)(((tmp) & ~0x00001000L) | (0x00001000L & ((1) << 0xc))); | |||
| 379 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00004000L) | (0x00004000L & ((1) << 0xe))) | |||
| 380 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1)(((tmp) & ~0x00004000L) | (0x00004000L & ((1) << 0xe))); | |||
| 381 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00010000L) | (0x00010000L & ((1) << 0x10))) | |||
| 382 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1)(((tmp) & ~0x00010000L) | (0x00010000L & ((1) << 0x10))); | |||
| 383 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00040000L) | (0x00040000L & ((1) << 0x12))) | |||
| 384 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1)(((tmp) & ~0x00040000L) | (0x00040000L & ((1) << 0x12))); | |||
| 385 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00100000L) | (0x00100000L & ((1) << 0x14))) | |||
| 386 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1)(((tmp) & ~0x00100000L) | (0x00100000L & ((1) << 0x14))); | |||
| 387 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00400000L) | (0x00400000L & ((1) << 0x16))) | |||
| 388 | EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1)(((tmp) & ~0x00400000L) | (0x00400000L & ((1) << 0x16))); | |||
| 389 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00000078L) | (0x00000078L & ((adev->vm_manager .block_size - 9) << 0x3))) | |||
| 390 | PAGE_TABLE_BLOCK_SIZE,(((tmp) & ~0x00000078L) | (0x00000078L & ((adev->vm_manager .block_size - 9) << 0x3))) | |||
| 391 | adev->vm_manager.block_size - 9)(((tmp) & ~0x00000078L) | (0x00000078L & ((adev->vm_manager .block_size - 9) << 0x3))); | |||
| 392 | /* Send no-retry XNACK on fault to suppress VM fault storm. */ | |||
| 393 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,(((tmp) & ~0x00000080L) | (0x00000080L & ((!adev-> gmc.noretry) << 0x7))) | |||
| 394 | RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,(((tmp) & ~0x00000080L) | (0x00000080L & ((!adev-> gmc.noretry) << 0x7))) | |||
| 395 | !adev->gmc.noretry)(((tmp) & ~0x00000080L) | (0x00000080L & ((!adev-> gmc.noretry) << 0x7))); | |||
| 396 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x06c1) + i * hub->ctx_distance, tmp, (1<<2) , MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x06c1) + i * hub->ctx_distance), (tmp ), 0)) | |||
| 397 | i * hub->ctx_distance, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x06c1) + i * hub->ctx_distance, tmp, (1<<2) , MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x06c1) + i * hub->ctx_distance), (tmp ), 0)); | |||
| 398 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x074d) + i * hub->ctx_addr_distance, 0, (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x074d) + i * hub->ctx_addr_distance) , (0), 0)) | |||
| 399 | i * hub->ctx_addr_distance, 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x074d) + i * hub->ctx_addr_distance, 0, (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x074d) + i * hub->ctx_addr_distance) , (0), 0)); | |||
| 400 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x074e) + i * hub->ctx_addr_distance, 0, (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x074e) + i * hub->ctx_addr_distance) , (0), 0)) | |||
| 401 | i * hub->ctx_addr_distance, 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x074e) + i * hub->ctx_addr_distance, 0, (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x074e) + i * hub->ctx_addr_distance) , (0), 0)); | |||
| 402 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x076d) + i * hub->ctx_addr_distance, ((u32)(adev-> vm_manager.max_pfn - 1)), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x076d) + i * hub->ctx_addr_distance), (((u32)(adev->vm_manager.max_pfn - 1))), 0)) | |||
| 403 | i * hub->ctx_addr_distance,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x076d) + i * hub->ctx_addr_distance, ((u32)(adev-> vm_manager.max_pfn - 1)), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x076d) + i * hub->ctx_addr_distance), (((u32)(adev->vm_manager.max_pfn - 1))), 0)) | |||
| 404 | lower_32_bits(adev->vm_manager.max_pfn - 1))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x076d) + i * hub->ctx_addr_distance, ((u32)(adev-> vm_manager.max_pfn - 1)), (1<<2), MMHUB_HWIP) : amdgpu_device_wreg (adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x076d) + i * hub->ctx_addr_distance), (((u32)(adev->vm_manager.max_pfn - 1))), 0)); | |||
| 405 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x076e) + i * hub->ctx_addr_distance, ((u32)(((adev ->vm_manager.max_pfn - 1) >> 16) >> 16)), (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x076e) + i * hub->ctx_addr_distance) , (((u32)(((adev->vm_manager.max_pfn - 1) >> 16) >> 16))), 0)) | |||
| 406 | i * hub->ctx_addr_distance,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x076e) + i * hub->ctx_addr_distance, ((u32)(((adev ->vm_manager.max_pfn - 1) >> 16) >> 16)), (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x076e) + i * hub->ctx_addr_distance) , (((u32)(((adev->vm_manager.max_pfn - 1) >> 16) >> 16))), 0)) | |||
| 407 | upper_32_bits(adev->vm_manager.max_pfn - 1))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x076e) + i * hub->ctx_addr_distance, ((u32)(((adev ->vm_manager.max_pfn - 1) >> 16) >> 16)), (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x076e) + i * hub->ctx_addr_distance) , (((u32)(((adev->vm_manager.max_pfn - 1) >> 16) >> 16))), 0)); | |||
| 408 | } | |||
| 409 | ||||
| 410 | hub->vm_cntx_cntl = tmp; | |||
| 411 | } | |||
| 412 | ||||
| 413 | static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) | |||
| 414 | { | |||
| 415 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_01]; | |||
| 416 | unsigned i; | |||
| 417 | ||||
| 418 | for (i = 0; i < 18; ++i) { | |||
| 419 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0707) + i * hub->eng_addr_distance, 0xffffffff, ( 1<<2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev-> reg_offset[MMHUB_HWIP][0][0] + 0x0707) + i * hub->eng_addr_distance ), (0xffffffff), 0)) | |||
| 420 | i * hub->eng_addr_distance, 0xffffffff)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0707) + i * hub->eng_addr_distance, 0xffffffff, ( 1<<2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev-> reg_offset[MMHUB_HWIP][0][0] + 0x0707) + i * hub->eng_addr_distance ), (0xffffffff), 0)); | |||
| 421 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0708) + i * hub->eng_addr_distance, 0x1f, (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0708) + i * hub->eng_addr_distance) , (0x1f), 0)) | |||
| 422 | i * hub->eng_addr_distance, 0x1f)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0708) + i * hub->eng_addr_distance, 0x1f, (1<< 2), MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [MMHUB_HWIP][0][0] + 0x0708) + i * hub->eng_addr_distance) , (0x1f), 0)); | |||
| 423 | } | |||
| 424 | } | |||
| 425 | ||||
| 426 | static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) | |||
| 427 | { | |||
| 428 | /* GART Enable. */ | |||
| 429 | mmhub_v2_0_init_gart_aperture_regs(adev); | |||
| 430 | mmhub_v2_0_init_system_aperture_regs(adev); | |||
| 431 | mmhub_v2_0_init_tlb_regs(adev); | |||
| 432 | mmhub_v2_0_init_cache_regs(adev); | |||
| 433 | ||||
| 434 | mmhub_v2_0_enable_system_domain(adev); | |||
| 435 | mmhub_v2_0_disable_identity_aperture(adev); | |||
| 436 | mmhub_v2_0_setup_vmid_config(adev); | |||
| 437 | mmhub_v2_0_program_invalidation(adev); | |||
| 438 | ||||
| 439 | return 0; | |||
| 440 | } | |||
| 441 | ||||
| 442 | static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) | |||
| 443 | { | |||
| 444 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_01]; | |||
| 445 | u32 tmp; | |||
| 446 | u32 i; | |||
| 447 | ||||
| 448 | /* Disable all tables */ | |||
| 449 | for (i = 0; i < AMDGPU_NUM_VMID16; i++) | |||
| 450 | WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x06c0) + i * hub->ctx_distance, 0, (1<<2), MMHUB_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP ][0][0] + 0x06c0) + i * hub->ctx_distance), (0), 0)) | |||
| 451 | i * hub->ctx_distance, 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x06c0) + i * hub->ctx_distance, 0, (1<<2), MMHUB_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP ][0][0] + 0x06c0) + i * hub->ctx_distance), (0), 0)); | |||
| 452 | ||||
| 453 | /* Setup TLB control */ | |||
| 454 | tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0873, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0873), 0)); | |||
| 455 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0)(((tmp) & ~0x00000001L) | (0x00000001L & ((0) << 0x0))); | |||
| 456 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,(((tmp) & ~0x00000040L) | (0x00000040L & ((0) << 0x6))) | |||
| 457 | ENABLE_ADVANCED_DRIVER_MODEL, 0)(((tmp) & ~0x00000040L) | (0x00000040L & ((0) << 0x6))); | |||
| 458 | WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0873), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0873)), (tmp), 0 )); | |||
| 459 | ||||
| 460 | /* Setup L2 cache */ | |||
| 461 | tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0680, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0680), 0)); | |||
| 462 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0)(((tmp) & ~0x00000001L) | (0x00000001L & ((0) << 0x0))); | |||
| 463 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0680), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0680)), (tmp), 0 )); | |||
| 464 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0682), 0, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0682)), (0), 0)); | |||
| 465 | } | |||
| 466 | ||||
| 467 | /** | |||
| 468 | * mmhub_v2_0_set_fault_enable_default - update GART/VM fault handling | |||
| 469 | * | |||
| 470 | * @adev: amdgpu_device pointer | |||
| 471 | * @value: true redirects VM faults to the default page | |||
| 472 | */ | |||
| 473 | static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool_Bool value) | |||
| 474 | { | |||
| 475 | u32 tmp; | |||
| 476 | ||||
| 477 | /* These registers are not accessible to VF-SRIOV. | |||
| 478 | * The PF will program them instead. | |||
| 479 | */ | |||
| 480 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | |||
| 481 | return; | |||
| 482 | ||||
| 483 | tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0688, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0688), 0)); | |||
| 484 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000004L) | (0x00000004L & ((value) << 0x2))) | |||
| 485 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000004L) | (0x00000004L & ((value) << 0x2))); | |||
| 486 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000008L) | (0x00000008L & ((value) << 0x3))) | |||
| 487 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000008L) | (0x00000008L & ((value) << 0x3))); | |||
| 488 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000010L) | (0x00000010L & ((value) << 0x4))) | |||
| 489 | PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000010L) | (0x00000010L & ((value) << 0x4))); | |||
| 490 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000020L) | (0x00000020L & ((value) << 0x5))) | |||
| 491 | PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000020L) | (0x00000020L & ((value) << 0x5))); | |||
| 492 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000040L) | (0x00000040L & ((value) << 0x6))) | |||
| 493 | TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,(((tmp) & ~0x00000040L) | (0x00000040L & ((value) << 0x6))) | |||
| 494 | value)(((tmp) & ~0x00000040L) | (0x00000040L & ((value) << 0x6))); | |||
| 495 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000080L) | (0x00000080L & ((value) << 0x7))) | |||
| 496 | NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000080L) | (0x00000080L & ((value) << 0x7))); | |||
| 497 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000100L) | (0x00000100L & ((value) << 0x8))) | |||
| 498 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000100L) | (0x00000100L & ((value) << 0x8))); | |||
| 499 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000200L) | (0x00000200L & ((value) << 0x9))) | |||
| 500 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000200L) | (0x00000200L & ((value) << 0x9))); | |||
| 501 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000400L) | (0x00000400L & ((value) << 0xa))) | |||
| 502 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000400L) | (0x00000400L & ((value) << 0xa))); | |||
| 503 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00000800L) | (0x00000800L & ((value) << 0xb))) | |||
| 504 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00000800L) | (0x00000800L & ((value) << 0xb))); | |||
| 505 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x00001000L) | (0x00001000L & ((value) << 0xc))) | |||
| 506 | EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x00001000L) | (0x00001000L & ((value) << 0xc))); | |||
| 507 | if (!value) { | |||
| 508 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x40000000L) | (0x40000000L & ((1) << 0x1e))) | |||
| 509 | CRASH_ON_NO_RETRY_FAULT, 1)(((tmp) & ~0x40000000L) | (0x40000000L & ((1) << 0x1e))); | |||
| 510 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,(((tmp) & ~0x80000000L) | (0x80000000L & ((1) << 0x1f))) | |||
| 511 | CRASH_ON_RETRY_FAULT, 1)(((tmp) & ~0x80000000L) | (0x80000000L & ((1) << 0x1f))); | |||
| 512 | } | |||
| 513 | WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0688), tmp, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0688)), (tmp), 0 )); | |||
| 514 | } | |||
| 515 | ||||
| 516 | static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = { | |||
| 517 | .print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status, | |||
| 518 | .get_invalidate_req = mmhub_v2_0_get_invalidate_req, | |||
| 519 | }; | |||
| 520 | ||||
| 521 | static void mmhub_v2_0_init(struct amdgpu_device *adev) | |||
| 522 | { | |||
| 523 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_01]; | |||
| 524 | ||||
| 525 | hub->ctx0_ptb_addr_lo32 = | |||
| 526 | SOC15_REG_OFFSET(MMHUB, 0,(adev->reg_offset[MMHUB_HWIP][0][0] + 0x072b) | |||
| 527 | mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x072b); | |||
| 528 | hub->ctx0_ptb_addr_hi32 = | |||
| 529 | SOC15_REG_OFFSET(MMHUB, 0,(adev->reg_offset[MMHUB_HWIP][0][0] + 0x072c) | |||
| 530 | mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x072c); | |||
| 531 | hub->vm_inv_eng0_sem = | |||
| 532 | SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x06d1); | |||
| 533 | hub->vm_inv_eng0_req = | |||
| 534 | SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x06e3); | |||
| 535 | hub->vm_inv_eng0_ack = | |||
| 536 | SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x06f5); | |||
| 537 | hub->vm_context0_cntl = | |||
| 538 | SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x06c0); | |||
| 539 | hub->vm_l2_pro_fault_status = | |||
| 540 | SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x068c); | |||
| 541 | hub->vm_l2_pro_fault_cntl = | |||
| 542 | SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL)(adev->reg_offset[MMHUB_HWIP][0][0] + 0x0688); | |||
| 543 | ||||
| 544 | hub->ctx_distance = mmMMVM_CONTEXT1_CNTL0x06c1 - mmMMVM_CONTEXT0_CNTL0x06c0; | |||
| 545 | hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO320x072d - | |||
| 546 | mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO320x072b; | |||
| 547 | hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ0x06e4 - | |||
| 548 | mmMMVM_INVALIDATE_ENG0_REQ0x06e3; | |||
| 549 | hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO320x0709 - | |||
| 550 | mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO320x0707; | |||
| 551 | ||||
| 552 | hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x00000200L | | |||
| 553 | MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x00000800L | | |||
| 554 | MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x00002000L | | |||
| 555 | MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x00008000L | | |||
| 556 | MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x00020000L | | |||
| 557 | MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x00080000L | | |||
| 558 | MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x00200000L; | |||
| 559 | ||||
| 560 | hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs; | |||
| 561 | } | |||
| 562 | ||||
| 563 | static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, | |||
| 564 | bool_Bool enable) | |||
| 565 | { | |||
| 566 | uint32_t def, data, def1, data1; | |||
| 567 | ||||
| 568 | if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG(1ULL << 9))) | |||
| 569 | return; | |||
| 570 | ||||
| 571 | switch (adev->ip_versions[MMHUB_HWIP][0]) { | |||
| 572 | case IP_VERSION(2, 1, 0)(((2) << 16) | ((1) << 8) | (0)): | |||
| 573 | case IP_VERSION(2, 1, 1)(((2) << 16) | ((1) << 8) | (1)): | |||
| 574 | case IP_VERSION(2, 1, 2)(((2) << 16) | ((1) << 8) | (2)): | |||
| 575 | def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0070, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0070), 0)); | |||
| 576 | break; | |||
| 577 | default: | |||
| 578 | def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x064a, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x064a), 0)); | |||
| 579 | def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0071, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0071), 0)); | |||
| 580 | break; | |||
| 581 | } | |||
| 582 | ||||
| 583 | if (enable
| |||
| 584 | data |= MM_ATC_L2_MISC_CG__ENABLE_MASK0x00040000L; | |||
| 585 | ||||
| 586 | data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK0x00000004L | | |||
| 587 | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK0x00000008L | | |||
| 588 | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK0x00000010L | | |||
| 589 | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK0x00000020L | | |||
| 590 | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK0x00000040L | | |||
| 591 | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK0x00000080L); | |||
| 592 | ||||
| 593 | } else { | |||
| 594 | data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK0x00040000L; | |||
| ||||
| 595 | ||||
| 596 | data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK0x00000004L | | |||
| 597 | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK0x00000008L | | |||
| 598 | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK0x00000010L | | |||
| 599 | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK0x00000020L | | |||
| 600 | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK0x00000040L | | |||
| 601 | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK0x00000080L); | |||
| 602 | } | |||
| 603 | ||||
| 604 | switch (adev->ip_versions[MMHUB_HWIP][0]) { | |||
| 605 | case IP_VERSION(2, 1, 0)(((2) << 16) | ((1) << 8) | (0)): | |||
| 606 | case IP_VERSION(2, 1, 1)(((2) << 16) | ((1) << 8) | (1)): | |||
| 607 | case IP_VERSION(2, 1, 2)(((2) << 16) | ((1) << 8) | (2)): | |||
| 608 | if (def1 != data1) | |||
| 609 | WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0070), data1, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0070)), (data1) , 0)); | |||
| 610 | break; | |||
| 611 | default: | |||
| 612 | if (def != data) | |||
| 613 | WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x064a), data, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x064a)), (data), 0)); | |||
| 614 | if (def1 != data1) | |||
| 615 | WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x0071), data1, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x0071)), (data1) , 0)); | |||
| 616 | break; | |||
| 617 | } | |||
| 618 | } | |||
| 619 | ||||
| 620 | static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, | |||
| 621 | bool_Bool enable) | |||
| 622 | { | |||
| 623 | uint32_t def, data; | |||
| 624 | ||||
| 625 | if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS(1ULL << 8))) | |||
| 626 | return; | |||
| 627 | ||||
| 628 | switch (adev->ip_versions[MMHUB_HWIP][0]) { | |||
| 629 | case IP_VERSION(2, 1, 0)(((2) << 16) | ((1) << 8) | (0)): | |||
| 630 | case IP_VERSION(2, 1, 1)(((2) << 16) | ((1) << 8) | (1)): | |||
| 631 | case IP_VERSION(2, 1, 2)(((2) << 16) | ((1) << 8) | (2)): | |||
| 632 | /* There is no ATCL2 in MMHUB for 2.1.x */ | |||
| 633 | return; | |||
| 634 | default: | |||
| 635 | def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x064a, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x064a), 0)); | |||
| 636 | break; | |||
| 637 | } | |||
| 638 | ||||
| 639 | if (enable) | |||
| 640 | data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK0x00080000L; | |||
| 641 | else | |||
| 642 | data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK0x00080000L; | |||
| 643 | ||||
| 644 | if (def != data) | |||
| 645 | WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[MMHUB_HWIP][ 0][0] + 0x064a), data, 0, MMHUB_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[MMHUB_HWIP][0][0] + 0x064a)), (data), 0)); | |||
| 646 | } | |||
| 647 | ||||
| 648 | static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, | |||
| 649 | enum amd_clockgating_state state) | |||
| 650 | { | |||
| 651 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | |||
| ||||
| 652 | return 0; | |||
| 653 | ||||
| 654 | switch (adev->ip_versions[MMHUB_HWIP][0]) { | |||
| 655 | case IP_VERSION(2, 0, 0)(((2) << 16) | ((0) << 8) | (0)): | |||
| 656 | case IP_VERSION(2, 0, 2)(((2) << 16) | ((0) << 8) | (2)): | |||
| 657 | case IP_VERSION(2, 1, 0)(((2) << 16) | ((1) << 8) | (0)): | |||
| 658 | case IP_VERSION(2, 1, 1)(((2) << 16) | ((1) << 8) | (1)): | |||
| 659 | case IP_VERSION(2, 1, 2)(((2) << 16) | ((1) << 8) | (2)): | |||
| 660 | mmhub_v2_0_update_medium_grain_clock_gating(adev, | |||
| 661 | state == AMD_CG_STATE_GATE); | |||
| 662 | mmhub_v2_0_update_medium_grain_light_sleep(adev, | |||
| 663 | state == AMD_CG_STATE_GATE); | |||
| 664 | break; | |||
| 665 | default: | |||
| 666 | break; | |||
| 667 | } | |||
| 668 | ||||
| 669 | return 0; | |||
| 670 | } | |||
| 671 | ||||
| 672 | static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags) | |||
| 673 | { | |||
| 674 | int data, data1; | |||
| 675 | ||||
| 676 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | |||
| 677 | *flags = 0; | |||
| 678 | ||||
| 679 | switch (adev->ip_versions[MMHUB_HWIP][0]) { | |||
| 680 | case IP_VERSION(2, 1, 0)(((2) << 16) | ((1) << 8) | (0)): | |||
| 681 | case IP_VERSION(2, 1, 1)(((2) << 16) | ((1) << 8) | (1)): | |||
| 682 | case IP_VERSION(2, 1, 2)(((2) << 16) | ((1) << 8) | (2)): | |||
| 683 | /* There is no ATCL2 in MMHUB for 2.1.x. Keep the status | |||
| 684 | * based on DAGB | |||
| 685 | */ | |||
| 686 | data = MM_ATC_L2_MISC_CG__ENABLE_MASK0x00040000L; | |||
| 687 | data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0070, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0070), 0)); | |||
| 688 | break; | |||
| 689 | default: | |||
| 690 | data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x064a, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x064a), 0)); | |||
| 691 | data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MMHUB_HWIP][0 ][0] + 0x0071, 0, MMHUB_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[MMHUB_HWIP][0][0] + 0x0071), 0)); | |||
| 692 | break; | |||
| 693 | } | |||
| 694 | ||||
| 695 | /* AMD_CG_SUPPORT_MC_MGCG */ | |||
| 696 | if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK0x00040000L) && | |||
| 697 | !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK0x00000004L | | |||
| 698 | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK0x00000008L | | |||
| 699 | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK0x00000010L | | |||
| 700 | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK0x00000020L | | |||
| 701 | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK0x00000040L | | |||
| 702 | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK0x00000080L))) | |||
| 703 | *flags |= AMD_CG_SUPPORT_MC_MGCG(1ULL << 9); | |||
| 704 | ||||
| 705 | /* AMD_CG_SUPPORT_MC_LS */ | |||
| 706 | if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK0x00080000L) | |||
| 707 | *flags |= AMD_CG_SUPPORT_MC_LS(1ULL << 8); | |||
| 708 | } | |||
| 709 | ||||
| 710 | const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = { | |||
| 711 | .init = mmhub_v2_0_init, | |||
| 712 | .gart_enable = mmhub_v2_0_gart_enable, | |||
| 713 | .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default, | |||
| 714 | .gart_disable = mmhub_v2_0_gart_disable, | |||
| 715 | .set_clockgating = mmhub_v2_0_set_clockgating, | |||
| 716 | .get_clockgating = mmhub_v2_0_get_clockgating, | |||
| 717 | .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs, | |||
| 718 | }; |