| File: | dev/pci/drm/amd/amdgpu/amdgpu_ras.c |
| Warning: | line 763, column 3 1st function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright 2018 Advanced Micro Devices, Inc. | |||
| 3 | * | |||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 5 | * copy of this software and associated documentation files (the "Software"), | |||
| 6 | * to deal in the Software without restriction, including without limitation | |||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 9 | * Software is furnished to do so, subject to the following conditions: | |||
| 10 | * | |||
| 11 | * The above copyright notice and this permission notice shall be included in | |||
| 12 | * all copies or substantial portions of the Software. | |||
| 13 | * | |||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | |||
| 21 | * | |||
| 22 | * | |||
| 23 | */ | |||
| 24 | #include <linux/debugfs.h> | |||
| 25 | #include <linux/list.h> | |||
| 26 | #include <linux/module.h> | |||
| 27 | #include <linux/uaccess.h> | |||
| 28 | #include <linux/reboot.h> | |||
| 29 | #include <linux/syscalls.h> | |||
| 30 | #include <linux/pm_runtime.h> | |||
| 31 | ||||
| 32 | #include "amdgpu.h" | |||
| 33 | #include "amdgpu_ras.h" | |||
| 34 | #include "amdgpu_atomfirmware.h" | |||
| 35 | #include "amdgpu_xgmi.h" | |||
| 36 | #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" | |||
| 37 | #include "atom.h" | |||
| 38 | #include "amdgpu_reset.h" | |||
| 39 | ||||
| 40 | #ifdef CONFIG_X86_MCE_AMD | |||
| 41 | #include <asm/mce.h> | |||
| 42 | ||||
| 43 | static bool_Bool notifier_registered; | |||
| 44 | #endif | |||
| 45 | static const char *RAS_FS_NAME = "ras"; | |||
| 46 | ||||
| 47 | const char *ras_error_string[] = { | |||
| 48 | "none", | |||
| 49 | "parity", | |||
| 50 | "single_correctable", | |||
| 51 | "multi_uncorrectable", | |||
| 52 | "poison", | |||
| 53 | }; | |||
| 54 | ||||
| 55 | const char *ras_block_string[] = { | |||
| 56 | "umc", | |||
| 57 | "sdma", | |||
| 58 | "gfx", | |||
| 59 | "mmhub", | |||
| 60 | "athub", | |||
| 61 | "pcie_bif", | |||
| 62 | "hdp", | |||
| 63 | "xgmi_wafl", | |||
| 64 | "df", | |||
| 65 | "smn", | |||
| 66 | "sem", | |||
| 67 | "mp0", | |||
| 68 | "mp1", | |||
| 69 | "fuse", | |||
| 70 | "mca", | |||
| 71 | "vcn", | |||
| 72 | "jpeg", | |||
| 73 | }; | |||
| 74 | ||||
| 75 | const char *ras_mca_block_string[] = { | |||
| 76 | "mca_mp0", | |||
| 77 | "mca_mp1", | |||
| 78 | "mca_mpio", | |||
| 79 | "mca_iohc", | |||
| 80 | }; | |||
| 81 | ||||
| 82 | struct amdgpu_ras_block_list { | |||
| 83 | /* ras block link */ | |||
| 84 | struct list_head node; | |||
| 85 | ||||
| 86 | struct amdgpu_ras_block_object *ras_obj; | |||
| 87 | }; | |||
| 88 | ||||
| 89 | const char *get_ras_block_str(struct ras_common_if *ras_block) | |||
| 90 | { | |||
| 91 | if (!ras_block) | |||
| 92 | return "NULL"; | |||
| 93 | ||||
| 94 | if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNTAMDGPU_RAS_BLOCK__LAST) | |||
| 95 | return "OUT OF RANGE"; | |||
| 96 | ||||
| 97 | if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) | |||
| 98 | return ras_mca_block_string[ras_block->sub_block_index]; | |||
| 99 | ||||
| 100 | return ras_block_string[ras_block->block]; | |||
| 101 | } | |||
| 102 | ||||
| 103 | #define ras_block_str(_BLOCK_)(((_BLOCK_) < (sizeof((ras_block_string)) / sizeof((ras_block_string )[0]))) ? ras_block_string[_BLOCK_] : "Out Of Range") \ | |||
| 104 | (((_BLOCK_) < ARRAY_SIZE(ras_block_string)(sizeof((ras_block_string)) / sizeof((ras_block_string)[0]))) ? ras_block_string[_BLOCK_] : "Out Of Range") | |||
| 105 | ||||
| 106 | #define ras_err_str(i)(ras_error_string[ffs(i)]) (ras_error_string[ffs(i)]) | |||
| 107 | ||||
| 108 | #define RAS_DEFAULT_FLAGS((0x1 << 0)) (AMDGPU_RAS_FLAG_INIT_BY_VBIOS(0x1 << 0)) | |||
| 109 | ||||
| 110 | /* inject address is 52 bits */ | |||
| 111 | #define RAS_UMC_INJECT_ADDR_LIMIT(0x1ULL << 52) (0x1ULL << 52) | |||
| 112 | ||||
| 113 | /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ | |||
| 114 | #define RAS_BAD_PAGE_COVER(100 * 1024 * 1024ULL) (100 * 1024 * 1024ULL) | |||
| 115 | ||||
| 116 | enum amdgpu_ras_retire_page_reservation { | |||
| 117 | AMDGPU_RAS_RETIRE_PAGE_RESERVED, | |||
| 118 | AMDGPU_RAS_RETIRE_PAGE_PENDING, | |||
| 119 | AMDGPU_RAS_RETIRE_PAGE_FAULT, | |||
| 120 | }; | |||
| 121 | ||||
| 122 | atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0)(0); | |||
| 123 | ||||
| 124 | static bool_Bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, | |||
| 125 | uint64_t addr); | |||
| 126 | static bool_Bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, | |||
| 127 | uint64_t addr); | |||
| 128 | #ifdef CONFIG_X86_MCE_AMD | |||
| 129 | static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); | |||
| 130 | struct mce_notifier_adev_list { | |||
| 131 | struct amdgpu_device *devs[MAX_GPU_INSTANCE16]; | |||
| 132 | int num_gpu; | |||
| 133 | }; | |||
| 134 | static struct mce_notifier_adev_list mce_adev_list; | |||
| 135 | #endif | |||
| 136 | ||||
| 137 | void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool_Bool ready) | |||
| 138 | { | |||
| 139 | if (adev && amdgpu_ras_get_context(adev)) | |||
| 140 | amdgpu_ras_get_context(adev)->error_query_ready = ready; | |||
| 141 | } | |||
| 142 | ||||
| 143 | static bool_Bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) | |||
| 144 | { | |||
| 145 | if (adev && amdgpu_ras_get_context(adev)) | |||
| 146 | return amdgpu_ras_get_context(adev)->error_query_ready; | |||
| 147 | ||||
| 148 | return false0; | |||
| 149 | } | |||
| 150 | ||||
| 151 | static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) | |||
| 152 | { | |||
| 153 | struct ras_err_data err_data = {0, 0, 0, NULL((void *)0)}; | |||
| 154 | struct eeprom_table_record err_rec; | |||
| 155 | ||||
| 156 | if ((address >= adev->gmc.mc_vram_size) || | |||
| 157 | (address >= RAS_UMC_INJECT_ADDR_LIMIT(0x1ULL << 52))) { | |||
| 158 | dev_warn(adev->dev,printf("drm:pid%d:%s *WARNING* " "RAS WARN: input address 0x%llx is invalid.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , address ) | |||
| 159 | "RAS WARN: input address 0x%llx is invalid.\n",printf("drm:pid%d:%s *WARNING* " "RAS WARN: input address 0x%llx is invalid.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , address ) | |||
| 160 | address)printf("drm:pid%d:%s *WARNING* " "RAS WARN: input address 0x%llx is invalid.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , address ); | |||
| 161 | return -EINVAL22; | |||
| 162 | } | |||
| 163 | ||||
| 164 | if (amdgpu_ras_check_bad_page(adev, address)) { | |||
| 165 | dev_warn(adev->dev,printf("drm:pid%d:%s *WARNING* " "RAS WARN: 0x%llx has already been marked as bad page!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , address ) | |||
| 166 | "RAS WARN: 0x%llx has already been marked as bad page!\n",printf("drm:pid%d:%s *WARNING* " "RAS WARN: 0x%llx has already been marked as bad page!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , address ) | |||
| 167 | address)printf("drm:pid%d:%s *WARNING* " "RAS WARN: 0x%llx has already been marked as bad page!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , address ); | |||
| 168 | return 0; | |||
| 169 | } | |||
| 170 | ||||
| 171 | memset(&err_rec, 0x0, sizeof(struct eeprom_table_record))__builtin_memset((&err_rec), (0x0), (sizeof(struct eeprom_table_record ))); | |||
| 172 | err_data.err_addr = &err_rec; | |||
| 173 | amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0); | |||
| 174 | ||||
| 175 | if (amdgpu_bad_page_threshold != 0) { | |||
| 176 | amdgpu_ras_add_bad_pages(adev, err_data.err_addr, | |||
| 177 | err_data.err_addr_cnt); | |||
| 178 | amdgpu_ras_save_bad_pages(adev); | |||
| 179 | } | |||
| 180 | ||||
| 181 | dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n")printf("drm:pid%d:%s *WARNING* " "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 182 | dev_warn(adev->dev, "Clear EEPROM:\n")printf("drm:pid%d:%s *WARNING* " "Clear EEPROM:\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__); | |||
| 183 | dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n")printf("drm:pid%d:%s *WARNING* " " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 184 | ||||
| 185 | return 0; | |||
| 186 | } | |||
| 187 | ||||
| 188 | #ifdef __linux__ | |||
| 189 | ||||
| 190 | static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, | |||
| 191 | size_t size, loff_t *pos) | |||
| 192 | { | |||
| 193 | struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; | |||
| 194 | struct ras_query_if info = { | |||
| 195 | .head = obj->head, | |||
| 196 | }; | |||
| 197 | ssize_t s; | |||
| 198 | char val[128]; | |||
| 199 | ||||
| 200 | if (amdgpu_ras_query_error_status(obj->adev, &info)) | |||
| 201 | return -EINVAL22; | |||
| 202 | ||||
| 203 | /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ | |||
| 204 | if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2)(((11) << 16) | ((0) << 8) | (2)) && | |||
| 205 | obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)(((11) << 16) | ((0) << 8) | (4))) { | |||
| 206 | if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) | |||
| 207 | dev_warn(obj->adev->dev, "Failed to reset error counter and error status")printf("drm:pid%d:%s *WARNING* " "Failed to reset error counter and error status" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 208 | } | |||
| 209 | ||||
| 210 | s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", | |||
| 211 | "ue", info.ue_count, | |||
| 212 | "ce", info.ce_count); | |||
| 213 | if (*pos >= s) | |||
| 214 | return 0; | |||
| 215 | ||||
| 216 | s -= *pos; | |||
| 217 | s = min_t(u64, s, size)({ u64 __min_a = (s); u64 __min_b = (size); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 218 | ||||
| 219 | ||||
| 220 | if (copy_to_user(buf, &val[*pos], s)) | |||
| 221 | return -EINVAL22; | |||
| 222 | ||||
| 223 | *pos += s; | |||
| 224 | ||||
| 225 | return s; | |||
| 226 | } | |||
| 227 | ||||
| 228 | static const struct file_operations amdgpu_ras_debugfs_ops = { | |||
| 229 | .owner = THIS_MODULE((void *)0), | |||
| 230 | .read = amdgpu_ras_debugfs_read, | |||
| 231 | .write = NULL((void *)0), | |||
| 232 | .llseek = default_llseek | |||
| 233 | }; | |||
| 234 | ||||
| 235 | static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) | |||
| 236 | { | |||
| 237 | int i; | |||
| 238 | ||||
| 239 | for (i = 0; i < ARRAY_SIZE(ras_block_string)(sizeof((ras_block_string)) / sizeof((ras_block_string)[0])); i++) { | |||
| 240 | *block_id = i; | |||
| 241 | if (strcmp(name, ras_block_string[i]) == 0) | |||
| 242 | return 0; | |||
| 243 | } | |||
| 244 | return -EINVAL22; | |||
| 245 | } | |||
| 246 | ||||
| 247 | static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, | |||
| 248 | const char __user *buf, size_t size, | |||
| 249 | loff_t *pos, struct ras_debug_if *data) | |||
| 250 | { | |||
| 251 | ssize_t s = min_t(u64, 64, size)({ u64 __min_a = (64); u64 __min_b = (size); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 252 | char str[65]; | |||
| 253 | char block_name[33]; | |||
| 254 | char err[9] = "ue"; | |||
| 255 | int op = -1; | |||
| 256 | int block_id; | |||
| 257 | uint32_t sub_block; | |||
| 258 | u64 address, value; | |||
| 259 | ||||
| 260 | if (*pos) | |||
| 261 | return -EINVAL22; | |||
| 262 | *pos = size; | |||
| 263 | ||||
| 264 | memset(str, 0, sizeof(str))__builtin_memset((str), (0), (sizeof(str))); | |||
| 265 | memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data))); | |||
| 266 | ||||
| 267 | if (copy_from_user(str, buf, s)) | |||
| 268 | return -EINVAL22; | |||
| 269 | ||||
| 270 | if (sscanf(str, "disable %32s", block_name) == 1) | |||
| 271 | op = 0; | |||
| 272 | else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) | |||
| 273 | op = 1; | |||
| 274 | else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) | |||
| 275 | op = 2; | |||
| 276 | else if (strstr(str, "retire_page") != NULL((void *)0)) | |||
| 277 | op = 3; | |||
| 278 | else if (str[0] && str[1] && str[2] && str[3]) | |||
| 279 | /* ascii string, but commands are not matched. */ | |||
| 280 | return -EINVAL22; | |||
| 281 | ||||
| 282 | if (op != -1) { | |||
| 283 | if (op == 3) { | |||
| 284 | if (sscanf(str, "%*s 0x%llx", &address) != 1 && | |||
| 285 | sscanf(str, "%*s %llu", &address) != 1) | |||
| 286 | return -EINVAL22; | |||
| 287 | ||||
| 288 | data->op = op; | |||
| 289 | data->inject.address = address; | |||
| 290 | ||||
| 291 | return 0; | |||
| 292 | } | |||
| 293 | ||||
| 294 | if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) | |||
| 295 | return -EINVAL22; | |||
| 296 | ||||
| 297 | data->head.block = block_id; | |||
| 298 | /* only ue and ce errors are supported */ | |||
| 299 | if (!memcmp("ue", err, 2)__builtin_memcmp(("ue"), (err), (2))) | |||
| 300 | data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; | |||
| 301 | else if (!memcmp("ce", err, 2)__builtin_memcmp(("ce"), (err), (2))) | |||
| 302 | data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; | |||
| 303 | else | |||
| 304 | return -EINVAL22; | |||
| 305 | ||||
| 306 | data->op = op; | |||
| 307 | ||||
| 308 | if (op == 2) { | |||
| 309 | if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", | |||
| 310 | &sub_block, &address, &value) != 3 && | |||
| 311 | sscanf(str, "%*s %*s %*s %u %llu %llu", | |||
| 312 | &sub_block, &address, &value) != 3) | |||
| 313 | return -EINVAL22; | |||
| 314 | data->head.sub_block_index = sub_block; | |||
| 315 | data->inject.address = address; | |||
| 316 | data->inject.value = value; | |||
| 317 | } | |||
| 318 | } else { | |||
| 319 | if (size < sizeof(*data)) | |||
| 320 | return -EINVAL22; | |||
| 321 | ||||
| 322 | if (copy_from_user(data, buf, sizeof(*data))) | |||
| 323 | return -EINVAL22; | |||
| 324 | } | |||
| 325 | ||||
| 326 | return 0; | |||
| 327 | } | |||
| 328 | ||||
| 329 | /** | |||
| 330 | * DOC: AMDGPU RAS debugfs control interface | |||
| 331 | * | |||
| 332 | * The control interface accepts struct ras_debug_if which has two members. | |||
| 333 | * | |||
| 334 | * First member: ras_debug_if::head or ras_debug_if::inject. | |||
| 335 | * | |||
| 336 | * head is used to indicate which IP block will be under control. | |||
| 337 | * | |||
| 338 | * head has four members, they are block, type, sub_block_index, name. | |||
| 339 | * block: which IP will be under control. | |||
| 340 | * type: what kind of error will be enabled/disabled/injected. | |||
| 341 | * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. | |||
| 342 | * name: the name of IP. | |||
| 343 | * | |||
| 344 | * inject has two more members than head, they are address, value. | |||
| 345 | * As their names indicate, inject operation will write the | |||
| 346 | * value to the address. | |||
| 347 | * | |||
| 348 | * The second member: struct ras_debug_if::op. | |||
| 349 | * It has three kinds of operations. | |||
| 350 | * | |||
| 351 | * - 0: disable RAS on the block. Take ::head as its data. | |||
| 352 | * - 1: enable RAS on the block. Take ::head as its data. | |||
| 353 | * - 2: inject errors on the block. Take ::inject as its data. | |||
| 354 | * | |||
| 355 | * How to use the interface? | |||
| 356 | * | |||
| 357 | * In a program | |||
| 358 | * | |||
| 359 | * Copy the struct ras_debug_if in your code and initialize it. | |||
| 360 | * Write the struct to the control interface. | |||
| 361 | * | |||
| 362 | * From shell | |||
| 363 | * | |||
| 364 | * .. code-block:: bash | |||
| 365 | * | |||
| 366 | * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl | |||
| 367 | * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl | |||
| 368 | * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl | |||
| 369 | * | |||
| 370 | * Where N, is the card which you want to affect. | |||
| 371 | * | |||
| 372 | * "disable" requires only the block. | |||
| 373 | * "enable" requires the block and error type. | |||
| 374 | * "inject" requires the block, error type, address, and value. | |||
| 375 | * | |||
| 376 | * The block is one of: umc, sdma, gfx, etc. | |||
| 377 | * see ras_block_string[] for details | |||
| 378 | * | |||
| 379 | * The error type is one of: ue, ce, where, | |||
| 380 | * ue is multi-uncorrectable | |||
| 381 | * ce is single-correctable | |||
| 382 | * | |||
| 383 | * The sub-block is a the sub-block index, pass 0 if there is no sub-block. | |||
| 384 | * The address and value are hexadecimal numbers, leading 0x is optional. | |||
| 385 | * | |||
| 386 | * For instance, | |||
| 387 | * | |||
| 388 | * .. code-block:: bash | |||
| 389 | * | |||
| 390 | * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl | |||
| 391 | * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl | |||
| 392 | * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl | |||
| 393 | * | |||
| 394 | * How to check the result of the operation? | |||
| 395 | * | |||
| 396 | * To check disable/enable, see "ras" features at, | |||
| 397 | * /sys/class/drm/card[0/1/2...]/device/ras/features | |||
| 398 | * | |||
| 399 | * To check inject, see the corresponding error count at, | |||
| 400 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count | |||
| 401 | * | |||
| 402 | * .. note:: | |||
| 403 | * Operations are only allowed on blocks which are supported. | |||
| 404 | * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask | |||
| 405 | * to see which blocks support RAS on a particular asic. | |||
| 406 | * | |||
| 407 | */ | |||
| 408 | static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, | |||
| 409 | const char __user *buf, | |||
| 410 | size_t size, loff_t *pos) | |||
| 411 | { | |||
| 412 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; | |||
| 413 | struct ras_debug_if data; | |||
| 414 | int ret = 0; | |||
| 415 | ||||
| 416 | if (!amdgpu_ras_get_error_query_ready(adev)) { | |||
| 417 | dev_warn(adev->dev, "RAS WARN: error injection "printf("drm:pid%d:%s *WARNING* " "RAS WARN: error injection " "currently inaccessible\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__) | |||
| 418 | "currently inaccessible\n")printf("drm:pid%d:%s *WARNING* " "RAS WARN: error injection " "currently inaccessible\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__); | |||
| 419 | return size; | |||
| 420 | } | |||
| 421 | ||||
| 422 | ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); | |||
| 423 | if (ret) | |||
| 424 | return ret; | |||
| 425 | ||||
| 426 | if (data.op == 3) { | |||
| 427 | ret = amdgpu_reserve_page_direct(adev, data.inject.address); | |||
| 428 | if (!ret) | |||
| 429 | return size; | |||
| 430 | else | |||
| 431 | return ret; | |||
| 432 | } | |||
| 433 | ||||
| 434 | if (!amdgpu_ras_is_supported(adev, data.head.block)) | |||
| 435 | return -EINVAL22; | |||
| 436 | ||||
| 437 | switch (data.op) { | |||
| 438 | case 0: | |||
| 439 | ret = amdgpu_ras_feature_enable(adev, &data.head, 0); | |||
| 440 | break; | |||
| 441 | case 1: | |||
| 442 | ret = amdgpu_ras_feature_enable(adev, &data.head, 1); | |||
| 443 | break; | |||
| 444 | case 2: | |||
| 445 | if ((data.inject.address >= adev->gmc.mc_vram_size) || | |||
| 446 | (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT(0x1ULL << 52))) { | |||
| 447 | dev_warn(adev->dev, "RAS WARN: input address "printf("drm:pid%d:%s *WARNING* " "RAS WARN: input address " "0x%llx is invalid." , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , data.inject .address) | |||
| 448 | "0x%llx is invalid.",printf("drm:pid%d:%s *WARNING* " "RAS WARN: input address " "0x%llx is invalid." , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , data.inject .address) | |||
| 449 | data.inject.address)printf("drm:pid%d:%s *WARNING* " "RAS WARN: input address " "0x%llx is invalid." , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , data.inject .address); | |||
| 450 | ret = -EINVAL22; | |||
| 451 | break; | |||
| 452 | } | |||
| 453 | ||||
| 454 | /* umc ce/ue error injection for a bad page is not allowed */ | |||
| 455 | if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && | |||
| 456 | amdgpu_ras_check_bad_page(adev, data.inject.address)) { | |||
| 457 | dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "printf("drm:pid%d:%s *WARNING* " "RAS WARN: inject: 0x%llx has " "already been marked as bad!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , data.inject.address) | |||
| 458 | "already been marked as bad!\n",printf("drm:pid%d:%s *WARNING* " "RAS WARN: inject: 0x%llx has " "already been marked as bad!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , data.inject.address) | |||
| 459 | data.inject.address)printf("drm:pid%d:%s *WARNING* " "RAS WARN: inject: 0x%llx has " "already been marked as bad!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , data.inject.address); | |||
| 460 | break; | |||
| 461 | } | |||
| 462 | ||||
| 463 | /* data.inject.address is offset instead of absolute gpu address */ | |||
| 464 | ret = amdgpu_ras_error_inject(adev, &data.inject); | |||
| 465 | break; | |||
| 466 | default: | |||
| 467 | ret = -EINVAL22; | |||
| 468 | break; | |||
| 469 | } | |||
| 470 | ||||
| 471 | if (ret) | |||
| 472 | return ret; | |||
| 473 | ||||
| 474 | return size; | |||
| 475 | } | |||
| 476 | ||||
| 477 | /** | |||
| 478 | * DOC: AMDGPU RAS debugfs EEPROM table reset interface | |||
| 479 | * | |||
| 480 | * Some boards contain an EEPROM which is used to persistently store a list of | |||
| 481 | * bad pages which experiences ECC errors in vram. This interface provides | |||
| 482 | * a way to reset the EEPROM, e.g., after testing error injection. | |||
| 483 | * | |||
| 484 | * Usage: | |||
| 485 | * | |||
| 486 | * .. code-block:: bash | |||
| 487 | * | |||
| 488 | * echo 1 > ../ras/ras_eeprom_reset | |||
| 489 | * | |||
| 490 | * will reset EEPROM table to 0 entries. | |||
| 491 | * | |||
| 492 | */ | |||
| 493 | static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, | |||
| 494 | const char __user *buf, | |||
| 495 | size_t size, loff_t *pos) | |||
| 496 | { | |||
| 497 | struct amdgpu_device *adev = | |||
| 498 | (struct amdgpu_device *)file_inode(f)->i_private; | |||
| 499 | int ret; | |||
| 500 | ||||
| 501 | ret = amdgpu_ras_eeprom_reset_table( | |||
| 502 | &(amdgpu_ras_get_context(adev)->eeprom_control)); | |||
| 503 | ||||
| 504 | if (!ret) { | |||
| 505 | /* Something was written to EEPROM. | |||
| 506 | */ | |||
| 507 | amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS((0x1 << 0)); | |||
| 508 | return size; | |||
| 509 | } else { | |||
| 510 | return ret; | |||
| 511 | } | |||
| 512 | } | |||
| 513 | ||||
| 514 | static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { | |||
| 515 | .owner = THIS_MODULE((void *)0), | |||
| 516 | .read = NULL((void *)0), | |||
| 517 | .write = amdgpu_ras_debugfs_ctrl_write, | |||
| 518 | .llseek = default_llseek | |||
| 519 | }; | |||
| 520 | ||||
| 521 | static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { | |||
| 522 | .owner = THIS_MODULE((void *)0), | |||
| 523 | .read = NULL((void *)0), | |||
| 524 | .write = amdgpu_ras_debugfs_eeprom_write, | |||
| 525 | .llseek = default_llseek | |||
| 526 | }; | |||
| 527 | ||||
| 528 | /** | |||
| 529 | * DOC: AMDGPU RAS sysfs Error Count Interface | |||
| 530 | * | |||
| 531 | * It allows the user to read the error count for each IP block on the gpu through | |||
| 532 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count | |||
| 533 | * | |||
| 534 | * It outputs the multiple lines which report the uncorrected (ue) and corrected | |||
| 535 | * (ce) error counts. | |||
| 536 | * | |||
| 537 | * The format of one line is below, | |||
| 538 | * | |||
| 539 | * [ce|ue]: count | |||
| 540 | * | |||
| 541 | * Example: | |||
| 542 | * | |||
| 543 | * .. code-block:: bash | |||
| 544 | * | |||
| 545 | * ue: 0 | |||
| 546 | * ce: 1 | |||
| 547 | * | |||
| 548 | */ | |||
| 549 | static ssize_t amdgpu_ras_sysfs_read(struct device *dev, | |||
| 550 | struct device_attribute *attr, char *buf) | |||
| 551 | { | |||
| 552 | struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr)({ const __typeof( ((struct ras_manager *)0)->sysfs_attr ) *__mptr = (attr); (struct ras_manager *)( (char *)__mptr - __builtin_offsetof (struct ras_manager, sysfs_attr) );}); | |||
| 553 | struct ras_query_if info = { | |||
| 554 | .head = obj->head, | |||
| 555 | }; | |||
| 556 | ||||
| 557 | if (!amdgpu_ras_get_error_query_ready(obj->adev)) | |||
| 558 | return sysfs_emit(buf, "Query currently inaccessible\n"); | |||
| 559 | ||||
| 560 | if (amdgpu_ras_query_error_status(obj->adev, &info)) | |||
| 561 | return -EINVAL22; | |||
| 562 | ||||
| 563 | if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2)(((11) << 16) | ((0) << 8) | (2)) && | |||
| 564 | obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)(((11) << 16) | ((0) << 8) | (4))) { | |||
| 565 | if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) | |||
| 566 | dev_warn(obj->adev->dev, "Failed to reset error counter and error status")printf("drm:pid%d:%s *WARNING* " "Failed to reset error counter and error status" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 567 | } | |||
| 568 | ||||
| 569 | return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, | |||
| 570 | "ce", info.ce_count); | |||
| 571 | } | |||
| 572 | ||||
| 573 | #endif /* __linux__ */ | |||
| 574 | ||||
| 575 | /* obj begin */ | |||
| 576 | ||||
| 577 | #define get_obj(obj)do { (obj)->use++; } while (0) do { (obj)->use++; } while (0) | |||
| 578 | #define alive_obj(obj)((obj)->use) ((obj)->use) | |||
| 579 | ||||
| 580 | static inline void put_obj(struct ras_manager *obj) | |||
| 581 | { | |||
| 582 | if (obj && (--obj->use == 0)) | |||
| 583 | list_del(&obj->node); | |||
| 584 | if (obj && (obj->use < 0)) | |||
| 585 | DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head))__drm_err("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str (&obj->head)); | |||
| 586 | } | |||
| 587 | ||||
| 588 | /* make one obj and return it. */ | |||
| 589 | static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, | |||
| 590 | struct ras_common_if *head) | |||
| 591 | { | |||
| 592 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 593 | struct ras_manager *obj; | |||
| 594 | ||||
| 595 | if (!adev->ras_enabled || !con) | |||
| 596 | return NULL((void *)0); | |||
| 597 | ||||
| 598 | if (head->block >= AMDGPU_RAS_BLOCK_COUNTAMDGPU_RAS_BLOCK__LAST) | |||
| 599 | return NULL((void *)0); | |||
| 600 | ||||
| 601 | if (head->block == AMDGPU_RAS_BLOCK__MCA) { | |||
| 602 | if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) | |||
| 603 | return NULL((void *)0); | |||
| 604 | ||||
| 605 | obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; | |||
| 606 | } else | |||
| 607 | obj = &con->objs[head->block]; | |||
| 608 | ||||
| 609 | /* already exist. return obj? */ | |||
| 610 | if (alive_obj(obj)((obj)->use)) | |||
| 611 | return NULL((void *)0); | |||
| 612 | ||||
| 613 | obj->head = *head; | |||
| 614 | obj->adev = adev; | |||
| 615 | list_add(&obj->node, &con->head); | |||
| 616 | get_obj(obj)do { (obj)->use++; } while (0); | |||
| 617 | ||||
| 618 | return obj; | |||
| 619 | } | |||
| 620 | ||||
| 621 | /* return an obj equal to head, or the first when head is NULL */ | |||
| 622 | struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, | |||
| 623 | struct ras_common_if *head) | |||
| 624 | { | |||
| 625 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 626 | struct ras_manager *obj; | |||
| 627 | int i; | |||
| 628 | ||||
| 629 | if (!adev->ras_enabled || !con) | |||
| 630 | return NULL((void *)0); | |||
| 631 | ||||
| 632 | if (head) { | |||
| 633 | if (head->block >= AMDGPU_RAS_BLOCK_COUNTAMDGPU_RAS_BLOCK__LAST) | |||
| 634 | return NULL((void *)0); | |||
| 635 | ||||
| 636 | if (head->block == AMDGPU_RAS_BLOCK__MCA) { | |||
| 637 | if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) | |||
| 638 | return NULL((void *)0); | |||
| 639 | ||||
| 640 | obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; | |||
| 641 | } else | |||
| 642 | obj = &con->objs[head->block]; | |||
| 643 | ||||
| 644 | if (alive_obj(obj)((obj)->use)) | |||
| 645 | return obj; | |||
| 646 | } else { | |||
| 647 | for (i = 0; i < AMDGPU_RAS_BLOCK_COUNTAMDGPU_RAS_BLOCK__LAST + AMDGPU_RAS_MCA_BLOCK_COUNTAMDGPU_RAS_MCA_BLOCK__LAST; i++) { | |||
| 648 | obj = &con->objs[i]; | |||
| 649 | if (alive_obj(obj)((obj)->use)) | |||
| 650 | return obj; | |||
| 651 | } | |||
| 652 | } | |||
| 653 | ||||
| 654 | return NULL((void *)0); | |||
| 655 | } | |||
| 656 | /* obj end */ | |||
| 657 | ||||
| 658 | /* feature ctl begin */ | |||
| 659 | static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, | |||
| 660 | struct ras_common_if *head) | |||
| 661 | { | |||
| 662 | return adev->ras_hw_enabled & BIT(head->block)(1UL << (head->block)); | |||
| 663 | } | |||
| 664 | ||||
| 665 | static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, | |||
| 666 | struct ras_common_if *head) | |||
| 667 | { | |||
| 668 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 669 | ||||
| 670 | return con->features & BIT(head->block)(1UL << (head->block)); | |||
| 671 | } | |||
| 672 | ||||
| 673 | /* | |||
| 674 | * if obj is not created, then create one. | |||
| 675 | * set feature enable flag. | |||
| 676 | */ | |||
| 677 | static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, | |||
| 678 | struct ras_common_if *head, int enable) | |||
| 679 | { | |||
| 680 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 681 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |||
| 682 | ||||
| 683 | /* If hardware does not support ras, then do not create obj. | |||
| 684 | * But if hardware support ras, we can create the obj. | |||
| 685 | * Ras framework checks con->hw_supported to see if it need do | |||
| 686 | * corresponding initialization. | |||
| 687 | * IP checks con->support to see if it need disable ras. | |||
| 688 | */ | |||
| 689 | if (!amdgpu_ras_is_feature_allowed(adev, head)) | |||
| 690 | return 0; | |||
| 691 | ||||
| 692 | if (enable) { | |||
| 693 | if (!obj) { | |||
| 694 | obj = amdgpu_ras_create_obj(adev, head); | |||
| 695 | if (!obj) | |||
| 696 | return -EINVAL22; | |||
| 697 | } else { | |||
| 698 | /* In case we create obj somewhere else */ | |||
| 699 | get_obj(obj)do { (obj)->use++; } while (0); | |||
| 700 | } | |||
| 701 | con->features |= BIT(head->block)(1UL << (head->block)); | |||
| 702 | } else { | |||
| 703 | if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { | |||
| 704 | con->features &= ~BIT(head->block)(1UL << (head->block)); | |||
| 705 | put_obj(obj); | |||
| 706 | } | |||
| 707 | } | |||
| 708 | ||||
| 709 | return 0; | |||
| 710 | } | |||
| 711 | ||||
| 712 | /* wrapper of psp_ras_enable_features */ | |||
| 713 | int amdgpu_ras_feature_enable(struct amdgpu_device *adev, | |||
| 714 | struct ras_common_if *head, bool_Bool enable) | |||
| 715 | { | |||
| 716 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 717 | union ta_ras_cmd_input *info; | |||
| 718 | int ret; | |||
| 719 | ||||
| 720 | if (!con
| |||
| 721 | return -EINVAL22; | |||
| 722 | ||||
| 723 | if (head->block == AMDGPU_RAS_BLOCK__GFX) { | |||
| 724 | info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 725 | if (!info) | |||
| 726 | return -ENOMEM12; | |||
| 727 | ||||
| 728 | if (!enable) { | |||
| 729 | info->disable_features = (struct ta_ras_disable_features_input) { | |||
| 730 | .block_id = amdgpu_ras_block_to_ta(head->block), | |||
| 731 | .error_type = amdgpu_ras_error_to_ta(head->type), | |||
| 732 | }; | |||
| 733 | } else { | |||
| 734 | info->enable_features = (struct ta_ras_enable_features_input) { | |||
| 735 | .block_id = amdgpu_ras_block_to_ta(head->block), | |||
| 736 | .error_type = amdgpu_ras_error_to_ta(head->type), | |||
| 737 | }; | |||
| 738 | } | |||
| 739 | } | |||
| 740 | ||||
| 741 | /* Do not enable if it is not allowed. */ | |||
| 742 | WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head))({ int __ret = !!(enable && !amdgpu_ras_is_feature_allowed (adev, head)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "enable && !amdgpu_ras_is_feature_allowed(adev, head)" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ras.c", 742); __builtin_expect (!!(__ret), 0); }); | |||
| 743 | ||||
| 744 | /* Only enable ras feature operation handle on host side */ | |||
| 745 | if (head->block
| |||
| 746 | !amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)) && | |||
| 747 | !amdgpu_ras_intr_triggered()) { | |||
| 748 | ret = psp_ras_enable_features(&adev->psp, info, enable); | |||
| 749 | if (ret) { | |||
| 750 | dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",printf("drm:pid%d:%s *ERROR* " "ras %s %s failed poison:%d ret:%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , enable ? "enable":"disable", get_ras_block_str(head), amdgpu_ras_is_poison_mode_supported (adev), ret) | |||
| 751 | enable ? "enable":"disable",printf("drm:pid%d:%s *ERROR* " "ras %s %s failed poison:%d ret:%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , enable ? "enable":"disable", get_ras_block_str(head), amdgpu_ras_is_poison_mode_supported (adev), ret) | |||
| 752 | get_ras_block_str(head),printf("drm:pid%d:%s *ERROR* " "ras %s %s failed poison:%d ret:%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , enable ? "enable":"disable", get_ras_block_str(head), amdgpu_ras_is_poison_mode_supported (adev), ret) | |||
| 753 | amdgpu_ras_is_poison_mode_supported(adev), ret)printf("drm:pid%d:%s *ERROR* " "ras %s %s failed poison:%d ret:%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , enable ? "enable":"disable", get_ras_block_str(head), amdgpu_ras_is_poison_mode_supported (adev), ret); | |||
| 754 | goto out; | |||
| 755 | } | |||
| 756 | } | |||
| 757 | ||||
| 758 | /* setup the obj */ | |||
| 759 | __amdgpu_ras_feature_enable(adev, head, enable); | |||
| 760 | ret = 0; | |||
| 761 | out: | |||
| 762 | if (head->block == AMDGPU_RAS_BLOCK__GFX) | |||
| 763 | kfree(info); | |||
| ||||
| 764 | return ret; | |||
| 765 | } | |||
| 766 | ||||
| 767 | /* Only used in device probe stage and called only once. */ | |||
| 768 | int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, | |||
| 769 | struct ras_common_if *head, bool_Bool enable) | |||
| 770 | { | |||
| 771 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 772 | int ret; | |||
| 773 | ||||
| 774 | if (!con) | |||
| 775 | return -EINVAL22; | |||
| 776 | ||||
| 777 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS(0x1 << 0)) { | |||
| 778 | if (enable) { | |||
| 779 | /* There is no harm to issue a ras TA cmd regardless of | |||
| 780 | * the currecnt ras state. | |||
| 781 | * If current state == target state, it will do nothing | |||
| 782 | * But sometimes it requests driver to reset and repost | |||
| 783 | * with error code -EAGAIN. | |||
| 784 | */ | |||
| 785 | ret = amdgpu_ras_feature_enable(adev, head, 1); | |||
| 786 | /* With old ras TA, we might fail to enable ras. | |||
| 787 | * Log it and just setup the object. | |||
| 788 | * TODO need remove this WA in the future. | |||
| 789 | */ | |||
| 790 | if (ret == -EINVAL22) { | |||
| 791 | ret = __amdgpu_ras_feature_enable(adev, head, 1); | |||
| 792 | if (!ret) | |||
| 793 | dev_info(adev->dev,do { } while(0) | |||
| 794 | "RAS INFO: %s setup object\n",do { } while(0) | |||
| 795 | get_ras_block_str(head))do { } while(0); | |||
| 796 | } | |||
| 797 | } else { | |||
| 798 | /* setup the object then issue a ras TA disable cmd.*/ | |||
| 799 | ret = __amdgpu_ras_feature_enable(adev, head, 1); | |||
| 800 | if (ret) | |||
| 801 | return ret; | |||
| 802 | ||||
| 803 | /* gfx block ras dsiable cmd must send to ras-ta */ | |||
| 804 | if (head->block == AMDGPU_RAS_BLOCK__GFX) | |||
| 805 | con->features |= BIT(head->block)(1UL << (head->block)); | |||
| 806 | ||||
| 807 | ret = amdgpu_ras_feature_enable(adev, head, 0); | |||
| 808 | ||||
| 809 | /* clean gfx block ras features flag */ | |||
| 810 | if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) | |||
| 811 | con->features &= ~BIT(head->block)(1UL << (head->block)); | |||
| 812 | } | |||
| 813 | } else | |||
| 814 | ret = amdgpu_ras_feature_enable(adev, head, enable); | |||
| 815 | ||||
| 816 | return ret; | |||
| 817 | } | |||
| 818 | ||||
| 819 | static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, | |||
| 820 | bool_Bool bypass) | |||
| 821 | { | |||
| 822 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 823 | struct ras_manager *obj, *tmp; | |||
| 824 | ||||
| 825 | list_for_each_entry_safe(obj, tmp, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}), tmp = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj->node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) );}); &obj-> node != (&con->head); obj = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next ); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof(__typeof (*tmp), node) );})) { | |||
| 826 | /* bypass psp. | |||
| 827 | * aka just release the obj and corresponding flags | |||
| 828 | */ | |||
| 829 | if (bypass
| |||
| 830 | if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) | |||
| 831 | break; | |||
| 832 | } else { | |||
| 833 | if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) | |||
| 834 | break; | |||
| 835 | } | |||
| 836 | } | |||
| 837 | ||||
| 838 | return con->features; | |||
| 839 | } | |||
| 840 | ||||
| 841 | static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, | |||
| 842 | bool_Bool bypass) | |||
| 843 | { | |||
| 844 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 845 | int i; | |||
| 846 | const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; | |||
| 847 | ||||
| 848 | for (i = 0; i < AMDGPU_RAS_BLOCK_COUNTAMDGPU_RAS_BLOCK__LAST; i++) { | |||
| 849 | struct ras_common_if head = { | |||
| 850 | .block = i, | |||
| 851 | .type = default_ras_type, | |||
| 852 | .sub_block_index = 0, | |||
| 853 | }; | |||
| 854 | ||||
| 855 | if (i == AMDGPU_RAS_BLOCK__MCA) | |||
| 856 | continue; | |||
| 857 | ||||
| 858 | if (bypass) { | |||
| 859 | /* | |||
| 860 | * bypass psp. vbios enable ras for us. | |||
| 861 | * so just create the obj | |||
| 862 | */ | |||
| 863 | if (__amdgpu_ras_feature_enable(adev, &head, 1)) | |||
| 864 | break; | |||
| 865 | } else { | |||
| 866 | if (amdgpu_ras_feature_enable(adev, &head, 1)) | |||
| 867 | break; | |||
| 868 | } | |||
| 869 | } | |||
| 870 | ||||
| 871 | for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNTAMDGPU_RAS_MCA_BLOCK__LAST; i++) { | |||
| 872 | struct ras_common_if head = { | |||
| 873 | .block = AMDGPU_RAS_BLOCK__MCA, | |||
| 874 | .type = default_ras_type, | |||
| 875 | .sub_block_index = i, | |||
| 876 | }; | |||
| 877 | ||||
| 878 | if (bypass) { | |||
| 879 | /* | |||
| 880 | * bypass psp. vbios enable ras for us. | |||
| 881 | * so just create the obj | |||
| 882 | */ | |||
| 883 | if (__amdgpu_ras_feature_enable(adev, &head, 1)) | |||
| 884 | break; | |||
| 885 | } else { | |||
| 886 | if (amdgpu_ras_feature_enable(adev, &head, 1)) | |||
| 887 | break; | |||
| 888 | } | |||
| 889 | } | |||
| 890 | ||||
| 891 | return con->features; | |||
| 892 | } | |||
| 893 | /* feature ctl end */ | |||
| 894 | ||||
| 895 | static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, | |||
| 896 | enum amdgpu_ras_block block) | |||
| 897 | { | |||
| 898 | if (!block_obj) | |||
| 899 | return -EINVAL22; | |||
| 900 | ||||
| 901 | if (block_obj->ras_comm.block == block) | |||
| 902 | return 0; | |||
| 903 | ||||
| 904 | return -EINVAL22; | |||
| 905 | } | |||
| 906 | ||||
| 907 | static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, | |||
| 908 | enum amdgpu_ras_block block, uint32_t sub_block_index) | |||
| 909 | { | |||
| 910 | struct amdgpu_ras_block_list *node, *tmp; | |||
| 911 | struct amdgpu_ras_block_object *obj; | |||
| 912 | ||||
| 913 | if (block >= AMDGPU_RAS_BLOCK__LAST) | |||
| 914 | return NULL((void *)0); | |||
| 915 | ||||
| 916 | if (!amdgpu_ras_is_supported(adev, block)) | |||
| 917 | return NULL((void *)0); | |||
| 918 | ||||
| 919 | list_for_each_entry_safe(node, tmp, &adev->ras_list, node)for (node = ({ const __typeof( ((__typeof(*node) *)0)->node ) *__mptr = ((&adev->ras_list)->next); (__typeof(* node) *)( (char *)__mptr - __builtin_offsetof(__typeof(*node) , node) );}), tmp = ({ const __typeof( ((__typeof(*node) *)0) ->node ) *__mptr = (node->node.next); (__typeof(*node) * )( (char *)__mptr - __builtin_offsetof(__typeof(*node), node) );}); &node->node != (&adev->ras_list); node = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), node) );})) { | |||
| 920 | if (!node->ras_obj) { | |||
| 921 | dev_warn(adev->dev, "Warning: abnormal ras list node.\n")printf("drm:pid%d:%s *WARNING* " "Warning: abnormal ras list node.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 922 | continue; | |||
| 923 | } | |||
| 924 | ||||
| 925 | obj = node->ras_obj; | |||
| 926 | if (obj->ras_block_match) { | |||
| 927 | if (obj->ras_block_match(obj, block, sub_block_index) == 0) | |||
| 928 | return obj; | |||
| 929 | } else { | |||
| 930 | if (amdgpu_ras_block_match_default(obj, block) == 0) | |||
| 931 | return obj; | |||
| 932 | } | |||
| 933 | } | |||
| 934 | ||||
| 935 | return NULL((void *)0); | |||
| 936 | } | |||
| 937 | ||||
| 938 | static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) | |||
| 939 | { | |||
| 940 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); | |||
| 941 | int ret = 0; | |||
| 942 | ||||
| 943 | /* | |||
| 944 | * choosing right query method according to | |||
| 945 | * whether smu support query error information | |||
| 946 | */ | |||
| 947 | ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); | |||
| 948 | if (ret == -EOPNOTSUPP45) { | |||
| 949 | if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && | |||
| 950 | adev->umc.ras->ras_block.hw_ops->query_ras_error_count) | |||
| 951 | adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); | |||
| 952 | ||||
| 953 | /* umc query_ras_error_address is also responsible for clearing | |||
| 954 | * error status | |||
| 955 | */ | |||
| 956 | if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && | |||
| 957 | adev->umc.ras->ras_block.hw_ops->query_ras_error_address) | |||
| 958 | adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); | |||
| 959 | } else if (!ret) { | |||
| 960 | if (adev->umc.ras && | |||
| 961 | adev->umc.ras->ecc_info_query_ras_error_count) | |||
| 962 | adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); | |||
| 963 | ||||
| 964 | if (adev->umc.ras && | |||
| 965 | adev->umc.ras->ecc_info_query_ras_error_address) | |||
| 966 | adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); | |||
| 967 | } | |||
| 968 | } | |||
| 969 | ||||
| 970 | /* query/inject/cure begin */ | |||
| 971 | int amdgpu_ras_query_error_status(struct amdgpu_device *adev, | |||
| 972 | struct ras_query_if *info) | |||
| 973 | { | |||
| 974 | struct amdgpu_ras_block_object *block_obj = NULL((void *)0); | |||
| 975 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |||
| 976 | struct ras_err_data err_data = {0, 0, 0, NULL((void *)0)}; | |||
| 977 | ||||
| 978 | if (!obj) | |||
| 979 | return -EINVAL22; | |||
| 980 | ||||
| 981 | if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { | |||
| 982 | amdgpu_ras_get_ecc_info(adev, &err_data); | |||
| 983 | } else { | |||
| 984 | block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); | |||
| 985 | if (!block_obj || !block_obj->hw_ops) { | |||
| 986 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",do { } while(0) | |||
| 987 | get_ras_block_str(&info->head))do { } while(0); | |||
| 988 | return -EINVAL22; | |||
| 989 | } | |||
| 990 | ||||
| 991 | if (block_obj->hw_ops->query_ras_error_count) | |||
| 992 | block_obj->hw_ops->query_ras_error_count(adev, &err_data); | |||
| 993 | ||||
| 994 | if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || | |||
| 995 | (info->head.block == AMDGPU_RAS_BLOCK__GFX) || | |||
| 996 | (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { | |||
| 997 | if (block_obj->hw_ops->query_ras_error_status) | |||
| 998 | block_obj->hw_ops->query_ras_error_status(adev); | |||
| 999 | } | |||
| 1000 | } | |||
| 1001 | ||||
| 1002 | obj->err_data.ue_count += err_data.ue_count; | |||
| 1003 | obj->err_data.ce_count += err_data.ce_count; | |||
| 1004 | ||||
| 1005 | info->ue_count = obj->err_data.ue_count; | |||
| 1006 | info->ce_count = obj->err_data.ce_count; | |||
| 1007 | ||||
| 1008 | if (err_data.ce_count) { | |||
| 1009 | if (adev->smuio.funcs && | |||
| 1010 | adev->smuio.funcs->get_socket_id && | |||
| 1011 | adev->smuio.funcs->get_die_id) { | |||
| 1012 | dev_info(adev->dev, "socket: %d, die: %d "do { } while(0) | |||
| 1013 | "%ld correctable hardware errors "do { } while(0) | |||
| 1014 | "detected in %s block, no user "do { } while(0) | |||
| 1015 | "action is needed.\n",do { } while(0) | |||
| 1016 | adev->smuio.funcs->get_socket_id(adev),do { } while(0) | |||
| 1017 | adev->smuio.funcs->get_die_id(adev),do { } while(0) | |||
| 1018 | obj->err_data.ce_count,do { } while(0) | |||
| 1019 | get_ras_block_str(&info->head))do { } while(0); | |||
| 1020 | } else { | |||
| 1021 | dev_info(adev->dev, "%ld correctable hardware errors "do { } while(0) | |||
| 1022 | "detected in %s block, no user "do { } while(0) | |||
| 1023 | "action is needed.\n",do { } while(0) | |||
| 1024 | obj->err_data.ce_count,do { } while(0) | |||
| 1025 | get_ras_block_str(&info->head))do { } while(0); | |||
| 1026 | } | |||
| 1027 | } | |||
| 1028 | if (err_data.ue_count) { | |||
| 1029 | if (adev->smuio.funcs && | |||
| 1030 | adev->smuio.funcs->get_socket_id && | |||
| 1031 | adev->smuio.funcs->get_die_id) { | |||
| 1032 | dev_info(adev->dev, "socket: %d, die: %d "do { } while(0) | |||
| 1033 | "%ld uncorrectable hardware errors "do { } while(0) | |||
| 1034 | "detected in %s block\n",do { } while(0) | |||
| 1035 | adev->smuio.funcs->get_socket_id(adev),do { } while(0) | |||
| 1036 | adev->smuio.funcs->get_die_id(adev),do { } while(0) | |||
| 1037 | obj->err_data.ue_count,do { } while(0) | |||
| 1038 | get_ras_block_str(&info->head))do { } while(0); | |||
| 1039 | } else { | |||
| 1040 | dev_info(adev->dev, "%ld uncorrectable hardware errors "do { } while(0) | |||
| 1041 | "detected in %s block\n",do { } while(0) | |||
| 1042 | obj->err_data.ue_count,do { } while(0) | |||
| 1043 | get_ras_block_str(&info->head))do { } while(0); | |||
| 1044 | } | |||
| 1045 | } | |||
| 1046 | ||||
| 1047 | return 0; | |||
| 1048 | } | |||
| 1049 | ||||
| 1050 | int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, | |||
| 1051 | enum amdgpu_ras_block block) | |||
| 1052 | { | |||
| 1053 | struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); | |||
| 1054 | ||||
| 1055 | if (!amdgpu_ras_is_supported(adev, block)) | |||
| 1056 | return -EINVAL22; | |||
| 1057 | ||||
| 1058 | if (!block_obj || !block_obj->hw_ops) { | |||
| 1059 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",do { } while(0) | |||
| 1060 | ras_block_str(block))do { } while(0); | |||
| 1061 | return -EINVAL22; | |||
| 1062 | } | |||
| 1063 | ||||
| 1064 | if (block_obj->hw_ops->reset_ras_error_count) | |||
| 1065 | block_obj->hw_ops->reset_ras_error_count(adev); | |||
| 1066 | ||||
| 1067 | if ((block == AMDGPU_RAS_BLOCK__GFX) || | |||
| 1068 | (block == AMDGPU_RAS_BLOCK__MMHUB)) { | |||
| 1069 | if (block_obj->hw_ops->reset_ras_error_status) | |||
| 1070 | block_obj->hw_ops->reset_ras_error_status(adev); | |||
| 1071 | } | |||
| 1072 | ||||
| 1073 | return 0; | |||
| 1074 | } | |||
| 1075 | ||||
| 1076 | /* wrapper of psp_ras_trigger_error */ | |||
| 1077 | int amdgpu_ras_error_inject(struct amdgpu_device *adev, | |||
| 1078 | struct ras_inject_if *info) | |||
| 1079 | { | |||
| 1080 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |||
| 1081 | struct ta_ras_trigger_error_input block_info = { | |||
| 1082 | .block_id = amdgpu_ras_block_to_ta(info->head.block), | |||
| 1083 | .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), | |||
| 1084 | .sub_block_index = info->head.sub_block_index, | |||
| 1085 | .address = info->address, | |||
| 1086 | .value = info->value, | |||
| 1087 | }; | |||
| 1088 | int ret = -EINVAL22; | |||
| 1089 | struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, | |||
| 1090 | info->head.block, | |||
| 1091 | info->head.sub_block_index); | |||
| 1092 | ||||
| 1093 | if (!obj) | |||
| 1094 | return -EINVAL22; | |||
| 1095 | ||||
| 1096 | if (!block_obj || !block_obj->hw_ops) { | |||
| 1097 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",do { } while(0) | |||
| 1098 | get_ras_block_str(&info->head))do { } while(0); | |||
| 1099 | return -EINVAL22; | |||
| 1100 | } | |||
| 1101 | ||||
| 1102 | /* Calculate XGMI relative offset */ | |||
| 1103 | if (adev->gmc.xgmi.num_physical_nodes > 1) { | |||
| 1104 | block_info.address = | |||
| 1105 | amdgpu_xgmi_get_relative_phy_addr(adev, | |||
| 1106 | block_info.address); | |||
| 1107 | } | |||
| 1108 | ||||
| 1109 | if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { | |||
| 1110 | if (block_obj->hw_ops->ras_error_inject) | |||
| 1111 | ret = block_obj->hw_ops->ras_error_inject(adev, info); | |||
| 1112 | } else { | |||
| 1113 | /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ | |||
| 1114 | if (block_obj->hw_ops->ras_error_inject) | |||
| 1115 | ret = block_obj->hw_ops->ras_error_inject(adev, &block_info); | |||
| 1116 | else /*If not defined .ras_error_inject, use default ras_error_inject*/ | |||
| 1117 | ret = psp_ras_trigger_error(&adev->psp, &block_info); | |||
| 1118 | } | |||
| 1119 | ||||
| 1120 | if (ret) | |||
| 1121 | dev_err(adev->dev, "ras inject %s failed %d\n",printf("drm:pid%d:%s *ERROR* " "ras inject %s failed %d\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , get_ras_block_str (&info->head), ret) | |||
| 1122 | get_ras_block_str(&info->head), ret)printf("drm:pid%d:%s *ERROR* " "ras inject %s failed %d\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , get_ras_block_str (&info->head), ret); | |||
| 1123 | ||||
| 1124 | return ret; | |||
| 1125 | } | |||
| 1126 | ||||
| 1127 | /** | |||
| 1128 | * amdgpu_ras_query_error_count -- Get error counts of all IPs | |||
| 1129 | * @adev: pointer to AMD GPU device | |||
| 1130 | * @ce_count: pointer to an integer to be set to the count of correctible errors. | |||
| 1131 | * @ue_count: pointer to an integer to be set to the count of uncorrectible | |||
| 1132 | * errors. | |||
| 1133 | * | |||
| 1134 | * If set, @ce_count or @ue_count, count and return the corresponding | |||
| 1135 | * error counts in those integer pointers. Return 0 if the device | |||
| 1136 | * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. | |||
| 1137 | */ | |||
| 1138 | int amdgpu_ras_query_error_count(struct amdgpu_device *adev, | |||
| 1139 | unsigned long *ce_count, | |||
| 1140 | unsigned long *ue_count) | |||
| 1141 | { | |||
| 1142 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1143 | struct ras_manager *obj; | |||
| 1144 | unsigned long ce, ue; | |||
| 1145 | ||||
| 1146 | if (!adev->ras_enabled || !con) | |||
| 1147 | return -EOPNOTSUPP45; | |||
| 1148 | ||||
| 1149 | /* Don't count since no reporting. | |||
| 1150 | */ | |||
| 1151 | if (!ce_count && !ue_count) | |||
| 1152 | return 0; | |||
| 1153 | ||||
| 1154 | ce = 0; | |||
| 1155 | ue = 0; | |||
| 1156 | list_for_each_entry(obj, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}); &obj->node != (&con->head); obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj-> node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), node) );})) { | |||
| 1157 | struct ras_query_if info = { | |||
| 1158 | .head = obj->head, | |||
| 1159 | }; | |||
| 1160 | int res; | |||
| 1161 | ||||
| 1162 | res = amdgpu_ras_query_error_status(adev, &info); | |||
| 1163 | if (res) | |||
| 1164 | return res; | |||
| 1165 | ||||
| 1166 | if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2)(((11) << 16) | ((0) << 8) | (2)) && | |||
| 1167 | adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)(((11) << 16) | ((0) << 8) | (4))) { | |||
| 1168 | if (amdgpu_ras_reset_error_status(adev, info.head.block)) | |||
| 1169 | dev_warn(adev->dev, "Failed to reset error counter and error status")printf("drm:pid%d:%s *WARNING* " "Failed to reset error counter and error status" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1170 | } | |||
| 1171 | ||||
| 1172 | ce += info.ce_count; | |||
| 1173 | ue += info.ue_count; | |||
| 1174 | } | |||
| 1175 | ||||
| 1176 | if (ce_count) | |||
| 1177 | *ce_count = ce; | |||
| 1178 | ||||
| 1179 | if (ue_count) | |||
| 1180 | *ue_count = ue; | |||
| 1181 | ||||
| 1182 | return 0; | |||
| 1183 | } | |||
| 1184 | /* query/inject/cure end */ | |||
| 1185 | ||||
| 1186 | #ifdef __linux__ | |||
| 1187 | ||||
| 1188 | /* sysfs begin */ | |||
| 1189 | ||||
| 1190 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, | |||
| 1191 | struct ras_badpage **bps, unsigned int *count); | |||
| 1192 | ||||
| 1193 | static char *amdgpu_ras_badpage_flags_str(unsigned int flags) | |||
| 1194 | { | |||
| 1195 | switch (flags) { | |||
| 1196 | case AMDGPU_RAS_RETIRE_PAGE_RESERVED: | |||
| 1197 | return "R"; | |||
| 1198 | case AMDGPU_RAS_RETIRE_PAGE_PENDING: | |||
| 1199 | return "P"; | |||
| 1200 | case AMDGPU_RAS_RETIRE_PAGE_FAULT: | |||
| 1201 | default: | |||
| 1202 | return "F"; | |||
| 1203 | } | |||
| 1204 | } | |||
| 1205 | ||||
| 1206 | /** | |||
| 1207 | * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface | |||
| 1208 | * | |||
| 1209 | * It allows user to read the bad pages of vram on the gpu through | |||
| 1210 | * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages | |||
| 1211 | * | |||
| 1212 | * It outputs multiple lines, and each line stands for one gpu page. | |||
| 1213 | * | |||
| 1214 | * The format of one line is below, | |||
| 1215 | * gpu pfn : gpu page size : flags | |||
| 1216 | * | |||
| 1217 | * gpu pfn and gpu page size are printed in hex format. | |||
| 1218 | * flags can be one of below character, | |||
| 1219 | * | |||
| 1220 | * R: reserved, this gpu page is reserved and not able to use. | |||
| 1221 | * | |||
| 1222 | * P: pending for reserve, this gpu page is marked as bad, will be reserved | |||
| 1223 | * in next window of page_reserve. | |||
| 1224 | * | |||
| 1225 | * F: unable to reserve. this gpu page can't be reserved due to some reasons. | |||
| 1226 | * | |||
| 1227 | * Examples: | |||
| 1228 | * | |||
| 1229 | * .. code-block:: bash | |||
| 1230 | * | |||
| 1231 | * 0x00000001 : 0x00001000 : R | |||
| 1232 | * 0x00000002 : 0x00001000 : P | |||
| 1233 | * | |||
| 1234 | */ | |||
| 1235 | ||||
| 1236 | static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, | |||
| 1237 | struct kobject *kobj, struct bin_attribute *attr, | |||
| 1238 | char *buf, loff_t ppos, size_t count) | |||
| 1239 | { | |||
| 1240 | struct amdgpu_ras *con = | |||
| 1241 | container_of(attr, struct amdgpu_ras, badpages_attr)({ const __typeof( ((struct amdgpu_ras *)0)->badpages_attr ) *__mptr = (attr); (struct amdgpu_ras *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ras, badpages_attr) );}); | |||
| 1242 | struct amdgpu_device *adev = con->adev; | |||
| 1243 | const unsigned int element_size = | |||
| 1244 | sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; | |||
| 1245 | unsigned int start = div64_ul(ppos + element_size - 1, element_size); | |||
| 1246 | unsigned int end = div64_ul(ppos + count - 1, element_size); | |||
| 1247 | ssize_t s = 0; | |||
| 1248 | struct ras_badpage *bps = NULL((void *)0); | |||
| 1249 | unsigned int bps_count = 0; | |||
| 1250 | ||||
| 1251 | memset(buf, 0, count)__builtin_memset((buf), (0), (count)); | |||
| 1252 | ||||
| 1253 | if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) | |||
| 1254 | return 0; | |||
| 1255 | ||||
| 1256 | for (; start < end && start < bps_count; start++) | |||
| 1257 | s += scnprintf(&buf[s], element_size + 1,snprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n" , bps[start].bp, bps[start].size, amdgpu_ras_badpage_flags_str (bps[start].flags)) | |||
| 1258 | "0x%08x : 0x%08x : %1s\n",snprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n" , bps[start].bp, bps[start].size, amdgpu_ras_badpage_flags_str (bps[start].flags)) | |||
| 1259 | bps[start].bp,snprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n" , bps[start].bp, bps[start].size, amdgpu_ras_badpage_flags_str (bps[start].flags)) | |||
| 1260 | bps[start].size,snprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n" , bps[start].bp, bps[start].size, amdgpu_ras_badpage_flags_str (bps[start].flags)) | |||
| 1261 | amdgpu_ras_badpage_flags_str(bps[start].flags))snprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n" , bps[start].bp, bps[start].size, amdgpu_ras_badpage_flags_str (bps[start].flags)); | |||
| 1262 | ||||
| 1263 | kfree(bps); | |||
| 1264 | ||||
| 1265 | return s; | |||
| 1266 | } | |||
| 1267 | ||||
| 1268 | static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, | |||
| 1269 | struct device_attribute *attr, char *buf) | |||
| 1270 | { | |||
| 1271 | struct amdgpu_ras *con = | |||
| 1272 | container_of(attr, struct amdgpu_ras, features_attr)({ const __typeof( ((struct amdgpu_ras *)0)->features_attr ) *__mptr = (attr); (struct amdgpu_ras *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ras, features_attr) );}); | |||
| 1273 | ||||
| 1274 | return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features)snprintf(buf, (1 << 12), "feature mask: 0x%x\n", con-> features); | |||
| 1275 | } | |||
| 1276 | ||||
| 1277 | static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) | |||
| 1278 | { | |||
| 1279 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1280 | ||||
| 1281 | if (adev->dev->kobj.sd) | |||
| 1282 | sysfs_remove_file_from_group(&adev->dev->kobj, | |||
| 1283 | &con->badpages_attr.attr, | |||
| 1284 | RAS_FS_NAME); | |||
| 1285 | } | |||
| 1286 | ||||
| 1287 | static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) | |||
| 1288 | { | |||
| 1289 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1290 | struct attribute *attrs[] = { | |||
| 1291 | &con->features_attr.attr, | |||
| 1292 | NULL((void *)0) | |||
| 1293 | }; | |||
| 1294 | struct attribute_group group = { | |||
| 1295 | .name = RAS_FS_NAME, | |||
| 1296 | .attrs = attrs, | |||
| 1297 | }; | |||
| 1298 | ||||
| 1299 | if (adev->dev->kobj.sd) | |||
| 1300 | sysfs_remove_group(&adev->dev->kobj, &group); | |||
| 1301 | ||||
| 1302 | return 0; | |||
| 1303 | } | |||
| 1304 | ||||
| 1305 | #endif /* __linux__ */ | |||
| 1306 | ||||
| 1307 | int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, | |||
| 1308 | struct ras_common_if *head) | |||
| 1309 | { | |||
| 1310 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |||
| 1311 | ||||
| 1312 | if (!obj || obj->attr_inuse) | |||
| 1313 | return -EINVAL22; | |||
| 1314 | ||||
| 1315 | STUB()do { printf("%s: stub\n", __func__); } while(0); | |||
| 1316 | return -ENOSYS78; | |||
| 1317 | #ifdef notyet | |||
| 1318 | get_obj(obj)do { (obj)->use++; } while (0); | |||
| 1319 | ||||
| 1320 | snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name), | |||
| 1321 | "%s_err_count", head->name); | |||
| 1322 | ||||
| 1323 | obj->sysfs_attr = (struct device_attribute){ | |||
| 1324 | .attr = { | |||
| 1325 | .name = obj->fs_data.sysfs_name, | |||
| 1326 | .mode = S_IRUGO, | |||
| 1327 | }, | |||
| 1328 | .show = amdgpu_ras_sysfs_read, | |||
| 1329 | }; | |||
| 1330 | sysfs_attr_init(&obj->sysfs_attr.attr); | |||
| 1331 | ||||
| 1332 | if (sysfs_add_file_to_group(&adev->dev->kobj, | |||
| 1333 | &obj->sysfs_attr.attr, | |||
| 1334 | RAS_FS_NAME)) { | |||
| 1335 | put_obj(obj); | |||
| 1336 | return -EINVAL22; | |||
| 1337 | } | |||
| 1338 | ||||
| 1339 | obj->attr_inuse = 1; | |||
| 1340 | ||||
| 1341 | return 0; | |||
| 1342 | #endif | |||
| 1343 | } | |||
| 1344 | ||||
| 1345 | int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, | |||
| 1346 | struct ras_common_if *head) | |||
| 1347 | { | |||
| 1348 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |||
| 1349 | ||||
| 1350 | if (!obj || !obj->attr_inuse) | |||
| 1351 | return -EINVAL22; | |||
| 1352 | ||||
| 1353 | #ifdef __linux__ | |||
| 1354 | if (adev->dev->kobj.sd) | |||
| 1355 | sysfs_remove_file_from_group(&adev->dev->kobj, | |||
| 1356 | &obj->sysfs_attr.attr, | |||
| 1357 | RAS_FS_NAME); | |||
| 1358 | #endif | |||
| 1359 | obj->attr_inuse = 0; | |||
| 1360 | put_obj(obj); | |||
| 1361 | ||||
| 1362 | return 0; | |||
| 1363 | } | |||
| 1364 | ||||
| 1365 | #ifdef __linux__ | |||
| 1366 | ||||
| 1367 | static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) | |||
| 1368 | { | |||
| 1369 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1370 | struct ras_manager *obj, *tmp; | |||
| 1371 | ||||
| 1372 | list_for_each_entry_safe(obj, tmp, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}), tmp = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj->node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) );}); &obj-> node != (&con->head); obj = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next ); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof(__typeof (*tmp), node) );})) { | |||
| 1373 | amdgpu_ras_sysfs_remove(adev, &obj->head); | |||
| 1374 | } | |||
| 1375 | ||||
| 1376 | if (amdgpu_bad_page_threshold != 0) | |||
| 1377 | amdgpu_ras_sysfs_remove_bad_page_node(adev); | |||
| 1378 | ||||
| 1379 | amdgpu_ras_sysfs_remove_feature_node(adev); | |||
| 1380 | ||||
| 1381 | return 0; | |||
| 1382 | } | |||
| 1383 | /* sysfs end */ | |||
| 1384 | ||||
| 1385 | /** | |||
| 1386 | * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors | |||
| 1387 | * | |||
| 1388 | * Normally when there is an uncorrectable error, the driver will reset | |||
| 1389 | * the GPU to recover. However, in the event of an unrecoverable error, | |||
| 1390 | * the driver provides an interface to reboot the system automatically | |||
| 1391 | * in that event. | |||
| 1392 | * | |||
| 1393 | * The following file in debugfs provides that interface: | |||
| 1394 | * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot | |||
| 1395 | * | |||
| 1396 | * Usage: | |||
| 1397 | * | |||
| 1398 | * .. code-block:: bash | |||
| 1399 | * | |||
| 1400 | * echo true > .../ras/auto_reboot | |||
| 1401 | * | |||
| 1402 | */ | |||
| 1403 | /* debugfs begin */ | |||
| 1404 | static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) | |||
| 1405 | { | |||
| 1406 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1407 | struct drm_minor *minor = adev_to_drm(adev)->primary; | |||
| 1408 | struct dentry *dir; | |||
| 1409 | ||||
| 1410 | dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root)ERR_PTR(-78); | |||
| 1411 | debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,ERR_PTR(-78) | |||
| 1412 | &amdgpu_ras_debugfs_ctrl_ops)ERR_PTR(-78); | |||
| 1413 | debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,ERR_PTR(-78) | |||
| 1414 | &amdgpu_ras_debugfs_eeprom_ops)ERR_PTR(-78); | |||
| 1415 | debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, | |||
| 1416 | &con->bad_page_cnt_threshold); | |||
| 1417 | debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); | |||
| 1418 | debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); | |||
| 1419 | debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,ERR_PTR(-78) | |||
| 1420 | &amdgpu_ras_debugfs_eeprom_size_ops)ERR_PTR(-78); | |||
| 1421 | con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",ERR_PTR(-78) | |||
| 1422 | S_IRUGO, dir, adev,ERR_PTR(-78) | |||
| 1423 | &amdgpu_ras_debugfs_eeprom_table_ops)ERR_PTR(-78); | |||
| 1424 | amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); | |||
| 1425 | ||||
| 1426 | /* | |||
| 1427 | * After one uncorrectable error happens, usually GPU recovery will | |||
| 1428 | * be scheduled. But due to the known problem in GPU recovery failing | |||
| 1429 | * to bring GPU back, below interface provides one direct way to | |||
| 1430 | * user to reboot system automatically in such case within | |||
| 1431 | * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine | |||
| 1432 | * will never be called. | |||
| 1433 | */ | |||
| 1434 | debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); | |||
| 1435 | ||||
| 1436 | /* | |||
| 1437 | * User could set this not to clean up hardware's error count register | |||
| 1438 | * of RAS IPs during ras recovery. | |||
| 1439 | */ | |||
| 1440 | debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, | |||
| 1441 | &con->disable_ras_err_cnt_harvest); | |||
| 1442 | return dir; | |||
| 1443 | } | |||
| 1444 | ||||
| 1445 | static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, | |||
| 1446 | struct ras_fs_if *head, | |||
| 1447 | struct dentry *dir) | |||
| 1448 | { | |||
| 1449 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); | |||
| 1450 | ||||
| 1451 | if (!obj || !dir) | |||
| 1452 | return; | |||
| 1453 | ||||
| 1454 | get_obj(obj)do { (obj)->use++; } while (0); | |||
| 1455 | ||||
| 1456 | memcpy(obj->fs_data.debugfs_name,__builtin_memcpy((obj->fs_data.debugfs_name), (head->debugfs_name ), (sizeof(obj->fs_data.debugfs_name))) | |||
| 1457 | head->debugfs_name,__builtin_memcpy((obj->fs_data.debugfs_name), (head->debugfs_name ), (sizeof(obj->fs_data.debugfs_name))) | |||
| 1458 | sizeof(obj->fs_data.debugfs_name))__builtin_memcpy((obj->fs_data.debugfs_name), (head->debugfs_name ), (sizeof(obj->fs_data.debugfs_name))); | |||
| 1459 | ||||
| 1460 | debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,ERR_PTR(-78) | |||
| 1461 | obj, &amdgpu_ras_debugfs_ops)ERR_PTR(-78); | |||
| 1462 | } | |||
| 1463 | ||||
| 1464 | void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) | |||
| 1465 | { | |||
| 1466 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1467 | struct dentry *dir; | |||
| 1468 | struct ras_manager *obj; | |||
| 1469 | struct ras_fs_if fs_info; | |||
| 1470 | ||||
| 1471 | /* | |||
| 1472 | * it won't be called in resume path, no need to check | |||
| 1473 | * suspend and gpu reset status | |||
| 1474 | */ | |||
| 1475 | if (!IS_ENABLED(CONFIG_DEBUG_FS)0 || !con) | |||
| 1476 | return; | |||
| 1477 | ||||
| 1478 | dir = amdgpu_ras_debugfs_create_ctrl_node(adev); | |||
| 1479 | ||||
| 1480 | list_for_each_entry(obj, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}); &obj->node != (&con->head); obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj-> node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), node) );})) { | |||
| 1481 | if (amdgpu_ras_is_supported(adev, obj->head.block) && | |||
| 1482 | (obj->attr_inuse == 1)) { | |||
| 1483 | sprintf(fs_info.debugfs_name, "%s_err_inject", | |||
| 1484 | get_ras_block_str(&obj->head)); | |||
| 1485 | fs_info.head = obj->head; | |||
| 1486 | amdgpu_ras_debugfs_create(adev, &fs_info, dir); | |||
| 1487 | } | |||
| 1488 | } | |||
| 1489 | } | |||
| 1490 | ||||
| 1491 | /* debugfs end */ | |||
| 1492 | ||||
| 1493 | /* ras fs */ | |||
| 1494 | static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, | |||
| 1495 | amdgpu_ras_sysfs_badpages_read, NULL((void *)0), 0); | |||
| 1496 | #endif /* __linux__ */ | |||
| 1497 | static DEVICE_ATTR(features, S_IRUGO,struct device_attribute dev_attr_features | |||
| 1498 | amdgpu_ras_sysfs_features_read, NULL)struct device_attribute dev_attr_features; | |||
| 1499 | static int amdgpu_ras_fs_init(struct amdgpu_device *adev) | |||
| 1500 | { | |||
| 1501 | #ifdef __linux__ | |||
| 1502 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1503 | struct attribute_group group = { | |||
| 1504 | .name = RAS_FS_NAME, | |||
| 1505 | }; | |||
| 1506 | struct attribute *attrs[] = { | |||
| 1507 | &con->features_attr.attr, | |||
| 1508 | NULL((void *)0) | |||
| 1509 | }; | |||
| 1510 | struct bin_attribute *bin_attrs[] = { | |||
| 1511 | NULL((void *)0), | |||
| 1512 | NULL((void *)0), | |||
| 1513 | }; | |||
| 1514 | int r; | |||
| 1515 | ||||
| 1516 | /* add features entry */ | |||
| 1517 | con->features_attr = dev_attr_features; | |||
| 1518 | group.attrs = attrs; | |||
| 1519 | sysfs_attr_init(attrs[0]); | |||
| 1520 | ||||
| 1521 | if (amdgpu_bad_page_threshold != 0) { | |||
| 1522 | /* add bad_page_features entry */ | |||
| 1523 | bin_attr_gpu_vram_bad_pages.private = NULL((void *)0); | |||
| 1524 | con->badpages_attr = bin_attr_gpu_vram_bad_pages; | |||
| 1525 | bin_attrs[0] = &con->badpages_attr; | |||
| 1526 | group.bin_attrs = bin_attrs; | |||
| 1527 | sysfs_bin_attr_init(bin_attrs[0]); | |||
| 1528 | } | |||
| 1529 | ||||
| 1530 | r = sysfs_create_group(&adev->dev->kobj, &group)0; | |||
| 1531 | if (r) | |||
| 1532 | dev_err(adev->dev, "Failed to create RAS sysfs group!")printf("drm:pid%d:%s *ERROR* " "Failed to create RAS sysfs group!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1533 | #endif | |||
| 1534 | ||||
| 1535 | return 0; | |||
| 1536 | } | |||
| 1537 | ||||
| 1538 | static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) | |||
| 1539 | { | |||
| 1540 | #ifdef __linux__ | |||
| 1541 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1542 | struct ras_manager *con_obj, *ip_obj, *tmp; | |||
| 1543 | ||||
| 1544 | if (IS_ENABLED(CONFIG_DEBUG_FS)0) { | |||
| 1545 | list_for_each_entry_safe(con_obj, tmp, &con->head, node)for (con_obj = ({ const __typeof( ((__typeof(*con_obj) *)0)-> node ) *__mptr = ((&con->head)->next); (__typeof(*con_obj ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*con_obj), node) );}), tmp = ({ const __typeof( ((__typeof(*con_obj) *) 0)->node ) *__mptr = (con_obj->node.next); (__typeof(*con_obj ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*con_obj), node) );}); &con_obj->node != (&con->head); con_obj = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), node) );})) { | |||
| 1546 | ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); | |||
| 1547 | if (ip_obj) | |||
| 1548 | put_obj(ip_obj); | |||
| 1549 | } | |||
| 1550 | } | |||
| 1551 | ||||
| 1552 | amdgpu_ras_sysfs_remove_all(adev); | |||
| 1553 | #endif | |||
| 1554 | return 0; | |||
| 1555 | } | |||
| 1556 | /* ras fs end */ | |||
| 1557 | ||||
| 1558 | /* ih begin */ | |||
| 1559 | ||||
| 1560 | /* For the hardware that cannot enable bif ring for both ras_controller_irq | |||
| 1561 | * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status | |||
| 1562 | * register to check whether the interrupt is triggered or not, and properly | |||
| 1563 | * ack the interrupt if it is there | |||
| 1564 | */ | |||
| 1565 | void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) | |||
| 1566 | { | |||
| 1567 | /* Fatal error events are handled on host side */ | |||
| 1568 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)) || | |||
| 1569 | !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) | |||
| 1570 | return; | |||
| 1571 | ||||
| 1572 | if (adev->nbio.ras && | |||
| 1573 | adev->nbio.ras->handle_ras_controller_intr_no_bifring) | |||
| 1574 | adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); | |||
| 1575 | ||||
| 1576 | if (adev->nbio.ras && | |||
| 1577 | adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) | |||
| 1578 | adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); | |||
| 1579 | } | |||
| 1580 | ||||
| 1581 | static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, | |||
| 1582 | struct amdgpu_iv_entry *entry) | |||
| 1583 | { | |||
| 1584 | bool_Bool poison_stat = false0; | |||
| 1585 | struct amdgpu_device *adev = obj->adev; | |||
| 1586 | struct ras_err_data err_data = {0, 0, 0, NULL((void *)0)}; | |||
| 1587 | struct amdgpu_ras_block_object *block_obj = | |||
| 1588 | amdgpu_ras_get_ras_block(adev, obj->head.block, 0); | |||
| 1589 | ||||
| 1590 | if (!block_obj || !block_obj->hw_ops) | |||
| 1591 | return; | |||
| 1592 | ||||
| 1593 | /* both query_poison_status and handle_poison_consumption are optional, | |||
| 1594 | * but at least one of them should be implemented if we need poison | |||
| 1595 | * consumption handler | |||
| 1596 | */ | |||
| 1597 | if (block_obj->hw_ops->query_poison_status) { | |||
| 1598 | poison_stat = block_obj->hw_ops->query_poison_status(adev); | |||
| 1599 | if (!poison_stat) { | |||
| 1600 | /* Not poison consumption interrupt, no need to handle it */ | |||
| 1601 | dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",do { } while(0) | |||
| 1602 | block_obj->ras_comm.name)do { } while(0); | |||
| 1603 | ||||
| 1604 | return; | |||
| 1605 | } | |||
| 1606 | } | |||
| 1607 | ||||
| 1608 | if (!adev->gmc.xgmi.connected_to_cpu) | |||
| 1609 | amdgpu_umc_poison_handler(adev, &err_data, false0); | |||
| 1610 | ||||
| 1611 | if (block_obj->hw_ops->handle_poison_consumption) | |||
| 1612 | poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); | |||
| 1613 | ||||
| 1614 | /* gpu reset is fallback for failed and default cases */ | |||
| 1615 | if (poison_stat) { | |||
| 1616 | dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",do { } while(0) | |||
| 1617 | block_obj->ras_comm.name)do { } while(0); | |||
| 1618 | amdgpu_ras_reset_gpu(adev); | |||
| 1619 | } | |||
| 1620 | } | |||
| 1621 | ||||
| 1622 | static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, | |||
| 1623 | struct amdgpu_iv_entry *entry) | |||
| 1624 | { | |||
| 1625 | dev_info(obj->adev->dev,do { } while(0) | |||
| 1626 | "Poison is created, no user action is needed.\n")do { } while(0); | |||
| 1627 | } | |||
| 1628 | ||||
| 1629 | static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, | |||
| 1630 | struct amdgpu_iv_entry *entry) | |||
| 1631 | { | |||
| 1632 | struct ras_ih_data *data = &obj->ih_data; | |||
| 1633 | struct ras_err_data err_data = {0, 0, 0, NULL((void *)0)}; | |||
| 1634 | int ret; | |||
| 1635 | ||||
| 1636 | if (!data->cb) | |||
| 1637 | return; | |||
| 1638 | ||||
| 1639 | /* Let IP handle its data, maybe we need get the output | |||
| 1640 | * from the callback to update the error type/count, etc | |||
| 1641 | */ | |||
| 1642 | ret = data->cb(obj->adev, &err_data, entry); | |||
| 1643 | /* ue will trigger an interrupt, and in that case | |||
| 1644 | * we need do a reset to recovery the whole system. | |||
| 1645 | * But leave IP do that recovery, here we just dispatch | |||
| 1646 | * the error. | |||
| 1647 | */ | |||
| 1648 | if (ret == AMDGPU_RAS_SUCCESS) { | |||
| 1649 | /* these counts could be left as 0 if | |||
| 1650 | * some blocks do not count error number | |||
| 1651 | */ | |||
| 1652 | obj->err_data.ue_count += err_data.ue_count; | |||
| 1653 | obj->err_data.ce_count += err_data.ce_count; | |||
| 1654 | } | |||
| 1655 | } | |||
| 1656 | ||||
| 1657 | static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) | |||
| 1658 | { | |||
| 1659 | struct ras_ih_data *data = &obj->ih_data; | |||
| 1660 | struct amdgpu_iv_entry entry; | |||
| 1661 | ||||
| 1662 | while (data->rptr != data->wptr) { | |||
| 1663 | rmb()do { __asm volatile("lfence" ::: "memory"); } while (0); | |||
| 1664 | memcpy(&entry, &data->ring[data->rptr],__builtin_memcpy((&entry), (&data->ring[data->rptr ]), (data->element_size)) | |||
| 1665 | data->element_size)__builtin_memcpy((&entry), (&data->ring[data->rptr ]), (data->element_size)); | |||
| 1666 | ||||
| 1667 | wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); | |||
| 1668 | data->rptr = (data->aligned_element_size + | |||
| 1669 | data->rptr) % data->ring_size; | |||
| 1670 | ||||
| 1671 | if (amdgpu_ras_is_poison_mode_supported(obj->adev)) { | |||
| 1672 | if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) | |||
| 1673 | amdgpu_ras_interrupt_poison_creation_handler(obj, &entry); | |||
| 1674 | else | |||
| 1675 | amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry); | |||
| 1676 | } else { | |||
| 1677 | if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) | |||
| 1678 | amdgpu_ras_interrupt_umc_handler(obj, &entry); | |||
| 1679 | else | |||
| 1680 | dev_warn(obj->adev->dev,printf("drm:pid%d:%s *WARNING* " "No RAS interrupt handler for non-UMC block with poison disabled.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 1681 | "No RAS interrupt handler for non-UMC block with poison disabled.\n")printf("drm:pid%d:%s *WARNING* " "No RAS interrupt handler for non-UMC block with poison disabled.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1682 | } | |||
| 1683 | } | |||
| 1684 | } | |||
| 1685 | ||||
| 1686 | static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) | |||
| 1687 | { | |||
| 1688 | struct ras_ih_data *data = | |||
| 1689 | container_of(work, struct ras_ih_data, ih_work)({ const __typeof( ((struct ras_ih_data *)0)->ih_work ) *__mptr = (work); (struct ras_ih_data *)( (char *)__mptr - __builtin_offsetof (struct ras_ih_data, ih_work) );}); | |||
| 1690 | struct ras_manager *obj = | |||
| 1691 | container_of(data, struct ras_manager, ih_data)({ const __typeof( ((struct ras_manager *)0)->ih_data ) *__mptr = (data); (struct ras_manager *)( (char *)__mptr - __builtin_offsetof (struct ras_manager, ih_data) );}); | |||
| 1692 | ||||
| 1693 | amdgpu_ras_interrupt_handler(obj); | |||
| 1694 | } | |||
| 1695 | ||||
| 1696 | int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, | |||
| 1697 | struct ras_dispatch_if *info) | |||
| 1698 | { | |||
| 1699 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); | |||
| 1700 | struct ras_ih_data *data = &obj->ih_data; | |||
| 1701 | ||||
| 1702 | if (!obj) | |||
| 1703 | return -EINVAL22; | |||
| 1704 | ||||
| 1705 | if (data->inuse == 0) | |||
| 1706 | return 0; | |||
| 1707 | ||||
| 1708 | /* Might be overflow... */ | |||
| 1709 | memcpy(&data->ring[data->wptr], info->entry,__builtin_memcpy((&data->ring[data->wptr]), (info-> entry), (data->element_size)) | |||
| 1710 | data->element_size)__builtin_memcpy((&data->ring[data->wptr]), (info-> entry), (data->element_size)); | |||
| 1711 | ||||
| 1712 | wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); | |||
| 1713 | data->wptr = (data->aligned_element_size + | |||
| 1714 | data->wptr) % data->ring_size; | |||
| 1715 | ||||
| 1716 | schedule_work(&data->ih_work); | |||
| 1717 | ||||
| 1718 | return 0; | |||
| 1719 | } | |||
| 1720 | ||||
| 1721 | int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, | |||
| 1722 | struct ras_common_if *head) | |||
| 1723 | { | |||
| 1724 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |||
| 1725 | struct ras_ih_data *data; | |||
| 1726 | ||||
| 1727 | if (!obj) | |||
| 1728 | return -EINVAL22; | |||
| 1729 | ||||
| 1730 | data = &obj->ih_data; | |||
| 1731 | if (data->inuse == 0) | |||
| 1732 | return 0; | |||
| 1733 | ||||
| 1734 | cancel_work_sync(&data->ih_work); | |||
| 1735 | ||||
| 1736 | kfree(data->ring); | |||
| 1737 | memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data))); | |||
| 1738 | put_obj(obj); | |||
| 1739 | ||||
| 1740 | return 0; | |||
| 1741 | } | |||
| 1742 | ||||
| 1743 | int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, | |||
| 1744 | struct ras_common_if *head) | |||
| 1745 | { | |||
| 1746 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); | |||
| 1747 | struct ras_ih_data *data; | |||
| 1748 | struct amdgpu_ras_block_object *ras_obj; | |||
| 1749 | ||||
| 1750 | if (!obj) { | |||
| 1751 | /* in case we registe the IH before enable ras feature */ | |||
| 1752 | obj = amdgpu_ras_create_obj(adev, head); | |||
| 1753 | if (!obj) | |||
| 1754 | return -EINVAL22; | |||
| 1755 | } else | |||
| 1756 | get_obj(obj)do { (obj)->use++; } while (0); | |||
| 1757 | ||||
| 1758 | ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm)({ const __typeof( ((struct amdgpu_ras_block_object *)0)-> ras_comm ) *__mptr = (head); (struct amdgpu_ras_block_object * )( (char *)__mptr - __builtin_offsetof(struct amdgpu_ras_block_object , ras_comm) );}); | |||
| 1759 | ||||
| 1760 | data = &obj->ih_data; | |||
| 1761 | /* add the callback.etc */ | |||
| 1762 | *data = (struct ras_ih_data) { | |||
| 1763 | .inuse = 0, | |||
| 1764 | .cb = ras_obj->ras_cb, | |||
| 1765 | .element_size = sizeof(struct amdgpu_iv_entry), | |||
| 1766 | .rptr = 0, | |||
| 1767 | .wptr = 0, | |||
| 1768 | }; | |||
| 1769 | ||||
| 1770 | INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); | |||
| 1771 | ||||
| 1772 | data->aligned_element_size = roundup2(data->element_size, 8)(((data->element_size) + ((8) - 1)) & (~((__typeof(data ->element_size))(8) - 1))); | |||
| 1773 | /* the ring can store 64 iv entries. */ | |||
| 1774 | data->ring_size = 64 * data->aligned_element_size; | |||
| 1775 | data->ring = kmalloc(data->ring_size, GFP_KERNEL(0x0001 | 0x0004)); | |||
| 1776 | if (!data->ring) { | |||
| 1777 | put_obj(obj); | |||
| 1778 | return -ENOMEM12; | |||
| 1779 | } | |||
| 1780 | ||||
| 1781 | /* IH is ready */ | |||
| 1782 | data->inuse = 1; | |||
| 1783 | ||||
| 1784 | return 0; | |||
| 1785 | } | |||
| 1786 | ||||
| 1787 | static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) | |||
| 1788 | { | |||
| 1789 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1790 | struct ras_manager *obj, *tmp; | |||
| 1791 | ||||
| 1792 | list_for_each_entry_safe(obj, tmp, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}), tmp = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj->node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) );}); &obj-> node != (&con->head); obj = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next ); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof(__typeof (*tmp), node) );})) { | |||
| 1793 | amdgpu_ras_interrupt_remove_handler(adev, &obj->head); | |||
| 1794 | } | |||
| 1795 | ||||
| 1796 | return 0; | |||
| 1797 | } | |||
| 1798 | /* ih end */ | |||
| 1799 | ||||
| 1800 | /* traversal all IPs except NBIO to query error counter */ | |||
| 1801 | static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) | |||
| 1802 | { | |||
| 1803 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1804 | struct ras_manager *obj; | |||
| 1805 | ||||
| 1806 | if (!adev->ras_enabled || !con) | |||
| 1807 | return; | |||
| 1808 | ||||
| 1809 | list_for_each_entry(obj, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}); &obj->node != (&con->head); obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj-> node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), node) );})) { | |||
| 1810 | struct ras_query_if info = { | |||
| 1811 | .head = obj->head, | |||
| 1812 | }; | |||
| 1813 | ||||
| 1814 | /* | |||
| 1815 | * PCIE_BIF IP has one different isr by ras controller | |||
| 1816 | * interrupt, the specific ras counter query will be | |||
| 1817 | * done in that isr. So skip such block from common | |||
| 1818 | * sync flood interrupt isr calling. | |||
| 1819 | */ | |||
| 1820 | if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) | |||
| 1821 | continue; | |||
| 1822 | ||||
| 1823 | /* | |||
| 1824 | * this is a workaround for aldebaran, skip send msg to | |||
| 1825 | * smu to get ecc_info table due to smu handle get ecc | |||
| 1826 | * info table failed temporarily. | |||
| 1827 | * should be removed until smu fix handle ecc_info table. | |||
| 1828 | */ | |||
| 1829 | if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && | |||
| 1830 | (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)(((13) << 16) | ((0) << 8) | (2)))) | |||
| 1831 | continue; | |||
| 1832 | ||||
| 1833 | amdgpu_ras_query_error_status(adev, &info); | |||
| 1834 | ||||
| 1835 | if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2)(((11) << 16) | ((0) << 8) | (2)) && | |||
| 1836 | adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)(((11) << 16) | ((0) << 8) | (4)) && | |||
| 1837 | adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)(((13) << 16) | ((0) << 8) | (0))) { | |||
| 1838 | if (amdgpu_ras_reset_error_status(adev, info.head.block)) | |||
| 1839 | dev_warn(adev->dev, "Failed to reset error counter and error status")printf("drm:pid%d:%s *WARNING* " "Failed to reset error counter and error status" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1840 | } | |||
| 1841 | } | |||
| 1842 | } | |||
| 1843 | ||||
| 1844 | /* Parse RdRspStatus and WrRspStatus */ | |||
| 1845 | static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, | |||
| 1846 | struct ras_query_if *info) | |||
| 1847 | { | |||
| 1848 | struct amdgpu_ras_block_object *block_obj; | |||
| 1849 | /* | |||
| 1850 | * Only two block need to query read/write | |||
| 1851 | * RspStatus at current state | |||
| 1852 | */ | |||
| 1853 | if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && | |||
| 1854 | (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) | |||
| 1855 | return; | |||
| 1856 | ||||
| 1857 | block_obj = amdgpu_ras_get_ras_block(adev, | |||
| 1858 | info->head.block, | |||
| 1859 | info->head.sub_block_index); | |||
| 1860 | ||||
| 1861 | if (!block_obj || !block_obj->hw_ops) { | |||
| 1862 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",do { } while(0) | |||
| 1863 | get_ras_block_str(&info->head))do { } while(0); | |||
| 1864 | return; | |||
| 1865 | } | |||
| 1866 | ||||
| 1867 | if (block_obj->hw_ops->query_ras_error_status) | |||
| 1868 | block_obj->hw_ops->query_ras_error_status(adev); | |||
| 1869 | ||||
| 1870 | } | |||
| 1871 | ||||
| 1872 | static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) | |||
| 1873 | { | |||
| 1874 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1875 | struct ras_manager *obj; | |||
| 1876 | ||||
| 1877 | if (!adev->ras_enabled || !con) | |||
| 1878 | return; | |||
| 1879 | ||||
| 1880 | list_for_each_entry(obj, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}); &obj->node != (&con->head); obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj-> node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), node) );})) { | |||
| 1881 | struct ras_query_if info = { | |||
| 1882 | .head = obj->head, | |||
| 1883 | }; | |||
| 1884 | ||||
| 1885 | amdgpu_ras_error_status_query(adev, &info); | |||
| 1886 | } | |||
| 1887 | } | |||
| 1888 | ||||
| 1889 | /* recovery begin */ | |||
| 1890 | ||||
| 1891 | /* return 0 on success. | |||
| 1892 | * caller need free bps. | |||
| 1893 | */ | |||
| 1894 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, | |||
| 1895 | struct ras_badpage **bps, unsigned int *count) | |||
| 1896 | { | |||
| 1897 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 1898 | struct ras_err_handler_data *data; | |||
| 1899 | int i = 0; | |||
| 1900 | int ret = 0, status; | |||
| 1901 | ||||
| 1902 | if (!con || !con->eh_data || !bps || !count) | |||
| 1903 | return -EINVAL22; | |||
| 1904 | ||||
| 1905 | mutex_lock(&con->recovery_lock)rw_enter_write(&con->recovery_lock); | |||
| 1906 | data = con->eh_data; | |||
| 1907 | if (!data || data->count == 0) { | |||
| 1908 | *bps = NULL((void *)0); | |||
| 1909 | ret = -EINVAL22; | |||
| 1910 | goto out; | |||
| 1911 | } | |||
| 1912 | ||||
| 1913 | *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL(0x0001 | 0x0004)); | |||
| 1914 | if (!*bps) { | |||
| 1915 | ret = -ENOMEM12; | |||
| 1916 | goto out; | |||
| 1917 | } | |||
| 1918 | ||||
| 1919 | for (; i < data->count; i++) { | |||
| 1920 | (*bps)[i] = (struct ras_badpage){ | |||
| 1921 | .bp = data->bps[i].retired_page, | |||
| 1922 | .size = AMDGPU_GPU_PAGE_SIZE4096, | |||
| 1923 | .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, | |||
| 1924 | }; | |||
| 1925 | status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, | |||
| 1926 | data->bps[i].retired_page); | |||
| 1927 | if (status == -EBUSY16) | |||
| 1928 | (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; | |||
| 1929 | else if (status == -ENOENT2) | |||
| 1930 | (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; | |||
| 1931 | } | |||
| 1932 | ||||
| 1933 | *count = data->count; | |||
| 1934 | out: | |||
| 1935 | mutex_unlock(&con->recovery_lock)rw_exit_write(&con->recovery_lock); | |||
| 1936 | return ret; | |||
| 1937 | } | |||
| 1938 | ||||
| 1939 | static void amdgpu_ras_do_recovery(struct work_struct *work) | |||
| 1940 | { | |||
| 1941 | struct amdgpu_ras *ras = | |||
| 1942 | container_of(work, struct amdgpu_ras, recovery_work)({ const __typeof( ((struct amdgpu_ras *)0)->recovery_work ) *__mptr = (work); (struct amdgpu_ras *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ras, recovery_work) );}); | |||
| 1943 | struct amdgpu_device *remote_adev = NULL((void *)0); | |||
| 1944 | struct amdgpu_device *adev = ras->adev; | |||
| 1945 | struct list_head device_list, *device_list_handle = NULL((void *)0); | |||
| 1946 | ||||
| 1947 | if (!ras->disable_ras_err_cnt_harvest) { | |||
| 1948 | struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); | |||
| 1949 | ||||
| 1950 | /* Build list of devices to query RAS related errors */ | |||
| 1951 | if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { | |||
| 1952 | device_list_handle = &hive->device_list; | |||
| 1953 | } else { | |||
| 1954 | INIT_LIST_HEAD(&device_list); | |||
| 1955 | list_add_tail(&adev->gmc.xgmi.head, &device_list); | |||
| 1956 | device_list_handle = &device_list; | |||
| 1957 | } | |||
| 1958 | ||||
| 1959 | list_for_each_entry(remote_adev,for (remote_adev = ({ const __typeof( ((__typeof(*remote_adev ) *)0)->gmc.xgmi.head ) *__mptr = ((device_list_handle)-> next); (__typeof(*remote_adev) *)( (char *)__mptr - __builtin_offsetof (__typeof(*remote_adev), gmc.xgmi.head) );}); &remote_adev ->gmc.xgmi.head != (device_list_handle); remote_adev = ({ const __typeof( ((__typeof(*remote_adev) *)0)->gmc.xgmi.head ) * __mptr = (remote_adev->gmc.xgmi.head.next); (__typeof(*remote_adev ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*remote_adev ), gmc.xgmi.head) );})) | |||
| 1960 | device_list_handle, gmc.xgmi.head)for (remote_adev = ({ const __typeof( ((__typeof(*remote_adev ) *)0)->gmc.xgmi.head ) *__mptr = ((device_list_handle)-> next); (__typeof(*remote_adev) *)( (char *)__mptr - __builtin_offsetof (__typeof(*remote_adev), gmc.xgmi.head) );}); &remote_adev ->gmc.xgmi.head != (device_list_handle); remote_adev = ({ const __typeof( ((__typeof(*remote_adev) *)0)->gmc.xgmi.head ) * __mptr = (remote_adev->gmc.xgmi.head.next); (__typeof(*remote_adev ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*remote_adev ), gmc.xgmi.head) );})) { | |||
| 1961 | amdgpu_ras_query_err_status(remote_adev); | |||
| 1962 | amdgpu_ras_log_on_err_counter(remote_adev); | |||
| 1963 | } | |||
| 1964 | ||||
| 1965 | amdgpu_put_xgmi_hive(hive); | |||
| 1966 | } | |||
| 1967 | ||||
| 1968 | if (amdgpu_device_should_recover_gpu(ras->adev)) { | |||
| 1969 | struct amdgpu_reset_context reset_context; | |||
| 1970 | memset(&reset_context, 0, sizeof(reset_context))__builtin_memset((&reset_context), (0), (sizeof(reset_context ))); | |||
| 1971 | ||||
| 1972 | reset_context.method = AMD_RESET_METHOD_NONE; | |||
| 1973 | reset_context.reset_req_dev = adev; | |||
| 1974 | clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); | |||
| 1975 | ||||
| 1976 | amdgpu_device_gpu_recover(ras->adev, NULL((void *)0), &reset_context); | |||
| 1977 | } | |||
| 1978 | atomic_set(&ras->in_recovery, 0)({ typeof(*(&ras->in_recovery)) __tmp = ((0)); *(volatile typeof(*(&ras->in_recovery)) *)&(*(&ras->in_recovery )) = __tmp; __tmp; }); | |||
| 1979 | } | |||
| 1980 | ||||
| 1981 | /* alloc/realloc bps array */ | |||
| 1982 | static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, | |||
| 1983 | struct ras_err_handler_data *data, int pages) | |||
| 1984 | { | |||
| 1985 | unsigned int old_space = data->count + data->space_left; | |||
| 1986 | unsigned int new_space = old_space + pages; | |||
| 1987 | unsigned int align_space = roundup2(new_space, 512)(((new_space) + ((512) - 1)) & (~((__typeof(new_space))(512 ) - 1))); | |||
| 1988 | void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 1989 | ||||
| 1990 | if (!bps) { | |||
| 1991 | return -ENOMEM12; | |||
| 1992 | } | |||
| 1993 | ||||
| 1994 | if (data->bps) { | |||
| 1995 | memcpy(bps, data->bps,__builtin_memcpy((bps), (data->bps), (data->count * sizeof (*data->bps))) | |||
| 1996 | data->count * sizeof(*data->bps))__builtin_memcpy((bps), (data->bps), (data->count * sizeof (*data->bps))); | |||
| 1997 | kfree(data->bps); | |||
| 1998 | } | |||
| 1999 | ||||
| 2000 | data->bps = bps; | |||
| 2001 | data->space_left += align_space - old_space; | |||
| 2002 | return 0; | |||
| 2003 | } | |||
| 2004 | ||||
| 2005 | /* it deal with vram only. */ | |||
| 2006 | int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, | |||
| 2007 | struct eeprom_table_record *bps, int pages) | |||
| 2008 | { | |||
| 2009 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2010 | struct ras_err_handler_data *data; | |||
| 2011 | int ret = 0; | |||
| 2012 | uint32_t i; | |||
| 2013 | ||||
| 2014 | if (!con || !con->eh_data || !bps || pages <= 0) | |||
| 2015 | return 0; | |||
| 2016 | ||||
| 2017 | mutex_lock(&con->recovery_lock)rw_enter_write(&con->recovery_lock); | |||
| 2018 | data = con->eh_data; | |||
| 2019 | if (!data) | |||
| 2020 | goto out; | |||
| 2021 | ||||
| 2022 | for (i = 0; i < pages; i++) { | |||
| 2023 | if (amdgpu_ras_check_bad_page_unlock(con, | |||
| 2024 | bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT12)) | |||
| 2025 | continue; | |||
| 2026 | ||||
| 2027 | if (!data->space_left && | |||
| 2028 | amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { | |||
| 2029 | ret = -ENOMEM12; | |||
| 2030 | goto out; | |||
| 2031 | } | |||
| 2032 | ||||
| 2033 | amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, | |||
| 2034 | bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT12, | |||
| 2035 | AMDGPU_GPU_PAGE_SIZE4096); | |||
| 2036 | ||||
| 2037 | memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps))__builtin_memcpy((&data->bps[data->count]), (&bps [i]), (sizeof(*data->bps))); | |||
| 2038 | data->count++; | |||
| 2039 | data->space_left--; | |||
| 2040 | } | |||
| 2041 | out: | |||
| 2042 | mutex_unlock(&con->recovery_lock)rw_exit_write(&con->recovery_lock); | |||
| 2043 | ||||
| 2044 | return ret; | |||
| 2045 | } | |||
| 2046 | ||||
| 2047 | /* | |||
| 2048 | * write error record array to eeprom, the function should be | |||
| 2049 | * protected by recovery_lock | |||
| 2050 | */ | |||
| 2051 | int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) | |||
| 2052 | { | |||
| 2053 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2054 | struct ras_err_handler_data *data; | |||
| 2055 | struct amdgpu_ras_eeprom_control *control; | |||
| 2056 | int save_count; | |||
| 2057 | ||||
| 2058 | if (!con || !con->eh_data) | |||
| 2059 | return 0; | |||
| 2060 | ||||
| 2061 | mutex_lock(&con->recovery_lock)rw_enter_write(&con->recovery_lock); | |||
| 2062 | control = &con->eeprom_control; | |||
| 2063 | data = con->eh_data; | |||
| 2064 | save_count = data->count - control->ras_num_recs; | |||
| 2065 | mutex_unlock(&con->recovery_lock)rw_exit_write(&con->recovery_lock); | |||
| 2066 | /* only new entries are saved */ | |||
| 2067 | if (save_count > 0) { | |||
| 2068 | if (amdgpu_ras_eeprom_append(control, | |||
| 2069 | &data->bps[control->ras_num_recs], | |||
| 2070 | save_count)) { | |||
| 2071 | dev_err(adev->dev, "Failed to save EEPROM table data!")printf("drm:pid%d:%s *ERROR* " "Failed to save EEPROM table data!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 2072 | return -EIO5; | |||
| 2073 | } | |||
| 2074 | ||||
| 2075 | dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count)do { } while(0); | |||
| 2076 | } | |||
| 2077 | ||||
| 2078 | return 0; | |||
| 2079 | } | |||
| 2080 | ||||
| 2081 | /* | |||
| 2082 | * read error record array in eeprom and reserve enough space for | |||
| 2083 | * storing new bad pages | |||
| 2084 | */ | |||
| 2085 | static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) | |||
| 2086 | { | |||
| 2087 | struct amdgpu_ras_eeprom_control *control = | |||
| 2088 | &adev->psp.ras_context.ras->eeprom_control; | |||
| 2089 | struct eeprom_table_record *bps; | |||
| 2090 | int ret; | |||
| 2091 | ||||
| 2092 | /* no bad page record, skip eeprom access */ | |||
| 2093 | if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) | |||
| 2094 | return 0; | |||
| 2095 | ||||
| 2096 | bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 2097 | if (!bps) | |||
| 2098 | return -ENOMEM12; | |||
| 2099 | ||||
| 2100 | ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); | |||
| 2101 | if (ret) | |||
| 2102 | dev_err(adev->dev, "Failed to load EEPROM table records!")printf("drm:pid%d:%s *ERROR* " "Failed to load EEPROM table records!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 2103 | else | |||
| 2104 | ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs); | |||
| 2105 | ||||
| 2106 | kfree(bps); | |||
| 2107 | return ret; | |||
| 2108 | } | |||
| 2109 | ||||
| 2110 | static bool_Bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, | |||
| 2111 | uint64_t addr) | |||
| 2112 | { | |||
| 2113 | struct ras_err_handler_data *data = con->eh_data; | |||
| 2114 | int i; | |||
| 2115 | ||||
| 2116 | addr >>= AMDGPU_GPU_PAGE_SHIFT12; | |||
| 2117 | for (i = 0; i < data->count; i++) | |||
| 2118 | if (addr == data->bps[i].retired_page) | |||
| 2119 | return true1; | |||
| 2120 | ||||
| 2121 | return false0; | |||
| 2122 | } | |||
| 2123 | ||||
| 2124 | /* | |||
| 2125 | * check if an address belongs to bad page | |||
| 2126 | * | |||
| 2127 | * Note: this check is only for umc block | |||
| 2128 | */ | |||
| 2129 | static bool_Bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, | |||
| 2130 | uint64_t addr) | |||
| 2131 | { | |||
| 2132 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2133 | bool_Bool ret = false0; | |||
| 2134 | ||||
| 2135 | if (!con || !con->eh_data) | |||
| 2136 | return ret; | |||
| 2137 | ||||
| 2138 | mutex_lock(&con->recovery_lock)rw_enter_write(&con->recovery_lock); | |||
| 2139 | ret = amdgpu_ras_check_bad_page_unlock(con, addr); | |||
| 2140 | mutex_unlock(&con->recovery_lock)rw_exit_write(&con->recovery_lock); | |||
| 2141 | return ret; | |||
| 2142 | } | |||
| 2143 | ||||
| 2144 | static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, | |||
| 2145 | uint32_t max_count) | |||
| 2146 | { | |||
| 2147 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2148 | ||||
| 2149 | /* | |||
| 2150 | * Justification of value bad_page_cnt_threshold in ras structure | |||
| 2151 | * | |||
| 2152 | * Generally, -1 <= amdgpu_bad_page_threshold <= max record length | |||
| 2153 | * in eeprom, and introduce two scenarios accordingly. | |||
| 2154 | * | |||
| 2155 | * Bad page retirement enablement: | |||
| 2156 | * - If amdgpu_bad_page_threshold = -1, | |||
| 2157 | * bad_page_cnt_threshold = typical value by formula. | |||
| 2158 | * | |||
| 2159 | * - When the value from user is 0 < amdgpu_bad_page_threshold < | |||
| 2160 | * max record length in eeprom, use it directly. | |||
| 2161 | * | |||
| 2162 | * Bad page retirement disablement: | |||
| 2163 | * - If amdgpu_bad_page_threshold = 0, bad page retirement | |||
| 2164 | * functionality is disabled, and bad_page_cnt_threshold will | |||
| 2165 | * take no effect. | |||
| 2166 | */ | |||
| 2167 | ||||
| 2168 | if (amdgpu_bad_page_threshold < 0) { | |||
| 2169 | u64 val = adev->gmc.mc_vram_size; | |||
| 2170 | ||||
| 2171 | do_div(val, RAS_BAD_PAGE_COVER)({ uint32_t __base = ((100 * 1024 * 1024ULL)); uint32_t __rem = ((uint64_t)(val)) % __base; (val) = ((uint64_t)(val)) / __base ; __rem; }); | |||
| 2172 | con->bad_page_cnt_threshold = min(lower_32_bits(val),(((((u32)(val)))<(max_count))?(((u32)(val))):(max_count)) | |||
| 2173 | max_count)(((((u32)(val)))<(max_count))?(((u32)(val))):(max_count)); | |||
| 2174 | } else { | |||
| 2175 | con->bad_page_cnt_threshold = min_t(int, max_count,({ int __min_a = (max_count); int __min_b = (amdgpu_bad_page_threshold ); __min_a < __min_b ? __min_a : __min_b; }) | |||
| 2176 | amdgpu_bad_page_threshold)({ int __min_a = (max_count); int __min_b = (amdgpu_bad_page_threshold ); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 2177 | } | |||
| 2178 | } | |||
| 2179 | ||||
| 2180 | int amdgpu_ras_recovery_init(struct amdgpu_device *adev) | |||
| 2181 | { | |||
| 2182 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2183 | struct ras_err_handler_data **data; | |||
| 2184 | u32 max_eeprom_records_count = 0; | |||
| 2185 | bool_Bool exc_err_limit = false0; | |||
| 2186 | int ret; | |||
| 2187 | ||||
| 2188 | if (!con || amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | |||
| 2189 | return 0; | |||
| 2190 | ||||
| 2191 | /* Allow access to RAS EEPROM via debugfs, when the ASIC | |||
| 2192 | * supports RAS and debugfs is enabled, but when | |||
| 2193 | * adev->ras_enabled is unset, i.e. when "ras_enable" | |||
| 2194 | * module parameter is set to 0. | |||
| 2195 | */ | |||
| 2196 | con->adev = adev; | |||
| 2197 | ||||
| 2198 | if (!adev->ras_enabled) | |||
| 2199 | return 0; | |||
| 2200 | ||||
| 2201 | data = &con->eh_data; | |||
| 2202 | *data = kmalloc(sizeof(**data), GFP_KERNEL(0x0001 | 0x0004) | __GFP_ZERO0x0008); | |||
| 2203 | if (!*data) { | |||
| 2204 | ret = -ENOMEM12; | |||
| 2205 | goto out; | |||
| 2206 | } | |||
| 2207 | ||||
| 2208 | rw_init(&con->recovery_lock, "rasrec")_rw_init_flags(&con->recovery_lock, "rasrec", 0, ((void *)0)); | |||
| 2209 | INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); | |||
| 2210 | atomic_set(&con->in_recovery, 0)({ typeof(*(&con->in_recovery)) __tmp = ((0)); *(volatile typeof(*(&con->in_recovery)) *)&(*(&con->in_recovery )) = __tmp; __tmp; }); | |||
| 2211 | con->eeprom_control.bad_channel_bitmap = 0; | |||
| 2212 | ||||
| 2213 | max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(); | |||
| 2214 | amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); | |||
| 2215 | ||||
| 2216 | /* Todo: During test the SMU might fail to read the eeprom through I2C | |||
| 2217 | * when the GPU is pending on XGMI reset during probe time | |||
| 2218 | * (Mostly after second bus reset), skip it now | |||
| 2219 | */ | |||
| 2220 | if (adev->gmc.xgmi.pending_reset) | |||
| 2221 | return 0; | |||
| 2222 | ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); | |||
| 2223 | /* | |||
| 2224 | * This calling fails when exc_err_limit is true or | |||
| 2225 | * ret != 0. | |||
| 2226 | */ | |||
| 2227 | if (exc_err_limit || ret) | |||
| 2228 | goto free; | |||
| 2229 | ||||
| 2230 | if (con->eeprom_control.ras_num_recs) { | |||
| 2231 | ret = amdgpu_ras_load_bad_pages(adev); | |||
| 2232 | if (ret) | |||
| 2233 | goto free; | |||
| 2234 | ||||
| 2235 | amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); | |||
| 2236 | ||||
| 2237 | if (con->update_channel_flag == true1) { | |||
| 2238 | amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); | |||
| 2239 | con->update_channel_flag = false0; | |||
| 2240 | } | |||
| 2241 | } | |||
| 2242 | ||||
| 2243 | #ifdef CONFIG_X86_MCE_AMD | |||
| 2244 | if ((adev->asic_type == CHIP_ALDEBARAN) && | |||
| 2245 | (adev->gmc.xgmi.connected_to_cpu)) | |||
| 2246 | amdgpu_register_bad_pages_mca_notifier(adev); | |||
| 2247 | #endif | |||
| 2248 | return 0; | |||
| 2249 | ||||
| 2250 | free: | |||
| 2251 | kfree((*data)->bps); | |||
| 2252 | kfree(*data); | |||
| 2253 | con->eh_data = NULL((void *)0); | |||
| 2254 | out: | |||
| 2255 | dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret)printf("drm:pid%d:%s *WARNING* " "Failed to initialize ras recovery! (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret); | |||
| 2256 | ||||
| 2257 | /* | |||
| 2258 | * Except error threshold exceeding case, other failure cases in this | |||
| 2259 | * function would not fail amdgpu driver init. | |||
| 2260 | */ | |||
| 2261 | if (!exc_err_limit) | |||
| 2262 | ret = 0; | |||
| 2263 | else | |||
| 2264 | ret = -EINVAL22; | |||
| 2265 | ||||
| 2266 | return ret; | |||
| 2267 | } | |||
| 2268 | ||||
| 2269 | static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) | |||
| 2270 | { | |||
| 2271 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2272 | struct ras_err_handler_data *data = con->eh_data; | |||
| 2273 | ||||
| 2274 | /* recovery_init failed to init it, fini is useless */ | |||
| 2275 | if (!data) | |||
| 2276 | return 0; | |||
| 2277 | ||||
| 2278 | cancel_work_sync(&con->recovery_work); | |||
| 2279 | ||||
| 2280 | mutex_lock(&con->recovery_lock)rw_enter_write(&con->recovery_lock); | |||
| 2281 | con->eh_data = NULL((void *)0); | |||
| 2282 | kfree(data->bps); | |||
| 2283 | kfree(data); | |||
| 2284 | mutex_unlock(&con->recovery_lock)rw_exit_write(&con->recovery_lock); | |||
| 2285 | ||||
| 2286 | return 0; | |||
| 2287 | } | |||
| 2288 | /* recovery end */ | |||
| 2289 | ||||
| 2290 | static bool_Bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) | |||
| 2291 | { | |||
| 2292 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | |||
| 2293 | switch (adev->ip_versions[MP0_HWIP][0]) { | |||
| 2294 | case IP_VERSION(13, 0, 2)(((13) << 16) | ((0) << 8) | (2)): | |||
| 2295 | return true1; | |||
| 2296 | default: | |||
| 2297 | return false0; | |||
| 2298 | } | |||
| 2299 | } | |||
| 2300 | ||||
| 2301 | if (adev->asic_type == CHIP_IP_DISCOVERY) { | |||
| 2302 | switch (adev->ip_versions[MP0_HWIP][0]) { | |||
| 2303 | case IP_VERSION(13, 0, 0)(((13) << 16) | ((0) << 8) | (0)): | |||
| 2304 | case IP_VERSION(13, 0, 10)(((13) << 16) | ((0) << 8) | (10)): | |||
| 2305 | return true1; | |||
| 2306 | default: | |||
| 2307 | return false0; | |||
| 2308 | } | |||
| 2309 | } | |||
| 2310 | ||||
| 2311 | return adev->asic_type == CHIP_VEGA10 || | |||
| 2312 | adev->asic_type == CHIP_VEGA20 || | |||
| 2313 | adev->asic_type == CHIP_ARCTURUS || | |||
| 2314 | adev->asic_type == CHIP_ALDEBARAN || | |||
| 2315 | adev->asic_type == CHIP_SIENNA_CICHLID; | |||
| 2316 | } | |||
| 2317 | ||||
| 2318 | /* | |||
| 2319 | * this is workaround for vega20 workstation sku, | |||
| 2320 | * force enable gfx ras, ignore vbios gfx ras flag | |||
| 2321 | * due to GC EDC can not write | |||
| 2322 | */ | |||
| 2323 | static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) | |||
| 2324 | { | |||
| 2325 | struct atom_context *ctx = adev->mode_info.atom_context; | |||
| 2326 | ||||
| 2327 | if (!ctx) | |||
| 2328 | return; | |||
| 2329 | ||||
| 2330 | if (strnstr(ctx->vbios_version, "D16406", | |||
| 2331 | sizeof(ctx->vbios_version)) || | |||
| 2332 | strnstr(ctx->vbios_version, "D36002", | |||
| 2333 | sizeof(ctx->vbios_version))) | |||
| 2334 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); | |||
| 2335 | } | |||
| 2336 | ||||
| 2337 | /* | |||
| 2338 | * check hardware's ras ability which will be saved in hw_supported. | |||
| 2339 | * if hardware does not support ras, we can skip some ras initializtion and | |||
| 2340 | * forbid some ras operations from IP. | |||
| 2341 | * if software itself, say boot parameter, limit the ras ability. We still | |||
| 2342 | * need allow IP do some limited operations, like disable. In such case, | |||
| 2343 | * we have to initialize ras as normal. but need check if operation is | |||
| 2344 | * allowed or not in each function. | |||
| 2345 | */ | |||
| 2346 | static void amdgpu_ras_check_supported(struct amdgpu_device *adev) | |||
| 2347 | { | |||
| 2348 | adev->ras_hw_enabled = adev->ras_enabled = 0; | |||
| 2349 | ||||
| 2350 | if (!adev->is_atom_fw || | |||
| 2351 | !amdgpu_ras_asic_supported(adev)) | |||
| 2352 | return; | |||
| 2353 | ||||
| 2354 | if (!adev->gmc.xgmi.connected_to_cpu) { | |||
| 2355 | if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { | |||
| 2356 | dev_info(adev->dev, "MEM ECC is active.\n")do { } while(0); | |||
| 2357 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | | |||
| 2358 | 1 << AMDGPU_RAS_BLOCK__DF); | |||
| 2359 | } else { | |||
| 2360 | dev_info(adev->dev, "MEM ECC is not presented.\n")do { } while(0); | |||
| 2361 | } | |||
| 2362 | ||||
| 2363 | if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { | |||
| 2364 | dev_info(adev->dev, "SRAM ECC is active.\n")do { } while(0); | |||
| 2365 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | |||
| 2366 | adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | | |||
| 2367 | 1 << AMDGPU_RAS_BLOCK__DF); | |||
| 2368 | ||||
| 2369 | if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0)(((2) << 16) | ((6) << 8) | (0))) | |||
| 2370 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | | |||
| 2371 | 1 << AMDGPU_RAS_BLOCK__JPEG); | |||
| 2372 | else | |||
| 2373 | adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | | |||
| 2374 | 1 << AMDGPU_RAS_BLOCK__JPEG); | |||
| 2375 | } else { | |||
| 2376 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | | |||
| 2377 | 1 << AMDGPU_RAS_BLOCK__SDMA | | |||
| 2378 | 1 << AMDGPU_RAS_BLOCK__GFX); | |||
| 2379 | } | |||
| 2380 | } else { | |||
| 2381 | dev_info(adev->dev, "SRAM ECC is not presented.\n")do { } while(0); | |||
| 2382 | } | |||
| 2383 | } else { | |||
| 2384 | /* driver only manages a few IP blocks RAS feature | |||
| 2385 | * when GPU is connected cpu through XGMI */ | |||
| 2386 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | | |||
| 2387 | 1 << AMDGPU_RAS_BLOCK__SDMA | | |||
| 2388 | 1 << AMDGPU_RAS_BLOCK__MMHUB); | |||
| 2389 | } | |||
| 2390 | ||||
| 2391 | amdgpu_ras_get_quirks(adev); | |||
| 2392 | ||||
| 2393 | /* hw_supported needs to be aligned with RAS block mask. */ | |||
| 2394 | adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK((1ULL << AMDGPU_RAS_BLOCK__LAST) - 1); | |||
| 2395 | ||||
| 2396 | adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : | |||
| 2397 | adev->ras_hw_enabled & amdgpu_ras_mask; | |||
| 2398 | } | |||
| 2399 | ||||
| 2400 | static void amdgpu_ras_counte_dw(struct work_struct *work) | |||
| 2401 | { | |||
| 2402 | struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,({ const __typeof( ((struct amdgpu_ras *)0)->ras_counte_delay_work .work ) *__mptr = (work); (struct amdgpu_ras *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_ras, ras_counte_delay_work .work) );}) | |||
| 2403 | ras_counte_delay_work.work)({ const __typeof( ((struct amdgpu_ras *)0)->ras_counte_delay_work .work ) *__mptr = (work); (struct amdgpu_ras *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_ras, ras_counte_delay_work .work) );}); | |||
| 2404 | struct amdgpu_device *adev = con->adev; | |||
| 2405 | struct drm_device *dev = adev_to_drm(adev); | |||
| 2406 | unsigned long ce_count, ue_count; | |||
| 2407 | int res; | |||
| 2408 | ||||
| 2409 | res = pm_runtime_get_sync(dev->dev); | |||
| 2410 | if (res < 0) | |||
| 2411 | goto Out; | |||
| 2412 | ||||
| 2413 | /* Cache new values. | |||
| 2414 | */ | |||
| 2415 | if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { | |||
| 2416 | atomic_set(&con->ras_ce_count, ce_count)({ typeof(*(&con->ras_ce_count)) __tmp = ((ce_count)); *(volatile typeof(*(&con->ras_ce_count)) *)&(*(& con->ras_ce_count)) = __tmp; __tmp; }); | |||
| 2417 | atomic_set(&con->ras_ue_count, ue_count)({ typeof(*(&con->ras_ue_count)) __tmp = ((ue_count)); *(volatile typeof(*(&con->ras_ue_count)) *)&(*(& con->ras_ue_count)) = __tmp; __tmp; }); | |||
| 2418 | } | |||
| 2419 | ||||
| 2420 | pm_runtime_mark_last_busy(dev->dev); | |||
| 2421 | Out: | |||
| 2422 | pm_runtime_put_autosuspend(dev->dev); | |||
| 2423 | } | |||
| 2424 | ||||
| 2425 | int amdgpu_ras_init(struct amdgpu_device *adev) | |||
| 2426 | { | |||
| 2427 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2428 | int r; | |||
| 2429 | bool_Bool df_poison, umc_poison; | |||
| 2430 | ||||
| 2431 | if (con) | |||
| 2432 | return 0; | |||
| 2433 | ||||
| 2434 | con = kmalloc(sizeof(struct amdgpu_ras) + | |||
| 2435 | sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNTAMDGPU_RAS_BLOCK__LAST + | |||
| 2436 | sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNTAMDGPU_RAS_MCA_BLOCK__LAST, | |||
| 2437 | GFP_KERNEL(0x0001 | 0x0004)|__GFP_ZERO0x0008); | |||
| 2438 | if (!con) | |||
| 2439 | return -ENOMEM12; | |||
| 2440 | ||||
| 2441 | con->adev = adev; | |||
| 2442 | INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); | |||
| 2443 | atomic_set(&con->ras_ce_count, 0)({ typeof(*(&con->ras_ce_count)) __tmp = ((0)); *(volatile typeof(*(&con->ras_ce_count)) *)&(*(&con-> ras_ce_count)) = __tmp; __tmp; }); | |||
| 2444 | atomic_set(&con->ras_ue_count, 0)({ typeof(*(&con->ras_ue_count)) __tmp = ((0)); *(volatile typeof(*(&con->ras_ue_count)) *)&(*(&con-> ras_ue_count)) = __tmp; __tmp; }); | |||
| 2445 | ||||
| 2446 | con->objs = (struct ras_manager *)(con + 1); | |||
| 2447 | ||||
| 2448 | amdgpu_ras_set_context(adev, con); | |||
| 2449 | ||||
| 2450 | amdgpu_ras_check_supported(adev); | |||
| 2451 | ||||
| 2452 | if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { | |||
| 2453 | /* set gfx block ras context feature for VEGA20 Gaming | |||
| 2454 | * send ras disable cmd to ras ta during ras late init. | |||
| 2455 | */ | |||
| 2456 | if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { | |||
| 2457 | con->features |= BIT(AMDGPU_RAS_BLOCK__GFX)(1UL << (AMDGPU_RAS_BLOCK__GFX)); | |||
| 2458 | ||||
| 2459 | return 0; | |||
| 2460 | } | |||
| 2461 | ||||
| 2462 | r = 0; | |||
| 2463 | goto release_con; | |||
| 2464 | } | |||
| 2465 | ||||
| 2466 | con->update_channel_flag = false0; | |||
| 2467 | con->features = 0; | |||
| 2468 | INIT_LIST_HEAD(&con->head); | |||
| 2469 | /* Might need get this flag from vbios. */ | |||
| 2470 | con->flags = RAS_DEFAULT_FLAGS((0x1 << 0)); | |||
| 2471 | ||||
| 2472 | /* initialize nbio ras function ahead of any other | |||
| 2473 | * ras functions so hardware fatal error interrupt | |||
| 2474 | * can be enabled as early as possible */ | |||
| 2475 | switch (adev->asic_type) { | |||
| 2476 | case CHIP_VEGA20: | |||
| 2477 | case CHIP_ARCTURUS: | |||
| 2478 | case CHIP_ALDEBARAN: | |||
| 2479 | if (!adev->gmc.xgmi.connected_to_cpu) { | |||
| 2480 | adev->nbio.ras = &nbio_v7_4_ras; | |||
| 2481 | amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block); | |||
| 2482 | adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm; | |||
| 2483 | } | |||
| 2484 | break; | |||
| 2485 | default: | |||
| 2486 | /* nbio ras is not available */ | |||
| 2487 | break; | |||
| 2488 | } | |||
| 2489 | ||||
| 2490 | if (adev->nbio.ras && | |||
| 2491 | adev->nbio.ras->init_ras_controller_interrupt) { | |||
| 2492 | r = adev->nbio.ras->init_ras_controller_interrupt(adev); | |||
| 2493 | if (r) | |||
| 2494 | goto release_con; | |||
| 2495 | } | |||
| 2496 | ||||
| 2497 | if (adev->nbio.ras && | |||
| 2498 | adev->nbio.ras->init_ras_err_event_athub_interrupt) { | |||
| 2499 | r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); | |||
| 2500 | if (r) | |||
| 2501 | goto release_con; | |||
| 2502 | } | |||
| 2503 | ||||
| 2504 | /* Init poison supported flag, the default value is false */ | |||
| 2505 | if (adev->gmc.xgmi.connected_to_cpu) { | |||
| 2506 | /* enabled by default when GPU is connected to CPU */ | |||
| 2507 | con->poison_supported = true1; | |||
| 2508 | } | |||
| 2509 | else if (adev->df.funcs && | |||
| 2510 | adev->df.funcs->query_ras_poison_mode && | |||
| 2511 | adev->umc.ras && | |||
| 2512 | adev->umc.ras->query_ras_poison_mode) { | |||
| 2513 | df_poison = | |||
| 2514 | adev->df.funcs->query_ras_poison_mode(adev); | |||
| 2515 | umc_poison = | |||
| 2516 | adev->umc.ras->query_ras_poison_mode(adev); | |||
| 2517 | /* Only poison is set in both DF and UMC, we can support it */ | |||
| 2518 | if (df_poison && umc_poison) | |||
| 2519 | con->poison_supported = true1; | |||
| 2520 | else if (df_poison != umc_poison) | |||
| 2521 | dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",printf("drm:pid%d:%s *WARNING* " "Poison setting is inconsistent in DF/UMC(%d:%d)!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , df_poison , umc_poison) | |||
| 2522 | df_poison, umc_poison)printf("drm:pid%d:%s *WARNING* " "Poison setting is inconsistent in DF/UMC(%d:%d)!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , df_poison , umc_poison); | |||
| 2523 | } | |||
| 2524 | ||||
| 2525 | if (amdgpu_ras_fs_init(adev)) { | |||
| 2526 | r = -EINVAL22; | |||
| 2527 | goto release_con; | |||
| 2528 | } | |||
| 2529 | ||||
| 2530 | dev_info(adev->dev, "RAS INFO: ras initialized successfully, "do { } while(0) | |||
| 2531 | "hardware ability[%x] ras_mask[%x]\n",do { } while(0) | |||
| 2532 | adev->ras_hw_enabled, adev->ras_enabled)do { } while(0); | |||
| 2533 | ||||
| 2534 | return 0; | |||
| 2535 | release_con: | |||
| 2536 | amdgpu_ras_set_context(adev, NULL((void *)0)); | |||
| 2537 | kfree(con); | |||
| 2538 | ||||
| 2539 | return r; | |||
| 2540 | } | |||
| 2541 | ||||
| 2542 | int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) | |||
| 2543 | { | |||
| 2544 | if (adev->gmc.xgmi.connected_to_cpu) | |||
| 2545 | return 1; | |||
| 2546 | return 0; | |||
| 2547 | } | |||
| 2548 | ||||
| 2549 | static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, | |||
| 2550 | struct ras_common_if *ras_block) | |||
| 2551 | { | |||
| 2552 | struct ras_query_if info = { | |||
| 2553 | .head = *ras_block, | |||
| 2554 | }; | |||
| 2555 | ||||
| 2556 | if (!amdgpu_persistent_edc_harvesting_supported(adev)) | |||
| 2557 | return 0; | |||
| 2558 | ||||
| 2559 | if (amdgpu_ras_query_error_status(adev, &info) != 0) | |||
| 2560 | DRM_WARN("RAS init harvest failure")printk("\0014" "[" "drm" "] " "RAS init harvest failure"); | |||
| 2561 | ||||
| 2562 | if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) | |||
| 2563 | DRM_WARN("RAS init harvest reset failure")printk("\0014" "[" "drm" "] " "RAS init harvest reset failure" ); | |||
| 2564 | ||||
| 2565 | return 0; | |||
| 2566 | } | |||
| 2567 | ||||
| 2568 | bool_Bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) | |||
| 2569 | { | |||
| 2570 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2571 | ||||
| 2572 | if (!con) | |||
| 2573 | return false0; | |||
| 2574 | ||||
| 2575 | return con->poison_supported; | |||
| 2576 | } | |||
| 2577 | ||||
| 2578 | /* helper function to handle common stuff in ip late init phase */ | |||
| 2579 | int amdgpu_ras_block_late_init(struct amdgpu_device *adev, | |||
| 2580 | struct ras_common_if *ras_block) | |||
| 2581 | { | |||
| 2582 | struct amdgpu_ras_block_object *ras_obj = NULL((void *)0); | |||
| 2583 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2584 | unsigned long ue_count, ce_count; | |||
| 2585 | int r; | |||
| 2586 | ||||
| 2587 | /* disable RAS feature per IP block if it is not supported */ | |||
| 2588 | if (!amdgpu_ras_is_supported(adev, ras_block->block)) { | |||
| 2589 | amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); | |||
| 2590 | return 0; | |||
| 2591 | } | |||
| 2592 | ||||
| 2593 | r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); | |||
| 2594 | if (r) { | |||
| 2595 | if (adev->in_suspend || amdgpu_in_reset(adev)) { | |||
| 2596 | /* in resume phase, if fail to enable ras, | |||
| 2597 | * clean up all ras fs nodes, and disable ras */ | |||
| 2598 | goto cleanup; | |||
| 2599 | } else | |||
| 2600 | return r; | |||
| 2601 | } | |||
| 2602 | ||||
| 2603 | /* check for errors on warm reset edc persisant supported ASIC */ | |||
| 2604 | amdgpu_persistent_edc_harvesting(adev, ras_block); | |||
| 2605 | ||||
| 2606 | /* in resume phase, no need to create ras fs node */ | |||
| 2607 | if (adev->in_suspend || amdgpu_in_reset(adev)) | |||
| 2608 | return 0; | |||
| 2609 | ||||
| 2610 | ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm)({ const __typeof( ((struct amdgpu_ras_block_object *)0)-> ras_comm ) *__mptr = (ras_block); (struct amdgpu_ras_block_object *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_ras_block_object , ras_comm) );}); | |||
| 2611 | if (ras_obj->ras_cb || (ras_obj->hw_ops && | |||
| 2612 | (ras_obj->hw_ops->query_poison_status || | |||
| 2613 | ras_obj->hw_ops->handle_poison_consumption))) { | |||
| 2614 | r = amdgpu_ras_interrupt_add_handler(adev, ras_block); | |||
| 2615 | if (r) | |||
| 2616 | goto cleanup; | |||
| 2617 | } | |||
| 2618 | ||||
| 2619 | r = amdgpu_ras_sysfs_create(adev, ras_block); | |||
| 2620 | if (r) | |||
| 2621 | goto interrupt; | |||
| 2622 | ||||
| 2623 | /* Those are the cached values at init. | |||
| 2624 | */ | |||
| 2625 | if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { | |||
| 2626 | atomic_set(&con->ras_ce_count, ce_count)({ typeof(*(&con->ras_ce_count)) __tmp = ((ce_count)); *(volatile typeof(*(&con->ras_ce_count)) *)&(*(& con->ras_ce_count)) = __tmp; __tmp; }); | |||
| 2627 | atomic_set(&con->ras_ue_count, ue_count)({ typeof(*(&con->ras_ue_count)) __tmp = ((ue_count)); *(volatile typeof(*(&con->ras_ue_count)) *)&(*(& con->ras_ue_count)) = __tmp; __tmp; }); | |||
| 2628 | } | |||
| 2629 | ||||
| 2630 | return 0; | |||
| 2631 | ||||
| 2632 | interrupt: | |||
| 2633 | if (ras_obj->ras_cb) | |||
| 2634 | amdgpu_ras_interrupt_remove_handler(adev, ras_block); | |||
| 2635 | cleanup: | |||
| 2636 | amdgpu_ras_feature_enable(adev, ras_block, 0); | |||
| 2637 | return r; | |||
| 2638 | } | |||
| 2639 | ||||
| 2640 | static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, | |||
| 2641 | struct ras_common_if *ras_block) | |||
| 2642 | { | |||
| 2643 | return amdgpu_ras_block_late_init(adev, ras_block); | |||
| 2644 | } | |||
| 2645 | ||||
| 2646 | /* helper function to remove ras fs node and interrupt handler */ | |||
| 2647 | void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, | |||
| 2648 | struct ras_common_if *ras_block) | |||
| 2649 | { | |||
| 2650 | struct amdgpu_ras_block_object *ras_obj; | |||
| 2651 | if (!ras_block) | |||
| 2652 | return; | |||
| 2653 | ||||
| 2654 | amdgpu_ras_sysfs_remove(adev, ras_block); | |||
| 2655 | ||||
| 2656 | ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm)({ const __typeof( ((struct amdgpu_ras_block_object *)0)-> ras_comm ) *__mptr = (ras_block); (struct amdgpu_ras_block_object *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_ras_block_object , ras_comm) );}); | |||
| 2657 | if (ras_obj->ras_cb) | |||
| 2658 | amdgpu_ras_interrupt_remove_handler(adev, ras_block); | |||
| 2659 | } | |||
| 2660 | ||||
| 2661 | static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, | |||
| 2662 | struct ras_common_if *ras_block) | |||
| 2663 | { | |||
| 2664 | return amdgpu_ras_block_late_fini(adev, ras_block); | |||
| 2665 | } | |||
| 2666 | ||||
| 2667 | /* do some init work after IP late init as dependence. | |||
| 2668 | * and it runs in resume/gpu reset/booting up cases. | |||
| 2669 | */ | |||
| 2670 | void amdgpu_ras_resume(struct amdgpu_device *adev) | |||
| 2671 | { | |||
| 2672 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2673 | struct ras_manager *obj, *tmp; | |||
| 2674 | ||||
| 2675 | if (!adev->ras_enabled || !con) { | |||
| 2676 | /* clean ras context for VEGA20 Gaming after send ras disable cmd */ | |||
| 2677 | amdgpu_release_ras_context(adev); | |||
| 2678 | ||||
| 2679 | return; | |||
| 2680 | } | |||
| 2681 | ||||
| 2682 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS(0x1 << 0)) { | |||
| 2683 | /* Set up all other IPs which are not implemented. There is a | |||
| 2684 | * tricky thing that IP's actual ras error type should be | |||
| 2685 | * MULTI_UNCORRECTABLE, but as driver does not handle it, so | |||
| 2686 | * ERROR_NONE make sense anyway. | |||
| 2687 | */ | |||
| 2688 | amdgpu_ras_enable_all_features(adev, 1); | |||
| 2689 | ||||
| 2690 | /* We enable ras on all hw_supported block, but as boot | |||
| 2691 | * parameter might disable some of them and one or more IP has | |||
| 2692 | * not implemented yet. So we disable them on behalf. | |||
| 2693 | */ | |||
| 2694 | list_for_each_entry_safe(obj, tmp, &con->head, node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = ((&con->head)->next); (__typeof(*obj) *) ( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) ) ;}), tmp = ({ const __typeof( ((__typeof(*obj) *)0)->node ) *__mptr = (obj->node.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*obj), node) );}); &obj-> node != (&con->head); obj = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next ); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof(__typeof (*tmp), node) );})) { | |||
| 2695 | if (!amdgpu_ras_is_supported(adev, obj->head.block)) { | |||
| 2696 | amdgpu_ras_feature_enable(adev, &obj->head, 0); | |||
| 2697 | /* there should be no any reference. */ | |||
| 2698 | WARN_ON(alive_obj(obj))({ int __ret = !!(((obj)->use)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "((obj)->use)", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ras.c" , 2698); __builtin_expect(!!(__ret), 0); }); | |||
| 2699 | } | |||
| 2700 | } | |||
| 2701 | } | |||
| 2702 | } | |||
| 2703 | ||||
| 2704 | void amdgpu_ras_suspend(struct amdgpu_device *adev) | |||
| 2705 | { | |||
| 2706 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2707 | ||||
| 2708 | if (!adev->ras_enabled || !con) | |||
| 2709 | return; | |||
| 2710 | ||||
| 2711 | amdgpu_ras_disable_all_features(adev, 0); | |||
| 2712 | /* Make sure all ras objects are disabled. */ | |||
| 2713 | if (con->features) | |||
| 2714 | amdgpu_ras_disable_all_features(adev, 1); | |||
| 2715 | } | |||
| 2716 | ||||
| 2717 | int amdgpu_ras_late_init(struct amdgpu_device *adev) | |||
| 2718 | { | |||
| 2719 | struct amdgpu_ras_block_list *node, *tmp; | |||
| 2720 | struct amdgpu_ras_block_object *obj; | |||
| 2721 | int r; | |||
| 2722 | ||||
| 2723 | /* Guest side doesn't need init ras feature */ | |||
| 2724 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | |||
| 2725 | return 0; | |||
| 2726 | ||||
| 2727 | list_for_each_entry_safe(node, tmp, &adev->ras_list, node)for (node = ({ const __typeof( ((__typeof(*node) *)0)->node ) *__mptr = ((&adev->ras_list)->next); (__typeof(* node) *)( (char *)__mptr - __builtin_offsetof(__typeof(*node) , node) );}), tmp = ({ const __typeof( ((__typeof(*node) *)0) ->node ) *__mptr = (node->node.next); (__typeof(*node) * )( (char *)__mptr - __builtin_offsetof(__typeof(*node), node) );}); &node->node != (&adev->ras_list); node = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), node) );})) { | |||
| 2728 | if (!node->ras_obj) { | |||
| 2729 | dev_warn(adev->dev, "Warning: abnormal ras list node.\n")printf("drm:pid%d:%s *WARNING* " "Warning: abnormal ras list node.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 2730 | continue; | |||
| 2731 | } | |||
| 2732 | ||||
| 2733 | obj = node->ras_obj; | |||
| 2734 | if (obj->ras_late_init) { | |||
| 2735 | r = obj->ras_late_init(adev, &obj->ras_comm); | |||
| 2736 | if (r) { | |||
| 2737 | dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",printf("drm:pid%d:%s *ERROR* " "%s failed to execute ras_late_init! ret:%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , obj-> ras_comm.name, r) | |||
| 2738 | obj->ras_comm.name, r)printf("drm:pid%d:%s *ERROR* " "%s failed to execute ras_late_init! ret:%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , obj-> ras_comm.name, r); | |||
| 2739 | return r; | |||
| 2740 | } | |||
| 2741 | } else | |||
| 2742 | amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); | |||
| 2743 | } | |||
| 2744 | ||||
| 2745 | return 0; | |||
| 2746 | } | |||
| 2747 | ||||
| 2748 | /* do some fini work before IP fini as dependence */ | |||
| 2749 | int amdgpu_ras_pre_fini(struct amdgpu_device *adev) | |||
| 2750 | { | |||
| 2751 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| ||||
| 2752 | ||||
| 2753 | if (!adev->ras_enabled || !con) | |||
| 2754 | return 0; | |||
| 2755 | ||||
| 2756 | ||||
| 2757 | /* Need disable ras on all IPs here before ip [hw/sw]fini */ | |||
| 2758 | if (con->features) | |||
| 2759 | amdgpu_ras_disable_all_features(adev, 0); | |||
| 2760 | amdgpu_ras_recovery_fini(adev); | |||
| 2761 | return 0; | |||
| 2762 | } | |||
| 2763 | ||||
| 2764 | int amdgpu_ras_fini(struct amdgpu_device *adev) | |||
| 2765 | { | |||
| 2766 | struct amdgpu_ras_block_list *ras_node, *tmp; | |||
| 2767 | struct amdgpu_ras_block_object *obj = NULL((void *)0); | |||
| 2768 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2769 | ||||
| 2770 | if (!adev->ras_enabled || !con) | |||
| 2771 | return 0; | |||
| 2772 | ||||
| 2773 | list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node)for (ras_node = ({ const __typeof( ((__typeof(*ras_node) *)0) ->node ) *__mptr = ((&adev->ras_list)->next); (__typeof (*ras_node) *)( (char *)__mptr - __builtin_offsetof(__typeof( *ras_node), node) );}), tmp = ({ const __typeof( ((__typeof(* ras_node) *)0)->node ) *__mptr = (ras_node->node.next); (__typeof(*ras_node) *)( (char *)__mptr - __builtin_offsetof (__typeof(*ras_node), node) );}); &ras_node->node != ( &adev->ras_list); ras_node = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->node ) *__mptr = (tmp->node.next ); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof(__typeof (*tmp), node) );})) { | |||
| 2774 | if (ras_node->ras_obj) { | |||
| 2775 | obj = ras_node->ras_obj; | |||
| 2776 | if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) && | |||
| 2777 | obj->ras_fini) | |||
| 2778 | obj->ras_fini(adev, &obj->ras_comm); | |||
| 2779 | else | |||
| 2780 | amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm); | |||
| 2781 | } | |||
| 2782 | ||||
| 2783 | /* Clear ras blocks from ras_list and free ras block list node */ | |||
| 2784 | list_del(&ras_node->node); | |||
| 2785 | kfree(ras_node); | |||
| 2786 | } | |||
| 2787 | ||||
| 2788 | amdgpu_ras_fs_fini(adev); | |||
| 2789 | amdgpu_ras_interrupt_remove_all(adev); | |||
| 2790 | ||||
| 2791 | WARN(con->features, "Feature mask is not cleared")({ int __ret = !!(con->features); if (__ret) printf("Feature mask is not cleared" ); __builtin_expect(!!(__ret), 0); }); | |||
| 2792 | ||||
| 2793 | if (con->features) | |||
| 2794 | amdgpu_ras_disable_all_features(adev, 1); | |||
| 2795 | ||||
| 2796 | cancel_delayed_work_sync(&con->ras_counte_delay_work); | |||
| 2797 | ||||
| 2798 | amdgpu_ras_set_context(adev, NULL((void *)0)); | |||
| 2799 | kfree(con); | |||
| 2800 | ||||
| 2801 | return 0; | |||
| 2802 | } | |||
| 2803 | ||||
| 2804 | void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) | |||
| 2805 | { | |||
| 2806 | amdgpu_ras_check_supported(adev); | |||
| 2807 | if (!adev->ras_hw_enabled) | |||
| 2808 | return; | |||
| 2809 | ||||
| 2810 | if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1)__sync_val_compare_and_swap(&amdgpu_ras_in_intr, 0, 1) == 0) { | |||
| 2811 | dev_info(adev->dev, "uncorrectable hardware error"do { } while(0) | |||
| 2812 | "(ERREVENT_ATHUB_INTERRUPT) detected!\n")do { } while(0); | |||
| 2813 | ||||
| 2814 | amdgpu_ras_reset_gpu(adev); | |||
| 2815 | } | |||
| 2816 | } | |||
| 2817 | ||||
| 2818 | bool_Bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) | |||
| 2819 | { | |||
| 2820 | if (adev->asic_type == CHIP_VEGA20 && | |||
| 2821 | adev->pm.fw_version <= 0x283400) { | |||
| 2822 | return !(amdgpu_asic_reset_method(adev)(adev)->asic_funcs->reset_method((adev)) == AMD_RESET_METHOD_BACO) && | |||
| 2823 | amdgpu_ras_intr_triggered(); | |||
| 2824 | } | |||
| 2825 | ||||
| 2826 | return false0; | |||
| 2827 | } | |||
| 2828 | ||||
| 2829 | void amdgpu_release_ras_context(struct amdgpu_device *adev) | |||
| 2830 | { | |||
| 2831 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); | |||
| 2832 | ||||
| 2833 | if (!con) | |||
| 2834 | return; | |||
| 2835 | ||||
| 2836 | if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)(1UL << (AMDGPU_RAS_BLOCK__GFX))) { | |||
| 2837 | con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX)(1UL << (AMDGPU_RAS_BLOCK__GFX)); | |||
| 2838 | amdgpu_ras_set_context(adev, NULL((void *)0)); | |||
| 2839 | kfree(con); | |||
| 2840 | } | |||
| 2841 | } | |||
| 2842 | ||||
| 2843 | #ifdef CONFIG_X86_MCE_AMD | |||
| 2844 | static struct amdgpu_device *find_adev(uint32_t node_id) | |||
| 2845 | { | |||
| 2846 | int i; | |||
| 2847 | struct amdgpu_device *adev = NULL((void *)0); | |||
| 2848 | ||||
| 2849 | for (i = 0; i < mce_adev_list.num_gpu; i++) { | |||
| 2850 | adev = mce_adev_list.devs[i]; | |||
| 2851 | ||||
| 2852 | if (adev && adev->gmc.xgmi.connected_to_cpu && | |||
| 2853 | adev->gmc.xgmi.physical_node_id == node_id) | |||
| 2854 | break; | |||
| 2855 | adev = NULL((void *)0); | |||
| 2856 | } | |||
| 2857 | ||||
| 2858 | return adev; | |||
| 2859 | } | |||
| 2860 | ||||
| 2861 | #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) | |||
| 2862 | #define GET_UMC_INST(m) (((m) >> 21) & 0x7) | |||
| 2863 | #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) | |||
| 2864 | #define GPU_ID_OFFSET 8 | |||
| 2865 | ||||
| 2866 | static int amdgpu_bad_page_notifier(struct notifier_block *nb, | |||
| 2867 | unsigned long val, void *data) | |||
| 2868 | { | |||
| 2869 | struct mce *m = (struct mce *)data; | |||
| 2870 | struct amdgpu_device *adev = NULL((void *)0); | |||
| 2871 | uint32_t gpu_id = 0; | |||
| 2872 | uint32_t umc_inst = 0, ch_inst = 0; | |||
| 2873 | struct ras_err_data err_data = {0, 0, 0, NULL((void *)0)}; | |||
| 2874 | ||||
| 2875 | /* | |||
| 2876 | * If the error was generated in UMC_V2, which belongs to GPU UMCs, | |||
| 2877 | * and error occurred in DramECC (Extended error code = 0) then only | |||
| 2878 | * process the error, else bail out. | |||
| 2879 | */ | |||
| 2880 | if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && | |||
| 2881 | (XEC(m->status, 0x3f) == 0x0))) | |||
| 2882 | return NOTIFY_DONE0; | |||
| 2883 | ||||
| 2884 | /* | |||
| 2885 | * If it is correctable error, return. | |||
| 2886 | */ | |||
| 2887 | if (mce_is_correctable(m)) | |||
| 2888 | return NOTIFY_OK1; | |||
| 2889 | ||||
| 2890 | /* | |||
| 2891 | * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. | |||
| 2892 | */ | |||
| 2893 | gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; | |||
| 2894 | ||||
| 2895 | adev = find_adev(gpu_id); | |||
| 2896 | if (!adev) { | |||
| 2897 | DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,printk("\0014" "[" "drm" "] " "%s: Unable to find adev for gpu_id: %d\n" , __func__, gpu_id) | |||
| 2898 | gpu_id)printk("\0014" "[" "drm" "] " "%s: Unable to find adev for gpu_id: %d\n" , __func__, gpu_id); | |||
| 2899 | return NOTIFY_DONE0; | |||
| 2900 | } | |||
| 2901 | ||||
| 2902 | /* | |||
| 2903 | * If it is uncorrectable error, then find out UMC instance and | |||
| 2904 | * channel index. | |||
| 2905 | */ | |||
| 2906 | umc_inst = GET_UMC_INST(m->ipid); | |||
| 2907 | ch_inst = GET_CHAN_INDEX(m->ipid); | |||
| 2908 | ||||
| 2909 | dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",do { } while(0) | |||
| 2910 | umc_inst, ch_inst)do { } while(0); | |||
| 2911 | ||||
| 2912 | err_data.err_addr = | |||
| 2913 | kcalloc(adev->umc.max_ras_err_cnt_per_query, | |||
| 2914 | sizeof(struct eeprom_table_record), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 2915 | if (!err_data.err_addr) { | |||
| 2916 | dev_warn(adev->dev,printf("drm:pid%d:%s *WARNING* " "Failed to alloc memory for umc error record in mca notifier!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 2917 | "Failed to alloc memory for umc error record in mca notifier!\n")printf("drm:pid%d:%s *WARNING* " "Failed to alloc memory for umc error record in mca notifier!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 2918 | return NOTIFY_DONE0; | |||
| 2919 | } | |||
| 2920 | ||||
| 2921 | /* | |||
| 2922 | * Translate UMC channel address to Physical address | |||
| 2923 | */ | |||
| 2924 | if (adev->umc.ras && | |||
| 2925 | adev->umc.ras->convert_ras_error_address) | |||
| 2926 | adev->umc.ras->convert_ras_error_address(adev, | |||
| 2927 | &err_data, m->addr, ch_inst, umc_inst); | |||
| 2928 | ||||
| 2929 | if (amdgpu_bad_page_threshold != 0) { | |||
| 2930 | amdgpu_ras_add_bad_pages(adev, err_data.err_addr, | |||
| 2931 | err_data.err_addr_cnt); | |||
| 2932 | amdgpu_ras_save_bad_pages(adev); | |||
| 2933 | } | |||
| 2934 | ||||
| 2935 | kfree(err_data.err_addr); | |||
| 2936 | return NOTIFY_OK1; | |||
| 2937 | } | |||
| 2938 | ||||
| 2939 | static struct notifier_block amdgpu_bad_page_nb = { | |||
| 2940 | .notifier_call = amdgpu_bad_page_notifier, | |||
| 2941 | .priority = MCE_PRIO_UC, | |||
| 2942 | }; | |||
| 2943 | ||||
| 2944 | static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) | |||
| 2945 | { | |||
| 2946 | /* | |||
| 2947 | * Add the adev to the mce_adev_list. | |||
| 2948 | * During mode2 reset, amdgpu device is temporarily | |||
| 2949 | * removed from the mgpu_info list which can cause | |||
| 2950 | * page retirement to fail. | |||
| 2951 | * Use this list instead of mgpu_info to find the amdgpu | |||
| 2952 | * device on which the UMC error was reported. | |||
| 2953 | */ | |||
| 2954 | mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; | |||
| 2955 | ||||
| 2956 | /* | |||
| 2957 | * Register the x86 notifier only once | |||
| 2958 | * with MCE subsystem. | |||
| 2959 | */ | |||
| 2960 | if (notifier_registered == false0) { | |||
| 2961 | mce_register_decode_chain(&amdgpu_bad_page_nb); | |||
| 2962 | notifier_registered = true1; | |||
| 2963 | } | |||
| 2964 | } | |||
| 2965 | #endif | |||
| 2966 | ||||
| 2967 | struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) | |||
| 2968 | { | |||
| 2969 | if (!adev
| |||
| 2970 | return NULL((void *)0); | |||
| 2971 | ||||
| 2972 | return adev->psp.ras_context.ras; | |||
| 2973 | } | |||
| 2974 | ||||
| 2975 | int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) | |||
| 2976 | { | |||
| 2977 | if (!adev) | |||
| 2978 | return -EINVAL22; | |||
| 2979 | ||||
| 2980 | adev->psp.ras_context.ras = ras_con; | |||
| 2981 | return 0; | |||
| 2982 | } | |||
| 2983 | ||||
| 2984 | /* check if ras is supported on block, say, sdma, gfx */ | |||
| 2985 | int amdgpu_ras_is_supported(struct amdgpu_device *adev, | |||
| 2986 | unsigned int block) | |||
| 2987 | { | |||
| 2988 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); | |||
| 2989 | ||||
| 2990 | if (block >= AMDGPU_RAS_BLOCK_COUNTAMDGPU_RAS_BLOCK__LAST) | |||
| 2991 | return 0; | |||
| 2992 | return ras && (adev->ras_enabled & (1 << block)); | |||
| 2993 | } | |||
| 2994 | ||||
| 2995 | int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) | |||
| 2996 | { | |||
| 2997 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); | |||
| 2998 | ||||
| 2999 | if (atomic_cmpxchg(&ras->in_recovery, 0, 1)__sync_val_compare_and_swap(&ras->in_recovery, 0, 1) == 0) | |||
| 3000 | amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); | |||
| 3001 | return 0; | |||
| 3002 | } | |||
| 3003 | ||||
| 3004 | ||||
| 3005 | /* Register each ip ras block into amdgpu ras */ | |||
| 3006 | int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, | |||
| 3007 | struct amdgpu_ras_block_object *ras_block_obj) | |||
| 3008 | { | |||
| 3009 | struct amdgpu_ras_block_list *ras_node; | |||
| 3010 | if (!adev || !ras_block_obj) | |||
| 3011 | return -EINVAL22; | |||
| 3012 | ||||
| 3013 | if (!amdgpu_ras_asic_supported(adev)) | |||
| 3014 | return 0; | |||
| 3015 | ||||
| 3016 | ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 3017 | if (!ras_node) | |||
| 3018 | return -ENOMEM12; | |||
| 3019 | ||||
| 3020 | INIT_LIST_HEAD(&ras_node->node); | |||
| 3021 | ras_node->ras_obj = ras_block_obj; | |||
| 3022 | list_add_tail(&ras_node->node, &adev->ras_list); | |||
| 3023 | ||||
| 3024 | return 0; | |||
| 3025 | } |