| File: | dev/pci/drm/amd/amdgpu/amdgpu_reset.c |
| Warning: | line 137, column 3 Potential leak of memory pointed to by 'reset_domain' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright 2021 Advanced Micro Devices, Inc. | |||
| 3 | * | |||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 5 | * copy of this software and associated documentation files (the "Software"), | |||
| 6 | * to deal in the Software without restriction, including without limitation | |||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 9 | * Software is furnished to do so, subject to the following conditions: | |||
| 10 | * | |||
| 11 | * The above copyright notice and this permission notice shall be included in | |||
| 12 | * all copies or substantial portions of the Software. | |||
| 13 | * | |||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | |||
| 21 | * | |||
| 22 | */ | |||
| 23 | ||||
| 24 | #include "amdgpu_reset.h" | |||
| 25 | #include "aldebaran.h" | |||
| 26 | #include "sienna_cichlid.h" | |||
| 27 | ||||
| 28 | int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, | |||
| 29 | struct amdgpu_reset_handler *handler) | |||
| 30 | { | |||
| 31 | /* TODO: Check if handler exists? */ | |||
| 32 | list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers); | |||
| 33 | return 0; | |||
| 34 | } | |||
| 35 | ||||
| 36 | int amdgpu_reset_init(struct amdgpu_device *adev) | |||
| 37 | { | |||
| 38 | int ret = 0; | |||
| 39 | ||||
| 40 | switch (adev->ip_versions[MP1_HWIP][0]) { | |||
| 41 | case IP_VERSION(13, 0, 2)(((13) << 16) | ((0) << 8) | (2)): | |||
| 42 | ret = aldebaran_reset_init(adev); | |||
| 43 | break; | |||
| 44 | case IP_VERSION(11, 0, 7)(((11) << 16) | ((0) << 8) | (7)): | |||
| 45 | ret = sienna_cichlid_reset_init(adev); | |||
| 46 | break; | |||
| 47 | default: | |||
| 48 | break; | |||
| 49 | } | |||
| 50 | ||||
| 51 | return ret; | |||
| 52 | } | |||
| 53 | ||||
| 54 | int amdgpu_reset_fini(struct amdgpu_device *adev) | |||
| 55 | { | |||
| 56 | int ret = 0; | |||
| 57 | ||||
| 58 | switch (adev->ip_versions[MP1_HWIP][0]) { | |||
| 59 | case IP_VERSION(13, 0, 2)(((13) << 16) | ((0) << 8) | (2)): | |||
| 60 | ret = aldebaran_reset_fini(adev); | |||
| 61 | break; | |||
| 62 | case IP_VERSION(11, 0, 7)(((11) << 16) | ((0) << 8) | (7)): | |||
| 63 | ret = sienna_cichlid_reset_fini(adev); | |||
| 64 | break; | |||
| 65 | default: | |||
| 66 | break; | |||
| 67 | } | |||
| 68 | ||||
| 69 | return ret; | |||
| 70 | } | |||
| 71 | ||||
| 72 | int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, | |||
| 73 | struct amdgpu_reset_context *reset_context) | |||
| 74 | { | |||
| 75 | struct amdgpu_reset_handler *reset_handler = NULL((void *)0); | |||
| 76 | ||||
| 77 | if (adev->reset_cntl && adev->reset_cntl->get_reset_handler) | |||
| 78 | reset_handler = adev->reset_cntl->get_reset_handler( | |||
| 79 | adev->reset_cntl, reset_context); | |||
| 80 | if (!reset_handler) | |||
| 81 | return -ENOSYS78; | |||
| 82 | ||||
| 83 | return reset_handler->prepare_hwcontext(adev->reset_cntl, | |||
| 84 | reset_context); | |||
| 85 | } | |||
| 86 | ||||
| 87 | int amdgpu_reset_perform_reset(struct amdgpu_device *adev, | |||
| 88 | struct amdgpu_reset_context *reset_context) | |||
| 89 | { | |||
| 90 | int ret; | |||
| 91 | struct amdgpu_reset_handler *reset_handler = NULL((void *)0); | |||
| 92 | ||||
| 93 | if (adev->reset_cntl) | |||
| 94 | reset_handler = adev->reset_cntl->get_reset_handler( | |||
| 95 | adev->reset_cntl, reset_context); | |||
| 96 | if (!reset_handler) | |||
| 97 | return -ENOSYS78; | |||
| 98 | ||||
| 99 | ret = reset_handler->perform_reset(adev->reset_cntl, reset_context); | |||
| 100 | if (ret) | |||
| 101 | return ret; | |||
| 102 | ||||
| 103 | return reset_handler->restore_hwcontext(adev->reset_cntl, | |||
| 104 | reset_context); | |||
| 105 | } | |||
| 106 | ||||
| 107 | ||||
| 108 | void amdgpu_reset_destroy_reset_domain(struct kref *ref) | |||
| 109 | { | |||
| 110 | struct amdgpu_reset_domain *reset_domain = container_of(ref,({ const __typeof( ((struct amdgpu_reset_domain *)0)->refcount ) *__mptr = (ref); (struct amdgpu_reset_domain *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_reset_domain, refcount) ) ;}) | |||
| 111 | struct amdgpu_reset_domain,({ const __typeof( ((struct amdgpu_reset_domain *)0)->refcount ) *__mptr = (ref); (struct amdgpu_reset_domain *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_reset_domain, refcount) ) ;}) | |||
| 112 | refcount)({ const __typeof( ((struct amdgpu_reset_domain *)0)->refcount ) *__mptr = (ref); (struct amdgpu_reset_domain *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_reset_domain, refcount) ) ;}); | |||
| 113 | if (reset_domain->wq) | |||
| 114 | destroy_workqueue(reset_domain->wq); | |||
| 115 | ||||
| 116 | kvfree(reset_domain); | |||
| 117 | } | |||
| 118 | ||||
| 119 | struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type, | |||
| 120 | char *wq_name) | |||
| 121 | { | |||
| 122 | struct amdgpu_reset_domain *reset_domain; | |||
| 123 | ||||
| 124 | reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL(0x0001 | 0x0004)); | |||
| ||||
| 125 | if (!reset_domain) { | |||
| 126 | DRM_ERROR("Failed to allocate amdgpu_reset_domain!")__drm_err("Failed to allocate amdgpu_reset_domain!"); | |||
| 127 | return NULL((void *)0); | |||
| 128 | } | |||
| 129 | ||||
| 130 | reset_domain->type = type; | |||
| 131 | kref_init(&reset_domain->refcount); | |||
| 132 | ||||
| 133 | reset_domain->wq = create_singlethread_workqueue(wq_name); | |||
| 134 | if (!reset_domain->wq) { | |||
| 135 | DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!")__drm_err("Failed to allocate wq for amdgpu_reset_domain!"); | |||
| 136 | amdgpu_reset_put_reset_domain(reset_domain); | |||
| 137 | return NULL((void *)0); | |||
| ||||
| 138 | ||||
| 139 | } | |||
| 140 | ||||
| 141 | atomic_set(&reset_domain->in_gpu_reset, 0)({ typeof(*(&reset_domain->in_gpu_reset)) __tmp = ((0) ); *(volatile typeof(*(&reset_domain->in_gpu_reset)) * )&(*(&reset_domain->in_gpu_reset)) = __tmp; __tmp; }); | |||
| 142 | atomic_set(&reset_domain->reset_res, 0)({ typeof(*(&reset_domain->reset_res)) __tmp = ((0)); * (volatile typeof(*(&reset_domain->reset_res)) *)&( *(&reset_domain->reset_res)) = __tmp; __tmp; }); | |||
| 143 | rw_init(&reset_domain->sem, "agrs")_rw_init_flags(&reset_domain->sem, "agrs", 0, ((void * )0)); | |||
| 144 | ||||
| 145 | return reset_domain; | |||
| 146 | } | |||
| 147 | ||||
| 148 | void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain) | |||
| 149 | { | |||
| 150 | atomic_set(&reset_domain->in_gpu_reset, 1)({ typeof(*(&reset_domain->in_gpu_reset)) __tmp = ((1) ); *(volatile typeof(*(&reset_domain->in_gpu_reset)) * )&(*(&reset_domain->in_gpu_reset)) = __tmp; __tmp; }); | |||
| 151 | down_write(&reset_domain->sem)rw_enter_write(&reset_domain->sem); | |||
| 152 | } | |||
| 153 | ||||
| 154 | ||||
| 155 | void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain) | |||
| 156 | { | |||
| 157 | atomic_set(&reset_domain->in_gpu_reset, 0)({ typeof(*(&reset_domain->in_gpu_reset)) __tmp = ((0) ); *(volatile typeof(*(&reset_domain->in_gpu_reset)) * )&(*(&reset_domain->in_gpu_reset)) = __tmp; __tmp; }); | |||
| 158 | up_write(&reset_domain->sem)rw_exit_write(&reset_domain->sem); | |||
| 159 | } | |||
| 160 | ||||
| 161 | ||||
| 162 |
| 1 | /* Public domain. */ |
| 2 | |
| 3 | #ifndef _LINUX_MM_H |
| 4 | #define _LINUX_MM_H |
| 5 | |
| 6 | #include <sys/types.h> |
| 7 | #include <sys/param.h> |
| 8 | #include <sys/malloc.h> |
| 9 | #include <sys/stdint.h> |
| 10 | #include <sys/atomic.h> |
| 11 | #include <machine/cpu.h> |
| 12 | #include <uvm/uvm_extern.h> |
| 13 | #include <uvm/uvm_glue.h> |
| 14 | #include <lib/libkern/libkern.h> /* for flsl */ |
| 15 | #include <linux/shrinker.h> |
| 16 | #include <linux/overflow.h> |
| 17 | #include <linux/pgtable.h> |
| 18 | |
| 19 | #define PageHighMem(x)0 0 |
| 20 | |
| 21 | #define page_to_phys(page)(((page)->phys_addr)) (VM_PAGE_TO_PHYS(page)((page)->phys_addr)) |
| 22 | #define page_to_pfn(pp)(((pp)->phys_addr) / (1 << 12)) (VM_PAGE_TO_PHYS(pp)((pp)->phys_addr) / PAGE_SIZE(1 << 12)) |
| 23 | #define pfn_to_page(pfn)(PHYS_TO_VM_PAGE(((paddr_t)(pfn) << 12))) (PHYS_TO_VM_PAGE(ptoa(pfn)((paddr_t)(pfn) << 12))) |
| 24 | #define nth_page(page, n)(&(page)[(n)]) (&(page)[(n)]) |
| 25 | #define offset_in_page(off)((vaddr_t)(off) & ((1 << 12) - 1)) ((vaddr_t)(off) & PAGE_MASK((1 << 12) - 1)) |
| 26 | #define set_page_dirty(page)x86_atomic_clearbits_u32(&page->pg_flags, 0x00000008) atomic_clearbits_intx86_atomic_clearbits_u32(&page->pg_flags, PG_CLEAN0x00000008) |
| 27 | |
| 28 | #define PAGE_ALIGN(addr)(((addr) + ((1 << 12) - 1)) & ~((1 << 12) - 1 )) (((addr) + PAGE_MASK((1 << 12) - 1)) & ~PAGE_MASK((1 << 12) - 1)) |
| 29 | |
| 30 | #define PFN_UP(x)(((x) + (1 << 12)-1) >> 12) (((x) + PAGE_SIZE(1 << 12)-1) >> PAGE_SHIFT12) |
| 31 | #define PFN_DOWN(x)((x) >> 12) ((x) >> PAGE_SHIFT12) |
| 32 | #define PFN_PHYS(x)((x) << 12) ((x) << PAGE_SHIFT12) |
| 33 | |
| 34 | bool_Bool is_vmalloc_addr(const void *); |
| 35 | |
| 36 | static inline void * |
| 37 | kvmalloc(size_t size, gfp_t flags) |
| 38 | { |
| 39 | return malloc(size, M_DRM145, flags); |
| 40 | } |
| 41 | |
| 42 | static inline void * |
| 43 | kvmalloc_array(size_t n, size_t size, int flags) |
| 44 | { |
| 45 | if (n != 0 && SIZE_MAX0xffffffffffffffffUL / n < size) |
| 46 | return NULL((void *)0); |
| 47 | return malloc(n * size, M_DRM145, flags); |
| 48 | } |
| 49 | |
| 50 | static inline struct vm_page * |
| 51 | vmalloc_to_page(const void *va) |
| 52 | { |
| 53 | return uvm_atopg((vaddr_t)va); |
| 54 | } |
| 55 | |
| 56 | static inline struct vm_page * |
| 57 | virt_to_page(const void *va) |
| 58 | { |
| 59 | return uvm_atopg((vaddr_t)va); |
| 60 | } |
| 61 | |
| 62 | static inline void * |
| 63 | kvcalloc(size_t n, size_t size, int flags) |
| 64 | { |
| 65 | return kvmalloc_array(n, size, flags | M_ZERO0x0008); |
| 66 | } |
| 67 | |
| 68 | static inline void * |
| 69 | kvzalloc(size_t size, int flags) |
| 70 | { |
| 71 | return malloc(size, M_DRM145, flags | M_ZERO0x0008); |
| 72 | } |
| 73 | |
| 74 | static inline void |
| 75 | kvfree(const void *objp) |
| 76 | { |
| 77 | free((void *)objp, M_DRM145, 0); |
| 78 | } |
| 79 | |
| 80 | static inline long |
| 81 | si_mem_available(void) |
| 82 | { |
| 83 | return uvmexp.free; |
| 84 | } |
| 85 | |
| 86 | static inline unsigned int |
| 87 | get_order(size_t size) |
| 88 | { |
| 89 | return flsl((size - 1) >> PAGE_SHIFT12); |
| 90 | } |
| 91 | |
| 92 | static inline int |
| 93 | totalram_pages(void) |
| 94 | { |
| 95 | return uvmexp.npages; |
| 96 | } |
| 97 | |
| 98 | #endif |