| File: | dev/pci/drm/radeon/radeon_uvd.c |
| Warning: | line 184, column 12 Access to field 'size' results in a dereference of a null pointer (loaded from field 'uvd_fw') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | ||||||
| 2 | * Copyright 2011 Advanced Micro Devices, Inc. | ||||||
| 3 | * All Rights Reserved. | ||||||
| 4 | * | ||||||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||||||
| 6 | * copy of this software and associated documentation files (the | ||||||
| 7 | * "Software"), to deal in the Software without restriction, including | ||||||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||||||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||||||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||||||
| 11 | * the following conditions: | ||||||
| 12 | * | ||||||
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||||||
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||||||
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||||||
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||||||
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||||
| 20 | * | ||||||
| 21 | * The above copyright notice and this permission notice (including the | ||||||
| 22 | * next paragraph) shall be included in all copies or substantial portions | ||||||
| 23 | * of the Software. | ||||||
| 24 | * | ||||||
| 25 | */ | ||||||
| 26 | /* | ||||||
| 27 | * Authors: | ||||||
| 28 | * Christian König <deathsimple@vodafone.de> | ||||||
| 29 | */ | ||||||
| 30 | |||||||
| 31 | #include <linux/firmware.h> | ||||||
| 32 | #include <linux/module.h> | ||||||
| 33 | |||||||
| 34 | #include <drm/drm.h> | ||||||
| 35 | |||||||
| 36 | #include "radeon.h" | ||||||
| 37 | #include "radeon_ucode.h" | ||||||
| 38 | #include "r600d.h" | ||||||
| 39 | |||||||
| 40 | /* 1 second timeout */ | ||||||
| 41 | #define UVD_IDLE_TIMEOUT_MS1000 1000 | ||||||
| 42 | |||||||
| 43 | /* Firmware Names */ | ||||||
| 44 | #define FIRMWARE_R600"radeon/R600_uvd.bin" "radeon/R600_uvd.bin" | ||||||
| 45 | #define FIRMWARE_RS780"radeon/RS780_uvd.bin" "radeon/RS780_uvd.bin" | ||||||
| 46 | #define FIRMWARE_RV770"radeon/RV770_uvd.bin" "radeon/RV770_uvd.bin" | ||||||
| 47 | #define FIRMWARE_RV710"radeon/RV710_uvd.bin" "radeon/RV710_uvd.bin" | ||||||
| 48 | #define FIRMWARE_CYPRESS"radeon/CYPRESS_uvd.bin" "radeon/CYPRESS_uvd.bin" | ||||||
| 49 | #define FIRMWARE_SUMO"radeon/SUMO_uvd.bin" "radeon/SUMO_uvd.bin" | ||||||
| 50 | #define FIRMWARE_TAHITI"radeon/TAHITI_uvd.bin" "radeon/TAHITI_uvd.bin" | ||||||
| 51 | #define FIRMWARE_BONAIRE_LEGACY"radeon/BONAIRE_uvd.bin" "radeon/BONAIRE_uvd.bin" | ||||||
| 52 | #define FIRMWARE_BONAIRE"radeon/bonaire_uvd.bin" "radeon/bonaire_uvd.bin" | ||||||
| 53 | |||||||
| 54 | MODULE_FIRMWARE(FIRMWARE_R600); | ||||||
| 55 | MODULE_FIRMWARE(FIRMWARE_RS780); | ||||||
| 56 | MODULE_FIRMWARE(FIRMWARE_RV770); | ||||||
| 57 | MODULE_FIRMWARE(FIRMWARE_RV710); | ||||||
| 58 | MODULE_FIRMWARE(FIRMWARE_CYPRESS); | ||||||
| 59 | MODULE_FIRMWARE(FIRMWARE_SUMO); | ||||||
| 60 | MODULE_FIRMWARE(FIRMWARE_TAHITI); | ||||||
| 61 | MODULE_FIRMWARE(FIRMWARE_BONAIRE_LEGACY); | ||||||
| 62 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | ||||||
| 63 | |||||||
| 64 | static void radeon_uvd_idle_work_handler(struct work_struct *work); | ||||||
| 65 | |||||||
| 66 | int radeon_uvd_init(struct radeon_device *rdev) | ||||||
| 67 | { | ||||||
| 68 | unsigned long bo_size; | ||||||
| 69 | const char *fw_name = NULL((void *)0), *legacy_fw_name = NULL((void *)0); | ||||||
| 70 | int i, r; | ||||||
| 71 | |||||||
| 72 | INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); | ||||||
| |||||||
| 73 | |||||||
| 74 | switch (rdev->family) { | ||||||
| 75 | case CHIP_RV610: | ||||||
| 76 | case CHIP_RV630: | ||||||
| 77 | case CHIP_RV670: | ||||||
| 78 | case CHIP_RV620: | ||||||
| 79 | case CHIP_RV635: | ||||||
| 80 | legacy_fw_name = FIRMWARE_R600"radeon/R600_uvd.bin"; | ||||||
| 81 | break; | ||||||
| 82 | |||||||
| 83 | case CHIP_RS780: | ||||||
| 84 | case CHIP_RS880: | ||||||
| 85 | legacy_fw_name = FIRMWARE_RS780"radeon/RS780_uvd.bin"; | ||||||
| 86 | break; | ||||||
| 87 | |||||||
| 88 | case CHIP_RV770: | ||||||
| 89 | legacy_fw_name = FIRMWARE_RV770"radeon/RV770_uvd.bin"; | ||||||
| 90 | break; | ||||||
| 91 | |||||||
| 92 | case CHIP_RV710: | ||||||
| 93 | case CHIP_RV730: | ||||||
| 94 | case CHIP_RV740: | ||||||
| 95 | legacy_fw_name = FIRMWARE_RV710"radeon/RV710_uvd.bin"; | ||||||
| 96 | break; | ||||||
| 97 | |||||||
| 98 | case CHIP_CYPRESS: | ||||||
| 99 | case CHIP_HEMLOCK: | ||||||
| 100 | case CHIP_JUNIPER: | ||||||
| 101 | case CHIP_REDWOOD: | ||||||
| 102 | case CHIP_CEDAR: | ||||||
| 103 | legacy_fw_name = FIRMWARE_CYPRESS"radeon/CYPRESS_uvd.bin"; | ||||||
| 104 | break; | ||||||
| 105 | |||||||
| 106 | case CHIP_SUMO: | ||||||
| 107 | case CHIP_SUMO2: | ||||||
| 108 | case CHIP_PALM: | ||||||
| 109 | case CHIP_CAYMAN: | ||||||
| 110 | case CHIP_BARTS: | ||||||
| 111 | case CHIP_TURKS: | ||||||
| 112 | case CHIP_CAICOS: | ||||||
| 113 | legacy_fw_name = FIRMWARE_SUMO"radeon/SUMO_uvd.bin"; | ||||||
| 114 | break; | ||||||
| 115 | |||||||
| 116 | case CHIP_TAHITI: | ||||||
| 117 | case CHIP_VERDE: | ||||||
| 118 | case CHIP_PITCAIRN: | ||||||
| 119 | case CHIP_ARUBA: | ||||||
| 120 | case CHIP_OLAND: | ||||||
| 121 | legacy_fw_name = FIRMWARE_TAHITI"radeon/TAHITI_uvd.bin"; | ||||||
| 122 | break; | ||||||
| 123 | |||||||
| 124 | case CHIP_BONAIRE: | ||||||
| 125 | case CHIP_KABINI: | ||||||
| 126 | case CHIP_KAVERI: | ||||||
| 127 | case CHIP_HAWAII: | ||||||
| 128 | case CHIP_MULLINS: | ||||||
| 129 | legacy_fw_name = FIRMWARE_BONAIRE_LEGACY"radeon/BONAIRE_uvd.bin"; | ||||||
| 130 | fw_name = FIRMWARE_BONAIRE"radeon/bonaire_uvd.bin"; | ||||||
| 131 | break; | ||||||
| 132 | |||||||
| 133 | default: | ||||||
| 134 | return -EINVAL22; | ||||||
| 135 | } | ||||||
| 136 | |||||||
| 137 | rdev->uvd.fw_header_present = false0; | ||||||
| 138 | rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES10; | ||||||
| 139 | if (fw_name
| ||||||
| 140 | /* Let's try to load the newer firmware first */ | ||||||
| 141 | r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); | ||||||
| 142 | if (r) { | ||||||
| 143 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "radeon_uvd: Can't load firmware \"%s\"\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name ) | ||||||
| 144 | fw_name)printf("drm:pid%d:%s *ERROR* " "radeon_uvd: Can't load firmware \"%s\"\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name ); | ||||||
| 145 | } else { | ||||||
| 146 | struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data; | ||||||
| 147 | unsigned version_major, version_minor, family_id; | ||||||
| 148 | |||||||
| 149 | r = radeon_ucode_validate(rdev->uvd_fw); | ||||||
| 150 | if (r) | ||||||
| 151 | return r; | ||||||
| 152 | |||||||
| 153 | rdev->uvd.fw_header_present = true1; | ||||||
| 154 | |||||||
| 155 | family_id = le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) & 0xff; | ||||||
| 156 | version_major = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 24) & 0xff; | ||||||
| 157 | version_minor = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 8) & 0xff; | ||||||
| 158 | DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",printk("\0016" "[" "drm" "] " "Found UVD firmware Version: %u.%u Family ID: %u\n" , version_major, version_minor, family_id) | ||||||
| 159 | version_major, version_minor, family_id)printk("\0016" "[" "drm" "] " "Found UVD firmware Version: %u.%u Family ID: %u\n" , version_major, version_minor, family_id); | ||||||
| 160 | |||||||
| 161 | /* | ||||||
| 162 | * Limit the number of UVD handles depending on | ||||||
| 163 | * microcode major and minor versions. | ||||||
| 164 | */ | ||||||
| 165 | if ((version_major >= 0x01) && (version_minor >= 0x37)) | ||||||
| 166 | rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES30; | ||||||
| 167 | } | ||||||
| 168 | } | ||||||
| 169 | |||||||
| 170 | /* | ||||||
| 171 | * In case there is only legacy firmware, or we encounter an error | ||||||
| 172 | * while loading the new firmware, we fall back to loading the legacy | ||||||
| 173 | * firmware now. | ||||||
| 174 | */ | ||||||
| 175 | if (!fw_name
| ||||||
| 176 | r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev); | ||||||
| 177 | if (r) { | ||||||
| 178 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "radeon_uvd: Can't load firmware \"%s\"\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , legacy_fw_name ) | ||||||
| 179 | legacy_fw_name)printf("drm:pid%d:%s *ERROR* " "radeon_uvd: Can't load firmware \"%s\"\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , legacy_fw_name ); | ||||||
| 180 | return r; | ||||||
| 181 | } | ||||||
| 182 | } | ||||||
| 183 | |||||||
| 184 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8)(((rdev->uvd_fw->size + 8) + (4096 - 1)) & ~(4096 - 1)) + | ||||||
| |||||||
| 185 | RADEON_UVD_STACK_SIZE(200*1024) + RADEON_UVD_HEAP_SIZE(256*1024) + | ||||||
| 186 | RADEON_UVD_SESSION_SIZE(50*1024) * rdev->uvd.max_handles; | ||||||
| 187 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE(1 << 12), true1, | ||||||
| 188 | RADEON_GEM_DOMAIN_VRAM0x4, 0, NULL((void *)0), | ||||||
| 189 | NULL((void *)0), &rdev->uvd.vcpu_bo); | ||||||
| 190 | if (r) { | ||||||
| 191 | dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate UVD bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | ||||||
| 192 | return r; | ||||||
| 193 | } | ||||||
| 194 | |||||||
| 195 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false0); | ||||||
| 196 | if (r) { | ||||||
| 197 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||||||
| 198 | dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to reserve UVD bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | ||||||
| 199 | return r; | ||||||
| 200 | } | ||||||
| 201 | |||||||
| 202 | r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM0x4, | ||||||
| 203 | &rdev->uvd.gpu_addr); | ||||||
| 204 | if (r) { | ||||||
| 205 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||||||
| 206 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||||||
| 207 | dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) UVD bo pin failed\n", ({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | ||||||
| 208 | return r; | ||||||
| 209 | } | ||||||
| 210 | |||||||
| 211 | r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); | ||||||
| 212 | if (r) { | ||||||
| 213 | dev_err(rdev->dev, "(%d) UVD map failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) UVD map failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , r); | ||||||
| 214 | return r; | ||||||
| 215 | } | ||||||
| 216 | |||||||
| 217 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||||||
| 218 | |||||||
| 219 | for (i = 0; i < rdev->uvd.max_handles; ++i) { | ||||||
| 220 | atomic_set(&rdev->uvd.handles[i], 0)({ typeof(*(&rdev->uvd.handles[i])) __tmp = ((0)); *(volatile typeof(*(&rdev->uvd.handles[i])) *)&(*(&rdev-> uvd.handles[i])) = __tmp; __tmp; }); | ||||||
| 221 | rdev->uvd.filp[i] = NULL((void *)0); | ||||||
| 222 | rdev->uvd.img_size[i] = 0; | ||||||
| 223 | } | ||||||
| 224 | |||||||
| 225 | return 0; | ||||||
| 226 | } | ||||||
| 227 | |||||||
| 228 | void radeon_uvd_fini(struct radeon_device *rdev) | ||||||
| 229 | { | ||||||
| 230 | int r; | ||||||
| 231 | |||||||
| 232 | if (rdev->uvd.vcpu_bo == NULL((void *)0)) | ||||||
| 233 | return; | ||||||
| 234 | |||||||
| 235 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false0); | ||||||
| 236 | if (!r) { | ||||||
| 237 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | ||||||
| 238 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | ||||||
| 239 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||||||
| 240 | } | ||||||
| 241 | |||||||
| 242 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||||||
| 243 | |||||||
| 244 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX5]); | ||||||
| 245 | |||||||
| 246 | release_firmware(rdev->uvd_fw); | ||||||
| 247 | } | ||||||
| 248 | |||||||
| 249 | int radeon_uvd_suspend(struct radeon_device *rdev) | ||||||
| 250 | { | ||||||
| 251 | int i, r; | ||||||
| 252 | |||||||
| 253 | if (rdev->uvd.vcpu_bo == NULL((void *)0)) | ||||||
| 254 | return 0; | ||||||
| 255 | |||||||
| 256 | for (i = 0; i < rdev->uvd.max_handles; ++i) { | ||||||
| 257 | uint32_t handle = atomic_read(&rdev->uvd.handles[i])({ typeof(*(&rdev->uvd.handles[i])) __tmp = *(volatile typeof(*(&rdev->uvd.handles[i])) *)&(*(&rdev-> uvd.handles[i])); membar_datadep_consumer(); __tmp; }); | ||||||
| 258 | if (handle != 0) { | ||||||
| 259 | struct radeon_fence *fence; | ||||||
| 260 | |||||||
| 261 | radeon_uvd_note_usage(rdev); | ||||||
| 262 | |||||||
| 263 | r = radeon_uvd_get_destroy_msg(rdev, | ||||||
| 264 | R600_RING_TYPE_UVD_INDEX5, handle, &fence); | ||||||
| 265 | if (r) { | ||||||
| 266 | DRM_ERROR("Error destroying UVD (%d)!\n", r)__drm_err("Error destroying UVD (%d)!\n", r); | ||||||
| 267 | continue; | ||||||
| 268 | } | ||||||
| 269 | |||||||
| 270 | radeon_fence_wait(fence, false0); | ||||||
| 271 | radeon_fence_unref(&fence); | ||||||
| 272 | |||||||
| 273 | rdev->uvd.filp[i] = NULL((void *)0); | ||||||
| 274 | atomic_set(&rdev->uvd.handles[i], 0)({ typeof(*(&rdev->uvd.handles[i])) __tmp = ((0)); *(volatile typeof(*(&rdev->uvd.handles[i])) *)&(*(&rdev-> uvd.handles[i])) = __tmp; __tmp; }); | ||||||
| 275 | } | ||||||
| 276 | } | ||||||
| 277 | |||||||
| 278 | return 0; | ||||||
| 279 | } | ||||||
| 280 | |||||||
| 281 | int radeon_uvd_resume(struct radeon_device *rdev) | ||||||
| 282 | { | ||||||
| 283 | unsigned size; | ||||||
| 284 | void *ptr; | ||||||
| 285 | |||||||
| 286 | if (rdev->uvd.vcpu_bo == NULL((void *)0)) | ||||||
| 287 | return -EINVAL22; | ||||||
| 288 | |||||||
| 289 | memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size)__builtin_memcpy(((void *)rdev->uvd.cpu_addr), (rdev->uvd_fw ->data), (rdev->uvd_fw->size)); | ||||||
| 290 | |||||||
| 291 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | ||||||
| 292 | size -= rdev->uvd_fw->size; | ||||||
| 293 | |||||||
| 294 | ptr = rdev->uvd.cpu_addr; | ||||||
| 295 | ptr += rdev->uvd_fw->size; | ||||||
| 296 | |||||||
| 297 | memset_io((void __iomem *)ptr, 0, size)__builtin_memset(((void *)ptr), (0), (size)); | ||||||
| 298 | |||||||
| 299 | return 0; | ||||||
| 300 | } | ||||||
| 301 | |||||||
| 302 | void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, | ||||||
| 303 | uint32_t allowed_domains) | ||||||
| 304 | { | ||||||
| 305 | int i; | ||||||
| 306 | |||||||
| 307 | for (i = 0; i < rbo->placement.num_placement; ++i) { | ||||||
| 308 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT12; | ||||||
| 309 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT12; | ||||||
| 310 | } | ||||||
| 311 | |||||||
| 312 | /* If it must be in VRAM it must be in the first segment as well */ | ||||||
| 313 | if (allowed_domains == RADEON_GEM_DOMAIN_VRAM0x4) | ||||||
| 314 | return; | ||||||
| 315 | |||||||
| 316 | /* abort if we already have more than one placement */ | ||||||
| 317 | if (rbo->placement.num_placement > 1) | ||||||
| 318 | return; | ||||||
| 319 | |||||||
| 320 | /* add another 256MB segment */ | ||||||
| 321 | rbo->placements[1] = rbo->placements[0]; | ||||||
| 322 | rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT12; | ||||||
| 323 | rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT12; | ||||||
| 324 | rbo->placement.num_placement++; | ||||||
| 325 | rbo->placement.num_busy_placement++; | ||||||
| 326 | } | ||||||
| 327 | |||||||
| 328 | void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | ||||||
| 329 | { | ||||||
| 330 | int i, r; | ||||||
| 331 | for (i = 0; i < rdev->uvd.max_handles; ++i) { | ||||||
| 332 | uint32_t handle = atomic_read(&rdev->uvd.handles[i])({ typeof(*(&rdev->uvd.handles[i])) __tmp = *(volatile typeof(*(&rdev->uvd.handles[i])) *)&(*(&rdev-> uvd.handles[i])); membar_datadep_consumer(); __tmp; }); | ||||||
| 333 | if (handle != 0 && rdev->uvd.filp[i] == filp) { | ||||||
| 334 | struct radeon_fence *fence; | ||||||
| 335 | |||||||
| 336 | radeon_uvd_note_usage(rdev); | ||||||
| 337 | |||||||
| 338 | r = radeon_uvd_get_destroy_msg(rdev, | ||||||
| 339 | R600_RING_TYPE_UVD_INDEX5, handle, &fence); | ||||||
| 340 | if (r) { | ||||||
| 341 | DRM_ERROR("Error destroying UVD (%d)!\n", r)__drm_err("Error destroying UVD (%d)!\n", r); | ||||||
| 342 | continue; | ||||||
| 343 | } | ||||||
| 344 | |||||||
| 345 | radeon_fence_wait(fence, false0); | ||||||
| 346 | radeon_fence_unref(&fence); | ||||||
| 347 | |||||||
| 348 | rdev->uvd.filp[i] = NULL((void *)0); | ||||||
| 349 | atomic_set(&rdev->uvd.handles[i], 0)({ typeof(*(&rdev->uvd.handles[i])) __tmp = ((0)); *(volatile typeof(*(&rdev->uvd.handles[i])) *)&(*(&rdev-> uvd.handles[i])) = __tmp; __tmp; }); | ||||||
| 350 | } | ||||||
| 351 | } | ||||||
| 352 | } | ||||||
| 353 | |||||||
| 354 | static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | ||||||
| 355 | { | ||||||
| 356 | unsigned stream_type = msg[4]; | ||||||
| 357 | unsigned width = msg[6]; | ||||||
| 358 | unsigned height = msg[7]; | ||||||
| 359 | unsigned dpb_size = msg[9]; | ||||||
| 360 | unsigned pitch = msg[28]; | ||||||
| 361 | |||||||
| 362 | unsigned width_in_mb = width / 16; | ||||||
| 363 | unsigned height_in_mb = roundup2(height / 16, 2)(((height / 16) + ((2) - 1)) & (~((__typeof(height / 16)) (2) - 1))); | ||||||
| 364 | |||||||
| 365 | unsigned image_size, tmp, min_dpb_size; | ||||||
| 366 | |||||||
| 367 | image_size = width * height; | ||||||
| 368 | image_size += image_size / 2; | ||||||
| 369 | image_size = roundup2(image_size, 1024)(((image_size) + ((1024) - 1)) & (~((__typeof(image_size) )(1024) - 1))); | ||||||
| 370 | |||||||
| 371 | switch (stream_type) { | ||||||
| 372 | case 0: /* H264 */ | ||||||
| 373 | |||||||
| 374 | /* reference picture buffer */ | ||||||
| 375 | min_dpb_size = image_size * 17; | ||||||
| 376 | |||||||
| 377 | /* macroblock context buffer */ | ||||||
| 378 | min_dpb_size += width_in_mb * height_in_mb * 17 * 192; | ||||||
| 379 | |||||||
| 380 | /* IT surface buffer */ | ||||||
| 381 | min_dpb_size += width_in_mb * height_in_mb * 32; | ||||||
| 382 | break; | ||||||
| 383 | |||||||
| 384 | case 1: /* VC1 */ | ||||||
| 385 | |||||||
| 386 | /* reference picture buffer */ | ||||||
| 387 | min_dpb_size = image_size * 3; | ||||||
| 388 | |||||||
| 389 | /* CONTEXT_BUFFER */ | ||||||
| 390 | min_dpb_size += width_in_mb * height_in_mb * 128; | ||||||
| 391 | |||||||
| 392 | /* IT surface buffer */ | ||||||
| 393 | min_dpb_size += width_in_mb * 64; | ||||||
| 394 | |||||||
| 395 | /* DB surface buffer */ | ||||||
| 396 | min_dpb_size += width_in_mb * 128; | ||||||
| 397 | |||||||
| 398 | /* BP */ | ||||||
| 399 | tmp = max(width_in_mb, height_in_mb)(((width_in_mb)>(height_in_mb))?(width_in_mb):(height_in_mb )); | ||||||
| 400 | min_dpb_size += roundup2(tmp * 7 * 16, 64)(((tmp * 7 * 16) + ((64) - 1)) & (~((__typeof(tmp * 7 * 16 ))(64) - 1))); | ||||||
| 401 | break; | ||||||
| 402 | |||||||
| 403 | case 3: /* MPEG2 */ | ||||||
| 404 | |||||||
| 405 | /* reference picture buffer */ | ||||||
| 406 | min_dpb_size = image_size * 3; | ||||||
| 407 | break; | ||||||
| 408 | |||||||
| 409 | case 4: /* MPEG4 */ | ||||||
| 410 | |||||||
| 411 | /* reference picture buffer */ | ||||||
| 412 | min_dpb_size = image_size * 3; | ||||||
| 413 | |||||||
| 414 | /* CM */ | ||||||
| 415 | min_dpb_size += width_in_mb * height_in_mb * 64; | ||||||
| 416 | |||||||
| 417 | /* IT surface buffer */ | ||||||
| 418 | min_dpb_size += roundup2(width_in_mb * height_in_mb * 32, 64)(((width_in_mb * height_in_mb * 32) + ((64) - 1)) & (~((__typeof (width_in_mb * height_in_mb * 32))(64) - 1))); | ||||||
| 419 | break; | ||||||
| 420 | |||||||
| 421 | default: | ||||||
| 422 | DRM_ERROR("UVD codec not handled %d!\n", stream_type)__drm_err("UVD codec not handled %d!\n", stream_type); | ||||||
| 423 | return -EINVAL22; | ||||||
| 424 | } | ||||||
| 425 | |||||||
| 426 | if (width > pitch) { | ||||||
| 427 | DRM_ERROR("Invalid UVD decoding target pitch!\n")__drm_err("Invalid UVD decoding target pitch!\n"); | ||||||
| 428 | return -EINVAL22; | ||||||
| 429 | } | ||||||
| 430 | |||||||
| 431 | if (dpb_size < min_dpb_size) { | ||||||
| 432 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",__drm_err("Invalid dpb_size in UVD message (%d / %d)!\n", dpb_size , min_dpb_size) | ||||||
| 433 | dpb_size, min_dpb_size)__drm_err("Invalid dpb_size in UVD message (%d / %d)!\n", dpb_size , min_dpb_size); | ||||||
| 434 | return -EINVAL22; | ||||||
| 435 | } | ||||||
| 436 | |||||||
| 437 | buf_sizes[0x1] = dpb_size; | ||||||
| 438 | buf_sizes[0x2] = image_size; | ||||||
| 439 | return 0; | ||||||
| 440 | } | ||||||
| 441 | |||||||
| 442 | static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, | ||||||
| 443 | unsigned stream_type) | ||||||
| 444 | { | ||||||
| 445 | switch (stream_type) { | ||||||
| 446 | case 0: /* H264 */ | ||||||
| 447 | case 1: /* VC1 */ | ||||||
| 448 | /* always supported */ | ||||||
| 449 | return 0; | ||||||
| 450 | |||||||
| 451 | case 3: /* MPEG2 */ | ||||||
| 452 | case 4: /* MPEG4 */ | ||||||
| 453 | /* only since UVD 3 */ | ||||||
| 454 | if (p->rdev->family >= CHIP_PALM) | ||||||
| 455 | return 0; | ||||||
| 456 | |||||||
| 457 | fallthroughdo {} while (0); | ||||||
| 458 | default: | ||||||
| 459 | DRM_ERROR("UVD codec not supported by hardware %d!\n",__drm_err("UVD codec not supported by hardware %d!\n", stream_type ) | ||||||
| 460 | stream_type)__drm_err("UVD codec not supported by hardware %d!\n", stream_type ); | ||||||
| 461 | return -EINVAL22; | ||||||
| 462 | } | ||||||
| 463 | } | ||||||
| 464 | |||||||
| 465 | static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | ||||||
| 466 | unsigned offset, unsigned buf_sizes[]) | ||||||
| 467 | { | ||||||
| 468 | int32_t *msg, msg_type, handle; | ||||||
| 469 | unsigned img_size = 0; | ||||||
| 470 | struct dma_fence *f; | ||||||
| 471 | void *ptr; | ||||||
| 472 | |||||||
| 473 | int i, r; | ||||||
| 474 | |||||||
| 475 | if (offset & 0x3F) { | ||||||
| 476 | DRM_ERROR("UVD messages must be 64 byte aligned!\n")__drm_err("UVD messages must be 64 byte aligned!\n"); | ||||||
| 477 | return -EINVAL22; | ||||||
| 478 | } | ||||||
| 479 | |||||||
| 480 | f = dma_resv_get_excl(bo->tbo.base.resv); | ||||||
| 481 | if (f) { | ||||||
| 482 | r = radeon_fence_wait((struct radeon_fence *)f, false0); | ||||||
| 483 | if (r) { | ||||||
| 484 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r)__drm_err("Failed waiting for UVD message (%d)!\n", r); | ||||||
| 485 | return r; | ||||||
| 486 | } | ||||||
| 487 | } | ||||||
| 488 | |||||||
| 489 | r = radeon_bo_kmap(bo, &ptr); | ||||||
| 490 | if (r) { | ||||||
| 491 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r)__drm_err("Failed mapping the UVD message (%d)!\n", r); | ||||||
| 492 | return r; | ||||||
| 493 | } | ||||||
| 494 | |||||||
| 495 | msg = ptr + offset; | ||||||
| 496 | |||||||
| 497 | msg_type = msg[1]; | ||||||
| 498 | handle = msg[2]; | ||||||
| 499 | |||||||
| 500 | if (handle == 0) { | ||||||
| 501 | DRM_ERROR("Invalid UVD handle!\n")__drm_err("Invalid UVD handle!\n"); | ||||||
| 502 | return -EINVAL22; | ||||||
| 503 | } | ||||||
| 504 | |||||||
| 505 | switch (msg_type) { | ||||||
| 506 | case 0: | ||||||
| 507 | /* it's a create msg, calc image size (width * height) */ | ||||||
| 508 | img_size = msg[7] * msg[8]; | ||||||
| 509 | |||||||
| 510 | r = radeon_uvd_validate_codec(p, msg[4]); | ||||||
| 511 | radeon_bo_kunmap(bo); | ||||||
| 512 | if (r) | ||||||
| 513 | return r; | ||||||
| 514 | |||||||
| 515 | /* try to alloc a new handle */ | ||||||
| 516 | for (i = 0; i < p->rdev->uvd.max_handles; ++i) { | ||||||
| 517 | if (atomic_read(&p->rdev->uvd.handles[i])({ typeof(*(&p->rdev->uvd.handles[i])) __tmp = *(volatile typeof(*(&p->rdev->uvd.handles[i])) *)&(*(& p->rdev->uvd.handles[i])); membar_datadep_consumer(); __tmp ; }) == handle) { | ||||||
| 518 | DRM_ERROR("Handle 0x%x already in use!\n", handle)__drm_err("Handle 0x%x already in use!\n", handle); | ||||||
| 519 | return -EINVAL22; | ||||||
| 520 | } | ||||||
| 521 | |||||||
| 522 | if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)__sync_val_compare_and_swap(&p->rdev->uvd.handles[i ], 0, handle)) { | ||||||
| 523 | p->rdev->uvd.filp[i] = p->filp; | ||||||
| 524 | p->rdev->uvd.img_size[i] = img_size; | ||||||
| 525 | return 0; | ||||||
| 526 | } | ||||||
| 527 | } | ||||||
| 528 | |||||||
| 529 | DRM_ERROR("No more free UVD handles!\n")__drm_err("No more free UVD handles!\n"); | ||||||
| 530 | return -EINVAL22; | ||||||
| 531 | |||||||
| 532 | case 1: | ||||||
| 533 | /* it's a decode msg, validate codec and calc buffer sizes */ | ||||||
| 534 | r = radeon_uvd_validate_codec(p, msg[4]); | ||||||
| 535 | if (!r) | ||||||
| 536 | r = radeon_uvd_cs_msg_decode(msg, buf_sizes); | ||||||
| 537 | radeon_bo_kunmap(bo); | ||||||
| 538 | if (r) | ||||||
| 539 | return r; | ||||||
| 540 | |||||||
| 541 | /* validate the handle */ | ||||||
| 542 | for (i = 0; i < p->rdev->uvd.max_handles; ++i) { | ||||||
| 543 | if (atomic_read(&p->rdev->uvd.handles[i])({ typeof(*(&p->rdev->uvd.handles[i])) __tmp = *(volatile typeof(*(&p->rdev->uvd.handles[i])) *)&(*(& p->rdev->uvd.handles[i])); membar_datadep_consumer(); __tmp ; }) == handle) { | ||||||
| 544 | if (p->rdev->uvd.filp[i] != p->filp) { | ||||||
| 545 | DRM_ERROR("UVD handle collision detected!\n")__drm_err("UVD handle collision detected!\n"); | ||||||
| 546 | return -EINVAL22; | ||||||
| 547 | } | ||||||
| 548 | return 0; | ||||||
| 549 | } | ||||||
| 550 | } | ||||||
| 551 | |||||||
| 552 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle)__drm_err("Invalid UVD handle 0x%x!\n", handle); | ||||||
| 553 | return -ENOENT2; | ||||||
| 554 | |||||||
| 555 | case 2: | ||||||
| 556 | /* it's a destroy msg, free the handle */ | ||||||
| 557 | for (i = 0; i < p->rdev->uvd.max_handles; ++i) | ||||||
| 558 | atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0)__sync_val_compare_and_swap(&p->rdev->uvd.handles[i ], handle, 0); | ||||||
| 559 | radeon_bo_kunmap(bo); | ||||||
| 560 | return 0; | ||||||
| 561 | |||||||
| 562 | default: | ||||||
| 563 | |||||||
| 564 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type)__drm_err("Illegal UVD message type (%d)!\n", msg_type); | ||||||
| 565 | return -EINVAL22; | ||||||
| 566 | } | ||||||
| 567 | |||||||
| 568 | BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/radeon/radeon_uvd.c" , 568); } while (0); | ||||||
| 569 | return -EINVAL22; | ||||||
| 570 | } | ||||||
| 571 | |||||||
| 572 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | ||||||
| 573 | int data0, int data1, | ||||||
| 574 | unsigned buf_sizes[], bool_Bool *has_msg_cmd) | ||||||
| 575 | { | ||||||
| 576 | struct radeon_cs_chunk *relocs_chunk; | ||||||
| 577 | struct radeon_bo_list *reloc; | ||||||
| 578 | unsigned idx, cmd, offset; | ||||||
| 579 | uint64_t start, end; | ||||||
| 580 | int r; | ||||||
| 581 | |||||||
| 582 | relocs_chunk = p->chunk_relocs; | ||||||
| 583 | offset = radeon_get_ib_value(p, data0); | ||||||
| 584 | idx = radeon_get_ib_value(p, data1); | ||||||
| 585 | if (idx >= relocs_chunk->length_dw) { | ||||||
| 586 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",__drm_err("Relocs at %d after relocations chunk end %d !\n", idx , relocs_chunk->length_dw) | ||||||
| 587 | idx, relocs_chunk->length_dw)__drm_err("Relocs at %d after relocations chunk end %d !\n", idx , relocs_chunk->length_dw); | ||||||
| 588 | return -EINVAL22; | ||||||
| 589 | } | ||||||
| 590 | |||||||
| 591 | reloc = &p->relocs[(idx / 4)]; | ||||||
| 592 | start = reloc->gpu_offset; | ||||||
| 593 | end = start + radeon_bo_size(reloc->robj); | ||||||
| 594 | start += offset; | ||||||
| 595 | |||||||
| 596 | p->ib.ptr[data0] = start & 0xFFFFFFFF; | ||||||
| 597 | p->ib.ptr[data1] = start >> 32; | ||||||
| 598 | |||||||
| 599 | cmd = radeon_get_ib_value(p, p->idx) >> 1; | ||||||
| 600 | |||||||
| 601 | if (cmd < 0x4) { | ||||||
| 602 | if (end <= start) { | ||||||
| 603 | DRM_ERROR("invalid reloc offset %X!\n", offset)__drm_err("invalid reloc offset %X!\n", offset); | ||||||
| 604 | return -EINVAL22; | ||||||
| 605 | } | ||||||
| 606 | if ((end - start) < buf_sizes[cmd]) { | ||||||
| 607 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned )(end - start), buf_sizes[cmd]) | ||||||
| 608 | (unsigned)(end - start), buf_sizes[cmd])__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned )(end - start), buf_sizes[cmd]); | ||||||
| 609 | return -EINVAL22; | ||||||
| 610 | } | ||||||
| 611 | |||||||
| 612 | } else if (cmd != 0x100) { | ||||||
| 613 | DRM_ERROR("invalid UVD command %X!\n", cmd)__drm_err("invalid UVD command %X!\n", cmd); | ||||||
| 614 | return -EINVAL22; | ||||||
| 615 | } | ||||||
| 616 | |||||||
| 617 | if ((start >> 28) != ((end - 1) >> 28)) { | ||||||
| 618 | DRM_ERROR("reloc %llX-%llX crossing 256MB boundary!\n",__drm_err("reloc %llX-%llX crossing 256MB boundary!\n", start , end) | ||||||
| 619 | start, end)__drm_err("reloc %llX-%llX crossing 256MB boundary!\n", start , end); | ||||||
| 620 | return -EINVAL22; | ||||||
| 621 | } | ||||||
| 622 | |||||||
| 623 | /* TODO: is this still necessary on NI+ ? */ | ||||||
| 624 | if ((cmd == 0 || cmd == 0x3) && | ||||||
| 625 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { | ||||||
| 626 | DRM_ERROR("msg/fb buffer %llX-%llX out of 256MB segment!\n",__drm_err("msg/fb buffer %llX-%llX out of 256MB segment!\n", start , end) | ||||||
| 627 | start, end)__drm_err("msg/fb buffer %llX-%llX out of 256MB segment!\n", start , end); | ||||||
| 628 | return -EINVAL22; | ||||||
| 629 | } | ||||||
| 630 | |||||||
| 631 | if (cmd == 0) { | ||||||
| 632 | if (*has_msg_cmd) { | ||||||
| 633 | DRM_ERROR("More than one message in a UVD-IB!\n")__drm_err("More than one message in a UVD-IB!\n"); | ||||||
| 634 | return -EINVAL22; | ||||||
| 635 | } | ||||||
| 636 | *has_msg_cmd = true1; | ||||||
| 637 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); | ||||||
| 638 | if (r) | ||||||
| 639 | return r; | ||||||
| 640 | } else if (!*has_msg_cmd) { | ||||||
| 641 | DRM_ERROR("Message needed before other commands are send!\n")__drm_err("Message needed before other commands are send!\n"); | ||||||
| 642 | return -EINVAL22; | ||||||
| 643 | } | ||||||
| 644 | |||||||
| 645 | return 0; | ||||||
| 646 | } | ||||||
| 647 | |||||||
| 648 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | ||||||
| 649 | struct radeon_cs_packet *pkt, | ||||||
| 650 | int *data0, int *data1, | ||||||
| 651 | unsigned buf_sizes[], | ||||||
| 652 | bool_Bool *has_msg_cmd) | ||||||
| 653 | { | ||||||
| 654 | int i, r; | ||||||
| 655 | |||||||
| 656 | p->idx++; | ||||||
| 657 | for (i = 0; i <= pkt->count; ++i) { | ||||||
| 658 | switch (pkt->reg + i*4) { | ||||||
| 659 | case UVD_GPCOM_VCPU_DATA00xef10: | ||||||
| 660 | *data0 = p->idx; | ||||||
| 661 | break; | ||||||
| 662 | case UVD_GPCOM_VCPU_DATA10xef14: | ||||||
| 663 | *data1 = p->idx; | ||||||
| 664 | break; | ||||||
| 665 | case UVD_GPCOM_VCPU_CMD0xef0c: | ||||||
| 666 | r = radeon_uvd_cs_reloc(p, *data0, *data1, | ||||||
| 667 | buf_sizes, has_msg_cmd); | ||||||
| 668 | if (r) | ||||||
| 669 | return r; | ||||||
| 670 | break; | ||||||
| 671 | case UVD_ENGINE_CNTL0xef18: | ||||||
| 672 | case UVD_NO_OP0xeffc: | ||||||
| 673 | break; | ||||||
| 674 | default: | ||||||
| 675 | DRM_ERROR("Invalid reg 0x%X!\n",__drm_err("Invalid reg 0x%X!\n", pkt->reg + i*4) | ||||||
| 676 | pkt->reg + i*4)__drm_err("Invalid reg 0x%X!\n", pkt->reg + i*4); | ||||||
| 677 | return -EINVAL22; | ||||||
| 678 | } | ||||||
| 679 | p->idx++; | ||||||
| 680 | } | ||||||
| 681 | return 0; | ||||||
| 682 | } | ||||||
| 683 | |||||||
| 684 | int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | ||||||
| 685 | { | ||||||
| 686 | struct radeon_cs_packet pkt; | ||||||
| 687 | int r, data0 = 0, data1 = 0; | ||||||
| 688 | |||||||
| 689 | /* does the IB has a msg command */ | ||||||
| 690 | bool_Bool has_msg_cmd = false0; | ||||||
| 691 | |||||||
| 692 | /* minimum buffer sizes */ | ||||||
| 693 | unsigned buf_sizes[] = { | ||||||
| 694 | [0x00000000] = 2048, | ||||||
| 695 | [0x00000001] = 32 * 1024 * 1024, | ||||||
| 696 | [0x00000002] = 2048 * 1152 * 3, | ||||||
| 697 | [0x00000003] = 2048, | ||||||
| 698 | }; | ||||||
| 699 | |||||||
| 700 | if (p->chunk_ib->length_dw % 16) { | ||||||
| 701 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",__drm_err("UVD IB length (%d) not 16 dwords aligned!\n", p-> chunk_ib->length_dw) | ||||||
| 702 | p->chunk_ib->length_dw)__drm_err("UVD IB length (%d) not 16 dwords aligned!\n", p-> chunk_ib->length_dw); | ||||||
| 703 | return -EINVAL22; | ||||||
| 704 | } | ||||||
| 705 | |||||||
| 706 | if (p->chunk_relocs == NULL((void *)0)) { | ||||||
| 707 | DRM_ERROR("No relocation chunk !\n")__drm_err("No relocation chunk !\n"); | ||||||
| 708 | return -EINVAL22; | ||||||
| 709 | } | ||||||
| 710 | |||||||
| 711 | |||||||
| 712 | do { | ||||||
| 713 | r = radeon_cs_packet_parse(p, &pkt, p->idx); | ||||||
| 714 | if (r) | ||||||
| 715 | return r; | ||||||
| 716 | switch (pkt.type) { | ||||||
| 717 | case RADEON_PACKET_TYPE00: | ||||||
| 718 | r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, | ||||||
| 719 | buf_sizes, &has_msg_cmd); | ||||||
| 720 | if (r) | ||||||
| 721 | return r; | ||||||
| 722 | break; | ||||||
| 723 | case RADEON_PACKET_TYPE22: | ||||||
| 724 | p->idx += pkt.count + 2; | ||||||
| 725 | break; | ||||||
| 726 | default: | ||||||
| 727 | DRM_ERROR("Unknown packet type %d !\n", pkt.type)__drm_err("Unknown packet type %d !\n", pkt.type); | ||||||
| 728 | return -EINVAL22; | ||||||
| 729 | } | ||||||
| 730 | } while (p->idx < p->chunk_ib->length_dw); | ||||||
| 731 | |||||||
| 732 | if (!has_msg_cmd) { | ||||||
| 733 | DRM_ERROR("UVD-IBs need a msg command!\n")__drm_err("UVD-IBs need a msg command!\n"); | ||||||
| 734 | return -EINVAL22; | ||||||
| 735 | } | ||||||
| 736 | |||||||
| 737 | return 0; | ||||||
| 738 | } | ||||||
| 739 | |||||||
| 740 | static int radeon_uvd_send_msg(struct radeon_device *rdev, | ||||||
| 741 | int ring, uint64_t addr, | ||||||
| 742 | struct radeon_fence **fence) | ||||||
| 743 | { | ||||||
| 744 | struct radeon_ib ib; | ||||||
| 745 | int i, r; | ||||||
| 746 | |||||||
| 747 | r = radeon_ib_get(rdev, ring, &ib, NULL((void *)0), 64); | ||||||
| 748 | if (r) | ||||||
| 749 | return r; | ||||||
| 750 | |||||||
| 751 | ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0)((0 << 30) | (((0xef10) >> 2) & 0xFFFF) | ((0 ) & 0x3FFF) << 16); | ||||||
| 752 | ib.ptr[1] = addr; | ||||||
| 753 | ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0)((0 << 30) | (((0xef14) >> 2) & 0xFFFF) | ((0 ) & 0x3FFF) << 16); | ||||||
| 754 | ib.ptr[3] = addr >> 32; | ||||||
| 755 | ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0)((0 << 30) | (((0xef0c) >> 2) & 0xFFFF) | ((0 ) & 0x3FFF) << 16); | ||||||
| 756 | ib.ptr[5] = 0; | ||||||
| 757 | for (i = 6; i < 16; i += 2) { | ||||||
| 758 | ib.ptr[i] = PACKET0(UVD_NO_OP, 0)((0 << 30) | (((0xeffc) >> 2) & 0xFFFF) | ((0 ) & 0x3FFF) << 16); | ||||||
| 759 | ib.ptr[i+1] = 0; | ||||||
| 760 | } | ||||||
| 761 | ib.length_dw = 16; | ||||||
| 762 | |||||||
| 763 | r = radeon_ib_schedule(rdev, &ib, NULL((void *)0), false0); | ||||||
| 764 | |||||||
| 765 | if (fence) | ||||||
| 766 | *fence = radeon_fence_ref(ib.fence); | ||||||
| 767 | |||||||
| 768 | radeon_ib_free(rdev, &ib); | ||||||
| 769 | return r; | ||||||
| 770 | } | ||||||
| 771 | |||||||
| 772 | /* | ||||||
| 773 | * multiple fence commands without any stream commands in between can | ||||||
| 774 | * crash the vcpu so just try to emmit a dummy create/destroy msg to | ||||||
| 775 | * avoid this | ||||||
| 776 | */ | ||||||
| 777 | int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, | ||||||
| 778 | uint32_t handle, struct radeon_fence **fence) | ||||||
| 779 | { | ||||||
| 780 | /* we use the last page of the vcpu bo for the UVD message */ | ||||||
| 781 | uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - | ||||||
| 782 | RADEON_GPU_PAGE_SIZE4096; | ||||||
| 783 | |||||||
| 784 | uint32_t *msg = rdev->uvd.cpu_addr + offs; | ||||||
| 785 | uint64_t addr = rdev->uvd.gpu_addr + offs; | ||||||
| 786 | |||||||
| 787 | int r, i; | ||||||
| 788 | |||||||
| 789 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true1); | ||||||
| 790 | if (r) | ||||||
| 791 | return r; | ||||||
| 792 | |||||||
| 793 | /* stitch together an UVD create msg */ | ||||||
| 794 | msg[0] = cpu_to_le32(0x00000de4)((__uint32_t)(0x00000de4)); | ||||||
| 795 | msg[1] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | ||||||
| 796 | msg[2] = cpu_to_le32(handle)((__uint32_t)(handle)); | ||||||
| 797 | msg[3] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | ||||||
| 798 | msg[4] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | ||||||
| 799 | msg[5] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | ||||||
| 800 | msg[6] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | ||||||
| 801 | msg[7] = cpu_to_le32(0x00000780)((__uint32_t)(0x00000780)); | ||||||
| 802 | msg[8] = cpu_to_le32(0x00000440)((__uint32_t)(0x00000440)); | ||||||
| 803 | msg[9] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | ||||||
| 804 | msg[10] = cpu_to_le32(0x01b37000)((__uint32_t)(0x01b37000)); | ||||||
| 805 | for (i = 11; i < 1024; ++i) | ||||||
| 806 | msg[i] = cpu_to_le32(0x0)((__uint32_t)(0x0)); | ||||||
| 807 | |||||||
| 808 | r = radeon_uvd_send_msg(rdev, ring, addr, fence); | ||||||
| 809 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||||||
| 810 | return r; | ||||||
| 811 | } | ||||||
| 812 | |||||||
| 813 | int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, | ||||||
| 814 | uint32_t handle, struct radeon_fence **fence) | ||||||
| 815 | { | ||||||
| 816 | /* we use the last page of the vcpu bo for the UVD message */ | ||||||
| 817 | uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - | ||||||
| 818 | RADEON_GPU_PAGE_SIZE4096; | ||||||
| 819 | |||||||
| 820 | uint32_t *msg = rdev->uvd.cpu_addr + offs; | ||||||
| 821 | uint64_t addr = rdev->uvd.gpu_addr + offs; | ||||||
| 822 | |||||||
| 823 | int r, i; | ||||||
| 824 | |||||||
| 825 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true1); | ||||||
| 826 | if (r) | ||||||
| 827 | return r; | ||||||
| 828 | |||||||
| 829 | /* stitch together an UVD destroy msg */ | ||||||
| 830 | msg[0] = cpu_to_le32(0x00000de4)((__uint32_t)(0x00000de4)); | ||||||
| 831 | msg[1] = cpu_to_le32(0x00000002)((__uint32_t)(0x00000002)); | ||||||
| 832 | msg[2] = cpu_to_le32(handle)((__uint32_t)(handle)); | ||||||
| 833 | msg[3] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | ||||||
| 834 | for (i = 4; i < 1024; ++i) | ||||||
| 835 | msg[i] = cpu_to_le32(0x0)((__uint32_t)(0x0)); | ||||||
| 836 | |||||||
| 837 | r = radeon_uvd_send_msg(rdev, ring, addr, fence); | ||||||
| 838 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||||||
| 839 | return r; | ||||||
| 840 | } | ||||||
| 841 | |||||||
| 842 | /** | ||||||
| 843 | * radeon_uvd_count_handles - count number of open streams | ||||||
| 844 | * | ||||||
| 845 | * @rdev: radeon_device pointer | ||||||
| 846 | * @sd: number of SD streams | ||||||
| 847 | * @hd: number of HD streams | ||||||
| 848 | * | ||||||
| 849 | * Count the number of open SD/HD streams as a hint for power mangement | ||||||
| 850 | */ | ||||||
| 851 | static void radeon_uvd_count_handles(struct radeon_device *rdev, | ||||||
| 852 | unsigned *sd, unsigned *hd) | ||||||
| 853 | { | ||||||
| 854 | unsigned i; | ||||||
| 855 | |||||||
| 856 | *sd = 0; | ||||||
| 857 | *hd = 0; | ||||||
| 858 | |||||||
| 859 | for (i = 0; i < rdev->uvd.max_handles; ++i) { | ||||||
| 860 | if (!atomic_read(&rdev->uvd.handles[i])({ typeof(*(&rdev->uvd.handles[i])) __tmp = *(volatile typeof(*(&rdev->uvd.handles[i])) *)&(*(&rdev-> uvd.handles[i])); membar_datadep_consumer(); __tmp; })) | ||||||
| 861 | continue; | ||||||
| 862 | |||||||
| 863 | if (rdev->uvd.img_size[i] >= 720*576) | ||||||
| 864 | ++(*hd); | ||||||
| 865 | else | ||||||
| 866 | ++(*sd); | ||||||
| 867 | } | ||||||
| 868 | } | ||||||
| 869 | |||||||
| 870 | static void radeon_uvd_idle_work_handler(struct work_struct *work) | ||||||
| 871 | { | ||||||
| 872 | struct radeon_device *rdev = | ||||||
| 873 | container_of(work, struct radeon_device, uvd.idle_work.work)({ const __typeof( ((struct radeon_device *)0)->uvd.idle_work .work ) *__mptr = (work); (struct radeon_device *)( (char *)__mptr - __builtin_offsetof(struct radeon_device, uvd.idle_work.work ) );}); | ||||||
| 874 | |||||||
| 875 | if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX5) == 0) { | ||||||
| 876 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | ||||||
| 877 | radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd, | ||||||
| 878 | &rdev->pm.dpm.hd); | ||||||
| 879 | radeon_dpm_enable_uvd(rdev, false0); | ||||||
| 880 | } else { | ||||||
| 881 | radeon_set_uvd_clocks(rdev, 0, 0)(rdev)->asic->pm.set_uvd_clocks((rdev), (0), (0)); | ||||||
| 882 | } | ||||||
| 883 | } else { | ||||||
| 884 | schedule_delayed_work(&rdev->uvd.idle_work, | ||||||
| 885 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)(((uint64_t)(1000)) * hz / 1000)); | ||||||
| 886 | } | ||||||
| 887 | } | ||||||
| 888 | |||||||
| 889 | void radeon_uvd_note_usage(struct radeon_device *rdev) | ||||||
| 890 | { | ||||||
| 891 | bool_Bool streams_changed = false0; | ||||||
| 892 | bool_Bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); | ||||||
| 893 | set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, | ||||||
| 894 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)(((uint64_t)(1000)) * hz / 1000)); | ||||||
| 895 | |||||||
| 896 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | ||||||
| 897 | unsigned hd = 0, sd = 0; | ||||||
| 898 | radeon_uvd_count_handles(rdev, &sd, &hd); | ||||||
| 899 | if ((rdev->pm.dpm.sd != sd) || | ||||||
| 900 | (rdev->pm.dpm.hd != hd)) { | ||||||
| 901 | rdev->pm.dpm.sd = sd; | ||||||
| 902 | rdev->pm.dpm.hd = hd; | ||||||
| 903 | /* disable this for now */ | ||||||
| 904 | /*streams_changed = true;*/ | ||||||
| 905 | } | ||||||
| 906 | } | ||||||
| 907 | |||||||
| 908 | if (set_clocks || streams_changed) { | ||||||
| 909 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | ||||||
| 910 | radeon_dpm_enable_uvd(rdev, true1); | ||||||
| 911 | } else { | ||||||
| 912 | radeon_set_uvd_clocks(rdev, 53300, 40000)(rdev)->asic->pm.set_uvd_clocks((rdev), (53300), (40000 )); | ||||||
| 913 | } | ||||||
| 914 | } | ||||||
| 915 | } | ||||||
| 916 | |||||||
| 917 | static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq, | ||||||
| 918 | unsigned target_freq, | ||||||
| 919 | unsigned pd_min, | ||||||
| 920 | unsigned pd_even) | ||||||
| 921 | { | ||||||
| 922 | unsigned post_div = vco_freq / target_freq; | ||||||
| 923 | |||||||
| 924 | /* adjust to post divider minimum value */ | ||||||
| 925 | if (post_div < pd_min) | ||||||
| 926 | post_div = pd_min; | ||||||
| 927 | |||||||
| 928 | /* we alway need a frequency less than or equal the target */ | ||||||
| 929 | if ((vco_freq / post_div) > target_freq) | ||||||
| 930 | post_div += 1; | ||||||
| 931 | |||||||
| 932 | /* post dividers above a certain value must be even */ | ||||||
| 933 | if (post_div > pd_even && post_div % 2) | ||||||
| 934 | post_div += 1; | ||||||
| 935 | |||||||
| 936 | return post_div; | ||||||
| 937 | } | ||||||
| 938 | |||||||
| 939 | /** | ||||||
| 940 | * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers | ||||||
| 941 | * | ||||||
| 942 | * @rdev: radeon_device pointer | ||||||
| 943 | * @vclk: wanted VCLK | ||||||
| 944 | * @dclk: wanted DCLK | ||||||
| 945 | * @vco_min: minimum VCO frequency | ||||||
| 946 | * @vco_max: maximum VCO frequency | ||||||
| 947 | * @fb_factor: factor to multiply vco freq with | ||||||
| 948 | * @fb_mask: limit and bitmask for feedback divider | ||||||
| 949 | * @pd_min: post divider minimum | ||||||
| 950 | * @pd_max: post divider maximum | ||||||
| 951 | * @pd_even: post divider must be even above this value | ||||||
| 952 | * @optimal_fb_div: resulting feedback divider | ||||||
| 953 | * @optimal_vclk_div: resulting vclk post divider | ||||||
| 954 | * @optimal_dclk_div: resulting dclk post divider | ||||||
| 955 | * | ||||||
| 956 | * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs). | ||||||
| 957 | * Returns zero on success -EINVAL on error. | ||||||
| 958 | */ | ||||||
| 959 | int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, | ||||||
| 960 | unsigned vclk, unsigned dclk, | ||||||
| 961 | unsigned vco_min, unsigned vco_max, | ||||||
| 962 | unsigned fb_factor, unsigned fb_mask, | ||||||
| 963 | unsigned pd_min, unsigned pd_max, | ||||||
| 964 | unsigned pd_even, | ||||||
| 965 | unsigned *optimal_fb_div, | ||||||
| 966 | unsigned *optimal_vclk_div, | ||||||
| 967 | unsigned *optimal_dclk_div) | ||||||
| 968 | { | ||||||
| 969 | unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; | ||||||
| 970 | |||||||
| 971 | /* start off with something large */ | ||||||
| 972 | unsigned optimal_score = ~0; | ||||||
| 973 | |||||||
| 974 | /* loop through vco from low to high */ | ||||||
| 975 | vco_min = max(max(vco_min, vclk), dclk)((((((vco_min)>(vclk))?(vco_min):(vclk)))>(dclk))?((((vco_min )>(vclk))?(vco_min):(vclk))):(dclk)); | ||||||
| 976 | for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { | ||||||
| 977 | |||||||
| 978 | uint64_t fb_div = (uint64_t)vco_freq * fb_factor; | ||||||
| 979 | unsigned vclk_div, dclk_div, score; | ||||||
| 980 | |||||||
| 981 | do_div(fb_div, ref_freq)({ uint32_t __base = (ref_freq); uint32_t __rem = ((uint64_t) (fb_div)) % __base; (fb_div) = ((uint64_t)(fb_div)) / __base; __rem; }); | ||||||
| 982 | |||||||
| 983 | /* fb div out of range ? */ | ||||||
| 984 | if (fb_div > fb_mask) | ||||||
| 985 | break; /* it can oly get worse */ | ||||||
| 986 | |||||||
| 987 | fb_div &= fb_mask; | ||||||
| 988 | |||||||
| 989 | /* calc vclk divider with current vco freq */ | ||||||
| 990 | vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk, | ||||||
| 991 | pd_min, pd_even); | ||||||
| 992 | if (vclk_div > pd_max) | ||||||
| 993 | break; /* vco is too big, it has to stop */ | ||||||
| 994 | |||||||
| 995 | /* calc dclk divider with current vco freq */ | ||||||
| 996 | dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, | ||||||
| 997 | pd_min, pd_even); | ||||||
| 998 | if (dclk_div > pd_max) | ||||||
| 999 | break; /* vco is too big, it has to stop */ | ||||||
| 1000 | |||||||
| 1001 | /* calc score with current vco freq */ | ||||||
| 1002 | score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); | ||||||
| 1003 | |||||||
| 1004 | /* determine if this vco setting is better than current optimal settings */ | ||||||
| 1005 | if (score < optimal_score) { | ||||||
| 1006 | *optimal_fb_div = fb_div; | ||||||
| 1007 | *optimal_vclk_div = vclk_div; | ||||||
| 1008 | *optimal_dclk_div = dclk_div; | ||||||
| 1009 | optimal_score = score; | ||||||
| 1010 | if (optimal_score == 0) | ||||||
| 1011 | break; /* it can't get better than this */ | ||||||
| 1012 | } | ||||||
| 1013 | } | ||||||
| 1014 | |||||||
| 1015 | /* did we found a valid setup ? */ | ||||||
| 1016 | if (optimal_score == ~0) | ||||||
| 1017 | return -EINVAL22; | ||||||
| 1018 | |||||||
| 1019 | return 0; | ||||||
| 1020 | } | ||||||
| 1021 | |||||||
| 1022 | int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, | ||||||
| 1023 | unsigned cg_upll_func_cntl) | ||||||
| 1024 | { | ||||||
| 1025 | unsigned i; | ||||||
| 1026 | |||||||
| 1027 | /* make sure UPLL_CTLREQ is deasserted */ | ||||||
| 1028 | WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (cg_upll_func_cntl), 0 ); tmp_ &= (~0x00000008); tmp_ |= ((0) & ~(~0x00000008 )); r100_mm_wreg(rdev, (cg_upll_func_cntl), (tmp_), 0); } while (0); | ||||||
| 1029 | |||||||
| 1030 | mdelay(10); | ||||||
| 1031 | |||||||
| 1032 | /* assert UPLL_CTLREQ */ | ||||||
| 1033 | WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (cg_upll_func_cntl), 0 ); tmp_ &= (~0x00000008); tmp_ |= ((0x00000008) & ~(~ 0x00000008)); r100_mm_wreg(rdev, (cg_upll_func_cntl), (tmp_), 0); } while (0); | ||||||
| 1034 | |||||||
| 1035 | /* wait for CTLACK and CTLACK2 to get asserted */ | ||||||
| 1036 | for (i = 0; i < 100; ++i) { | ||||||
| 1037 | uint32_t mask = UPLL_CTLACK_MASK0x40000000 | UPLL_CTLACK2_MASK0x80000000; | ||||||
| 1038 | if ((RREG32(cg_upll_func_cntl)r100_mm_rreg(rdev, (cg_upll_func_cntl), 0) & mask) == mask) | ||||||
| 1039 | break; | ||||||
| 1040 | mdelay(10); | ||||||
| 1041 | } | ||||||
| 1042 | |||||||
| 1043 | /* deassert UPLL_CTLREQ */ | ||||||
| 1044 | WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (cg_upll_func_cntl), 0 ); tmp_ &= (~0x00000008); tmp_ |= ((0) & ~(~0x00000008 )); r100_mm_wreg(rdev, (cg_upll_func_cntl), (tmp_), 0); } while (0); | ||||||
| 1045 | |||||||
| 1046 | if (i == 100) { | ||||||
| 1047 | DRM_ERROR("Timeout setting UVD clocks!\n")__drm_err("Timeout setting UVD clocks!\n"); | ||||||
| 1048 | return -ETIMEDOUT60; | ||||||
| 1049 | } | ||||||
| 1050 | |||||||
| 1051 | return 0; | ||||||
| 1052 | } |
| 1 | /* $OpenBSD: workqueue.h,v 1.6 2021/08/14 03:12:51 jsg Exp $ */ |
| 2 | /* |
| 3 | * Copyright (c) 2015 Mark Kettenis |
| 4 | * |
| 5 | * Permission to use, copy, modify, and distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #ifndef _LINUX_WORKQUEUE_H |
| 19 | #define _LINUX_WORKQUEUE_H |
| 20 | |
| 21 | #include <sys/param.h> |
| 22 | #include <sys/systm.h> |
| 23 | #include <sys/task.h> |
| 24 | #include <sys/timeout.h> |
| 25 | #include <linux/bitops.h> |
| 26 | #include <linux/atomic.h> |
| 27 | #include <linux/rcupdate.h> |
| 28 | #include <linux/kernel.h> |
| 29 | #include <linux/lockdep.h> |
| 30 | #include <linux/timer.h> |
| 31 | |
| 32 | struct workqueue_struct; |
| 33 | |
| 34 | extern struct workqueue_struct *system_wq; |
| 35 | extern struct workqueue_struct *system_highpri_wq; |
| 36 | extern struct workqueue_struct *system_unbound_wq; |
| 37 | extern struct workqueue_struct *system_long_wq; |
| 38 | |
| 39 | #define WQ_HIGHPRI1 1 |
| 40 | #define WQ_FREEZABLE2 2 |
| 41 | #define WQ_UNBOUND4 4 |
| 42 | |
| 43 | #define WQ_UNBOUND_MAX_ACTIVE4 4 /* matches nthreads in drm_linux.c */ |
| 44 | |
| 45 | static inline struct workqueue_struct * |
| 46 | alloc_workqueue(const char *name, int flags, int max_active) |
| 47 | { |
| 48 | struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0); |
| 49 | return (struct workqueue_struct *)tq; |
| 50 | } |
| 51 | |
| 52 | static inline struct workqueue_struct * |
| 53 | alloc_ordered_workqueue(const char *name, int flags) |
| 54 | { |
| 55 | struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0); |
| 56 | return (struct workqueue_struct *)tq; |
| 57 | } |
| 58 | |
| 59 | static inline struct workqueue_struct * |
| 60 | create_singlethread_workqueue(const char *name) |
| 61 | { |
| 62 | struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0); |
| 63 | return (struct workqueue_struct *)tq; |
| 64 | } |
| 65 | |
| 66 | static inline void |
| 67 | destroy_workqueue(struct workqueue_struct *wq) |
| 68 | { |
| 69 | taskq_destroy((struct taskq *)wq); |
| 70 | } |
| 71 | |
| 72 | struct work_struct { |
| 73 | struct task task; |
| 74 | struct taskq *tq; |
| 75 | }; |
| 76 | |
| 77 | typedef void (*work_func_t)(struct work_struct *); |
| 78 | |
| 79 | static inline void |
| 80 | INIT_WORK(struct work_struct *work, work_func_t func) |
| 81 | { |
| 82 | work->tq = NULL((void *)0); |
| 83 | task_set(&work->task, (void (*)(void *))func, work); |
| 84 | } |
| 85 | |
| 86 | #define INIT_WORK_ONSTACK(x, y)INIT_WORK((x), (y)) INIT_WORK((x), (y)) |
| 87 | |
| 88 | static inline bool_Bool |
| 89 | queue_work(struct workqueue_struct *wq, struct work_struct *work) |
| 90 | { |
| 91 | work->tq = (struct taskq *)wq; |
| 92 | return task_add(work->tq, &work->task); |
| 93 | } |
| 94 | |
| 95 | static inline void |
| 96 | cancel_work_sync(struct work_struct *work) |
| 97 | { |
| 98 | if (work->tq != NULL((void *)0)) |
| 99 | task_del(work->tq, &work->task); |
| 100 | } |
| 101 | |
| 102 | #define work_pending(work)((&(work)->task)->t_flags & 1) task_pending(&(work)->task)((&(work)->task)->t_flags & 1) |
| 103 | |
| 104 | struct delayed_work { |
| 105 | struct work_struct work; |
| 106 | struct timeout to; |
| 107 | struct taskq *tq; |
| 108 | }; |
| 109 | |
| 110 | #define system_power_efficient_wq((struct workqueue_struct *)systq) ((struct workqueue_struct *)systq) |
| 111 | |
| 112 | static inline struct delayed_work * |
| 113 | to_delayed_work(struct work_struct *work) |
| 114 | { |
| 115 | return container_of(work, struct delayed_work, work)({ const __typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof (struct delayed_work, work) );}); |
| 116 | } |
| 117 | |
| 118 | static void |
| 119 | __delayed_work_tick(void *arg) |
| 120 | { |
| 121 | struct delayed_work *dwork = arg; |
| 122 | |
| 123 | task_add(dwork->tq, &dwork->work.task); |
| 124 | } |
| 125 | |
| 126 | static inline void |
| 127 | INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func) |
| 128 | { |
| 129 | INIT_WORK(&dwork->work, func); |
| 130 | timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); |
| 131 | } |
| 132 | |
| 133 | static inline void |
| 134 | INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func) |
| 135 | { |
| 136 | INIT_WORK(&dwork->work, func); |
| 137 | timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); |
| 138 | } |
| 139 | |
| 140 | static inline bool_Bool |
| 141 | schedule_work(struct work_struct *work) |
| 142 | { |
| 143 | work->tq = (struct taskq *)system_wq; |
| 144 | return task_add(work->tq, &work->task); |
| 145 | } |
| 146 | |
| 147 | static inline bool_Bool |
| 148 | schedule_delayed_work(struct delayed_work *dwork, int jiffies) |
| 149 | { |
| 150 | dwork->tq = (struct taskq *)system_wq; |
| 151 | return timeout_add(&dwork->to, jiffies); |
| 152 | } |
| 153 | |
| 154 | static inline bool_Bool |
| 155 | queue_delayed_work(struct workqueue_struct *wq, |
| 156 | struct delayed_work *dwork, int jiffies) |
| 157 | { |
| 158 | dwork->tq = (struct taskq *)wq; |
| 159 | return timeout_add(&dwork->to, jiffies); |
| 160 | } |
| 161 | |
| 162 | static inline bool_Bool |
| 163 | mod_delayed_work(struct workqueue_struct *wq, |
| 164 | struct delayed_work *dwork, int jiffies) |
| 165 | { |
| 166 | dwork->tq = (struct taskq *)wq; |
| 167 | return (timeout_add(&dwork->to, jiffies) == 0); |
| 168 | } |
| 169 | |
| 170 | static inline bool_Bool |
| 171 | cancel_delayed_work(struct delayed_work *dwork) |
| 172 | { |
| 173 | if (dwork->tq == NULL((void *)0)) |
| 174 | return false0; |
| 175 | if (timeout_del(&dwork->to)) |
| 176 | return true1; |
| 177 | return task_del(dwork->tq, &dwork->work.task); |
| 178 | } |
| 179 | |
| 180 | static inline bool_Bool |
| 181 | cancel_delayed_work_sync(struct delayed_work *dwork) |
| 182 | { |
| 183 | if (dwork->tq == NULL((void *)0)) |
| 184 | return false0; |
| 185 | if (timeout_del(&dwork->to)) |
| 186 | return true1; |
| 187 | return task_del(dwork->tq, &dwork->work.task); |
| 188 | } |
| 189 | |
| 190 | static inline bool_Bool |
| 191 | delayed_work_pending(struct delayed_work *dwork) |
| 192 | { |
| 193 | if (timeout_pending(&dwork->to)((&dwork->to)->to_flags & 0x02)) |
| 194 | return true1; |
| 195 | return task_pending(&dwork->work.task)((&dwork->work.task)->t_flags & 1); |
| 196 | } |
| 197 | |
| 198 | void flush_workqueue(struct workqueue_struct *); |
| 199 | bool_Bool flush_work(struct work_struct *); |
| 200 | bool_Bool flush_delayed_work(struct delayed_work *); |
| 201 | #define flush_scheduled_work()flush_workqueue(system_wq) flush_workqueue(system_wq) |
| 202 | #define drain_workqueue(x)flush_workqueue(x) flush_workqueue(x) |
| 203 | |
| 204 | static inline void |
| 205 | destroy_work_on_stack(struct work_struct *work) |
| 206 | { |
| 207 | if (work->tq) |
| 208 | task_del(work->tq, &work->task); |
| 209 | } |
| 210 | |
| 211 | #define destroy_delayed_work_on_stack(x) |
| 212 | |
| 213 | struct rcu_work { |
| 214 | struct work_struct work; |
| 215 | struct rcu_head rcu; |
| 216 | }; |
| 217 | |
| 218 | static inline void |
| 219 | INIT_RCU_WORK(struct rcu_work *work, work_func_t func) |
| 220 | { |
| 221 | INIT_WORK(&work->work, func); |
| 222 | } |
| 223 | |
| 224 | static inline bool_Bool |
| 225 | queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *work) |
| 226 | { |
| 227 | return queue_work(wq, &work->work); |
| 228 | } |
| 229 | |
| 230 | #endif |
| 1 | /* Public domain. */ |
| 2 | |
| 3 | #ifndef _LINUX_FIRMWARE_H |
| 4 | #define _LINUX_FIRMWARE_H |
| 5 | |
| 6 | #include <sys/types.h> |
| 7 | #include <sys/malloc.h> |
| 8 | #include <sys/device.h> |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/gfp.h> |
| 11 | |
| 12 | #ifndef __DECONST |
| 13 | #define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var)) |
| 14 | #endif |
| 15 | |
| 16 | struct firmware { |
| 17 | size_t size; |
| 18 | const u8 *data; |
| 19 | }; |
| 20 | |
| 21 | static inline int |
| 22 | request_firmware(const struct firmware **fw, const char *name, |
| 23 | struct device *device) |
| 24 | { |
| 25 | int r; |
| 26 | struct firmware *f = malloc(sizeof(struct firmware), M_DRM145, |
| 27 | M_WAITOK0x0001 | M_ZERO0x0008); |
| 28 | r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size); |
| 29 | if (r != 0) { |
| 30 | free(f, M_DRM145, sizeof(struct firmware)); |
| 31 | *fw = NULL((void *)0); |
| 32 | return -r; |
| 33 | } else { |
| 34 | *fw = f; |
| 35 | return 0; |
| 36 | } |
| 37 | } |
| 38 | |
| 39 | static inline int |
| 40 | request_firmware_direct(const struct firmware **fw, const char *name, |
| 41 | struct device *device) |
| 42 | { |
| 43 | return request_firmware(fw, name, device); |
| 44 | } |
| 45 | |
| 46 | #define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22 |
| 47 | |
| 48 | static inline void |
| 49 | release_firmware(const struct firmware *fw) |
| 50 | { |
| 51 | if (fw) |
| 52 | free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size); |
| 53 | free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw)); |
| 54 | } |
| 55 | |
| 56 | #endif |