File: | dev/pci/drm/amd/amdgpu/sdma_v5_0.c |
Warning: | line 240, column 51 Access to field 'data' results in a dereference of a null pointer (loaded from field 'fw') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | ||||
2 | * Copyright 2019 Advanced Micro Devices, Inc. | ||||
3 | * | ||||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||||
5 | * copy of this software and associated documentation files (the "Software"), | ||||
6 | * to deal in the Software without restriction, including without limitation | ||||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||||
9 | * Software is furnished to do so, subject to the following conditions: | ||||
10 | * | ||||
11 | * The above copyright notice and this permission notice shall be included in | ||||
12 | * all copies or substantial portions of the Software. | ||||
13 | * | ||||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||||
21 | * | ||||
22 | */ | ||||
23 | |||||
24 | #include <linux/delay.h> | ||||
25 | #include <linux/firmware.h> | ||||
26 | #include <linux/module.h> | ||||
27 | #include <linux/pci.h> | ||||
28 | |||||
29 | #include "amdgpu.h" | ||||
30 | #include "amdgpu_ucode.h" | ||||
31 | #include "amdgpu_trace.h" | ||||
32 | |||||
33 | #include "gc/gc_10_1_0_offset.h" | ||||
34 | #include "gc/gc_10_1_0_sh_mask.h" | ||||
35 | #include "hdp/hdp_5_0_0_offset.h" | ||||
36 | #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h" | ||||
37 | #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h" | ||||
38 | |||||
39 | #include "soc15_common.h" | ||||
40 | #include "soc15.h" | ||||
41 | #include "navi10_sdma_pkt_open.h" | ||||
42 | #include "nbio_v2_3.h" | ||||
43 | #include "sdma_common.h" | ||||
44 | #include "sdma_v5_0.h" | ||||
45 | |||||
46 | MODULE_FIRMWARE("amdgpu/navi10_sdma.bin"); | ||||
47 | MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin"); | ||||
48 | |||||
49 | MODULE_FIRMWARE("amdgpu/navi14_sdma.bin"); | ||||
50 | MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin"); | ||||
51 | |||||
52 | MODULE_FIRMWARE("amdgpu/navi12_sdma.bin"); | ||||
53 | MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin"); | ||||
54 | |||||
55 | #define SDMA1_REG_OFFSET0x600 0x600 | ||||
56 | #define SDMA0_HYP_DEC_REG_START0x5880 0x5880 | ||||
57 | #define SDMA0_HYP_DEC_REG_END0x5893 0x5893 | ||||
58 | #define SDMA1_HYP_DEC_REG_OFFSET0x20 0x20 | ||||
59 | |||||
60 | static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev); | ||||
61 | static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev); | ||||
62 | static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev); | ||||
63 | static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev); | ||||
64 | |||||
65 | static const struct soc15_reg_golden golden_settings_sdma_5[] = { | ||||
66 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107){ GC_HWIP, 0, 0, 0x001d, 0xffbf1f0f, 0x03ab0107 }, | ||||
67 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0087, 0xfffffff7, 0x00403000 }, | ||||
68 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x00e7, 0xfffffff7, 0x00403000 }, | ||||
69 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0147, 0xfffffff7, 0x00403000 }, | ||||
70 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x01a7, 0xfffffff7, 0x00403000 }, | ||||
71 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0207, 0xfffffff7, 0x00403000 }, | ||||
72 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0267, 0xfffffff7, 0x00403000 }, | ||||
73 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x02c7, 0xfffffff7, 0x00403000 }, | ||||
74 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0327, 0xfffffff7, 0x00403000 }, | ||||
75 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0387, 0xfffffff7, 0x00403000 }, | ||||
76 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x03e7, 0xfffffff7, 0x00403000 }, | ||||
77 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00){ GC_HWIP, 0, 0, 0x0048, 0x00ffffff, 0x000c5c00 }, | ||||
78 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107){ GC_HWIP, 0, 0, 0x061d, 0xffbf1f0f, 0x03ab0107 }, | ||||
79 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0687, 0xfffffff7, 0x00403000 }, | ||||
80 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x06e7, 0xfffffff7, 0x00403000 }, | ||||
81 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0747, 0xfffffff7, 0x00403000 }, | ||||
82 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x07a7, 0xfffffff7, 0x00403000 }, | ||||
83 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0807, 0xfffffff7, 0x00403000 }, | ||||
84 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0867, 0xfffffff7, 0x00403000 }, | ||||
85 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x08c7, 0xfffffff7, 0x00403000 }, | ||||
86 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0927, 0xfffffff7, 0x00403000 }, | ||||
87 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0987, 0xfffffff7, 0x00403000 }, | ||||
88 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x09e7, 0xfffffff7, 0x00403000 }, | ||||
89 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00){ GC_HWIP, 0, 0, 0x0648, 0x00ffffff, 0x000c5c00 } | ||||
90 | }; | ||||
91 | |||||
92 | static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = { | ||||
93 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0087, 0xfffffff7, 0x00403000 }, | ||||
94 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x00e7, 0xfffffff7, 0x00403000 }, | ||||
95 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0147, 0xfffffff7, 0x00403000 }, | ||||
96 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x01a7, 0xfffffff7, 0x00403000 }, | ||||
97 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0207, 0xfffffff7, 0x00403000 }, | ||||
98 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0267, 0xfffffff7, 0x00403000 }, | ||||
99 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x02c7, 0xfffffff7, 0x00403000 }, | ||||
100 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0327, 0xfffffff7, 0x00403000 }, | ||||
101 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0387, 0xfffffff7, 0x00403000 }, | ||||
102 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x03e7, 0xfffffff7, 0x00403000 }, | ||||
103 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0687, 0xfffffff7, 0x00403000 }, | ||||
104 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x06e7, 0xfffffff7, 0x00403000 }, | ||||
105 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0747, 0xfffffff7, 0x00403000 }, | ||||
106 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x07a7, 0xfffffff7, 0x00403000 }, | ||||
107 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0807, 0xfffffff7, 0x00403000 }, | ||||
108 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0867, 0xfffffff7, 0x00403000 }, | ||||
109 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x08c7, 0xfffffff7, 0x00403000 }, | ||||
110 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0927, 0xfffffff7, 0x00403000 }, | ||||
111 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0987, 0xfffffff7, 0x00403000 }, | ||||
112 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x09e7, 0xfffffff7, 0x00403000 }, | ||||
113 | }; | ||||
114 | |||||
115 | static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { | ||||
116 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000){ GC_HWIP, 0, 0, 0x0267, 0x0000fff0, 0x00403000 }, | ||||
117 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000){ GC_HWIP, 0, 0, 0x0867, 0x0000fff0, 0x00403000 }, | ||||
118 | }; | ||||
119 | |||||
120 | static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { | ||||
121 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0267, 0xfffffff7, 0x00403000 }, | ||||
122 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0867, 0xfffffff7, 0x00403000 }, | ||||
123 | }; | ||||
124 | |||||
125 | static const struct soc15_reg_golden golden_settings_sdma_nv12[] = { | ||||
126 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0267, 0xfffffff7, 0x00403000 }, | ||||
127 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044){ GC_HWIP, 0, 0, 0x001e, 0x001877ff, 0x00000044 }, | ||||
128 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044){ GC_HWIP, 0, 0, 0x001f, 0x001877ff, 0x00000044 }, | ||||
129 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044){ GC_HWIP, 0, 0, 0x061e, 0x001877ff, 0x00000044 }, | ||||
130 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044){ GC_HWIP, 0, 0, 0x061f, 0x001877ff, 0x00000044 }, | ||||
131 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000){ GC_HWIP, 0, 0, 0x0867, 0xfffffff7, 0x00403000 }, | ||||
132 | }; | ||||
133 | |||||
134 | static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) | ||||
135 | { | ||||
136 | u32 base; | ||||
137 | |||||
138 | if (internal_offset >= SDMA0_HYP_DEC_REG_START0x5880 && | ||||
139 | internal_offset <= SDMA0_HYP_DEC_REG_END0x5893) { | ||||
140 | base = adev->reg_offset[GC_HWIP][0][1]; | ||||
141 | if (instance == 1) | ||||
142 | internal_offset += SDMA1_HYP_DEC_REG_OFFSET0x20; | ||||
143 | } else { | ||||
144 | base = adev->reg_offset[GC_HWIP][0][0]; | ||||
145 | if (instance == 1) | ||||
146 | internal_offset += SDMA1_REG_OFFSET0x600; | ||||
147 | } | ||||
148 | |||||
149 | return base + internal_offset; | ||||
150 | } | ||||
151 | |||||
152 | static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) | ||||
153 | { | ||||
154 | switch (adev->asic_type) { | ||||
155 | case CHIP_NAVI10: | ||||
156 | soc15_program_register_sequence(adev, | ||||
157 | golden_settings_sdma_5, | ||||
158 | (const u32)ARRAY_SIZE(golden_settings_sdma_5)(sizeof((golden_settings_sdma_5)) / sizeof((golden_settings_sdma_5 )[0]))); | ||||
159 | soc15_program_register_sequence(adev, | ||||
160 | golden_settings_sdma_nv10, | ||||
161 | (const u32)ARRAY_SIZE(golden_settings_sdma_nv10)(sizeof((golden_settings_sdma_nv10)) / sizeof((golden_settings_sdma_nv10 )[0]))); | ||||
162 | break; | ||||
163 | case CHIP_NAVI14: | ||||
164 | soc15_program_register_sequence(adev, | ||||
165 | golden_settings_sdma_5, | ||||
166 | (const u32)ARRAY_SIZE(golden_settings_sdma_5)(sizeof((golden_settings_sdma_5)) / sizeof((golden_settings_sdma_5 )[0]))); | ||||
167 | soc15_program_register_sequence(adev, | ||||
168 | golden_settings_sdma_nv14, | ||||
169 | (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)(sizeof((golden_settings_sdma_nv14)) / sizeof((golden_settings_sdma_nv14 )[0]))); | ||||
170 | break; | ||||
171 | case CHIP_NAVI12: | ||||
172 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
173 | soc15_program_register_sequence(adev, | ||||
174 | golden_settings_sdma_5_sriov, | ||||
175 | (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov)(sizeof((golden_settings_sdma_5_sriov)) / sizeof((golden_settings_sdma_5_sriov )[0]))); | ||||
176 | else | ||||
177 | soc15_program_register_sequence(adev, | ||||
178 | golden_settings_sdma_5, | ||||
179 | (const u32)ARRAY_SIZE(golden_settings_sdma_5)(sizeof((golden_settings_sdma_5)) / sizeof((golden_settings_sdma_5 )[0]))); | ||||
180 | soc15_program_register_sequence(adev, | ||||
181 | golden_settings_sdma_nv12, | ||||
182 | (const u32)ARRAY_SIZE(golden_settings_sdma_nv12)(sizeof((golden_settings_sdma_nv12)) / sizeof((golden_settings_sdma_nv12 )[0]))); | ||||
183 | break; | ||||
184 | default: | ||||
185 | break; | ||||
186 | } | ||||
187 | } | ||||
188 | |||||
189 | /** | ||||
190 | * sdma_v5_0_init_microcode - load ucode images from disk | ||||
191 | * | ||||
192 | * @adev: amdgpu_device pointer | ||||
193 | * | ||||
194 | * Use the firmware interface to load the ucode images into | ||||
195 | * the driver (not loaded into hw). | ||||
196 | * Returns 0 on success, error on failure. | ||||
197 | */ | ||||
198 | |||||
199 | // emulation only, won't work on real chip | ||||
200 | // navi10 real chip need to use PSP to load firmware | ||||
201 | static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) | ||||
202 | { | ||||
203 | const char *chip_name; | ||||
204 | char fw_name[30]; | ||||
205 | int err = 0, i; | ||||
206 | struct amdgpu_firmware_info *info = NULL((void *)0); | ||||
207 | const struct common_firmware_header *header = NULL((void *)0); | ||||
208 | const struct sdma_firmware_header_v1_0 *hdr; | ||||
209 | |||||
210 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
211 | return 0; | ||||
212 | |||||
213 | DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n"); | ||||
214 | |||||
215 | switch (adev->asic_type) { | ||||
216 | case CHIP_NAVI10: | ||||
217 | chip_name = "navi10"; | ||||
218 | break; | ||||
219 | case CHIP_NAVI14: | ||||
220 | chip_name = "navi14"; | ||||
221 | break; | ||||
222 | case CHIP_NAVI12: | ||||
223 | chip_name = "navi12"; | ||||
224 | break; | ||||
225 | default: | ||||
226 | BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/amd/amdgpu/sdma_v5_0.c" , 226); } while (0); | ||||
227 | } | ||||
228 | |||||
229 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
230 | if (i
| ||||
231 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); | ||||
232 | else | ||||
233 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); | ||||
234 | err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); | ||||
235 | if (err) | ||||
236 | goto out; | ||||
237 | err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); | ||||
238 | if (err) | ||||
239 | goto out; | ||||
240 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; | ||||
| |||||
241 | adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version)((__uint32_t)(hdr->header.ucode_version)); | ||||
242 | adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version)((__uint32_t)(hdr->ucode_feature_version)); | ||||
243 | if (adev->sdma.instance[i].feature_version >= 20) | ||||
244 | adev->sdma.instance[i].burst_nop = true1; | ||||
245 | DRM_DEBUG("psp_load == '%s'\n",__drm_dbg(DRM_UT_CORE, "psp_load == '%s'\n", adev->firmware .load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false") | ||||
246 | adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false")__drm_dbg(DRM_UT_CORE, "psp_load == '%s'\n", adev->firmware .load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); | ||||
247 | |||||
248 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { | ||||
249 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; | ||||
250 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; | ||||
251 | info->fw = adev->sdma.instance[i].fw; | ||||
252 | header = (const struct common_firmware_header *)info->fw->data; | ||||
253 | adev->firmware.fw_size += | ||||
254 | roundup2(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE)(((((__uint32_t)(header->ucode_size_bytes))) + (((1 << 12)) - 1)) & (~((__typeof(((__uint32_t)(header->ucode_size_bytes ))))((1 << 12)) - 1))); | ||||
255 | } | ||||
256 | } | ||||
257 | out: | ||||
258 | if (err) { | ||||
259 | DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name)__drm_err("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name ); | ||||
260 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
261 | release_firmware(adev->sdma.instance[i].fw); | ||||
262 | adev->sdma.instance[i].fw = NULL((void *)0); | ||||
263 | } | ||||
264 | } | ||||
265 | return err; | ||||
266 | } | ||||
267 | |||||
268 | static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring) | ||||
269 | { | ||||
270 | unsigned ret; | ||||
271 | |||||
272 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE)(((9) & 0x000000FF) << 0)); | ||||
273 | amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)((u32)(ring->cond_exe_gpu_addr))); | ||||
274 | amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)((u32)(((ring->cond_exe_gpu_addr) >> 16) >> 16 ))); | ||||
275 | amdgpu_ring_write(ring, 1); | ||||
276 | ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */ | ||||
277 | amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ | ||||
278 | |||||
279 | return ret; | ||||
280 | } | ||||
281 | |||||
282 | static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring, | ||||
283 | unsigned offset) | ||||
284 | { | ||||
285 | unsigned cur; | ||||
286 | |||||
287 | BUG_ON(offset > ring->buf_mask)((!(offset > ring->buf_mask)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/drm/amd/amdgpu/sdma_v5_0.c", 287, "!(offset > ring->buf_mask)" )); | ||||
288 | BUG_ON(ring->ring[offset] != 0x55aa55aa)((!(ring->ring[offset] != 0x55aa55aa)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/sdma_v5_0.c" , 288, "!(ring->ring[offset] != 0x55aa55aa)")); | ||||
289 | |||||
290 | cur = (ring->wptr - 1) & ring->buf_mask; | ||||
291 | if (cur > offset) | ||||
292 | ring->ring[offset] = cur - offset; | ||||
293 | else | ||||
294 | ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; | ||||
295 | } | ||||
296 | |||||
297 | /** | ||||
298 | * sdma_v5_0_ring_get_rptr - get the current read pointer | ||||
299 | * | ||||
300 | * @ring: amdgpu ring pointer | ||||
301 | * | ||||
302 | * Get the current rptr from the hardware (NAVI10+). | ||||
303 | */ | ||||
304 | static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) | ||||
305 | { | ||||
306 | u64 *rptr; | ||||
307 | |||||
308 | /* XXX check if swapping is necessary on BE */ | ||||
309 | rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); | ||||
310 | |||||
311 | DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr)__drm_dbg(DRM_UT_CORE, "rptr before shift == 0x%016llx\n", *rptr ); | ||||
312 | return ((*rptr) >> 2); | ||||
313 | } | ||||
314 | |||||
315 | /** | ||||
316 | * sdma_v5_0_ring_get_wptr - get the current write pointer | ||||
317 | * | ||||
318 | * @ring: amdgpu ring pointer | ||||
319 | * | ||||
320 | * Get the current wptr from the hardware (NAVI10+). | ||||
321 | */ | ||||
322 | static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) | ||||
323 | { | ||||
324 | struct amdgpu_device *adev = ring->adev; | ||||
325 | u64 wptr; | ||||
326 | |||||
327 | if (ring->use_doorbell) { | ||||
328 | /* XXX check if swapping is necessary on BE */ | ||||
329 | wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]))({ typeof(*((u64 *)&adev->wb.wb[ring->wptr_offs])) __tmp = *(volatile typeof(*((u64 *)&adev->wb.wb[ring->wptr_offs ])) *)&(*((u64 *)&adev->wb.wb[ring->wptr_offs]) ); membar_datadep_consumer(); __tmp; }); | ||||
330 | DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr)__drm_dbg(DRM_UT_CORE, "wptr/doorbell before shift == 0x%016llx\n" , wptr); | ||||
331 | } else { | ||||
332 | wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, ring ->me, 0x0086)), 0); | ||||
333 | wptr = wptr << 32; | ||||
334 | wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, ring ->me, 0x0085)), 0); | ||||
335 | DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr)__drm_dbg(DRM_UT_CORE, "wptr before shift [%i] wptr == 0x%016llx\n" , ring->me, wptr); | ||||
336 | } | ||||
337 | |||||
338 | return wptr >> 2; | ||||
339 | } | ||||
340 | |||||
341 | /** | ||||
342 | * sdma_v5_0_ring_set_wptr - commit the write pointer | ||||
343 | * | ||||
344 | * @ring: amdgpu ring pointer | ||||
345 | * | ||||
346 | * Write the wptr back to the hardware (NAVI10+). | ||||
347 | */ | ||||
348 | static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring) | ||||
349 | { | ||||
350 | struct amdgpu_device *adev = ring->adev; | ||||
351 | |||||
352 | DRM_DEBUG("Setting write pointer\n")__drm_dbg(DRM_UT_CORE, "Setting write pointer\n"); | ||||
353 | if (ring->use_doorbell) { | ||||
354 | DRM_DEBUG("Using doorbell -- "__drm_dbg(DRM_UT_CORE, "Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " "upper_32_bits(ring->wptr) << 2 == 0x%08x\n" , ring->wptr_offs, ((u32)(ring->wptr << 2)), ((u32 )(((ring->wptr << 2) >> 16) >> 16))) | ||||
355 | "wptr_offs == 0x%08x "__drm_dbg(DRM_UT_CORE, "Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " "upper_32_bits(ring->wptr) << 2 == 0x%08x\n" , ring->wptr_offs, ((u32)(ring->wptr << 2)), ((u32 )(((ring->wptr << 2) >> 16) >> 16))) | ||||
356 | "lower_32_bits(ring->wptr) << 2 == 0x%08x "__drm_dbg(DRM_UT_CORE, "Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " "upper_32_bits(ring->wptr) << 2 == 0x%08x\n" , ring->wptr_offs, ((u32)(ring->wptr << 2)), ((u32 )(((ring->wptr << 2) >> 16) >> 16))) | ||||
357 | "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",__drm_dbg(DRM_UT_CORE, "Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " "upper_32_bits(ring->wptr) << 2 == 0x%08x\n" , ring->wptr_offs, ((u32)(ring->wptr << 2)), ((u32 )(((ring->wptr << 2) >> 16) >> 16))) | ||||
358 | ring->wptr_offs,__drm_dbg(DRM_UT_CORE, "Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " "upper_32_bits(ring->wptr) << 2 == 0x%08x\n" , ring->wptr_offs, ((u32)(ring->wptr << 2)), ((u32 )(((ring->wptr << 2) >> 16) >> 16))) | ||||
359 | lower_32_bits(ring->wptr << 2),__drm_dbg(DRM_UT_CORE, "Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " "upper_32_bits(ring->wptr) << 2 == 0x%08x\n" , ring->wptr_offs, ((u32)(ring->wptr << 2)), ((u32 )(((ring->wptr << 2) >> 16) >> 16))) | ||||
360 | upper_32_bits(ring->wptr << 2))__drm_dbg(DRM_UT_CORE, "Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " "upper_32_bits(ring->wptr) << 2 == 0x%08x\n" , ring->wptr_offs, ((u32)(ring->wptr << 2)), ((u32 )(((ring->wptr << 2) >> 16) >> 16))); | ||||
361 | /* XXX check if swapping is necessary on BE */ | ||||
362 | adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2)((u32)(ring->wptr << 2)); | ||||
363 | adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2)((u32)(((ring->wptr << 2) >> 16) >> 16)); | ||||
364 | DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",__drm_dbg(DRM_UT_CORE, "calling WDOORBELL64(0x%08x, 0x%016llx)\n" , ring->doorbell_index, ring->wptr << 2) | ||||
365 | ring->doorbell_index, ring->wptr << 2)__drm_dbg(DRM_UT_CORE, "calling WDOORBELL64(0x%08x, 0x%016llx)\n" , ring->doorbell_index, ring->wptr << 2); | ||||
366 | WDOORBELL64(ring->doorbell_index, ring->wptr << 2)amdgpu_mm_wdoorbell64(adev, (ring->doorbell_index), (ring-> wptr << 2)); | ||||
367 | } else { | ||||
368 | DRM_DEBUG("Not using doorbell -- "__drm_dbg(DRM_UT_CORE, "Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", ring->me, ((u32)(ring ->wptr << 2)), ring->me, ((u32)(((ring->wptr << 2) >> 16) >> 16))) | ||||
369 | "mmSDMA%i_GFX_RB_WPTR == 0x%08x "__drm_dbg(DRM_UT_CORE, "Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", ring->me, ((u32)(ring ->wptr << 2)), ring->me, ((u32)(((ring->wptr << 2) >> 16) >> 16))) | ||||
370 | "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",__drm_dbg(DRM_UT_CORE, "Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", ring->me, ((u32)(ring ->wptr << 2)), ring->me, ((u32)(((ring->wptr << 2) >> 16) >> 16))) | ||||
371 | ring->me,__drm_dbg(DRM_UT_CORE, "Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", ring->me, ((u32)(ring ->wptr << 2)), ring->me, ((u32)(((ring->wptr << 2) >> 16) >> 16))) | ||||
372 | lower_32_bits(ring->wptr << 2),__drm_dbg(DRM_UT_CORE, "Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", ring->me, ((u32)(ring ->wptr << 2)), ring->me, ((u32)(((ring->wptr << 2) >> 16) >> 16))) | ||||
373 | ring->me,__drm_dbg(DRM_UT_CORE, "Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", ring->me, ((u32)(ring ->wptr << 2)), ring->me, ((u32)(((ring->wptr << 2) >> 16) >> 16))) | ||||
374 | upper_32_bits(ring->wptr << 2))__drm_dbg(DRM_UT_CORE, "Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", ring->me, ((u32)(ring ->wptr << 2)), ring->me, ((u32)(((ring->wptr << 2) >> 16) >> 16))); | ||||
375 | WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, ring ->me, 0x0085)), (((u32)(ring->wptr << 2))), 0) | ||||
376 | lower_32_bits(ring->wptr << 2))amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, ring ->me, 0x0085)), (((u32)(ring->wptr << 2))), 0); | ||||
377 | WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, ring ->me, 0x0086)), (((u32)(((ring->wptr << 2) >> 16) >> 16))), 0) | ||||
378 | upper_32_bits(ring->wptr << 2))amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, ring ->me, 0x0086)), (((u32)(((ring->wptr << 2) >> 16) >> 16))), 0); | ||||
379 | } | ||||
380 | } | ||||
381 | |||||
382 | static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | ||||
383 | { | ||||
384 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); | ||||
385 | int i; | ||||
386 | |||||
387 | for (i = 0; i < count; i++) | ||||
388 | if (sdma && sdma->burst_nop && (i == 0)) | ||||
389 | amdgpu_ring_write(ring, ring->funcs->nop | | ||||
390 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)(((count - 1) & 0x00003FFF) << 16)); | ||||
391 | else | ||||
392 | amdgpu_ring_write(ring, ring->funcs->nop); | ||||
393 | } | ||||
394 | |||||
395 | /** | ||||
396 | * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine | ||||
397 | * | ||||
398 | * @ring: amdgpu ring pointer | ||||
399 | * @ib: IB object to schedule | ||||
400 | * | ||||
401 | * Schedule an IB in the DMA ring (NAVI10). | ||||
402 | */ | ||||
403 | static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, | ||||
404 | struct amdgpu_job *job, | ||||
405 | struct amdgpu_ib *ib, | ||||
406 | uint32_t flags) | ||||
407 | { | ||||
408 | unsigned vmid = AMDGPU_JOB_GET_VMID(job)((job) ? (job)->vmid : 0); | ||||
409 | uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); | ||||
410 | |||||
411 | /* Invalidate L2, because if we don't do it, we might get stale cache | ||||
412 | * lines from previous IBs. | ||||
413 | */ | ||||
414 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)(((17) & 0x000000FF) << 0)); | ||||
415 | amdgpu_ring_write(ring, 0); | ||||
416 | amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV(1 << 14) | | ||||
417 | SDMA_GCR_GL2_WB(1 << 15) | | ||||
418 | SDMA_GCR_GLM_INV(1 << 5) | | ||||
419 | SDMA_GCR_GLM_WB(1 << 4)) << 16); | ||||
420 | amdgpu_ring_write(ring, 0xffffff80); | ||||
421 | amdgpu_ring_write(ring, 0xffff); | ||||
422 | |||||
423 | /* An IB packet must end on a 8 DW boundary--the next dword | ||||
424 | * must be on a 8-dword boundary. Our IB packet below is 6 | ||||
425 | * dwords long, thus add x number of NOPs, such that, in | ||||
426 | * modular arithmetic, | ||||
427 | * wptr + 6 + x = 8k, k >= 0, which in C is, | ||||
428 | * (wptr + 6 + x) % 8 = 0. | ||||
429 | * The expression below, is a solution of x. | ||||
430 | */ | ||||
431 | sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)((u32)(ring->wptr))) & 7); | ||||
432 | |||||
433 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT)(((4) & 0x000000FF) << 0) | | ||||
434 | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)(((vmid & 0xf) & 0x0000000F) << 16)); | ||||
435 | /* base must be 32 byte aligned */ | ||||
436 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)((u32)(ib->gpu_addr)) & 0xffffffe0); | ||||
437 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)((u32)(((ib->gpu_addr) >> 16) >> 16))); | ||||
438 | amdgpu_ring_write(ring, ib->length_dw); | ||||
439 | amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)((u32)(csa_mc_addr))); | ||||
440 | amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)((u32)(((csa_mc_addr) >> 16) >> 16))); | ||||
441 | } | ||||
442 | |||||
443 | /** | ||||
444 | * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring | ||||
445 | * | ||||
446 | * @ring: amdgpu ring pointer | ||||
447 | * | ||||
448 | * Emit an hdp flush packet on the requested DMA ring. | ||||
449 | */ | ||||
450 | static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | ||||
451 | { | ||||
452 | struct amdgpu_device *adev = ring->adev; | ||||
453 | u32 ref_and_mask = 0; | ||||
454 | const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; | ||||
455 | |||||
456 | if (ring->me == 0) | ||||
457 | ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; | ||||
458 | else | ||||
459 | ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; | ||||
460 | |||||
461 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM)(((8) & 0x000000FF) << 0) | | ||||
462 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1)(((1) & 0x00000001) << 26) | | ||||
463 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)(((3) & 0x00000007) << 28)); /* == */ | ||||
464 | amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); | ||||
465 | amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); | ||||
466 | amdgpu_ring_write(ring, ref_and_mask); /* reference */ | ||||
467 | amdgpu_ring_write(ring, ref_and_mask); /* mask */ | ||||
468 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff)(((0xfff) & 0x00000FFF) << 16) | | ||||
469 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)(((10) & 0x0000FFFF) << 0)); /* retry count, poll interval */ | ||||
470 | } | ||||
471 | |||||
472 | /** | ||||
473 | * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring | ||||
474 | * | ||||
475 | * @ring: amdgpu ring pointer | ||||
476 | * @fence: amdgpu fence object | ||||
477 | * | ||||
478 | * Add a DMA fence packet to the ring to write | ||||
479 | * the fence seq number and DMA trap packet to generate | ||||
480 | * an interrupt if needed (NAVI10). | ||||
481 | */ | ||||
482 | static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||||
483 | unsigned flags) | ||||
484 | { | ||||
485 | bool_Bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT(1 << 0); | ||||
486 | /* write the fence */ | ||||
487 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)(((5) & 0x000000FF) << 0) | | ||||
488 | SDMA_PKT_FENCE_HEADER_MTYPE(0x3)(((0x3) & 0x00000007) << 16)); /* Ucached(UC) */ | ||||
489 | /* zero in first two bits */ | ||||
490 | BUG_ON(addr & 0x3)((!(addr & 0x3)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/sdma_v5_0.c" , 490, "!(addr & 0x3)")); | ||||
491 | amdgpu_ring_write(ring, lower_32_bits(addr)((u32)(addr))); | ||||
492 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16))); | ||||
493 | amdgpu_ring_write(ring, lower_32_bits(seq)((u32)(seq))); | ||||
494 | |||||
495 | /* optionally write high bits as well */ | ||||
496 | if (write64bit) { | ||||
497 | addr += 4; | ||||
498 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)(((5) & 0x000000FF) << 0) | | ||||
499 | SDMA_PKT_FENCE_HEADER_MTYPE(0x3)(((0x3) & 0x00000007) << 16)); | ||||
500 | /* zero in first two bits */ | ||||
501 | BUG_ON(addr & 0x3)((!(addr & 0x3)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/sdma_v5_0.c" , 501, "!(addr & 0x3)")); | ||||
502 | amdgpu_ring_write(ring, lower_32_bits(addr)((u32)(addr))); | ||||
503 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16))); | ||||
504 | amdgpu_ring_write(ring, upper_32_bits(seq)((u32)(((seq) >> 16) >> 16))); | ||||
505 | } | ||||
506 | |||||
507 | if (flags & AMDGPU_FENCE_FLAG_INT(1 << 1)) { | ||||
508 | /* generate an interrupt */ | ||||
509 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)(((6) & 0x000000FF) << 0)); | ||||
510 | amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)(((0) & 0x0FFFFFFF) << 0)); | ||||
511 | } | ||||
512 | } | ||||
513 | |||||
514 | |||||
515 | /** | ||||
516 | * sdma_v5_0_gfx_stop - stop the gfx async dma engines | ||||
517 | * | ||||
518 | * @adev: amdgpu_device pointer | ||||
519 | * | ||||
520 | * Stop the gfx async dma ring buffers (NAVI10). | ||||
521 | */ | ||||
522 | static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) | ||||
523 | { | ||||
524 | struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; | ||||
525 | struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; | ||||
526 | u32 rb_cntl, ib_cntl; | ||||
527 | int i; | ||||
528 | |||||
529 | if ((adev->mman.buffer_funcs_ring == sdma0) || | ||||
530 | (adev->mman.buffer_funcs_ring == sdma1)) | ||||
531 | amdgpu_ttm_set_buffer_funcs_status(adev, false0); | ||||
532 | |||||
533 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
534 | rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0080 )), 0); | ||||
535 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0)(((rb_cntl) & ~0x00000001L) | (0x00000001L & ((0) << 0x0))); | ||||
536 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0080 )), (rb_cntl), 0); | ||||
537 | ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x008a )), 0); | ||||
538 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0)(((ib_cntl) & ~0x00000001L) | (0x00000001L & ((0) << 0x0))); | ||||
539 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x008a )), (ib_cntl), 0); | ||||
540 | } | ||||
541 | } | ||||
542 | |||||
543 | /** | ||||
544 | * sdma_v5_0_rlc_stop - stop the compute async dma engines | ||||
545 | * | ||||
546 | * @adev: amdgpu_device pointer | ||||
547 | * | ||||
548 | * Stop the compute async dma queues (NAVI10). | ||||
549 | */ | ||||
550 | static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) | ||||
551 | { | ||||
552 | /* XXX todo */ | ||||
553 | } | ||||
554 | |||||
555 | /** | ||||
556 | * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch | ||||
557 | * | ||||
558 | * @adev: amdgpu_device pointer | ||||
559 | * @enable: enable/disable the DMA MEs context switch. | ||||
560 | * | ||||
561 | * Halt or unhalt the async dma engines context switch (NAVI10). | ||||
562 | */ | ||||
563 | static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool_Bool enable) | ||||
564 | { | ||||
565 | u32 f32_cntl = 0, phase_quantum = 0; | ||||
566 | int i; | ||||
567 | |||||
568 | if (amdgpu_sdma_phase_quantum) { | ||||
569 | unsigned value = amdgpu_sdma_phase_quantum; | ||||
570 | unsigned unit = 0; | ||||
571 | |||||
572 | while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK0x00FFFF00L >> | ||||
573 | SDMA0_PHASE0_QUANTUM__VALUE__SHIFT0x8)) { | ||||
574 | value = (value + 1) >> 1; | ||||
575 | unit++; | ||||
576 | } | ||||
577 | if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK0x0000000FL >> | ||||
578 | SDMA0_PHASE0_QUANTUM__UNIT__SHIFT0x0)) { | ||||
579 | value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK0x00FFFF00L >> | ||||
580 | SDMA0_PHASE0_QUANTUM__VALUE__SHIFT0x8); | ||||
581 | unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK0x0000000FL >> | ||||
582 | SDMA0_PHASE0_QUANTUM__UNIT__SHIFT0x0); | ||||
583 | WARN_ONCE(1,({ static int __warned; int __ret = !!(1); if (__ret && !__warned) { printf("clamping sdma_phase_quantum to %uK clock cycles\n" , value << unit); __warned = 1; } __builtin_expect(!!(__ret ), 0); }) | ||||
584 | "clamping sdma_phase_quantum to %uK clock cycles\n",({ static int __warned; int __ret = !!(1); if (__ret && !__warned) { printf("clamping sdma_phase_quantum to %uK clock cycles\n" , value << unit); __warned = 1; } __builtin_expect(!!(__ret ), 0); }) | ||||
585 | value << unit)({ static int __warned; int __ret = !!(1); if (__ret && !__warned) { printf("clamping sdma_phase_quantum to %uK clock cycles\n" , value << unit); __warned = 1; } __builtin_expect(!!(__ret ), 0); }); | ||||
586 | } | ||||
587 | phase_quantum = | ||||
588 | value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT0x8 | | ||||
589 | unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT0x0; | ||||
590 | } | ||||
591 | |||||
592 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
593 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | ||||
594 | f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001c )), 0); | ||||
595 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,(((f32_cntl) & ~0x00040000L) | (0x00040000L & ((enable ? 1 : 0) << 0x12))) | ||||
596 | AUTO_CTXSW_ENABLE, enable ? 1 : 0)(((f32_cntl) & ~0x00040000L) | (0x00040000L & ((enable ? 1 : 0) << 0x12))); | ||||
597 | } | ||||
598 | |||||
599 | if (enable && amdgpu_sdma_phase_quantum) { | ||||
600 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002c )), (phase_quantum), 0) | ||||
601 | phase_quantum)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002c )), (phase_quantum), 0); | ||||
602 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002d )), (phase_quantum), 0) | ||||
603 | phase_quantum)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002d )), (phase_quantum), 0); | ||||
604 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x004f )), (phase_quantum), 0) | ||||
605 | phase_quantum)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x004f )), (phase_quantum), 0); | ||||
606 | } | ||||
607 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
608 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001c )), (f32_cntl), 0); | ||||
609 | } | ||||
610 | |||||
611 | } | ||||
612 | |||||
613 | /** | ||||
614 | * sdma_v5_0_enable - stop the async dma engines | ||||
615 | * | ||||
616 | * @adev: amdgpu_device pointer | ||||
617 | * @enable: enable/disable the DMA MEs. | ||||
618 | * | ||||
619 | * Halt or unhalt the async dma engines (NAVI10). | ||||
620 | */ | ||||
621 | static void sdma_v5_0_enable(struct amdgpu_device *adev, bool_Bool enable) | ||||
622 | { | ||||
623 | u32 f32_cntl; | ||||
624 | int i; | ||||
625 | |||||
626 | if (!enable) { | ||||
627 | sdma_v5_0_gfx_stop(adev); | ||||
628 | sdma_v5_0_rlc_stop(adev); | ||||
629 | } | ||||
630 | |||||
631 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
632 | return; | ||||
633 | |||||
634 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
635 | f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002a )), 0); | ||||
636 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1)(((f32_cntl) & ~0x00000001L) | (0x00000001L & ((enable ? 0 : 1) << 0x0))); | ||||
637 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002a )), (f32_cntl), 0); | ||||
638 | } | ||||
639 | } | ||||
640 | |||||
641 | /** | ||||
642 | * sdma_v5_0_gfx_resume - setup and start the async dma engines | ||||
643 | * | ||||
644 | * @adev: amdgpu_device pointer | ||||
645 | * | ||||
646 | * Set up the gfx DMA ring buffers and enable them (NAVI10). | ||||
647 | * Returns 0 for success, error for failure. | ||||
648 | */ | ||||
649 | static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) | ||||
650 | { | ||||
651 | struct amdgpu_ring *ring; | ||||
652 | u32 rb_cntl, ib_cntl; | ||||
653 | u32 rb_bufsz; | ||||
654 | u32 wb_offset; | ||||
655 | u32 doorbell; | ||||
656 | u32 doorbell_offset; | ||||
657 | u32 temp; | ||||
658 | u32 wptr_poll_cntl; | ||||
659 | u64 wptr_gpu_addr; | ||||
660 | int i, r; | ||||
661 | |||||
662 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
663 | ring = &adev->sdma.instance[i].ring; | ||||
664 | wb_offset = (ring->rptr_offs * 4); | ||||
665 | |||||
666 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
667 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0021 )), (0), 0); | ||||
668 | |||||
669 | /* Set ring buffer size in dwords */ | ||||
670 | rb_bufsz = order_base_2(ring->ring_size / 4)drm_order(ring->ring_size / 4); | ||||
671 | rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0080 )), 0); | ||||
672 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz)(((rb_cntl) & ~0x0000003EL) | (0x0000003EL & ((rb_bufsz ) << 0x1))); | ||||
673 | #ifdef __BIG_ENDIAN | ||||
674 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1)(((rb_cntl) & ~0x00000200L) | (0x00000200L & ((1) << 0x9))); | ||||
675 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,(((rb_cntl) & ~0x00002000L) | (0x00002000L & ((1) << 0xd))) | ||||
676 | RPTR_WRITEBACK_SWAP_ENABLE, 1)(((rb_cntl) & ~0x00002000L) | (0x00002000L & ((1) << 0xd))); | ||||
677 | #endif | ||||
678 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0080 )), (rb_cntl), 0); | ||||
679 | |||||
680 | /* Initialize the ring buffer's read and write pointers */ | ||||
681 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0083 )), (0), 0); | ||||
682 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0084 )), (0), 0); | ||||
683 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0085 )), (0), 0); | ||||
684 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0086 )), (0), 0); | ||||
685 | |||||
686 | /* setup the wptr shadow polling */ | ||||
687 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | ||||
688 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00b3 )), (((u32)(wptr_gpu_addr))), 0) | ||||
689 | lower_32_bits(wptr_gpu_addr))amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00b3 )), (((u32)(wptr_gpu_addr))), 0); | ||||
690 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00b2 )), (((u32)(((wptr_gpu_addr) >> 16) >> 16))), 0) | ||||
691 | upper_32_bits(wptr_gpu_addr))amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00b2 )), (((u32)(((wptr_gpu_addr) >> 16) >> 16))), 0); | ||||
692 | wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0087 )), 0) | ||||
693 | mmSDMA0_GFX_RB_WPTR_POLL_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0087 )), 0); | ||||
694 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,(((wptr_poll_cntl) & ~0x00000004L) | (0x00000004L & ( (1) << 0x2))) | ||||
695 | SDMA0_GFX_RB_WPTR_POLL_CNTL,(((wptr_poll_cntl) & ~0x00000004L) | (0x00000004L & ( (1) << 0x2))) | ||||
696 | F32_POLL_ENABLE, 1)(((wptr_poll_cntl) & ~0x00000004L) | (0x00000004L & ( (1) << 0x2))); | ||||
697 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0087 )), (wptr_poll_cntl), 0) | ||||
698 | wptr_poll_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0087 )), (wptr_poll_cntl), 0); | ||||
699 | |||||
700 | /* set the wb address whether it's enabled or not */ | ||||
701 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0088 )), (((u32)(((adev->wb.gpu_addr + wb_offset) >> 16) >> 16)) & 0xFFFFFFFF), 0) | ||||
702 | upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0088 )), (((u32)(((adev->wb.gpu_addr + wb_offset) >> 16) >> 16)) & 0xFFFFFFFF), 0); | ||||
703 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0089 )), (((u32)(adev->wb.gpu_addr + wb_offset)) & 0xFFFFFFFC ), 0) | ||||
704 | lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0089 )), (((u32)(adev->wb.gpu_addr + wb_offset)) & 0xFFFFFFFC ), 0); | ||||
705 | |||||
706 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1)(((rb_cntl) & ~0x00001000L) | (0x00001000L & ((1) << 0xc))); | ||||
707 | |||||
708 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0081 )), (ring->gpu_addr >> 8), 0); | ||||
709 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0082 )), (ring->gpu_addr >> 40), 0); | ||||
710 | |||||
711 | ring->wptr = 0; | ||||
712 | |||||
713 | /* before programing wptr to a less value, need set minor_ptr_update first */ | ||||
714 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00b5 )), (1), 0); | ||||
715 | |||||
716 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { /* only bare-metal use register write for wptr */ | ||||
717 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0085 )), (((u32)(ring->wptr)) << 2), 0); | ||||
718 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0086 )), (((u32)(((ring->wptr) >> 16) >> 16)) << 2), 0); | ||||
719 | } | ||||
720 | |||||
721 | doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0092 )), 0); | ||||
722 | doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00ab )), 0); | ||||
723 | |||||
724 | if (ring->use_doorbell) { | ||||
725 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1)(((doorbell) & ~0x10000000L) | (0x10000000L & ((1) << 0x1c))); | ||||
726 | doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,(((doorbell_offset) & ~0x0FFFFFFCL) | (0x0FFFFFFCL & ( (ring->doorbell_index) << 0x2))) | ||||
727 | OFFSET, ring->doorbell_index)(((doorbell_offset) & ~0x0FFFFFFCL) | (0x0FFFFFFCL & ( (ring->doorbell_index) << 0x2))); | ||||
728 | } else { | ||||
729 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0)(((doorbell) & ~0x10000000L) | (0x10000000L & ((0) << 0x1c))); | ||||
730 | } | ||||
731 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0092 )), (doorbell), 0); | ||||
732 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00ab )), (doorbell_offset), 0); | ||||
733 | |||||
734 | adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, | ||||
735 | ring->doorbell_index, 20); | ||||
736 | |||||
737 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
738 | sdma_v5_0_ring_set_wptr(ring); | ||||
739 | |||||
740 | /* set minor_ptr_update to 0 after wptr programed */ | ||||
741 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x00b5 )), (0), 0); | ||||
742 | |||||
743 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | ||||
744 | /* set utc l1 enable flag always to 1 */ | ||||
745 | temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001c )), 0); | ||||
746 | temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1)(((temp) & ~0x00000002L) | (0x00000002L & ((1) << 0x1))); | ||||
747 | |||||
748 | /* enable MCBP */ | ||||
749 | temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1)(((temp) & ~0x00000020L) | (0x00000020L & ((1) << 0x5))); | ||||
750 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001c )), (temp), 0); | ||||
751 | |||||
752 | /* Set up RESP_MODE to non-copy addresses */ | ||||
753 | temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x003c )), 0); | ||||
754 | temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3)(((temp) & ~0x00000E00L) | (0x00000E00L & ((3) << 0x9))); | ||||
755 | temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9)(((temp) & ~0x0000003EL) | (0x0000003EL & ((9) << 0x1))); | ||||
756 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x003c )), (temp), 0); | ||||
757 | |||||
758 | /* program default cache read and write policy */ | ||||
759 | temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0048 )), 0); | ||||
760 | /* clean read policy and write policy bits */ | ||||
761 | temp &= 0xFF0FFF; | ||||
762 | temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); | ||||
763 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0048 )), (temp), 0); | ||||
764 | } | ||||
765 | |||||
766 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | ||||
767 | /* unhalt engine */ | ||||
768 | temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002a )), 0); | ||||
769 | temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0)(((temp) & ~0x00000001L) | (0x00000001L & ((0) << 0x0))); | ||||
770 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x002a )), (temp), 0); | ||||
771 | } | ||||
772 | |||||
773 | /* enable DMA RB */ | ||||
774 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1)(((rb_cntl) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); | ||||
775 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0080 )), (rb_cntl), 0); | ||||
776 | |||||
777 | ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x008a )), 0); | ||||
778 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1)(((ib_cntl) & ~0x00000001L) | (0x00000001L & ((1) << 0x0))); | ||||
779 | #ifdef __BIG_ENDIAN | ||||
780 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1)(((ib_cntl) & ~0x00000010L) | (0x00000010L & ((1) << 0x4))); | ||||
781 | #endif | ||||
782 | /* enable DMA IBs */ | ||||
783 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x008a )), (ib_cntl), 0); | ||||
784 | |||||
785 | ring->sched.ready = true1; | ||||
786 | |||||
787 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { /* bare-metal sequence doesn't need below to lines */ | ||||
788 | sdma_v5_0_ctx_switch_enable(adev, true1); | ||||
789 | sdma_v5_0_enable(adev, true1); | ||||
790 | } | ||||
791 | |||||
792 | r = amdgpu_ring_test_helper(ring); | ||||
793 | if (r) | ||||
794 | return r; | ||||
795 | |||||
796 | if (adev->mman.buffer_funcs_ring == ring) | ||||
797 | amdgpu_ttm_set_buffer_funcs_status(adev, true1); | ||||
798 | } | ||||
799 | |||||
800 | return 0; | ||||
801 | } | ||||
802 | |||||
803 | /** | ||||
804 | * sdma_v5_0_rlc_resume - setup and start the async dma engines | ||||
805 | * | ||||
806 | * @adev: amdgpu_device pointer | ||||
807 | * | ||||
808 | * Set up the compute DMA queues and enable them (NAVI10). | ||||
809 | * Returns 0 for success, error for failure. | ||||
810 | */ | ||||
811 | static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev) | ||||
812 | { | ||||
813 | return 0; | ||||
814 | } | ||||
815 | |||||
816 | /** | ||||
817 | * sdma_v5_0_load_microcode - load the sDMA ME ucode | ||||
818 | * | ||||
819 | * @adev: amdgpu_device pointer | ||||
820 | * | ||||
821 | * Loads the sDMA0/1 ucode. | ||||
822 | * Returns 0 for success, -EINVAL if the ucode is not available. | ||||
823 | */ | ||||
824 | static int sdma_v5_0_load_microcode(struct amdgpu_device *adev) | ||||
825 | { | ||||
826 | const struct sdma_firmware_header_v1_0 *hdr; | ||||
827 | const __le32 *fw_data; | ||||
828 | u32 fw_size; | ||||
829 | int i, j; | ||||
830 | |||||
831 | /* halt the MEs */ | ||||
832 | sdma_v5_0_enable(adev, false0); | ||||
833 | |||||
834 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
835 | if (!adev->sdma.instance[i].fw) | ||||
836 | return -EINVAL22; | ||||
837 | |||||
838 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; | ||||
839 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | ||||
840 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes)((__uint32_t)(hdr->header.ucode_size_bytes)) / 4; | ||||
841 | |||||
842 | fw_data = (const __le32 *) | ||||
843 | (adev->sdma.instance[i].fw->data + | ||||
844 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes))); | ||||
845 | |||||
846 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x5880 )), (0), 0); | ||||
847 | |||||
848 | for (j = 0; j < fw_size; j++) { | ||||
849 | if (amdgpu_emu_mode == 1 && j % 500 == 0) | ||||
850 | drm_msleep(1)mdelay(1); | ||||
851 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++))amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x5881 )), (((__uint32_t)(*(__uint32_t *)(fw_data++)))), 0); | ||||
852 | } | ||||
853 | |||||
854 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x5880 )), (adev->sdma.instance[i].fw_version), 0); | ||||
855 | } | ||||
856 | |||||
857 | return 0; | ||||
858 | } | ||||
859 | |||||
860 | /** | ||||
861 | * sdma_v5_0_start - setup and start the async dma engines | ||||
862 | * | ||||
863 | * @adev: amdgpu_device pointer | ||||
864 | * | ||||
865 | * Set up the DMA engines and enable them (NAVI10). | ||||
866 | * Returns 0 for success, error for failure. | ||||
867 | */ | ||||
868 | static int sdma_v5_0_start(struct amdgpu_device *adev) | ||||
869 | { | ||||
870 | int r = 0; | ||||
871 | |||||
872 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | ||||
873 | sdma_v5_0_ctx_switch_enable(adev, false0); | ||||
874 | sdma_v5_0_enable(adev, false0); | ||||
875 | |||||
876 | /* set RB registers */ | ||||
877 | r = sdma_v5_0_gfx_resume(adev); | ||||
878 | return r; | ||||
879 | } | ||||
880 | |||||
881 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { | ||||
882 | r = sdma_v5_0_load_microcode(adev); | ||||
883 | if (r) | ||||
884 | return r; | ||||
885 | } | ||||
886 | |||||
887 | /* unhalt the MEs */ | ||||
888 | sdma_v5_0_enable(adev, true1); | ||||
889 | /* enable sdma ring preemption */ | ||||
890 | sdma_v5_0_ctx_switch_enable(adev, true1); | ||||
891 | |||||
892 | /* start the gfx rings and rlc compute queues */ | ||||
893 | r = sdma_v5_0_gfx_resume(adev); | ||||
894 | if (r) | ||||
895 | return r; | ||||
896 | r = sdma_v5_0_rlc_resume(adev); | ||||
897 | |||||
898 | return r; | ||||
899 | } | ||||
900 | |||||
901 | /** | ||||
902 | * sdma_v5_0_ring_test_ring - simple async dma engine test | ||||
903 | * | ||||
904 | * @ring: amdgpu_ring structure holding ring information | ||||
905 | * | ||||
906 | * Test the DMA engine by writing using it to write an | ||||
907 | * value to memory. (NAVI10). | ||||
908 | * Returns 0 for success, error for failure. | ||||
909 | */ | ||||
910 | static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) | ||||
911 | { | ||||
912 | struct amdgpu_device *adev = ring->adev; | ||||
913 | unsigned i; | ||||
914 | unsigned index; | ||||
915 | int r; | ||||
916 | u32 tmp; | ||||
917 | u64 gpu_addr; | ||||
918 | |||||
919 | r = amdgpu_device_wb_get(adev, &index); | ||||
920 | if (r) { | ||||
921 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate wb slot\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | ||||
922 | return r; | ||||
923 | } | ||||
924 | |||||
925 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||||
926 | tmp = 0xCAFEDEAD; | ||||
927 | adev->wb.wb[index] = cpu_to_le32(tmp)((__uint32_t)(tmp)); | ||||
928 | |||||
929 | r = amdgpu_ring_alloc(ring, 5); | ||||
930 | if (r) { | ||||
931 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r)__drm_err("amdgpu: dma failed to lock ring %d (%d).\n", ring-> idx, r); | ||||
932 | amdgpu_device_wb_free(adev, index); | ||||
933 | return r; | ||||
934 | } | ||||
935 | |||||
936 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE)(((2) & 0x000000FF) << 0) | | ||||
937 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)(((0) & 0x000000FF) << 8)); | ||||
938 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)((u32)(gpu_addr))); | ||||
939 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)((u32)(((gpu_addr) >> 16) >> 16))); | ||||
940 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)(((0) & 0x000FFFFF) << 0)); | ||||
941 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||||
942 | amdgpu_ring_commit(ring); | ||||
943 | |||||
944 | for (i = 0; i < adev->usec_timeout; i++) { | ||||
945 | tmp = le32_to_cpu(adev->wb.wb[index])((__uint32_t)(adev->wb.wb[index])); | ||||
946 | if (tmp == 0xDEADBEEF) | ||||
947 | break; | ||||
948 | if (amdgpu_emu_mode == 1) | ||||
949 | drm_msleep(1)mdelay(1); | ||||
950 | else | ||||
951 | udelay(1); | ||||
952 | } | ||||
953 | |||||
954 | if (i >= adev->usec_timeout) | ||||
955 | r = -ETIMEDOUT60; | ||||
956 | |||||
957 | amdgpu_device_wb_free(adev, index); | ||||
958 | |||||
959 | return r; | ||||
960 | } | ||||
961 | |||||
962 | /** | ||||
963 | * sdma_v5_0_ring_test_ib - test an IB on the DMA engine | ||||
964 | * | ||||
965 | * @ring: amdgpu_ring structure holding ring information | ||||
966 | * | ||||
967 | * Test a simple IB in the DMA ring (NAVI10). | ||||
968 | * Returns 0 on success, error on failure. | ||||
969 | */ | ||||
970 | static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | ||||
971 | { | ||||
972 | struct amdgpu_device *adev = ring->adev; | ||||
973 | struct amdgpu_ib ib; | ||||
974 | struct dma_fence *f = NULL((void *)0); | ||||
975 | unsigned index; | ||||
976 | long r; | ||||
977 | u32 tmp = 0; | ||||
978 | u64 gpu_addr; | ||||
979 | |||||
980 | r = amdgpu_device_wb_get(adev, &index); | ||||
981 | if (r) { | ||||
982 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r)printf("drm:pid%d:%s *ERROR* " "(%ld) failed to allocate wb slot\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | ||||
983 | return r; | ||||
984 | } | ||||
985 | |||||
986 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||||
987 | tmp = 0xCAFEDEAD; | ||||
988 | adev->wb.wb[index] = cpu_to_le32(tmp)((__uint32_t)(tmp)); | ||||
989 | memset(&ib, 0, sizeof(ib))__builtin_memset((&ib), (0), (sizeof(ib))); | ||||
990 | r = amdgpu_ib_get(adev, NULL((void *)0), 256, | ||||
991 | AMDGPU_IB_POOL_DIRECT, &ib); | ||||
992 | if (r) { | ||||
993 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r)__drm_err("amdgpu: failed to get ib (%ld).\n", r); | ||||
994 | goto err0; | ||||
995 | } | ||||
996 | |||||
997 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE)(((2) & 0x000000FF) << 0) | | ||||
998 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)(((0) & 0x000000FF) << 8); | ||||
999 | ib.ptr[1] = lower_32_bits(gpu_addr)((u32)(gpu_addr)); | ||||
1000 | ib.ptr[2] = upper_32_bits(gpu_addr)((u32)(((gpu_addr) >> 16) >> 16)); | ||||
1001 | ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)(((0) & 0x000FFFFF) << 0); | ||||
1002 | ib.ptr[4] = 0xDEADBEEF; | ||||
1003 | ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
1004 | ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
1005 | ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
1006 | ib.length_dw = 8; | ||||
1007 | |||||
1008 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL((void *)0), &f); | ||||
1009 | if (r) | ||||
1010 | goto err1; | ||||
1011 | |||||
1012 | r = dma_fence_wait_timeout(f, false0, timeout); | ||||
1013 | if (r == 0) { | ||||
1014 | DRM_ERROR("amdgpu: IB test timed out\n")__drm_err("amdgpu: IB test timed out\n"); | ||||
1015 | r = -ETIMEDOUT60; | ||||
1016 | goto err1; | ||||
1017 | } else if (r < 0) { | ||||
1018 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r)__drm_err("amdgpu: fence wait failed (%ld).\n", r); | ||||
1019 | goto err1; | ||||
1020 | } | ||||
1021 | tmp = le32_to_cpu(adev->wb.wb[index])((__uint32_t)(adev->wb.wb[index])); | ||||
1022 | if (tmp == 0xDEADBEEF) | ||||
1023 | r = 0; | ||||
1024 | else | ||||
1025 | r = -EINVAL22; | ||||
1026 | |||||
1027 | err1: | ||||
1028 | amdgpu_ib_free(adev, &ib, NULL((void *)0)); | ||||
1029 | dma_fence_put(f); | ||||
1030 | err0: | ||||
1031 | amdgpu_device_wb_free(adev, index); | ||||
1032 | return r; | ||||
1033 | } | ||||
1034 | |||||
1035 | |||||
1036 | /** | ||||
1037 | * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART | ||||
1038 | * | ||||
1039 | * @ib: indirect buffer to fill with commands | ||||
1040 | * @pe: addr of the page entry | ||||
1041 | * @src: src addr to copy from | ||||
1042 | * @count: number of page entries to update | ||||
1043 | * | ||||
1044 | * Update PTEs by copying them from the GART using sDMA (NAVI10). | ||||
1045 | */ | ||||
1046 | static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib, | ||||
1047 | uint64_t pe, uint64_t src, | ||||
1048 | unsigned count) | ||||
1049 | { | ||||
1050 | unsigned bytes = count * 8; | ||||
1051 | |||||
1052 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY)(((1) & 0x000000FF) << 0) | | ||||
1053 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)(((0) & 0x000000FF) << 8); | ||||
1054 | ib->ptr[ib->length_dw++] = bytes - 1; | ||||
1055 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | ||||
1056 | ib->ptr[ib->length_dw++] = lower_32_bits(src)((u32)(src)); | ||||
1057 | ib->ptr[ib->length_dw++] = upper_32_bits(src)((u32)(((src) >> 16) >> 16)); | ||||
1058 | ib->ptr[ib->length_dw++] = lower_32_bits(pe)((u32)(pe)); | ||||
1059 | ib->ptr[ib->length_dw++] = upper_32_bits(pe)((u32)(((pe) >> 16) >> 16)); | ||||
1060 | |||||
1061 | } | ||||
1062 | |||||
1063 | /** | ||||
1064 | * sdma_v5_0_vm_write_pte - update PTEs by writing them manually | ||||
1065 | * | ||||
1066 | * @ib: indirect buffer to fill with commands | ||||
1067 | * @pe: addr of the page entry | ||||
1068 | * @addr: dst addr to write into pe | ||||
1069 | * @count: number of page entries to update | ||||
1070 | * @incr: increase next addr by incr bytes | ||||
1071 | * @flags: access flags | ||||
1072 | * | ||||
1073 | * Update PTEs by writing them manually using sDMA (NAVI10). | ||||
1074 | */ | ||||
1075 | static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, | ||||
1076 | uint64_t value, unsigned count, | ||||
1077 | uint32_t incr) | ||||
1078 | { | ||||
1079 | unsigned ndw = count * 2; | ||||
1080 | |||||
1081 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE)(((2) & 0x000000FF) << 0) | | ||||
1082 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)(((0) & 0x000000FF) << 8); | ||||
1083 | ib->ptr[ib->length_dw++] = lower_32_bits(pe)((u32)(pe)); | ||||
1084 | ib->ptr[ib->length_dw++] = upper_32_bits(pe)((u32)(((pe) >> 16) >> 16)); | ||||
1085 | ib->ptr[ib->length_dw++] = ndw - 1; | ||||
1086 | for (; ndw > 0; ndw -= 2) { | ||||
1087 | ib->ptr[ib->length_dw++] = lower_32_bits(value)((u32)(value)); | ||||
1088 | ib->ptr[ib->length_dw++] = upper_32_bits(value)((u32)(((value) >> 16) >> 16)); | ||||
1089 | value += incr; | ||||
1090 | } | ||||
1091 | } | ||||
1092 | |||||
1093 | /** | ||||
1094 | * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA | ||||
1095 | * | ||||
1096 | * @ib: indirect buffer to fill with commands | ||||
1097 | * @pe: addr of the page entry | ||||
1098 | * @addr: dst addr to write into pe | ||||
1099 | * @count: number of page entries to update | ||||
1100 | * @incr: increase next addr by incr bytes | ||||
1101 | * @flags: access flags | ||||
1102 | * | ||||
1103 | * Update the page tables using sDMA (NAVI10). | ||||
1104 | */ | ||||
1105 | static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, | ||||
1106 | uint64_t pe, | ||||
1107 | uint64_t addr, unsigned count, | ||||
1108 | uint32_t incr, uint64_t flags) | ||||
1109 | { | ||||
1110 | /* for physically contiguous pages (vram) */ | ||||
1111 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE)(((12) & 0x000000FF) << 0); | ||||
1112 | ib->ptr[ib->length_dw++] = lower_32_bits(pe)((u32)(pe)); /* dst addr */ | ||||
1113 | ib->ptr[ib->length_dw++] = upper_32_bits(pe)((u32)(((pe) >> 16) >> 16)); | ||||
1114 | ib->ptr[ib->length_dw++] = lower_32_bits(flags)((u32)(flags)); /* mask */ | ||||
1115 | ib->ptr[ib->length_dw++] = upper_32_bits(flags)((u32)(((flags) >> 16) >> 16)); | ||||
1116 | ib->ptr[ib->length_dw++] = lower_32_bits(addr)((u32)(addr)); /* value */ | ||||
1117 | ib->ptr[ib->length_dw++] = upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)); | ||||
1118 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | ||||
1119 | ib->ptr[ib->length_dw++] = 0; | ||||
1120 | ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ | ||||
1121 | } | ||||
1122 | |||||
1123 | /** | ||||
1124 | * sdma_v5_0_ring_pad_ib - pad the IB | ||||
1125 | * @ib: indirect buffer to fill with padding | ||||
1126 | * | ||||
1127 | * Pad the IB with NOPs to a boundary multiple of 8. | ||||
1128 | */ | ||||
1129 | static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | ||||
1130 | { | ||||
1131 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); | ||||
1132 | u32 pad_count; | ||||
1133 | int i; | ||||
1134 | |||||
1135 | pad_count = (-ib->length_dw) & 0x7; | ||||
1136 | for (i = 0; i < pad_count; i++) | ||||
1137 | if (sdma && sdma->burst_nop && (i == 0)) | ||||
1138 | ib->ptr[ib->length_dw++] = | ||||
1139 | SDMA_PKT_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0) | | ||||
1140 | SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1)(((pad_count - 1) & 0x00003FFF) << 16); | ||||
1141 | else | ||||
1142 | ib->ptr[ib->length_dw++] = | ||||
1143 | SDMA_PKT_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
1144 | } | ||||
1145 | |||||
1146 | |||||
1147 | /** | ||||
1148 | * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline | ||||
1149 | * | ||||
1150 | * @ring: amdgpu_ring pointer | ||||
1151 | * | ||||
1152 | * Make sure all previous operations are completed (CIK). | ||||
1153 | */ | ||||
1154 | static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | ||||
1155 | { | ||||
1156 | uint32_t seq = ring->fence_drv.sync_seq; | ||||
1157 | uint64_t addr = ring->fence_drv.gpu_addr; | ||||
1158 | |||||
1159 | /* wait for idle */ | ||||
1160 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM)(((8) & 0x000000FF) << 0) | | ||||
1161 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0)(((0) & 0x00000001) << 26) | | ||||
1162 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)(((3) & 0x00000007) << 28) | /* equal */ | ||||
1163 | SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)(((1) & 0x00000001) << 31)); | ||||
1164 | amdgpu_ring_write(ring, addr & 0xfffffffc); | ||||
1165 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)) & 0xffffffff); | ||||
1166 | amdgpu_ring_write(ring, seq); /* reference */ | ||||
1167 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ | ||||
1168 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff)(((0xfff) & 0x00000FFF) << 16) | | ||||
1169 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)(((4) & 0x0000FFFF) << 0)); /* retry count, poll interval */ | ||||
1170 | } | ||||
1171 | |||||
1172 | |||||
1173 | /** | ||||
1174 | * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA | ||||
1175 | * | ||||
1176 | * @ring: amdgpu_ring pointer | ||||
1177 | * @vm: amdgpu_vm pointer | ||||
1178 | * | ||||
1179 | * Update the page table base and flush the VM TLB | ||||
1180 | * using sDMA (NAVI10). | ||||
1181 | */ | ||||
1182 | static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | ||||
1183 | unsigned vmid, uint64_t pd_addr) | ||||
1184 | { | ||||
1185 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr)(ring)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((ring ), (vmid), (pd_addr)); | ||||
1186 | } | ||||
1187 | |||||
1188 | static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring, | ||||
1189 | uint32_t reg, uint32_t val) | ||||
1190 | { | ||||
1191 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE)(((14) & 0x000000FF) << 0) | | ||||
1192 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)(((0xf) & 0x0000000F) << 28)); | ||||
1193 | amdgpu_ring_write(ring, reg); | ||||
1194 | amdgpu_ring_write(ring, val); | ||||
1195 | } | ||||
1196 | |||||
1197 | static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, | ||||
1198 | uint32_t val, uint32_t mask) | ||||
1199 | { | ||||
1200 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM)(((8) & 0x000000FF) << 0) | | ||||
1201 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0)(((0) & 0x00000001) << 26) | | ||||
1202 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)(((3) & 0x00000007) << 28)); /* equal */ | ||||
1203 | amdgpu_ring_write(ring, reg << 2); | ||||
1204 | amdgpu_ring_write(ring, 0); | ||||
1205 | amdgpu_ring_write(ring, val); /* reference */ | ||||
1206 | amdgpu_ring_write(ring, mask); /* mask */ | ||||
1207 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff)(((0xfff) & 0x00000FFF) << 16) | | ||||
1208 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)(((10) & 0x0000FFFF) << 0)); | ||||
1209 | } | ||||
1210 | |||||
1211 | static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, | ||||
1212 | uint32_t reg0, uint32_t reg1, | ||||
1213 | uint32_t ref, uint32_t mask) | ||||
1214 | { | ||||
1215 | amdgpu_ring_emit_wreg(ring, reg0, ref)(ring)->funcs->emit_wreg((ring), (reg0), (ref)); | ||||
1216 | /* wait for a cycle to reset vm_inv_eng*_ack */ | ||||
1217 | amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0)(ring)->funcs->emit_reg_wait((ring), (reg0), (0), (0)); | ||||
1218 | amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask)(ring)->funcs->emit_reg_wait((ring), (reg1), (mask), (mask )); | ||||
1219 | } | ||||
1220 | |||||
1221 | static int sdma_v5_0_early_init(void *handle) | ||||
1222 | { | ||||
1223 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1224 | |||||
1225 | adev->sdma.num_instances = 2; | ||||
1226 | |||||
1227 | sdma_v5_0_set_ring_funcs(adev); | ||||
1228 | sdma_v5_0_set_buffer_funcs(adev); | ||||
1229 | sdma_v5_0_set_vm_pte_funcs(adev); | ||||
1230 | sdma_v5_0_set_irq_funcs(adev); | ||||
1231 | |||||
1232 | return 0; | ||||
1233 | } | ||||
1234 | |||||
1235 | |||||
1236 | static int sdma_v5_0_sw_init(void *handle) | ||||
1237 | { | ||||
1238 | struct amdgpu_ring *ring; | ||||
1239 | int r, i; | ||||
1240 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1241 | |||||
1242 | /* SDMA trap event */ | ||||
1243 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, | ||||
1244 | SDMA0_5_0__SRCID__SDMA_TRAP224, | ||||
1245 | &adev->sdma.trap_irq); | ||||
1246 | if (r) | ||||
| |||||
1247 | return r; | ||||
1248 | |||||
1249 | /* SDMA trap event */ | ||||
1250 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, | ||||
1251 | SDMA1_5_0__SRCID__SDMA_TRAP224, | ||||
1252 | &adev->sdma.trap_irq); | ||||
1253 | if (r) | ||||
1254 | return r; | ||||
1255 | |||||
1256 | r = sdma_v5_0_init_microcode(adev); | ||||
1257 | if (r) { | ||||
1258 | DRM_ERROR("Failed to load sdma firmware!\n")__drm_err("Failed to load sdma firmware!\n"); | ||||
1259 | return r; | ||||
1260 | } | ||||
1261 | |||||
1262 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1263 | ring = &adev->sdma.instance[i].ring; | ||||
1264 | ring->ring_obj = NULL((void *)0); | ||||
1265 | ring->use_doorbell = true1; | ||||
1266 | |||||
1267 | DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,__drm_dbg(DRM_UT_CORE, "SDMA %d use_doorbell being set to: [%s]\n" , i, ring->use_doorbell?"true":"false") | ||||
1268 | ring->use_doorbell?"true":"false")__drm_dbg(DRM_UT_CORE, "SDMA %d use_doorbell being set to: [%s]\n" , i, ring->use_doorbell?"true":"false"); | ||||
1269 | |||||
1270 | ring->doorbell_index = (i == 0) ? | ||||
1271 | (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset | ||||
1272 | : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset | ||||
1273 | |||||
1274 | snprintf(ring->name, sizeof(ring->name), "sdma%d", i); | ||||
1275 | r = amdgpu_ring_init(adev, ring, 1024, | ||||
1276 | &adev->sdma.trap_irq, | ||||
1277 | (i == 0) ? | ||||
1278 | AMDGPU_SDMA_IRQ_INSTANCE0 : | ||||
1279 | AMDGPU_SDMA_IRQ_INSTANCE1, | ||||
1280 | AMDGPU_RING_PRIO_DEFAULT1); | ||||
1281 | if (r) | ||||
1282 | return r; | ||||
1283 | } | ||||
1284 | |||||
1285 | return r; | ||||
1286 | } | ||||
1287 | |||||
1288 | static int sdma_v5_0_sw_fini(void *handle) | ||||
1289 | { | ||||
1290 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1291 | int i; | ||||
1292 | |||||
1293 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1294 | release_firmware(adev->sdma.instance[i].fw); | ||||
1295 | adev->sdma.instance[i].fw = NULL((void *)0); | ||||
1296 | |||||
1297 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | ||||
1298 | } | ||||
1299 | |||||
1300 | return 0; | ||||
1301 | } | ||||
1302 | |||||
1303 | static int sdma_v5_0_hw_init(void *handle) | ||||
1304 | { | ||||
1305 | int r; | ||||
1306 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1307 | |||||
1308 | sdma_v5_0_init_golden_registers(adev); | ||||
1309 | |||||
1310 | r = sdma_v5_0_start(adev); | ||||
1311 | |||||
1312 | return r; | ||||
1313 | } | ||||
1314 | |||||
1315 | static int sdma_v5_0_hw_fini(void *handle) | ||||
1316 | { | ||||
1317 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1318 | |||||
1319 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
1320 | return 0; | ||||
1321 | |||||
1322 | sdma_v5_0_ctx_switch_enable(adev, false0); | ||||
1323 | sdma_v5_0_enable(adev, false0); | ||||
1324 | |||||
1325 | return 0; | ||||
1326 | } | ||||
1327 | |||||
1328 | static int sdma_v5_0_suspend(void *handle) | ||||
1329 | { | ||||
1330 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1331 | |||||
1332 | return sdma_v5_0_hw_fini(adev); | ||||
1333 | } | ||||
1334 | |||||
1335 | static int sdma_v5_0_resume(void *handle) | ||||
1336 | { | ||||
1337 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1338 | |||||
1339 | return sdma_v5_0_hw_init(adev); | ||||
1340 | } | ||||
1341 | |||||
1342 | static bool_Bool sdma_v5_0_is_idle(void *handle) | ||||
1343 | { | ||||
1344 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1345 | u32 i; | ||||
1346 | |||||
1347 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1348 | u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x0025 )), 0); | ||||
1349 | |||||
1350 | if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK0x00000001L)) | ||||
1351 | return false0; | ||||
1352 | } | ||||
1353 | |||||
1354 | return true1; | ||||
1355 | } | ||||
1356 | |||||
1357 | static int sdma_v5_0_wait_for_idle(void *handle) | ||||
1358 | { | ||||
1359 | unsigned i; | ||||
1360 | u32 sdma0, sdma1; | ||||
1361 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1362 | |||||
1363 | for (i = 0; i < adev->usec_timeout; i++) { | ||||
1364 | sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, 0, 0x0025 )), 0); | ||||
1365 | sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, 1, 0x0025 )), 0); | ||||
1366 | |||||
1367 | if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK0x00000001L) | ||||
1368 | return 0; | ||||
1369 | udelay(1); | ||||
1370 | } | ||||
1371 | return -ETIMEDOUT60; | ||||
1372 | } | ||||
1373 | |||||
1374 | static int sdma_v5_0_soft_reset(void *handle) | ||||
1375 | { | ||||
1376 | /* todo */ | ||||
1377 | |||||
1378 | return 0; | ||||
1379 | } | ||||
1380 | |||||
1381 | static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring) | ||||
1382 | { | ||||
1383 | int i, r = 0; | ||||
1384 | struct amdgpu_device *adev = ring->adev; | ||||
1385 | u32 index = 0; | ||||
1386 | u64 sdma_gfx_preempt; | ||||
1387 | |||||
1388 | amdgpu_sdma_get_index_from_ring(ring, &index); | ||||
1389 | if (index == 0) | ||||
1390 | sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT0x00b0; | ||||
1391 | else | ||||
1392 | sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT0x06b0; | ||||
1393 | |||||
1394 | /* assert preemption condition */ | ||||
1395 | amdgpu_ring_set_preempt_cond_exec(ring, false0); | ||||
1396 | |||||
1397 | /* emit the trailing fence */ | ||||
1398 | ring->trail_seq += 1; | ||||
1399 | amdgpu_ring_alloc(ring, 10); | ||||
1400 | sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, | ||||
1401 | ring->trail_seq, 0); | ||||
1402 | amdgpu_ring_commit(ring); | ||||
1403 | |||||
1404 | /* assert IB preemption */ | ||||
1405 | WREG32(sdma_gfx_preempt, 1)amdgpu_device_wreg(adev, (sdma_gfx_preempt), (1), 0); | ||||
1406 | |||||
1407 | /* poll the trailing fence */ | ||||
1408 | for (i = 0; i < adev->usec_timeout; i++) { | ||||
1409 | if (ring->trail_seq == | ||||
1410 | le32_to_cpu(*(ring->trail_fence_cpu_addr))((__uint32_t)(*(ring->trail_fence_cpu_addr)))) | ||||
1411 | break; | ||||
1412 | udelay(1); | ||||
1413 | } | ||||
1414 | |||||
1415 | if (i >= adev->usec_timeout) { | ||||
1416 | r = -EINVAL22; | ||||
1417 | DRM_ERROR("ring %d failed to be preempted\n", ring->idx)__drm_err("ring %d failed to be preempted\n", ring->idx); | ||||
1418 | } | ||||
1419 | |||||
1420 | /* deassert IB preemption */ | ||||
1421 | WREG32(sdma_gfx_preempt, 0)amdgpu_device_wreg(adev, (sdma_gfx_preempt), (0), 0); | ||||
1422 | |||||
1423 | /* deassert the preemption condition */ | ||||
1424 | amdgpu_ring_set_preempt_cond_exec(ring, true1); | ||||
1425 | return r; | ||||
1426 | } | ||||
1427 | |||||
1428 | static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev, | ||||
1429 | struct amdgpu_irq_src *source, | ||||
1430 | unsigned type, | ||||
1431 | enum amdgpu_interrupt_state state) | ||||
1432 | { | ||||
1433 | u32 sdma_cntl; | ||||
1434 | |||||
1435 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | ||||
1436 | u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? | ||||
1437 | sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL0x001c) : | ||||
1438 | sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL0x001c); | ||||
1439 | |||||
1440 | sdma_cntl = RREG32(reg_offset)amdgpu_device_rreg(adev, (reg_offset), 0); | ||||
1441 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,(((sdma_cntl) & ~0x00000001L) | (0x00000001L & ((state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0) << 0x0))) | ||||
1442 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0)(((sdma_cntl) & ~0x00000001L) | (0x00000001L & ((state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0) << 0x0))); | ||||
1443 | WREG32(reg_offset, sdma_cntl)amdgpu_device_wreg(adev, (reg_offset), (sdma_cntl), 0); | ||||
1444 | } | ||||
1445 | |||||
1446 | return 0; | ||||
1447 | } | ||||
1448 | |||||
1449 | static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev, | ||||
1450 | struct amdgpu_irq_src *source, | ||||
1451 | struct amdgpu_iv_entry *entry) | ||||
1452 | { | ||||
1453 | DRM_DEBUG("IH: SDMA trap\n")__drm_dbg(DRM_UT_CORE, "IH: SDMA trap\n"); | ||||
1454 | switch (entry->client_id) { | ||||
1455 | case SOC15_IH_CLIENTID_SDMA0: | ||||
1456 | switch (entry->ring_id) { | ||||
1457 | case 0: | ||||
1458 | amdgpu_fence_process(&adev->sdma.instance[0].ring); | ||||
1459 | break; | ||||
1460 | case 1: | ||||
1461 | /* XXX compute */ | ||||
1462 | break; | ||||
1463 | case 2: | ||||
1464 | /* XXX compute */ | ||||
1465 | break; | ||||
1466 | case 3: | ||||
1467 | /* XXX page queue*/ | ||||
1468 | break; | ||||
1469 | } | ||||
1470 | break; | ||||
1471 | case SOC15_IH_CLIENTID_SDMA1: | ||||
1472 | switch (entry->ring_id) { | ||||
1473 | case 0: | ||||
1474 | amdgpu_fence_process(&adev->sdma.instance[1].ring); | ||||
1475 | break; | ||||
1476 | case 1: | ||||
1477 | /* XXX compute */ | ||||
1478 | break; | ||||
1479 | case 2: | ||||
1480 | /* XXX compute */ | ||||
1481 | break; | ||||
1482 | case 3: | ||||
1483 | /* XXX page queue*/ | ||||
1484 | break; | ||||
1485 | } | ||||
1486 | break; | ||||
1487 | } | ||||
1488 | return 0; | ||||
1489 | } | ||||
1490 | |||||
1491 | static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev, | ||||
1492 | struct amdgpu_irq_src *source, | ||||
1493 | struct amdgpu_iv_entry *entry) | ||||
1494 | { | ||||
1495 | return 0; | ||||
1496 | } | ||||
1497 | |||||
1498 | static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, | ||||
1499 | bool_Bool enable) | ||||
1500 | { | ||||
1501 | uint32_t data, def; | ||||
1502 | int i; | ||||
1503 | |||||
1504 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1505 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG(1 << 11))) { | ||||
1506 | /* Enable sdma clock gating */ | ||||
1507 | def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001b )), 0); | ||||
1508 | data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK0x01000000L | | ||||
1509 | SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK0x02000000L | | ||||
1510 | SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK0x04000000L | | ||||
1511 | SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK0x08000000L | | ||||
1512 | SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK0x10000000L | | ||||
1513 | SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK0x20000000L | | ||||
1514 | SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK0x40000000L | | ||||
1515 | SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK0x80000000L); | ||||
1516 | if (def != data) | ||||
1517 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001b )), (data), 0); | ||||
1518 | } else { | ||||
1519 | /* Disable sdma clock gating */ | ||||
1520 | def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001b )), 0); | ||||
1521 | data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK0x01000000L | | ||||
1522 | SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK0x02000000L | | ||||
1523 | SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK0x04000000L | | ||||
1524 | SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK0x08000000L | | ||||
1525 | SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK0x10000000L | | ||||
1526 | SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK0x20000000L | | ||||
1527 | SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK0x40000000L | | ||||
1528 | SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK0x80000000L); | ||||
1529 | if (def != data) | ||||
1530 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001b )), (data), 0); | ||||
1531 | } | ||||
1532 | } | ||||
1533 | } | ||||
1534 | |||||
1535 | static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, | ||||
1536 | bool_Bool enable) | ||||
1537 | { | ||||
1538 | uint32_t data, def; | ||||
1539 | int i; | ||||
1540 | |||||
1541 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1542 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS(1 << 10))) { | ||||
1543 | /* Enable sdma mem light sleep */ | ||||
1544 | def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001a )), 0); | ||||
1545 | data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK0x00000100L; | ||||
1546 | if (def != data) | ||||
1547 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001a )), (data), 0); | ||||
1548 | |||||
1549 | } else { | ||||
1550 | /* Disable sdma mem light sleep */ | ||||
1551 | def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001a )), 0); | ||||
1552 | data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK0x00000100L; | ||||
1553 | if (def != data) | ||||
1554 | WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data)amdgpu_device_wreg(adev, (sdma_v5_0_get_reg_offset(adev, i, 0x001a )), (data), 0); | ||||
1555 | |||||
1556 | } | ||||
1557 | } | ||||
1558 | } | ||||
1559 | |||||
1560 | static int sdma_v5_0_set_clockgating_state(void *handle, | ||||
1561 | enum amd_clockgating_state state) | ||||
1562 | { | ||||
1563 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1564 | |||||
1565 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
1566 | return 0; | ||||
1567 | |||||
1568 | switch (adev->asic_type) { | ||||
1569 | case CHIP_NAVI10: | ||||
1570 | case CHIP_NAVI14: | ||||
1571 | case CHIP_NAVI12: | ||||
1572 | sdma_v5_0_update_medium_grain_clock_gating(adev, | ||||
1573 | state == AMD_CG_STATE_GATE); | ||||
1574 | sdma_v5_0_update_medium_grain_light_sleep(adev, | ||||
1575 | state == AMD_CG_STATE_GATE); | ||||
1576 | break; | ||||
1577 | default: | ||||
1578 | break; | ||||
1579 | } | ||||
1580 | |||||
1581 | return 0; | ||||
1582 | } | ||||
1583 | |||||
1584 | static int sdma_v5_0_set_powergating_state(void *handle, | ||||
1585 | enum amd_powergating_state state) | ||||
1586 | { | ||||
1587 | return 0; | ||||
1588 | } | ||||
1589 | |||||
1590 | static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags) | ||||
1591 | { | ||||
1592 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
1593 | int data; | ||||
1594 | |||||
1595 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) | ||||
1596 | *flags = 0; | ||||
1597 | |||||
1598 | /* AMD_CG_SUPPORT_SDMA_MGCG */ | ||||
1599 | data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, 0, 0x001b )), 0); | ||||
1600 | if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK0x01000000L)) | ||||
1601 | *flags |= AMD_CG_SUPPORT_SDMA_MGCG(1 << 11); | ||||
1602 | |||||
1603 | /* AMD_CG_SUPPORT_SDMA_LS */ | ||||
1604 | data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL))amdgpu_device_rreg(adev, (sdma_v5_0_get_reg_offset(adev, 0, 0x001a )), 0); | ||||
1605 | if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK0x00000100L) | ||||
1606 | *flags |= AMD_CG_SUPPORT_SDMA_LS(1 << 10); | ||||
1607 | } | ||||
1608 | |||||
1609 | const struct amd_ip_funcs sdma_v5_0_ip_funcs = { | ||||
1610 | .name = "sdma_v5_0", | ||||
1611 | .early_init = sdma_v5_0_early_init, | ||||
1612 | .late_init = NULL((void *)0), | ||||
1613 | .sw_init = sdma_v5_0_sw_init, | ||||
1614 | .sw_fini = sdma_v5_0_sw_fini, | ||||
1615 | .hw_init = sdma_v5_0_hw_init, | ||||
1616 | .hw_fini = sdma_v5_0_hw_fini, | ||||
1617 | .suspend = sdma_v5_0_suspend, | ||||
1618 | .resume = sdma_v5_0_resume, | ||||
1619 | .is_idle = sdma_v5_0_is_idle, | ||||
1620 | .wait_for_idle = sdma_v5_0_wait_for_idle, | ||||
1621 | .soft_reset = sdma_v5_0_soft_reset, | ||||
1622 | .set_clockgating_state = sdma_v5_0_set_clockgating_state, | ||||
1623 | .set_powergating_state = sdma_v5_0_set_powergating_state, | ||||
1624 | .get_clockgating_state = sdma_v5_0_get_clockgating_state, | ||||
1625 | }; | ||||
1626 | |||||
1627 | static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { | ||||
1628 | .type = AMDGPU_RING_TYPE_SDMA, | ||||
1629 | .align_mask = 0xf, | ||||
1630 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0), | ||||
1631 | .support_64bit_ptrs = true1, | ||||
1632 | .vmhub = AMDGPU_GFXHUB_00, | ||||
1633 | .get_rptr = sdma_v5_0_ring_get_rptr, | ||||
1634 | .get_wptr = sdma_v5_0_ring_get_wptr, | ||||
1635 | .set_wptr = sdma_v5_0_ring_set_wptr, | ||||
1636 | .emit_frame_size = | ||||
1637 | 5 + /* sdma_v5_0_ring_init_cond_exec */ | ||||
1638 | 6 + /* sdma_v5_0_ring_emit_hdp_flush */ | ||||
1639 | 3 + /* hdp_invalidate */ | ||||
1640 | 6 + /* sdma_v5_0_ring_emit_pipeline_sync */ | ||||
1641 | /* sdma_v5_0_ring_emit_vm_flush */ | ||||
1642 | SOC15_FLUSH_GPU_TLB_NUM_WREG6 * 3 + | ||||
1643 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT3 * 6 * 2 + | ||||
1644 | 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ | ||||
1645 | .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ | ||||
1646 | .emit_ib = sdma_v5_0_ring_emit_ib, | ||||
1647 | .emit_fence = sdma_v5_0_ring_emit_fence, | ||||
1648 | .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, | ||||
1649 | .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush, | ||||
1650 | .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush, | ||||
1651 | .test_ring = sdma_v5_0_ring_test_ring, | ||||
1652 | .test_ib = sdma_v5_0_ring_test_ib, | ||||
1653 | .insert_nop = sdma_v5_0_ring_insert_nop, | ||||
1654 | .pad_ib = sdma_v5_0_ring_pad_ib, | ||||
1655 | .emit_wreg = sdma_v5_0_ring_emit_wreg, | ||||
1656 | .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait, | ||||
1657 | .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, | ||||
1658 | .init_cond_exec = sdma_v5_0_ring_init_cond_exec, | ||||
1659 | .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec, | ||||
1660 | .preempt_ib = sdma_v5_0_ring_preempt_ib, | ||||
1661 | }; | ||||
1662 | |||||
1663 | static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev) | ||||
1664 | { | ||||
1665 | int i; | ||||
1666 | |||||
1667 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1668 | adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs; | ||||
1669 | adev->sdma.instance[i].ring.me = i; | ||||
1670 | } | ||||
1671 | } | ||||
1672 | |||||
1673 | static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = { | ||||
1674 | .set = sdma_v5_0_set_trap_irq_state, | ||||
1675 | .process = sdma_v5_0_process_trap_irq, | ||||
1676 | }; | ||||
1677 | |||||
1678 | static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = { | ||||
1679 | .process = sdma_v5_0_process_illegal_inst_irq, | ||||
1680 | }; | ||||
1681 | |||||
1682 | static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev) | ||||
1683 | { | ||||
1684 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + | ||||
1685 | adev->sdma.num_instances; | ||||
1686 | adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs; | ||||
1687 | adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs; | ||||
1688 | } | ||||
1689 | |||||
1690 | /** | ||||
1691 | * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine | ||||
1692 | * | ||||
1693 | * @ring: amdgpu_ring structure holding ring information | ||||
1694 | * @src_offset: src GPU address | ||||
1695 | * @dst_offset: dst GPU address | ||||
1696 | * @byte_count: number of bytes to xfer | ||||
1697 | * | ||||
1698 | * Copy GPU buffers using the DMA engine (NAVI10). | ||||
1699 | * Used by the amdgpu ttm implementation to move pages if | ||||
1700 | * registered as the asic copy callback. | ||||
1701 | */ | ||||
1702 | static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib, | ||||
1703 | uint64_t src_offset, | ||||
1704 | uint64_t dst_offset, | ||||
1705 | uint32_t byte_count, | ||||
1706 | bool_Bool tmz) | ||||
1707 | { | ||||
1708 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY)(((1) & 0x000000FF) << 0) | | ||||
1709 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)(((0) & 0x000000FF) << 8) | | ||||
1710 | SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0)(((tmz ? 1 : 0) & 0x00000001) << 18); | ||||
1711 | ib->ptr[ib->length_dw++] = byte_count - 1; | ||||
1712 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | ||||
1713 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset)((u32)(src_offset)); | ||||
1714 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset)((u32)(((src_offset) >> 16) >> 16)); | ||||
1715 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset)((u32)(dst_offset)); | ||||
1716 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset)((u32)(((dst_offset) >> 16) >> 16)); | ||||
1717 | } | ||||
1718 | |||||
1719 | /** | ||||
1720 | * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine | ||||
1721 | * | ||||
1722 | * @ring: amdgpu_ring structure holding ring information | ||||
1723 | * @src_data: value to write to buffer | ||||
1724 | * @dst_offset: dst GPU address | ||||
1725 | * @byte_count: number of bytes to xfer | ||||
1726 | * | ||||
1727 | * Fill GPU buffers using the DMA engine (NAVI10). | ||||
1728 | */ | ||||
1729 | static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib, | ||||
1730 | uint32_t src_data, | ||||
1731 | uint64_t dst_offset, | ||||
1732 | uint32_t byte_count) | ||||
1733 | { | ||||
1734 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)(((11) & 0x000000FF) << 0); | ||||
1735 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset)((u32)(dst_offset)); | ||||
1736 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset)((u32)(((dst_offset) >> 16) >> 16)); | ||||
1737 | ib->ptr[ib->length_dw++] = src_data; | ||||
1738 | ib->ptr[ib->length_dw++] = byte_count - 1; | ||||
1739 | } | ||||
1740 | |||||
1741 | static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = { | ||||
1742 | .copy_max_bytes = 0x400000, | ||||
1743 | .copy_num_dw = 7, | ||||
1744 | .emit_copy_buffer = sdma_v5_0_emit_copy_buffer, | ||||
1745 | |||||
1746 | .fill_max_bytes = 0x400000, | ||||
1747 | .fill_num_dw = 5, | ||||
1748 | .emit_fill_buffer = sdma_v5_0_emit_fill_buffer, | ||||
1749 | }; | ||||
1750 | |||||
1751 | static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev) | ||||
1752 | { | ||||
1753 | if (adev->mman.buffer_funcs == NULL((void *)0)) { | ||||
1754 | adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs; | ||||
1755 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | ||||
1756 | } | ||||
1757 | } | ||||
1758 | |||||
1759 | static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = { | ||||
1760 | .copy_pte_num_dw = 7, | ||||
1761 | .copy_pte = sdma_v5_0_vm_copy_pte, | ||||
1762 | .write_pte = sdma_v5_0_vm_write_pte, | ||||
1763 | .set_pte_pde = sdma_v5_0_vm_set_pte_pde, | ||||
1764 | }; | ||||
1765 | |||||
1766 | static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev) | ||||
1767 | { | ||||
1768 | unsigned i; | ||||
1769 | |||||
1770 | if (adev->vm_manager.vm_pte_funcs == NULL((void *)0)) { | ||||
1771 | adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs; | ||||
1772 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1773 | adev->vm_manager.vm_pte_scheds[i] = | ||||
1774 | &adev->sdma.instance[i].ring.sched; | ||||
1775 | } | ||||
1776 | adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; | ||||
1777 | } | ||||
1778 | } | ||||
1779 | |||||
1780 | const struct amdgpu_ip_block_version sdma_v5_0_ip_block = { | ||||
1781 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||||
1782 | .major = 5, | ||||
1783 | .minor = 0, | ||||
1784 | .rev = 0, | ||||
1785 | .funcs = &sdma_v5_0_ip_funcs, | ||||
1786 | }; |
1 | /* Public domain. */ |
2 | |
3 | #ifndef _LINUX_FIRMWARE_H |
4 | #define _LINUX_FIRMWARE_H |
5 | |
6 | #include <sys/types.h> |
7 | #include <sys/malloc.h> |
8 | #include <sys/device.h> |
9 | #include <linux/types.h> |
10 | #include <linux/gfp.h> |
11 | |
12 | #ifndef __DECONST |
13 | #define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var)) |
14 | #endif |
15 | |
16 | struct firmware { |
17 | size_t size; |
18 | const u8 *data; |
19 | }; |
20 | |
21 | static inline int |
22 | request_firmware(const struct firmware **fw, const char *name, |
23 | struct device *device) |
24 | { |
25 | int r; |
26 | struct firmware *f = malloc(sizeof(struct firmware), M_DRM145, |
27 | M_WAITOK0x0001 | M_ZERO0x0008); |
28 | r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size); |
29 | if (r != 0) { |
30 | free(f, M_DRM145, sizeof(struct firmware)); |
31 | *fw = NULL((void *)0); |
32 | return -r; |
33 | } else { |
34 | *fw = f; |
35 | return 0; |
36 | } |
37 | } |
38 | |
39 | static inline int |
40 | request_firmware_direct(const struct firmware **fw, const char *name, |
41 | struct device *device) |
42 | { |
43 | return request_firmware(fw, name, device); |
44 | } |
45 | |
46 | #define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22 |
47 | |
48 | static inline void |
49 | release_firmware(const struct firmware *fw) |
50 | { |
51 | if (fw) |
52 | free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size); |
53 | free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw)); |
54 | } |
55 | |
56 | #endif |