File: | dev/acpi/acpidmar.c |
Warning: | line 967, column 9 The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright (c) 2015 Jordan Hargrave <jordan_hargrave@hotmail.com> | |||
3 | * | |||
4 | * Permission to use, copy, modify, and distribute this software for any | |||
5 | * purpose with or without fee is hereby granted, provided that the above | |||
6 | * copyright notice and this permission notice appear in all copies. | |||
7 | * | |||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |||
15 | */ | |||
16 | ||||
17 | #include <sys/param.h> | |||
18 | #include <sys/systm.h> | |||
19 | #include <sys/kernel.h> | |||
20 | #include <sys/device.h> | |||
21 | #include <sys/malloc.h> | |||
22 | #include <sys/queue.h> | |||
23 | #include <sys/types.h> | |||
24 | #include <sys/mbuf.h> | |||
25 | #include <sys/proc.h> | |||
26 | ||||
27 | #include <uvm/uvm_extern.h> | |||
28 | ||||
29 | #include <machine/apicvar.h> | |||
30 | #include <machine/biosvar.h> | |||
31 | #include <machine/cpuvar.h> | |||
32 | #include <machine/bus.h> | |||
33 | ||||
34 | #include <dev/acpi/acpireg.h> | |||
35 | #include <dev/acpi/acpivar.h> | |||
36 | #include <dev/acpi/acpidev.h> | |||
37 | #include <dev/acpi/amltypes.h> | |||
38 | #include <dev/acpi/dsdt.h> | |||
39 | ||||
40 | #include <uvm/uvm_extern.h> | |||
41 | ||||
42 | #include <machine/i8259.h> | |||
43 | #include <machine/i82093reg.h> | |||
44 | #include <machine/i82093var.h> | |||
45 | #include <machine/i82489reg.h> | |||
46 | #include <machine/i82489var.h> | |||
47 | ||||
48 | #include <machine/mpbiosvar.h> | |||
49 | ||||
50 | #include <dev/pci/pcireg.h> | |||
51 | #include <dev/pci/pcivar.h> | |||
52 | #include <dev/pci/pcidevs.h> | |||
53 | #include <dev/pci/ppbreg.h> | |||
54 | ||||
55 | #include "ioapic.h" | |||
56 | ||||
57 | #include "acpidmar.h" | |||
58 | #include "amd_iommu.h" | |||
59 | ||||
60 | /* We don't want IOMMU to remap MSI */ | |||
61 | #define MSI_BASE_ADDRESS0xFEE00000L 0xFEE00000L | |||
62 | #define MSI_BASE_SIZE0x00100000L 0x00100000L | |||
63 | #define MAX_DEVFN65536 65536 | |||
64 | ||||
65 | #ifdef IOMMU_DEBUG | |||
66 | int acpidmar_dbg_lvl = 0; | |||
67 | #define DPRINTF(lvl,x...) if (acpidmar_dbg_lvl >= lvl) { printf(x); } | |||
68 | #else | |||
69 | #define DPRINTF(lvl,x...) | |||
70 | #endif | |||
71 | ||||
72 | #ifdef DDB1 | |||
73 | int acpidmar_ddb = 0; | |||
74 | #endif | |||
75 | ||||
76 | int acpidmar_force_cm = 1; | |||
77 | ||||
78 | /* Page Table Entry per domain */ | |||
79 | struct iommu_softc; | |||
80 | ||||
81 | static inline int | |||
82 | mksid(int b, int d, int f) | |||
83 | { | |||
84 | return (b << 8) + (d << 3) + f; | |||
85 | } | |||
86 | ||||
87 | static inline int | |||
88 | sid_devfn(int sid) | |||
89 | { | |||
90 | return sid & 0xff; | |||
91 | } | |||
92 | ||||
93 | static inline int | |||
94 | sid_bus(int sid) | |||
95 | { | |||
96 | return (sid >> 8) & 0xff; | |||
97 | } | |||
98 | ||||
99 | static inline int | |||
100 | sid_dev(int sid) | |||
101 | { | |||
102 | return (sid >> 3) & 0x1f; | |||
103 | } | |||
104 | ||||
105 | static inline int | |||
106 | sid_fun(int sid) | |||
107 | { | |||
108 | return (sid >> 0) & 0x7; | |||
109 | } | |||
110 | ||||
111 | /* Alias mapping */ | |||
112 | #define SID_INVALID0x80000000L 0x80000000L | |||
113 | static uint32_t sid_flag[MAX_DEVFN65536]; | |||
114 | ||||
115 | struct domain_dev { | |||
116 | int sid; | |||
117 | int sec; | |||
118 | int sub; | |||
119 | TAILQ_ENTRY(domain_dev)struct { struct domain_dev *tqe_next; struct domain_dev **tqe_prev ; } link; | |||
120 | }; | |||
121 | ||||
122 | struct domain { | |||
123 | struct iommu_softc *iommu; | |||
124 | int did; | |||
125 | int gaw; | |||
126 | struct pte_entry *pte; | |||
127 | paddr_t ptep; | |||
128 | struct bus_dma_tag dmat; | |||
129 | int flag; | |||
130 | ||||
131 | struct mutex exlck; | |||
132 | char exname[32]; | |||
133 | struct extent *iovamap; | |||
134 | TAILQ_HEAD(,domain_dev)struct { struct domain_dev *tqh_first; struct domain_dev **tqh_last ; } devices; | |||
135 | TAILQ_ENTRY(domain)struct { struct domain *tqe_next; struct domain **tqe_prev; } link; | |||
136 | }; | |||
137 | ||||
138 | #define DOM_DEBUG0x1 0x1 | |||
139 | #define DOM_NOMAP0x2 0x2 | |||
140 | ||||
141 | struct dmar_devlist { | |||
142 | int type; | |||
143 | int bus; | |||
144 | int ndp; | |||
145 | struct acpidmar_devpath *dp; | |||
146 | TAILQ_ENTRY(dmar_devlist)struct { struct dmar_devlist *tqe_next; struct dmar_devlist * *tqe_prev; } link; | |||
147 | }; | |||
148 | ||||
149 | TAILQ_HEAD(devlist_head, dmar_devlist)struct devlist_head { struct dmar_devlist *tqh_first; struct dmar_devlist **tqh_last; }; | |||
150 | ||||
151 | struct ivhd_devlist { | |||
152 | int start_id; | |||
153 | int end_id; | |||
154 | int cfg; | |||
155 | TAILQ_ENTRY(ivhd_devlist)struct { struct ivhd_devlist *tqe_next; struct ivhd_devlist * *tqe_prev; } link; | |||
156 | }; | |||
157 | ||||
158 | struct rmrr_softc { | |||
159 | TAILQ_ENTRY(rmrr_softc)struct { struct rmrr_softc *tqe_next; struct rmrr_softc **tqe_prev ; } link; | |||
160 | struct devlist_head devices; | |||
161 | int segment; | |||
162 | uint64_t start; | |||
163 | uint64_t end; | |||
164 | }; | |||
165 | ||||
166 | struct atsr_softc { | |||
167 | TAILQ_ENTRY(atsr_softc)struct { struct atsr_softc *tqe_next; struct atsr_softc **tqe_prev ; } link; | |||
168 | struct devlist_head devices; | |||
169 | int segment; | |||
170 | int flags; | |||
171 | }; | |||
172 | ||||
173 | struct iommu_pic { | |||
174 | struct pic pic; | |||
175 | struct iommu_softc *iommu; | |||
176 | }; | |||
177 | ||||
178 | #define IOMMU_FLAGS_CATCHALL0x1 0x1 | |||
179 | #define IOMMU_FLAGS_BAD0x2 0x2 | |||
180 | #define IOMMU_FLAGS_SUSPEND0x4 0x4 | |||
181 | ||||
182 | struct iommu_softc { | |||
183 | TAILQ_ENTRY(iommu_softc)struct { struct iommu_softc *tqe_next; struct iommu_softc **tqe_prev ; }link; | |||
184 | struct devlist_head devices; | |||
185 | int id; | |||
186 | int flags; | |||
187 | int segment; | |||
188 | ||||
189 | struct mutex reg_lock; | |||
190 | ||||
191 | bus_space_tag_t iot; | |||
192 | bus_space_handle_t ioh; | |||
193 | ||||
194 | uint64_t cap; | |||
195 | uint64_t ecap; | |||
196 | uint32_t gcmd; | |||
197 | ||||
198 | int mgaw; | |||
199 | int agaw; | |||
200 | int ndoms; | |||
201 | ||||
202 | struct root_entry *root; | |||
203 | struct context_entry *ctx[256]; | |||
204 | ||||
205 | void *intr; | |||
206 | struct iommu_pic pic; | |||
207 | int fedata; | |||
208 | uint64_t feaddr; | |||
209 | uint64_t rtaddr; | |||
210 | ||||
211 | /* Queued Invalidation */ | |||
212 | int qi_head; | |||
213 | int qi_tail; | |||
214 | paddr_t qip; | |||
215 | struct qi_entry *qi; | |||
216 | ||||
217 | struct domain *unity; | |||
218 | TAILQ_HEAD(,domain)struct { struct domain *tqh_first; struct domain **tqh_last; } domains; | |||
219 | ||||
220 | /* AMD iommu */ | |||
221 | struct ivhd_dte *dte; | |||
222 | void *cmd_tbl; | |||
223 | void *evt_tbl; | |||
224 | paddr_t cmd_tblp; | |||
225 | paddr_t evt_tblp; | |||
226 | }; | |||
227 | ||||
228 | static inline int iommu_bad(struct iommu_softc *sc) | |||
229 | { | |||
230 | return (sc->flags & IOMMU_FLAGS_BAD0x2); | |||
231 | } | |||
232 | ||||
233 | static inline int iommu_enabled(struct iommu_softc *sc) | |||
234 | { | |||
235 | if (sc->dte) { | |||
236 | return 1; | |||
237 | } | |||
238 | return (sc->gcmd & GCMD_TE(1LL << 31)); | |||
239 | } | |||
240 | ||||
241 | struct acpidmar_softc { | |||
242 | struct device sc_dev; | |||
243 | ||||
244 | pci_chipset_tag_t sc_pc; | |||
245 | bus_space_tag_t sc_memt; | |||
246 | int sc_haw; | |||
247 | int sc_flags; | |||
248 | bus_dma_tag_t sc_dmat; | |||
249 | ||||
250 | struct ivhd_dte *sc_hwdte; | |||
251 | paddr_t sc_hwdtep; | |||
252 | ||||
253 | TAILQ_HEAD(,iommu_softc)struct { struct iommu_softc *tqh_first; struct iommu_softc ** tqh_last; }sc_drhds; | |||
254 | TAILQ_HEAD(,rmrr_softc)struct { struct rmrr_softc *tqh_first; struct rmrr_softc **tqh_last ; } sc_rmrrs; | |||
255 | TAILQ_HEAD(,atsr_softc)struct { struct atsr_softc *tqh_first; struct atsr_softc **tqh_last ; } sc_atsrs; | |||
256 | }; | |||
257 | ||||
258 | int acpidmar_activate(struct device *, int); | |||
259 | int acpidmar_match(struct device *, void *, void *); | |||
260 | void acpidmar_attach(struct device *, struct device *, void *); | |||
261 | struct domain *acpidmar_pci_attach(struct acpidmar_softc *, int, int, int); | |||
262 | ||||
263 | struct cfattach acpidmar_ca = { | |||
264 | sizeof(struct acpidmar_softc), acpidmar_match, acpidmar_attach, NULL((void *)0), | |||
265 | acpidmar_activate | |||
266 | }; | |||
267 | ||||
268 | struct cfdriver acpidmar_cd = { | |||
269 | NULL((void *)0), "acpidmar", DV_DULL | |||
270 | }; | |||
271 | ||||
272 | struct acpidmar_softc *acpidmar_sc; | |||
273 | int acpidmar_intr(void *); | |||
274 | int acpiivhd_intr(void *); | |||
275 | ||||
276 | #define DID_UNITY0x1 0x1 | |||
277 | ||||
278 | void _dumppte(struct pte_entry *, int, vaddr_t); | |||
279 | ||||
280 | struct domain *domain_create(struct iommu_softc *, int); | |||
281 | struct domain *domain_lookup(struct acpidmar_softc *, int, int); | |||
282 | ||||
283 | void domain_unload_map(struct domain *, bus_dmamap_t); | |||
284 | void domain_load_map(struct domain *, bus_dmamap_t, int, int, const char *); | |||
285 | ||||
286 | void (*domain_map_page)(struct domain *, vaddr_t, paddr_t, uint64_t); | |||
287 | void domain_map_page_amd(struct domain *, vaddr_t, paddr_t, uint64_t); | |||
288 | void domain_map_page_intel(struct domain *, vaddr_t, paddr_t, uint64_t); | |||
289 | void domain_map_pthru(struct domain *, paddr_t, paddr_t); | |||
290 | ||||
291 | void acpidmar_pci_hook(pci_chipset_tag_t, struct pci_attach_args *); | |||
292 | void acpidmar_parse_devscope(union acpidmar_entry *, int, int, | |||
293 | struct devlist_head *); | |||
294 | int acpidmar_match_devscope(struct devlist_head *, pci_chipset_tag_t, int); | |||
295 | ||||
296 | void acpidmar_init(struct acpidmar_softc *, struct acpi_dmar *); | |||
297 | void acpidmar_drhd(struct acpidmar_softc *, union acpidmar_entry *); | |||
298 | void acpidmar_rmrr(struct acpidmar_softc *, union acpidmar_entry *); | |||
299 | void acpidmar_atsr(struct acpidmar_softc *, union acpidmar_entry *); | |||
300 | void acpiivrs_init(struct acpidmar_softc *, struct acpi_ivrs *); | |||
301 | ||||
302 | void *acpidmar_intr_establish(void *, int, int (*)(void *), void *, | |||
303 | const char *); | |||
304 | ||||
305 | void iommu_write_4(struct iommu_softc *, int, uint32_t); | |||
306 | uint32_t iommu_read_4(struct iommu_softc *, int); | |||
307 | void iommu_write_8(struct iommu_softc *, int, uint64_t); | |||
308 | uint64_t iommu_read_8(struct iommu_softc *, int); | |||
309 | void iommu_showfault(struct iommu_softc *, int, | |||
310 | struct fault_entry *); | |||
311 | void iommu_showcfg(struct iommu_softc *, int); | |||
312 | ||||
313 | int iommu_init(struct acpidmar_softc *, struct iommu_softc *, | |||
314 | struct acpidmar_drhd *); | |||
315 | int iommu_enable_translation(struct iommu_softc *, int); | |||
316 | void iommu_enable_qi(struct iommu_softc *, int); | |||
317 | void iommu_flush_cache(struct iommu_softc *, void *, size_t); | |||
318 | void *iommu_alloc_page(struct iommu_softc *, paddr_t *); | |||
319 | void iommu_flush_write_buffer(struct iommu_softc *); | |||
320 | void iommu_issue_qi(struct iommu_softc *, struct qi_entry *); | |||
321 | ||||
322 | void iommu_flush_ctx(struct iommu_softc *, int, int, int, int); | |||
323 | void iommu_flush_ctx_qi(struct iommu_softc *, int, int, int, int); | |||
324 | void iommu_flush_tlb(struct iommu_softc *, int, int); | |||
325 | void iommu_flush_tlb_qi(struct iommu_softc *, int, int); | |||
326 | ||||
327 | void iommu_set_rtaddr(struct iommu_softc *, paddr_t); | |||
328 | ||||
329 | void *iommu_alloc_hwdte(struct acpidmar_softc *, size_t, paddr_t *); | |||
330 | ||||
331 | const char *dmar_bdf(int); | |||
332 | ||||
333 | const char * | |||
334 | dmar_bdf(int sid) | |||
335 | { | |||
336 | static char bdf[32]; | |||
337 | ||||
338 | snprintf(bdf, sizeof(bdf), "%.4x:%.2x:%.2x.%x", 0, | |||
339 | sid_bus(sid), sid_dev(sid), sid_fun(sid)); | |||
340 | ||||
341 | return (bdf); | |||
342 | } | |||
343 | ||||
344 | /* busdma */ | |||
345 | static int dmar_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t, | |||
346 | bus_size_t, int, bus_dmamap_t *); | |||
347 | static void dmar_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); | |||
348 | static int dmar_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t, | |||
349 | struct proc *, int); | |||
350 | static int dmar_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, | |||
351 | int); | |||
352 | static int dmar_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int); | |||
353 | static int dmar_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, | |||
354 | bus_dma_segment_t *, int, bus_size_t, int); | |||
355 | static void dmar_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); | |||
356 | static void dmar_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, | |||
357 | bus_size_t, int); | |||
358 | static int dmar_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t, | |||
359 | bus_dma_segment_t *, int, int *, int); | |||
360 | static void dmar_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int); | |||
361 | static int dmar_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t, | |||
362 | caddr_t *, int); | |||
363 | static void dmar_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t); | |||
364 | static paddr_t dmar_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *, int, off_t, | |||
365 | int, int); | |||
366 | ||||
367 | static void dmar_dumpseg(bus_dma_tag_t, int, bus_dma_segment_t *, const char *); | |||
368 | const char *dom_bdf(struct domain *); | |||
369 | void domain_map_check(struct domain *); | |||
370 | ||||
371 | struct pte_entry *pte_lvl(struct iommu_softc *, struct pte_entry *, vaddr_t, int, uint64_t); | |||
372 | int ivhd_poll_events(struct iommu_softc *); | |||
373 | void ivhd_showreg(struct iommu_softc *); | |||
374 | void ivhd_showdte(struct iommu_softc *); | |||
375 | void ivhd_showcmd(struct iommu_softc *); | |||
376 | ||||
377 | static inline int | |||
378 | debugme(struct domain *dom) | |||
379 | { | |||
380 | return 0; | |||
381 | return (dom->flag & DOM_DEBUG0x1); | |||
382 | } | |||
383 | ||||
384 | void | |||
385 | domain_map_check(struct domain *dom) | |||
386 | { | |||
387 | struct iommu_softc *iommu; | |||
388 | struct domain_dev *dd; | |||
389 | struct context_entry *ctx; | |||
390 | int v; | |||
391 | ||||
392 | iommu = dom->iommu; | |||
393 | TAILQ_FOREACH(dd, &dom->devices, link)for((dd) = ((&dom->devices)->tqh_first); (dd) != (( void *)0); (dd) = ((dd)->link.tqe_next)) { | |||
394 | acpidmar_pci_attach(acpidmar_sc, iommu->segment, dd->sid, 1); | |||
395 | ||||
396 | if (iommu->dte) | |||
397 | continue; | |||
398 | ||||
399 | /* Check if this is the first time we are mapped */ | |||
400 | ctx = &iommu->ctx[sid_bus(dd->sid)][sid_devfn(dd->sid)]; | |||
401 | v = context_user(ctx); | |||
402 | if (v != 0xA) { | |||
403 | printf(" map: %.4x:%.2x:%.2x.%x iommu:%d did:%.4x\n", | |||
404 | iommu->segment, | |||
405 | sid_bus(dd->sid), | |||
406 | sid_dev(dd->sid), | |||
407 | sid_fun(dd->sid), | |||
408 | iommu->id, | |||
409 | dom->did); | |||
410 | context_set_user(ctx, 0xA); | |||
411 | } | |||
412 | } | |||
413 | } | |||
414 | ||||
415 | /* Map a single page as passthrough - used for DRM */ | |||
416 | void | |||
417 | dmar_ptmap(bus_dma_tag_t tag, bus_addr_t addr) | |||
418 | { | |||
419 | struct domain *dom = tag->_cookie; | |||
420 | ||||
421 | if (!acpidmar_sc) | |||
422 | return; | |||
423 | domain_map_check(dom); | |||
424 | domain_map_page(dom, addr, addr, PTE_P(1L << 0) | PTE_R0x00 | PTE_W(1L << 1)); | |||
425 | } | |||
426 | ||||
427 | /* Map a range of pages 1:1 */ | |||
428 | void | |||
429 | domain_map_pthru(struct domain *dom, paddr_t start, paddr_t end) | |||
430 | { | |||
431 | domain_map_check(dom); | |||
432 | while (start < end) { | |||
433 | domain_map_page(dom, start, start, PTE_P(1L << 0) | PTE_R0x00 | PTE_W(1L << 1)); | |||
434 | start += VTD_PAGE_SIZE4096; | |||
435 | } | |||
436 | } | |||
437 | ||||
438 | /* Map a single paddr to IOMMU paddr */ | |||
439 | void | |||
440 | domain_map_page_intel(struct domain *dom, vaddr_t va, paddr_t pa, uint64_t flags) | |||
441 | { | |||
442 | paddr_t paddr; | |||
443 | struct pte_entry *pte, *npte; | |||
444 | int lvl, idx; | |||
445 | struct iommu_softc *iommu; | |||
446 | ||||
447 | iommu = dom->iommu; | |||
448 | /* Insert physical address into virtual address map | |||
449 | * XXX: could we use private pmap here? | |||
450 | * essentially doing a pmap_enter(map, va, pa, prot); | |||
451 | */ | |||
452 | ||||
453 | /* Only handle 4k pages for now */ | |||
454 | npte = dom->pte; | |||
455 | for (lvl = iommu->agaw - VTD_STRIDE_SIZE9; lvl>= VTD_LEVEL012; | |||
456 | lvl -= VTD_STRIDE_SIZE9) { | |||
457 | idx = (va >> lvl) & VTD_STRIDE_MASK0x1FF; | |||
458 | pte = &npte[idx]; | |||
459 | if (lvl == VTD_LEVEL012) { | |||
460 | /* Level 1: Page Table - add physical address */ | |||
461 | pte->val = pa | flags; | |||
462 | iommu_flush_cache(iommu, pte, sizeof(*pte)); | |||
463 | break; | |||
464 | } else if (!(pte->val & PTE_P(1L << 0))) { | |||
465 | /* Level N: Point to lower level table */ | |||
466 | iommu_alloc_page(iommu, &paddr); | |||
467 | pte->val = paddr | PTE_P(1L << 0) | PTE_R0x00 | PTE_W(1L << 1); | |||
468 | iommu_flush_cache(iommu, pte, sizeof(*pte)); | |||
469 | } | |||
470 | npte = (void *)PMAP_DIRECT_MAP((pte->val & VTD_PTE_MASK))((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + ((pte->val & 0x0000FFFFFFFFF000LL))); | |||
471 | } | |||
472 | } | |||
473 | ||||
474 | /* Map a single paddr to IOMMU paddr: AMD | |||
475 | * physical address breakdown into levels: | |||
476 | * xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx | |||
477 | * 5.55555555.44444444.43333333,33222222.22211111.1111----.-------- | |||
478 | * mode: | |||
479 | * 000 = none shift | |||
480 | * 001 = 1 [21].12 | |||
481 | * 010 = 2 [30].21 | |||
482 | * 011 = 3 [39].30 | |||
483 | * 100 = 4 [48].39 | |||
484 | * 101 = 5 [57] | |||
485 | * 110 = 6 | |||
486 | * 111 = reserved | |||
487 | */ | |||
488 | struct pte_entry * | |||
489 | pte_lvl(struct iommu_softc *iommu, struct pte_entry *pte, vaddr_t va, | |||
490 | int shift, uint64_t flags) | |||
491 | { | |||
492 | paddr_t paddr; | |||
493 | int idx; | |||
494 | ||||
495 | idx = (va >> shift) & VTD_STRIDE_MASK0x1FF; | |||
496 | if (!(pte[idx].val & PTE_P(1L << 0))) { | |||
497 | /* Page Table entry is not present... create a new page entry */ | |||
498 | iommu_alloc_page(iommu, &paddr); | |||
499 | pte[idx].val = paddr | flags; | |||
500 | iommu_flush_cache(iommu, &pte[idx], sizeof(pte[idx])); | |||
501 | } | |||
502 | return (void *)PMAP_DIRECT_MAP((pte[idx].val & PTE_PADDR_MASK))((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + ((pte[idx].val & 0x000FFFFFFFFFF000LL))); | |||
503 | } | |||
504 | ||||
505 | void | |||
506 | domain_map_page_amd(struct domain *dom, vaddr_t va, paddr_t pa, uint64_t flags) | |||
507 | { | |||
508 | struct pte_entry *pte; | |||
509 | struct iommu_softc *iommu; | |||
510 | int idx; | |||
511 | ||||
512 | iommu = dom->iommu; | |||
513 | /* Insert physical address into virtual address map | |||
514 | * XXX: could we use private pmap here? | |||
515 | * essentially doing a pmap_enter(map, va, pa, prot); | |||
516 | */ | |||
517 | ||||
518 | /* Always assume AMD levels=4 */ | |||
519 | /* 39 30 21 12 */ | |||
520 | /* ---------|---------|---------|---------|------------ */ | |||
521 | pte = dom->pte; | |||
522 | pte = pte_lvl(iommu, pte, va, 30, PTE_NXTLVL(2)(((2) & 0x7) << 9) | PTE_IR(1LL << 61) | PTE_IW(1LL << 62) | PTE_P(1L << 0)); | |||
523 | pte = pte_lvl(iommu, pte, va, 21, PTE_NXTLVL(1)(((1) & 0x7) << 9) | PTE_IR(1LL << 61) | PTE_IW(1LL << 62) | PTE_P(1L << 0)); | |||
524 | if (flags) | |||
525 | flags = PTE_P(1L << 0) | PTE_R0x00 | PTE_W(1L << 1) | PTE_IW(1LL << 62) | PTE_IR(1LL << 61) | PTE_NXTLVL(0)(((0) & 0x7) << 9); | |||
526 | ||||
527 | /* Level 1: Page Table - add physical address */ | |||
528 | idx = (va >> 12) & 0x1FF; | |||
529 | pte[idx].val = pa | flags; | |||
530 | ||||
531 | iommu_flush_cache(iommu, pte, sizeof(*pte)); | |||
532 | } | |||
533 | ||||
534 | static void | |||
535 | dmar_dumpseg(bus_dma_tag_t tag, int nseg, bus_dma_segment_t *segs, | |||
536 | const char *lbl) | |||
537 | { | |||
538 | struct domain *dom = tag->_cookie; | |||
539 | int i; | |||
540 | ||||
541 | return; | |||
542 | if (!debugme(dom)) | |||
543 | return; | |||
544 | printf("%s: %s\n", lbl, dom_bdf(dom)); | |||
545 | for (i = 0; i < nseg; i++) { | |||
546 | printf(" %.16llx %.8x\n", | |||
547 | (uint64_t)segs[i].ds_addr, | |||
548 | (uint32_t)segs[i].ds_len); | |||
549 | } | |||
550 | } | |||
551 | ||||
552 | /* Unload mapping */ | |||
553 | void | |||
554 | domain_unload_map(struct domain *dom, bus_dmamap_t dmam) | |||
555 | { | |||
556 | bus_dma_segment_t *seg; | |||
557 | paddr_t base, end, idx; | |||
558 | psize_t alen; | |||
559 | int i; | |||
560 | ||||
561 | if (iommu_bad(dom->iommu)) { | |||
562 | printf("unload map no iommu\n"); | |||
563 | return; | |||
564 | } | |||
565 | ||||
566 | for (i = 0; i < dmam->dm_nsegs; i++) { | |||
567 | seg = &dmam->dm_segs[i]; | |||
568 | ||||
569 | base = trunc_page(seg->ds_addr)((seg->ds_addr) & ~((1 << 12) - 1)); | |||
570 | end = roundup(seg->ds_addr + seg->ds_len, VTD_PAGE_SIZE)((((seg->ds_addr + seg->ds_len)+((4096)-1))/(4096))*(4096 )); | |||
571 | alen = end - base; | |||
572 | ||||
573 | if (debugme(dom)) { | |||
574 | printf(" va:%.16llx len:%x\n", | |||
575 | (uint64_t)base, (uint32_t)alen); | |||
576 | } | |||
577 | ||||
578 | /* Clear PTE */ | |||
579 | for (idx = 0; idx < alen; idx += VTD_PAGE_SIZE4096) | |||
580 | domain_map_page(dom, base + idx, 0, 0); | |||
581 | ||||
582 | if (dom->flag & DOM_NOMAP0x2) { | |||
583 | printf("%s: nomap %.16llx\n", dom_bdf(dom), (uint64_t)base); | |||
584 | continue; | |||
585 | } | |||
586 | ||||
587 | mtx_enter(&dom->exlck); | |||
588 | if (extent_free(dom->iovamap, base, alen, EX_NOWAIT0x0000)) { | |||
589 | panic("domain_unload_map: extent_free"); | |||
590 | } | |||
591 | mtx_leave(&dom->exlck); | |||
592 | } | |||
593 | } | |||
594 | ||||
595 | /* map.segs[x].ds_addr is modified to IOMMU virtual PA */ | |||
596 | void | |||
597 | domain_load_map(struct domain *dom, bus_dmamap_t map, int flags, int pteflag, const char *fn) | |||
598 | { | |||
599 | bus_dma_segment_t *seg; | |||
600 | struct iommu_softc *iommu; | |||
601 | paddr_t base, end, idx; | |||
602 | psize_t alen; | |||
603 | u_long res; | |||
604 | int i; | |||
605 | ||||
606 | iommu = dom->iommu; | |||
607 | if (!iommu_enabled(iommu)) { | |||
608 | /* Lazy enable translation when required */ | |||
609 | if (iommu_enable_translation(iommu, 1)) { | |||
610 | return; | |||
611 | } | |||
612 | } | |||
613 | domain_map_check(dom); | |||
614 | for (i = 0; i < map->dm_nsegs; i++) { | |||
615 | seg = &map->dm_segs[i]; | |||
616 | ||||
617 | base = trunc_page(seg->ds_addr)((seg->ds_addr) & ~((1 << 12) - 1)); | |||
618 | end = roundup(seg->ds_addr + seg->ds_len, VTD_PAGE_SIZE)((((seg->ds_addr + seg->ds_len)+((4096)-1))/(4096))*(4096 )); | |||
619 | alen = end - base; | |||
620 | res = base; | |||
621 | ||||
622 | if (dom->flag & DOM_NOMAP0x2) { | |||
623 | goto nomap; | |||
624 | } | |||
625 | ||||
626 | /* Allocate DMA Virtual Address */ | |||
627 | mtx_enter(&dom->exlck); | |||
628 | if (extent_alloc(dom->iovamap, alen, VTD_PAGE_SIZE, 0,extent_alloc_subregion((dom->iovamap), (dom->iovamap)-> ex_start, (dom->iovamap)->ex_end, (alen), (4096), (0), ( map->_dm_boundary), (0x0000), (&res)) | |||
629 | map->_dm_boundary, EX_NOWAIT, &res)extent_alloc_subregion((dom->iovamap), (dom->iovamap)-> ex_start, (dom->iovamap)->ex_end, (alen), (4096), (0), ( map->_dm_boundary), (0x0000), (&res))) { | |||
630 | panic("domain_load_map: extent_alloc"); | |||
631 | } | |||
632 | if (res == -1) { | |||
633 | panic("got -1 address"); | |||
634 | } | |||
635 | mtx_leave(&dom->exlck); | |||
636 | ||||
637 | /* Reassign DMA address */ | |||
638 | seg->ds_addr = res | (seg->ds_addr & VTD_PAGE_MASK0xFFF); | |||
639 | nomap: | |||
640 | if (debugme(dom)) { | |||
641 | printf(" LOADMAP: %.16llx %x => %.16llx\n", | |||
642 | (uint64_t)seg->ds_addr, (uint32_t)seg->ds_len, | |||
643 | (uint64_t)res); | |||
644 | } | |||
645 | for (idx = 0; idx < alen; idx += VTD_PAGE_SIZE4096) { | |||
646 | domain_map_page(dom, res + idx, base + idx, | |||
647 | PTE_P(1L << 0) | pteflag); | |||
648 | } | |||
649 | } | |||
650 | if ((iommu->cap & CAP_CM(1LL << 7)) || acpidmar_force_cm) { | |||
651 | iommu_flush_tlb(iommu, IOTLB_DOMAIN, dom->did); | |||
652 | } else { | |||
653 | iommu_flush_write_buffer(iommu); | |||
654 | } | |||
655 | } | |||
656 | ||||
657 | const char * | |||
658 | dom_bdf(struct domain *dom) | |||
659 | { | |||
660 | struct domain_dev *dd; | |||
661 | static char mmm[48]; | |||
662 | ||||
663 | dd = TAILQ_FIRST(&dom->devices)((&dom->devices)->tqh_first); | |||
664 | snprintf(mmm, sizeof(mmm), "%s iommu:%d did:%.4x%s", | |||
665 | dmar_bdf(dd->sid), dom->iommu->id, dom->did, | |||
666 | dom->did == DID_UNITY0x1 ? " [unity]" : ""); | |||
667 | return (mmm); | |||
668 | } | |||
669 | ||||
670 | /* Bus DMA Map functions */ | |||
671 | static int | |||
672 | dmar_dmamap_create(bus_dma_tag_t tag, bus_size_t size, int nsegments, | |||
673 | bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) | |||
674 | { | |||
675 | int rc; | |||
676 | ||||
677 | rc = _bus_dmamap_create(tag, size, nsegments, maxsegsz, boundary, | |||
678 | flags, dmamp); | |||
679 | if (!rc) { | |||
680 | dmar_dumpseg(tag, (*dmamp)->dm_nsegs, (*dmamp)->dm_segs, | |||
681 | __FUNCTION__); | |||
682 | } | |||
683 | return (rc); | |||
684 | } | |||
685 | ||||
686 | static void | |||
687 | dmar_dmamap_destroy(bus_dma_tag_t tag, bus_dmamap_t dmam) | |||
688 | { | |||
689 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); | |||
690 | _bus_dmamap_destroy(tag, dmam); | |||
691 | } | |||
692 | ||||
693 | static int | |||
694 | dmar_dmamap_load(bus_dma_tag_t tag, bus_dmamap_t dmam, void *buf, | |||
695 | bus_size_t buflen, struct proc *p, int flags) | |||
696 | { | |||
697 | struct domain *dom = tag->_cookie; | |||
698 | int rc; | |||
699 | ||||
700 | rc = _bus_dmamap_load(tag, dmam, buf, buflen, p, flags); | |||
701 | if (!rc) { | |||
702 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
703 | __FUNCTION__); | |||
704 | domain_load_map(dom, dmam, flags, PTE_R0x00|PTE_W(1L << 1), __FUNCTION__); | |||
705 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
706 | __FUNCTION__); | |||
707 | } | |||
708 | return (rc); | |||
709 | } | |||
710 | ||||
711 | static int | |||
712 | dmar_dmamap_load_mbuf(bus_dma_tag_t tag, bus_dmamap_t dmam, struct mbuf *chain, | |||
713 | int flags) | |||
714 | { | |||
715 | struct domain *dom = tag->_cookie; | |||
716 | int rc; | |||
717 | ||||
718 | rc = _bus_dmamap_load_mbuf(tag, dmam, chain, flags); | |||
719 | if (!rc) { | |||
720 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
721 | __FUNCTION__); | |||
722 | domain_load_map(dom, dmam, flags, PTE_R0x00|PTE_W(1L << 1),__FUNCTION__); | |||
723 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
724 | __FUNCTION__); | |||
725 | } | |||
726 | return (rc); | |||
727 | } | |||
728 | ||||
729 | static int | |||
730 | dmar_dmamap_load_uio(bus_dma_tag_t tag, bus_dmamap_t dmam, struct uio *uio, | |||
731 | int flags) | |||
732 | { | |||
733 | struct domain *dom = tag->_cookie; | |||
734 | int rc; | |||
735 | ||||
736 | rc = _bus_dmamap_load_uio(tag, dmam, uio, flags); | |||
737 | if (!rc) { | |||
738 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
739 | __FUNCTION__); | |||
740 | domain_load_map(dom, dmam, flags, PTE_R0x00|PTE_W(1L << 1), __FUNCTION__); | |||
741 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
742 | __FUNCTION__); | |||
743 | } | |||
744 | return (rc); | |||
745 | } | |||
746 | ||||
747 | static int | |||
748 | dmar_dmamap_load_raw(bus_dma_tag_t tag, bus_dmamap_t dmam, | |||
749 | bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) | |||
750 | { | |||
751 | struct domain *dom = tag->_cookie; | |||
752 | int rc; | |||
753 | ||||
754 | rc = _bus_dmamap_load_raw(tag, dmam, segs, nsegs, size, flags); | |||
755 | if (!rc) { | |||
756 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
757 | __FUNCTION__); | |||
758 | domain_load_map(dom, dmam, flags, PTE_R0x00|PTE_W(1L << 1), __FUNCTION__); | |||
759 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, | |||
760 | __FUNCTION__); | |||
761 | } | |||
762 | return (rc); | |||
763 | } | |||
764 | ||||
765 | static void | |||
766 | dmar_dmamap_unload(bus_dma_tag_t tag, bus_dmamap_t dmam) | |||
767 | { | |||
768 | struct domain *dom = tag->_cookie; | |||
769 | ||||
770 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); | |||
771 | domain_unload_map(dom, dmam); | |||
772 | _bus_dmamap_unload(tag, dmam); | |||
773 | } | |||
774 | ||||
775 | static void | |||
776 | dmar_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_addr_t offset, | |||
777 | bus_size_t len, int ops) | |||
778 | { | |||
779 | #if 0 | |||
780 | struct domain *dom = tag->_cookie; | |||
781 | int flag; | |||
782 | ||||
783 | flag = PTE_P(1L << 0); | |||
784 | if (ops == BUS_DMASYNC_PREREAD0x01) { | |||
785 | /* make readable */ | |||
786 | flag |= PTE_R0x00; | |||
787 | } | |||
788 | else if (ops == BUS_DMASYNC_PREWRITE0x04) { | |||
789 | /* make writeable */ | |||
790 | flag |= PTE_W(1L << 1); | |||
791 | } | |||
792 | dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); | |||
793 | #endif | |||
794 | _bus_dmamap_sync(tag, dmam, offset, len, ops); | |||
795 | } | |||
796 | ||||
797 | static int | |||
798 | dmar_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment, | |||
799 | bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, | |||
800 | int flags) | |||
801 | { | |||
802 | int rc; | |||
803 | ||||
804 | rc = _bus_dmamem_alloc(tag, size, alignment, boundary, segs, nsegs, | |||
805 | rsegs, flags); | |||
806 | if (!rc) { | |||
807 | dmar_dumpseg(tag, *rsegs, segs, __FUNCTION__); | |||
808 | } | |||
809 | return (rc); | |||
810 | } | |||
811 | ||||
812 | static void | |||
813 | dmar_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs) | |||
814 | { | |||
815 | dmar_dumpseg(tag, nsegs, segs, __FUNCTION__); | |||
816 | _bus_dmamem_free(tag, segs, nsegs); | |||
817 | } | |||
818 | ||||
819 | static int | |||
820 | dmar_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs, | |||
821 | size_t size, caddr_t *kvap, int flags) | |||
822 | { | |||
823 | dmar_dumpseg(tag, nsegs, segs, __FUNCTION__); | |||
824 | return (_bus_dmamem_map(tag, segs, nsegs, size, kvap, flags)); | |||
825 | } | |||
826 | ||||
827 | static void | |||
828 | dmar_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size) | |||
829 | { | |||
830 | struct domain *dom = tag->_cookie; | |||
831 | ||||
832 | if (debugme(dom)) { | |||
833 | printf("dmamap_unmap: %s\n", dom_bdf(dom)); | |||
834 | } | |||
835 | _bus_dmamem_unmap(tag, kva, size); | |||
836 | } | |||
837 | ||||
838 | static paddr_t | |||
839 | dmar_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs, | |||
840 | off_t off, int prot, int flags) | |||
841 | { | |||
842 | dmar_dumpseg(tag, nsegs, segs, __FUNCTION__); | |||
843 | return (_bus_dmamem_mmap(tag, segs, nsegs, off, prot, flags)); | |||
844 | } | |||
845 | ||||
846 | /*=================================== | |||
847 | * IOMMU code | |||
848 | *===================================*/ | |||
849 | ||||
850 | /* Intel: Set Context Root Address */ | |||
851 | void | |||
852 | iommu_set_rtaddr(struct iommu_softc *iommu, paddr_t paddr) | |||
853 | { | |||
854 | int i, sts; | |||
855 | ||||
856 | mtx_enter(&iommu->reg_lock); | |||
857 | iommu_write_8(iommu, DMAR_RTADDR_REG0x20, paddr); | |||
858 | iommu_write_4(iommu, DMAR_GCMD_REG0x18, iommu->gcmd | GCMD_SRTP(1LL << 30)); | |||
859 | for (i = 0; i < 5; i++) { | |||
860 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
861 | if (sts & GSTS_RTPS(1LL << 30)) | |||
862 | break; | |||
863 | } | |||
864 | mtx_leave(&iommu->reg_lock); | |||
865 | ||||
866 | if (i == 5) { | |||
867 | printf("set_rtaddr fails\n"); | |||
868 | } | |||
869 | } | |||
870 | ||||
871 | /* Allocate contiguous memory (1Mb) for the Device Table Entries */ | |||
872 | void * | |||
873 | iommu_alloc_hwdte(struct acpidmar_softc *sc, size_t size, paddr_t *paddr) | |||
874 | { | |||
875 | caddr_t vaddr; | |||
876 | bus_dmamap_t map; | |||
877 | bus_dma_segment_t seg; | |||
878 | bus_dma_tag_t dmat = sc->sc_dmat; | |||
879 | int rc, nsegs; | |||
880 | ||||
881 | rc = _bus_dmamap_create(dmat, size, 1, size, 0, | |||
882 | BUS_DMA_NOWAIT0x0001, &map); | |||
883 | if (rc != 0) { | |||
884 | printf("hwdte_create fails\n"); | |||
885 | return NULL((void *)0); | |||
886 | } | |||
887 | rc = _bus_dmamem_alloc(dmat, size, 4, 0, &seg, 1, | |||
888 | &nsegs, BUS_DMA_NOWAIT0x0001 | BUS_DMA_ZERO0x1000); | |||
889 | if (rc != 0) { | |||
890 | printf("hwdte alloc fails\n"); | |||
891 | return NULL((void *)0); | |||
892 | } | |||
893 | rc = _bus_dmamem_map(dmat, &seg, 1, size, &vaddr, | |||
894 | BUS_DMA_NOWAIT0x0001 | BUS_DMA_COHERENT0x0004); | |||
895 | if (rc != 0) { | |||
896 | printf("hwdte map fails\n"); | |||
897 | return NULL((void *)0); | |||
898 | } | |||
899 | rc = _bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_NOWAIT0x0001); | |||
900 | if (rc != 0) { | |||
901 | printf("hwdte load raw fails\n"); | |||
902 | return NULL((void *)0); | |||
903 | } | |||
904 | *paddr = map->dm_segs[0].ds_addr; | |||
905 | return vaddr; | |||
906 | } | |||
907 | ||||
908 | /* COMMON: Allocate a new memory page */ | |||
909 | void * | |||
910 | iommu_alloc_page(struct iommu_softc *iommu, paddr_t *paddr) | |||
911 | { | |||
912 | void *va; | |||
913 | ||||
914 | *paddr = 0; | |||
915 | va = km_alloc(VTD_PAGE_SIZE4096, &kv_page, &kp_zero, &kd_nowait); | |||
916 | if (va == NULL((void *)0)) { | |||
917 | panic("can't allocate page"); | |||
918 | } | |||
919 | pmap_extract(pmap_kernel()(&kernel_pmap_store), (vaddr_t)va, paddr); | |||
920 | return (va); | |||
921 | } | |||
922 | ||||
923 | ||||
924 | /* Intel: Issue command via queued invalidation */ | |||
925 | void | |||
926 | iommu_issue_qi(struct iommu_softc *iommu, struct qi_entry *qi) | |||
927 | { | |||
928 | #if 0 | |||
929 | struct qi_entry *pi, *pw; | |||
930 | ||||
931 | idx = iommu->qi_head; | |||
932 | pi = &iommu->qi[idx]; | |||
933 | pw = &iommu->qi[(idx+1) % MAXQ]; | |||
934 | iommu->qi_head = (idx+2) % MAXQ; | |||
935 | ||||
936 | memcpy(pw, &qi, sizeof(qi))__builtin_memcpy((pw), (&qi), (sizeof(qi))); | |||
937 | issue command; | |||
938 | while (pw->xxx) | |||
939 | ; | |||
940 | #endif | |||
941 | } | |||
942 | ||||
943 | /* Intel: Flush TLB entries, Queued Invalidation mode */ | |||
944 | void | |||
945 | iommu_flush_tlb_qi(struct iommu_softc *iommu, int mode, int did) | |||
946 | { | |||
947 | struct qi_entry qi; | |||
948 | ||||
949 | /* Use queued invalidation */ | |||
950 | qi.hi = 0; | |||
951 | switch (mode) { | |||
| ||||
952 | case IOTLB_GLOBAL: | |||
953 | qi.lo = QI_IOTLB0x2 | QI_IOTLB_IG_GLOBAL(1 << 4); | |||
954 | break; | |||
955 | case IOTLB_DOMAIN: | |||
956 | qi.lo = QI_IOTLB0x2 | QI_IOTLB_IG_DOMAIN(2 << 4) | | |||
957 | QI_IOTLB_DID(did)(((uint64_t)(did) << 16)); | |||
958 | break; | |||
959 | case IOTLB_PAGE: | |||
960 | qi.lo = QI_IOTLB0x2 | QI_IOTLB_IG_PAGE(3 << 4) | QI_IOTLB_DID(did)(((uint64_t)(did) << 16)); | |||
961 | qi.hi = 0; | |||
962 | break; | |||
963 | } | |||
964 | if (iommu->cap & CAP_DRD(1LL << 55)) | |||
965 | qi.lo |= QI_IOTLB_DR(1LL << 6); | |||
966 | if (iommu->cap & CAP_DWD(1LL << 54)) | |||
967 | qi.lo |= QI_IOTLB_DW(1LL << 5); | |||
| ||||
968 | iommu_issue_qi(iommu, &qi); | |||
969 | } | |||
970 | ||||
971 | /* Intel: Flush Context entries, Queued Invalidation mode */ | |||
972 | void | |||
973 | iommu_flush_ctx_qi(struct iommu_softc *iommu, int mode, int did, | |||
974 | int sid, int fm) | |||
975 | { | |||
976 | struct qi_entry qi; | |||
977 | ||||
978 | /* Use queued invalidation */ | |||
979 | qi.hi = 0; | |||
980 | switch (mode) { | |||
981 | case CTX_GLOBAL: | |||
982 | qi.lo = QI_CTX0x1 | QI_CTX_IG_GLOBAL(CTX_GLOBAL << 4); | |||
983 | break; | |||
984 | case CTX_DOMAIN: | |||
985 | qi.lo = QI_CTX0x1 | QI_CTX_IG_DOMAIN(CTX_DOMAIN << 4) | QI_CTX_DID(did)(((uint64_t)(did) << 16)); | |||
986 | break; | |||
987 | case CTX_DEVICE: | |||
988 | qi.lo = QI_CTX0x1 | QI_CTX_IG_DEVICE(CTX_DEVICE << 4) | QI_CTX_DID(did)(((uint64_t)(did) << 16)) | | |||
989 | QI_CTX_SID(sid)(((uint64_t)(sid) << 32)) | QI_CTX_FM(fm)(((uint64_t)(fm) << 48)); | |||
990 | break; | |||
991 | } | |||
992 | iommu_issue_qi(iommu, &qi); | |||
993 | } | |||
994 | ||||
995 | /* Intel: Flush write buffers */ | |||
996 | void | |||
997 | iommu_flush_write_buffer(struct iommu_softc *iommu) | |||
998 | { | |||
999 | int i, sts; | |||
1000 | ||||
1001 | if (iommu->dte) | |||
1002 | return; | |||
1003 | if (!(iommu->cap & CAP_RWBF(1LL << 4))) | |||
1004 | return; | |||
1005 | DPRINTF(1,"writebuf\n"); | |||
1006 | iommu_write_4(iommu, DMAR_GCMD_REG0x18, iommu->gcmd | GCMD_WBF(1LL << 27)); | |||
1007 | for (i = 0; i < 5; i++) { | |||
1008 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
1009 | if (sts & GSTS_WBFS(1LL << 27)) | |||
1010 | break; | |||
1011 | delay(10000)(*delay_func)(10000); | |||
1012 | } | |||
1013 | if (i == 5) { | |||
1014 | printf("write buffer flush fails\n"); | |||
1015 | } | |||
1016 | } | |||
1017 | ||||
1018 | void | |||
1019 | iommu_flush_cache(struct iommu_softc *iommu, void *addr, size_t size) | |||
1020 | { | |||
1021 | if (iommu->dte) { | |||
1022 | pmap_flush_cache((vaddr_t)addr, size); | |||
1023 | return; | |||
1024 | } | |||
1025 | if (!(iommu->ecap & ECAP_C(1LL << 0))) | |||
1026 | pmap_flush_cache((vaddr_t)addr, size); | |||
1027 | } | |||
1028 | ||||
1029 | /* | |||
1030 | * Intel: Flush IOMMU TLB Entries | |||
1031 | * Flushing can occur globally, per domain or per page | |||
1032 | */ | |||
1033 | void | |||
1034 | iommu_flush_tlb(struct iommu_softc *iommu, int mode, int did) | |||
1035 | { | |||
1036 | int n; | |||
1037 | uint64_t val; | |||
1038 | ||||
1039 | /* Call AMD */ | |||
1040 | if (iommu->dte) { | |||
1041 | ivhd_invalidate_domain(iommu, did); | |||
1042 | return; | |||
1043 | } | |||
1044 | val = IOTLB_IVT(1LL << 63); | |||
1045 | switch (mode) { | |||
1046 | case IOTLB_GLOBAL: | |||
1047 | val |= IIG_GLOBAL((uint64_t)(IOTLB_GLOBAL) << 60); | |||
1048 | break; | |||
1049 | case IOTLB_DOMAIN: | |||
1050 | val |= IIG_DOMAIN((uint64_t)(IOTLB_DOMAIN) << 60) | IOTLB_DID(did)((uint64_t)(did) << 32); | |||
1051 | break; | |||
1052 | case IOTLB_PAGE: | |||
1053 | val |= IIG_PAGE((uint64_t)(IOTLB_PAGE) << 60) | IOTLB_DID(did)((uint64_t)(did) << 32); | |||
1054 | break; | |||
1055 | } | |||
1056 | ||||
1057 | /* Check for Read/Write Drain */ | |||
1058 | if (iommu->cap & CAP_DRD(1LL << 55)) | |||
1059 | val |= IOTLB_DR(1LL << 49); | |||
1060 | if (iommu->cap & CAP_DWD(1LL << 54)) | |||
1061 | val |= IOTLB_DW(1LL << 48); | |||
1062 | ||||
1063 | mtx_enter(&iommu->reg_lock); | |||
1064 | ||||
1065 | iommu_write_8(iommu, DMAR_IOTLB_REG(iommu)(((uint32_t)((((iommu)->ecap)>> 0x8) & 0x3FF) * 16 ) + 8), val); | |||
1066 | n = 0; | |||
1067 | do { | |||
1068 | val = iommu_read_8(iommu, DMAR_IOTLB_REG(iommu)(((uint32_t)((((iommu)->ecap)>> 0x8) & 0x3FF) * 16 ) + 8)); | |||
1069 | } while (n++ < 5 && val & IOTLB_IVT(1LL << 63)); | |||
1070 | ||||
1071 | mtx_leave(&iommu->reg_lock); | |||
1072 | } | |||
1073 | ||||
1074 | /* Intel: Flush IOMMU settings | |||
1075 | * Flushes can occur globally, per domain, or per device | |||
1076 | */ | |||
1077 | void | |||
1078 | iommu_flush_ctx(struct iommu_softc *iommu, int mode, int did, int sid, int fm) | |||
1079 | { | |||
1080 | uint64_t val; | |||
1081 | int n; | |||
1082 | ||||
1083 | if (iommu->dte) | |||
1084 | return; | |||
1085 | val = CCMD_ICC(1LL << 63); | |||
1086 | switch (mode) { | |||
1087 | case CTX_GLOBAL: | |||
1088 | val |= CIG_GLOBAL((uint64_t)(CTX_GLOBAL) << 61); | |||
1089 | break; | |||
1090 | case CTX_DOMAIN: | |||
1091 | val |= CIG_DOMAIN((uint64_t)(CTX_DOMAIN) << 61) | CCMD_DID(did)(((did) << 0)); | |||
1092 | break; | |||
1093 | case CTX_DEVICE: | |||
1094 | val |= CIG_DEVICE((uint64_t)(CTX_DEVICE) << 61) | CCMD_DID(did)(((did) << 0)) | | |||
1095 | CCMD_SID(sid)(((sid) << 8)) | CCMD_FM(fm)(((uint64_t)(fm) << 32)); | |||
1096 | break; | |||
1097 | } | |||
1098 | ||||
1099 | mtx_enter(&iommu->reg_lock); | |||
1100 | ||||
1101 | n = 0; | |||
1102 | iommu_write_8(iommu, DMAR_CCMD_REG0x28, val); | |||
1103 | do { | |||
1104 | val = iommu_read_8(iommu, DMAR_CCMD_REG0x28); | |||
1105 | } while (n++ < 5 && val & CCMD_ICC(1LL << 63)); | |||
1106 | ||||
1107 | mtx_leave(&iommu->reg_lock); | |||
1108 | } | |||
1109 | ||||
1110 | /* Intel: Enable Queued Invalidation */ | |||
1111 | void | |||
1112 | iommu_enable_qi(struct iommu_softc *iommu, int enable) | |||
1113 | { | |||
1114 | int n = 0; | |||
1115 | int sts; | |||
1116 | ||||
1117 | if (!(iommu->ecap & ECAP_QI(1LL << 1))) | |||
1118 | return; | |||
1119 | ||||
1120 | if (enable) { | |||
1121 | iommu->gcmd |= GCMD_QIE(1LL << 26); | |||
1122 | ||||
1123 | mtx_enter(&iommu->reg_lock); | |||
1124 | ||||
1125 | iommu_write_4(iommu, DMAR_GCMD_REG0x18, iommu->gcmd); | |||
1126 | do { | |||
1127 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
1128 | } while (n++ < 5 && !(sts & GSTS_QIES(1LL << 26))); | |||
1129 | ||||
1130 | mtx_leave(&iommu->reg_lock); | |||
1131 | ||||
1132 | DPRINTF(1,"set.qie: %d\n", n); | |||
1133 | } else { | |||
1134 | iommu->gcmd &= ~GCMD_QIE(1LL << 26); | |||
1135 | ||||
1136 | mtx_enter(&iommu->reg_lock); | |||
1137 | ||||
1138 | iommu_write_4(iommu, DMAR_GCMD_REG0x18, iommu->gcmd); | |||
1139 | do { | |||
1140 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
1141 | } while (n++ < 5 && sts & GSTS_QIES(1LL << 26)); | |||
1142 | ||||
1143 | mtx_leave(&iommu->reg_lock); | |||
1144 | ||||
1145 | DPRINTF(1,"clr.qie: %d\n", n); | |||
1146 | } | |||
1147 | } | |||
1148 | ||||
1149 | /* Intel: Enable IOMMU translation */ | |||
1150 | int | |||
1151 | iommu_enable_translation(struct iommu_softc *iommu, int enable) | |||
1152 | { | |||
1153 | uint32_t sts; | |||
1154 | uint64_t reg; | |||
1155 | int n = 0; | |||
1156 | ||||
1157 | if (iommu->dte) | |||
1158 | return (0); | |||
1159 | reg = 0; | |||
1160 | if (enable) { | |||
1161 | DPRINTF(0,"enable iommu %d\n", iommu->id); | |||
1162 | iommu_showcfg(iommu, -1); | |||
1163 | ||||
1164 | iommu->gcmd |= GCMD_TE(1LL << 31); | |||
1165 | ||||
1166 | /* Enable translation */ | |||
1167 | printf(" pre tes: "); | |||
1168 | ||||
1169 | mtx_enter(&iommu->reg_lock); | |||
1170 | iommu_write_4(iommu, DMAR_GCMD_REG0x18, iommu->gcmd); | |||
1171 | printf("xxx"); | |||
1172 | do { | |||
1173 | printf("yyy"); | |||
1174 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
1175 | delay(n * 10000)(*delay_func)(n * 10000); | |||
1176 | } while (n++ < 5 && !(sts & GSTS_TES(1LL << 31))); | |||
1177 | mtx_leave(&iommu->reg_lock); | |||
1178 | ||||
1179 | printf(" set.tes: %d\n", n); | |||
1180 | ||||
1181 | if (n >= 5) { | |||
1182 | printf("error.. unable to initialize iommu %d\n", | |||
1183 | iommu->id); | |||
1184 | iommu->flags |= IOMMU_FLAGS_BAD0x2; | |||
1185 | ||||
1186 | /* Disable IOMMU */ | |||
1187 | iommu->gcmd &= ~GCMD_TE(1LL << 31); | |||
1188 | mtx_enter(&iommu->reg_lock); | |||
1189 | iommu_write_4(iommu, DMAR_GCMD_REG0x18, iommu->gcmd); | |||
1190 | mtx_leave(&iommu->reg_lock); | |||
1191 | ||||
1192 | return (1); | |||
1193 | } | |||
1194 | ||||
1195 | iommu_flush_ctx(iommu, CTX_GLOBAL, 0, 0, 0); | |||
1196 | iommu_flush_tlb(iommu, IOTLB_GLOBAL, 0); | |||
1197 | } else { | |||
1198 | iommu->gcmd &= ~GCMD_TE(1LL << 31); | |||
1199 | ||||
1200 | mtx_enter(&iommu->reg_lock); | |||
1201 | ||||
1202 | iommu_write_4(iommu, DMAR_GCMD_REG0x18, iommu->gcmd); | |||
1203 | do { | |||
1204 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
1205 | } while (n++ < 5 && sts & GSTS_TES(1LL << 31)); | |||
1206 | mtx_leave(&iommu->reg_lock); | |||
1207 | ||||
1208 | printf(" clr.tes: %d\n", n); | |||
1209 | } | |||
1210 | ||||
1211 | return (0); | |||
1212 | } | |||
1213 | ||||
1214 | /* Intel: Initialize IOMMU */ | |||
1215 | int | |||
1216 | iommu_init(struct acpidmar_softc *sc, struct iommu_softc *iommu, | |||
1217 | struct acpidmar_drhd *dh) | |||
1218 | { | |||
1219 | static int niommu; | |||
1220 | int len = VTD_PAGE_SIZE4096; | |||
1221 | int i, gaw; | |||
1222 | uint32_t sts; | |||
1223 | paddr_t paddr; | |||
1224 | ||||
1225 | if (_bus_space_map(sc->sc_memt, dh->address, len, 0, &iommu->ioh) != 0) { | |||
1226 | return (-1); | |||
1227 | } | |||
1228 | ||||
1229 | TAILQ_INIT(&iommu->domains)do { (&iommu->domains)->tqh_first = ((void *)0); (& iommu->domains)->tqh_last = &(&iommu->domains )->tqh_first; } while (0); | |||
1230 | iommu->id = ++niommu; | |||
1231 | iommu->flags = dh->flags; | |||
1232 | iommu->segment = dh->segment; | |||
1233 | iommu->iot = sc->sc_memt; | |||
1234 | ||||
1235 | iommu->cap = iommu_read_8(iommu, DMAR_CAP_REG0x08); | |||
1236 | iommu->ecap = iommu_read_8(iommu, DMAR_ECAP_REG0x10); | |||
1237 | iommu->ndoms = cap_nd(iommu->cap)(16 << (((iommu->cap) & 0x7) << 1)); | |||
1238 | ||||
1239 | /* Print Capabilities & Extended Capabilities */ | |||
1240 | DPRINTF(0, " caps: %s%s%s%s%s%s%s%s%s%s%s\n", | |||
1241 | iommu->cap & CAP_AFL ? "afl " : "", /* adv fault */ | |||
1242 | iommu->cap & CAP_RWBF ? "rwbf " : "", /* write-buffer flush */ | |||
1243 | iommu->cap & CAP_PLMR ? "plmr " : "", /* protected lo region */ | |||
1244 | iommu->cap & CAP_PHMR ? "phmr " : "", /* protected hi region */ | |||
1245 | iommu->cap & CAP_CM ? "cm " : "", /* caching mode */ | |||
1246 | iommu->cap & CAP_ZLR ? "zlr " : "", /* zero-length read */ | |||
1247 | iommu->cap & CAP_PSI ? "psi " : "", /* page invalidate */ | |||
1248 | iommu->cap & CAP_DWD ? "dwd " : "", /* write drain */ | |||
1249 | iommu->cap & CAP_DRD ? "drd " : "", /* read drain */ | |||
1250 | iommu->cap & CAP_FL1GP ? "Gb " : "", /* 1Gb pages */ | |||
1251 | iommu->cap & CAP_PI ? "pi " : ""); /* posted interrupts */ | |||
1252 | DPRINTF(0, " ecap: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", | |||
1253 | iommu->ecap & ECAP_C ? "c " : "", /* coherent */ | |||
1254 | iommu->ecap & ECAP_QI ? "qi " : "", /* queued invalidate */ | |||
1255 | iommu->ecap & ECAP_DT ? "dt " : "", /* device iotlb */ | |||
1256 | iommu->ecap & ECAP_IR ? "ir " : "", /* intr remap */ | |||
1257 | iommu->ecap & ECAP_EIM ? "eim " : "", /* x2apic */ | |||
1258 | iommu->ecap & ECAP_PT ? "pt " : "", /* passthrough */ | |||
1259 | iommu->ecap & ECAP_SC ? "sc " : "", /* snoop control */ | |||
1260 | iommu->ecap & ECAP_ECS ? "ecs " : "", /* extended context */ | |||
1261 | iommu->ecap & ECAP_MTS ? "mts " : "", /* memory type */ | |||
1262 | iommu->ecap & ECAP_NEST ? "nest " : "", /* nested translations */ | |||
1263 | iommu->ecap & ECAP_DIS ? "dis " : "", /* deferred invalidation */ | |||
1264 | iommu->ecap & ECAP_PASID ? "pas " : "", /* pasid */ | |||
1265 | iommu->ecap & ECAP_PRS ? "prs " : "", /* page request */ | |||
1266 | iommu->ecap & ECAP_ERS ? "ers " : "", /* execute request */ | |||
1267 | iommu->ecap & ECAP_SRS ? "srs " : "", /* supervisor request */ | |||
1268 | iommu->ecap & ECAP_NWFS ? "nwfs " : "", /* no write flag */ | |||
1269 | iommu->ecap & ECAP_EAFS ? "eafs " : ""); /* extended accessed flag */ | |||
1270 | ||||
1271 | mtx_init(&iommu->reg_lock, IPL_HIGH)do { (void)(((void *)0)); (void)(0); __mtx_init((&iommu-> reg_lock), ((((0xd)) > 0x0 && ((0xd)) < 0x9) ? 0x9 : ((0xd)))); } while (0); | |||
1272 | ||||
1273 | /* Clear Interrupt Masking */ | |||
1274 | iommu_write_4(iommu, DMAR_FSTS_REG0x34, FSTS_PFO(1LL << 0) | FSTS_PPF(1LL << 1)); | |||
1275 | ||||
1276 | iommu->intr = acpidmar_intr_establish(iommu, IPL_HIGH0xd, | |||
1277 | acpidmar_intr, iommu, "dmarintr"); | |||
1278 | ||||
1279 | /* Enable interrupts */ | |||
1280 | sts = iommu_read_4(iommu, DMAR_FECTL_REG0x38); | |||
1281 | iommu_write_4(iommu, DMAR_FECTL_REG0x38, sts & ~FECTL_IM(1LL << 31)); | |||
1282 | ||||
1283 | /* Allocate root pointer */ | |||
1284 | iommu->root = iommu_alloc_page(iommu, &paddr); | |||
1285 | DPRINTF(0, "Allocated root pointer: pa:%.16llx va:%p\n", | |||
1286 | (uint64_t)paddr, iommu->root); | |||
1287 | iommu->rtaddr = paddr; | |||
1288 | iommu_flush_write_buffer(iommu); | |||
1289 | iommu_set_rtaddr(iommu, paddr); | |||
1290 | ||||
1291 | #if 0 | |||
1292 | if (iommu->ecap & ECAP_QI(1LL << 1)) { | |||
1293 | /* Queued Invalidation support */ | |||
1294 | iommu->qi = iommu_alloc_page(iommu, &iommu->qip); | |||
1295 | iommu_write_8(iommu, DMAR_IQT_REG0x88, 0); | |||
1296 | iommu_write_8(iommu, DMAR_IQA_REG0x90, iommu->qip | IQA_QS_2560); | |||
1297 | } | |||
1298 | if (iommu->ecap & ECAP_IR(1LL << 3)) { | |||
1299 | /* Interrupt remapping support */ | |||
1300 | iommu_write_8(iommu, DMAR_IRTA_REG0xb8, 0); | |||
1301 | } | |||
1302 | #endif | |||
1303 | ||||
1304 | /* Calculate guest address width and supported guest widths */ | |||
1305 | gaw = -1; | |||
1306 | iommu->mgaw = cap_mgaw(iommu->cap)((uint32_t)(((iommu->cap)>> 16LL) & 0x3F) + 1); | |||
1307 | DPRINTF(0, "gaw: %d { ", iommu->mgaw); | |||
1308 | for (i = 0; i < 5; i++) { | |||
1309 | if (cap_sagaw(iommu->cap)(uint32_t)(((iommu->cap)>> 8LL) & 0x1F) & (1L << i)) { | |||
1310 | gaw = VTD_LEVELTOAW(i)(((i) * 9) + 30); | |||
1311 | DPRINTF(0, "%d ", gaw); | |||
1312 | iommu->agaw = gaw; | |||
1313 | } | |||
1314 | } | |||
1315 | DPRINTF(0, "}\n"); | |||
1316 | ||||
1317 | /* Cache current status register bits */ | |||
1318 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
1319 | if (sts & GSTS_TES(1LL << 31)) | |||
1320 | iommu->gcmd |= GCMD_TE(1LL << 31); | |||
1321 | if (sts & GSTS_QIES(1LL << 26)) | |||
1322 | iommu->gcmd |= GCMD_QIE(1LL << 26); | |||
1323 | if (sts & GSTS_IRES(1LL << 25)) | |||
1324 | iommu->gcmd |= GCMD_IRE(1LL << 25); | |||
1325 | DPRINTF(0, "gcmd: %x preset\n", iommu->gcmd); | |||
1326 | acpidmar_intr(iommu); | |||
1327 | return (0); | |||
1328 | } | |||
1329 | ||||
1330 | /* Read/Write IOMMU register */ | |||
1331 | uint32_t | |||
1332 | iommu_read_4(struct iommu_softc *iommu, int reg) | |||
1333 | { | |||
1334 | uint32_t v; | |||
1335 | ||||
1336 | v = bus_space_read_4(iommu->iot, iommu->ioh, reg)((iommu->iot)->read_4((iommu->ioh), (reg))); | |||
1337 | return (v); | |||
1338 | } | |||
1339 | ||||
1340 | ||||
1341 | void | |||
1342 | iommu_write_4(struct iommu_softc *iommu, int reg, uint32_t v) | |||
1343 | { | |||
1344 | bus_space_write_4(iommu->iot, iommu->ioh, reg, (uint32_t)v)((iommu->iot)->write_4((iommu->ioh), (reg), ((uint32_t )v))); | |||
1345 | } | |||
1346 | ||||
1347 | uint64_t | |||
1348 | iommu_read_8(struct iommu_softc *iommu, int reg) | |||
1349 | { | |||
1350 | uint64_t v; | |||
1351 | ||||
1352 | v = bus_space_read_8(iommu->iot, iommu->ioh, reg)((iommu->iot)->read_8((iommu->ioh), (reg))); | |||
1353 | return (v); | |||
1354 | } | |||
1355 | ||||
1356 | void | |||
1357 | iommu_write_8(struct iommu_softc *iommu, int reg, uint64_t v) | |||
1358 | { | |||
1359 | bus_space_write_8(iommu->iot, iommu->ioh, reg, v)((iommu->iot)->write_8((iommu->ioh), (reg), (v))); | |||
1360 | } | |||
1361 | ||||
1362 | /* Check if a device is within a device scope */ | |||
1363 | int | |||
1364 | acpidmar_match_devscope(struct devlist_head *devlist, pci_chipset_tag_t pc, | |||
1365 | int sid) | |||
1366 | { | |||
1367 | struct dmar_devlist *ds; | |||
1368 | int sub, sec, i; | |||
1369 | int bus, dev, fun, sbus; | |||
1370 | pcireg_t reg; | |||
1371 | pcitag_t tag; | |||
1372 | ||||
1373 | sbus = sid_bus(sid); | |||
1374 | TAILQ_FOREACH(ds, devlist, link)for((ds) = ((devlist)->tqh_first); (ds) != ((void *)0); (ds ) = ((ds)->link.tqe_next)) { | |||
1375 | bus = ds->bus; | |||
1376 | dev = ds->dp[0].device; | |||
1377 | fun = ds->dp[0].function; | |||
1378 | /* Walk PCI bridges in path */ | |||
1379 | for (i = 1; i < ds->ndp; i++) { | |||
1380 | tag = pci_make_tag(pc, bus, dev, fun); | |||
1381 | reg = pci_conf_read(pc, tag, PPB_REG_BUSINFO0x18); | |||
1382 | bus = PPB_BUSINFO_SECONDARY(reg)((reg >> 8) & 0xff); | |||
1383 | dev = ds->dp[i].device; | |||
1384 | fun = ds->dp[i].function; | |||
1385 | } | |||
1386 | ||||
1387 | /* Check for device exact match */ | |||
1388 | if (sid == mksid(bus, dev, fun)) { | |||
1389 | return DMAR_ENDPOINT0x1; | |||
1390 | } | |||
1391 | ||||
1392 | /* Check for device subtree match */ | |||
1393 | if (ds->type == DMAR_BRIDGE0x2) { | |||
1394 | tag = pci_make_tag(pc, bus, dev, fun); | |||
1395 | reg = pci_conf_read(pc, tag, PPB_REG_BUSINFO0x18); | |||
1396 | sec = PPB_BUSINFO_SECONDARY(reg)((reg >> 8) & 0xff); | |||
1397 | sub = PPB_BUSINFO_SUBORDINATE(reg)((reg >> 16) & 0xff); | |||
1398 | if (sec <= sbus && sbus <= sub) { | |||
1399 | return DMAR_BRIDGE0x2; | |||
1400 | } | |||
1401 | } | |||
1402 | } | |||
1403 | ||||
1404 | return (0); | |||
1405 | } | |||
1406 | ||||
1407 | struct domain * | |||
1408 | domain_create(struct iommu_softc *iommu, int did) | |||
1409 | { | |||
1410 | struct domain *dom; | |||
1411 | int gaw; | |||
1412 | ||||
1413 | DPRINTF(0, "iommu%d: create domain: %.4x\n", iommu->id, did); | |||
1414 | dom = malloc(sizeof(*dom), M_DEVBUF2, M_ZERO0x0008 | M_WAITOK0x0001); | |||
1415 | dom->did = did; | |||
1416 | dom->iommu = iommu; | |||
1417 | dom->pte = iommu_alloc_page(iommu, &dom->ptep); | |||
1418 | TAILQ_INIT(&dom->devices)do { (&dom->devices)->tqh_first = ((void *)0); (& dom->devices)->tqh_last = &(&dom->devices)-> tqh_first; } while (0); | |||
1419 | ||||
1420 | /* Setup DMA */ | |||
1421 | dom->dmat._cookie = dom; | |||
1422 | dom->dmat._dmamap_create = dmar_dmamap_create; /* nop */ | |||
1423 | dom->dmat._dmamap_destroy = dmar_dmamap_destroy; /* nop */ | |||
1424 | dom->dmat._dmamap_load = dmar_dmamap_load; /* lm */ | |||
1425 | dom->dmat._dmamap_load_mbuf = dmar_dmamap_load_mbuf; /* lm */ | |||
1426 | dom->dmat._dmamap_load_uio = dmar_dmamap_load_uio; /* lm */ | |||
1427 | dom->dmat._dmamap_load_raw = dmar_dmamap_load_raw; /* lm */ | |||
1428 | dom->dmat._dmamap_unload = dmar_dmamap_unload; /* um */ | |||
1429 | dom->dmat._dmamap_sync = dmar_dmamap_sync; /* lm */ | |||
1430 | dom->dmat._dmamem_alloc = dmar_dmamem_alloc; /* nop */ | |||
1431 | dom->dmat._dmamem_free = dmar_dmamem_free; /* nop */ | |||
1432 | dom->dmat._dmamem_map = dmar_dmamem_map; /* nop */ | |||
1433 | dom->dmat._dmamem_unmap = dmar_dmamem_unmap; /* nop */ | |||
1434 | dom->dmat._dmamem_mmap = dmar_dmamem_mmap; | |||
1435 | ||||
1436 | snprintf(dom->exname, sizeof(dom->exname), "did:%x.%.4x", | |||
1437 | iommu->id, dom->did); | |||
1438 | ||||
1439 | /* Setup IOMMU address map */ | |||
1440 | gaw = min(iommu->agaw, iommu->mgaw); | |||
1441 | dom->iovamap = extent_create(dom->exname, 0, (1LL << gaw)-1, | |||
1442 | M_DEVBUF2, NULL((void *)0), 0, EX_WAITOK0x0001 | EX_NOCOALESCE0x0008); | |||
1443 | ||||
1444 | /* Reserve the first 16M */ | |||
1445 | extent_alloc_region(dom->iovamap, 0, 16*1024*1024, EX_WAITOK0x0001); | |||
1446 | ||||
1447 | /* Zero out MSI Interrupt region */ | |||
1448 | extent_alloc_region(dom->iovamap, MSI_BASE_ADDRESS0xFEE00000L, MSI_BASE_SIZE0x00100000L, | |||
1449 | EX_WAITOK0x0001); | |||
1450 | mtx_init(&dom->exlck, IPL_HIGH)do { (void)(((void *)0)); (void)(0); __mtx_init((&dom-> exlck), ((((0xd)) > 0x0 && ((0xd)) < 0x9) ? 0x9 : ((0xd)))); } while (0); | |||
1451 | ||||
1452 | TAILQ_INSERT_TAIL(&iommu->domains, dom, link)do { (dom)->link.tqe_next = ((void *)0); (dom)->link.tqe_prev = (&iommu->domains)->tqh_last; *(&iommu->domains )->tqh_last = (dom); (&iommu->domains)->tqh_last = &(dom)->link.tqe_next; } while (0); | |||
1453 | ||||
1454 | return dom; | |||
1455 | } | |||
1456 | ||||
1457 | void | |||
1458 | domain_add_device(struct domain *dom, int sid) | |||
1459 | { | |||
1460 | struct domain_dev *ddev; | |||
1461 | ||||
1462 | DPRINTF(0, "add %s to iommu%d.%.4x\n", dmar_bdf(sid), dom->iommu->id, dom->did); | |||
1463 | ddev = malloc(sizeof(*ddev), M_DEVBUF2, M_ZERO0x0008 | M_WAITOK0x0001); | |||
1464 | ddev->sid = sid; | |||
1465 | TAILQ_INSERT_TAIL(&dom->devices, ddev, link)do { (ddev)->link.tqe_next = ((void *)0); (ddev)->link. tqe_prev = (&dom->devices)->tqh_last; *(&dom-> devices)->tqh_last = (ddev); (&dom->devices)->tqh_last = &(ddev)->link.tqe_next; } while (0); | |||
1466 | ||||
1467 | /* Should set context entry here?? */ | |||
1468 | } | |||
1469 | ||||
1470 | void | |||
1471 | domain_remove_device(struct domain *dom, int sid) | |||
1472 | { | |||
1473 | struct domain_dev *ddev, *tmp; | |||
1474 | ||||
1475 | TAILQ_FOREACH_SAFE(ddev, &dom->devices, link, tmp)for ((ddev) = ((&dom->devices)->tqh_first); (ddev) != ((void *)0) && ((tmp) = ((ddev)->link.tqe_next), 1 ); (ddev) = (tmp)) { | |||
1476 | if (ddev->sid == sid) { | |||
1477 | TAILQ_REMOVE(&dom->devices, ddev, link)do { if (((ddev)->link.tqe_next) != ((void *)0)) (ddev)-> link.tqe_next->link.tqe_prev = (ddev)->link.tqe_prev; else (&dom->devices)->tqh_last = (ddev)->link.tqe_prev ; *(ddev)->link.tqe_prev = (ddev)->link.tqe_next; ((ddev )->link.tqe_prev) = ((void *)-1); ((ddev)->link.tqe_next ) = ((void *)-1); } while (0); | |||
1478 | free(ddev, sizeof(*ddev), M_DEVBUF2); | |||
1479 | } | |||
1480 | } | |||
1481 | } | |||
1482 | ||||
1483 | /* Lookup domain by segment & source id (bus.device.function) */ | |||
1484 | struct domain * | |||
1485 | domain_lookup(struct acpidmar_softc *sc, int segment, int sid) | |||
1486 | { | |||
1487 | struct iommu_softc *iommu; | |||
1488 | struct domain_dev *ddev; | |||
1489 | struct domain *dom; | |||
1490 | int rc; | |||
1491 | ||||
1492 | if (sc == NULL((void *)0)) { | |||
1493 | return NULL((void *)0); | |||
1494 | } | |||
1495 | ||||
1496 | /* Lookup IOMMU for this device */ | |||
1497 | TAILQ_FOREACH(iommu, &sc->sc_drhds, link)for((iommu) = ((&sc->sc_drhds)->tqh_first); (iommu) != ((void *)0); (iommu) = ((iommu)->link.tqe_next)) { | |||
1498 | if (iommu->segment != segment) | |||
1499 | continue; | |||
1500 | /* Check for devscope match or catchall iommu */ | |||
1501 | rc = acpidmar_match_devscope(&iommu->devices, sc->sc_pc, sid); | |||
1502 | if (rc != 0 || iommu->flags) { | |||
1503 | break; | |||
1504 | } | |||
1505 | } | |||
1506 | if (!iommu) { | |||
1507 | printf("%s: no iommu found\n", dmar_bdf(sid)); | |||
1508 | return NULL((void *)0); | |||
1509 | } | |||
1510 | ||||
1511 | /* Search domain devices */ | |||
1512 | TAILQ_FOREACH(dom, &iommu->domains, link)for((dom) = ((&iommu->domains)->tqh_first); (dom) != ((void *)0); (dom) = ((dom)->link.tqe_next)) { | |||
1513 | TAILQ_FOREACH(ddev, &dom->devices, link)for((ddev) = ((&dom->devices)->tqh_first); (ddev) != ((void *)0); (ddev) = ((ddev)->link.tqe_next)) { | |||
1514 | /* XXX: match all functions? */ | |||
1515 | if (ddev->sid == sid) { | |||
1516 | return dom; | |||
1517 | } | |||
1518 | } | |||
1519 | } | |||
1520 | if (iommu->ndoms <= 2) { | |||
1521 | /* Running out of domains.. create catchall domain */ | |||
1522 | if (!iommu->unity) { | |||
1523 | iommu->unity = domain_create(iommu, 1); | |||
1524 | } | |||
1525 | dom = iommu->unity; | |||
1526 | } else { | |||
1527 | dom = domain_create(iommu, --iommu->ndoms); | |||
1528 | } | |||
1529 | if (!dom) { | |||
1530 | printf("no domain here\n"); | |||
1531 | return NULL((void *)0); | |||
1532 | } | |||
1533 | ||||
1534 | /* Add device to domain */ | |||
1535 | domain_add_device(dom, sid); | |||
1536 | ||||
1537 | return dom; | |||
1538 | } | |||
1539 | ||||
1540 | /* Map Guest Pages into IOMMU */ | |||
1541 | void | |||
1542 | _iommu_map(void *dom, vaddr_t va, bus_addr_t gpa, bus_size_t len) | |||
1543 | { | |||
1544 | bus_size_t i; | |||
1545 | paddr_t hpa; | |||
1546 | ||||
1547 | if (dom == NULL((void *)0)) { | |||
1548 | return; | |||
1549 | } | |||
1550 | DPRINTF(1, "Mapping dma: %lx = %lx/%lx\n", va, gpa, len); | |||
1551 | for (i = 0; i < len; i += PAGE_SIZE(1 << 12)) { | |||
1552 | hpa = 0; | |||
1553 | pmap_extract(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_vmspace->vm_map.pmap, va, &hpa); | |||
1554 | domain_map_page(dom, gpa, hpa, PTE_P(1L << 0) | PTE_R0x00 | PTE_W(1L << 1)); | |||
1555 | gpa += PAGE_SIZE(1 << 12); | |||
1556 | va += PAGE_SIZE(1 << 12); | |||
1557 | } | |||
1558 | } | |||
1559 | ||||
1560 | /* Find IOMMU for a given PCI device */ | |||
1561 | void | |||
1562 | *_iommu_domain(int segment, int bus, int dev, int func, int *id) | |||
1563 | { | |||
1564 | struct domain *dom; | |||
1565 | ||||
1566 | dom = domain_lookup(acpidmar_sc, segment, mksid(bus, dev, func)); | |||
1567 | if (dom) { | |||
1568 | *id = dom->did; | |||
1569 | } | |||
1570 | return dom; | |||
1571 | } | |||
1572 | ||||
1573 | void | |||
1574 | domain_map_device(struct domain *dom, int sid); | |||
1575 | ||||
1576 | void | |||
1577 | domain_map_device(struct domain *dom, int sid) | |||
1578 | { | |||
1579 | struct iommu_softc *iommu; | |||
1580 | struct context_entry *ctx; | |||
1581 | paddr_t paddr; | |||
1582 | int bus, devfn; | |||
1583 | int tt, lvl; | |||
1584 | ||||
1585 | iommu = dom->iommu; | |||
1586 | ||||
1587 | bus = sid_bus(sid); | |||
1588 | devfn = sid_devfn(sid); | |||
1589 | /* AMD attach device */ | |||
1590 | if (iommu->dte) { | |||
1591 | struct ivhd_dte *dte = &iommu->dte[sid]; | |||
1592 | if (!dte->dw0) { | |||
1593 | /* Setup Device Table Entry: bus.devfn */ | |||
1594 | DPRINTF(1, "@@@ PCI Attach: %.4x[%s] %.4x\n", sid, dmar_bdf(sid), dom->did); | |||
1595 | dte_set_host_page_table_root_ptr(dte, dom->ptep); | |||
1596 | dte_set_domain(dte, dom->did); | |||
1597 | dte_set_mode(dte, 3); /* Set 3 level PTE */ | |||
1598 | dte_set_tv(dte); | |||
1599 | dte_set_valid(dte); | |||
1600 | ivhd_flush_devtab(iommu, dom->did); | |||
1601 | #ifdef IOMMU_DEBUG | |||
1602 | //ivhd_showreg(iommu); | |||
1603 | ivhd_showdte(iommu); | |||
1604 | #endif | |||
1605 | } | |||
1606 | return; | |||
1607 | } | |||
1608 | ||||
1609 | /* Create Bus mapping */ | |||
1610 | if (!root_entry_is_valid(&iommu->root[bus])) { | |||
1611 | iommu->ctx[bus] = iommu_alloc_page(iommu, &paddr); | |||
1612 | iommu->root[bus].lo = paddr | ROOT_P(1L << 0); | |||
1613 | iommu_flush_cache(iommu, &iommu->root[bus], | |||
1614 | sizeof(struct root_entry)); | |||
1615 | DPRINTF(0, "iommu%d: Allocate context for bus: %.2x pa:%.16llx va:%p\n", | |||
1616 | iommu->id, bus, (uint64_t)paddr, | |||
1617 | iommu->ctx[bus]); | |||
1618 | } | |||
1619 | ||||
1620 | /* Create DevFn mapping */ | |||
1621 | ctx = iommu->ctx[bus] + devfn; | |||
1622 | if (!context_entry_is_valid(ctx)) { | |||
1623 | tt = CTX_T_MULTI; | |||
1624 | lvl = VTD_AWTOLEVEL(iommu->agaw)(((iommu->agaw) - 30) / 9); | |||
1625 | ||||
1626 | /* Initialize context */ | |||
1627 | context_set_slpte(ctx, dom->ptep); | |||
1628 | context_set_translation_type(ctx, tt); | |||
1629 | context_set_domain_id(ctx, dom->did); | |||
1630 | context_set_address_width(ctx, lvl); | |||
1631 | context_set_present(ctx); | |||
1632 | ||||
1633 | /* Flush it */ | |||
1634 | iommu_flush_cache(iommu, ctx, sizeof(struct context_entry)); | |||
1635 | if ((iommu->cap & CAP_CM(1LL << 7)) || acpidmar_force_cm) { | |||
1636 | iommu_flush_ctx(iommu, CTX_DEVICE, dom->did, sid, 0); | |||
1637 | iommu_flush_tlb(iommu, IOTLB_GLOBAL, 0); | |||
1638 | } else { | |||
1639 | iommu_flush_write_buffer(iommu); | |||
1640 | } | |||
1641 | DPRINTF(0, "iommu%d: %s set context ptep:%.16llx lvl:%d did:%.4x tt:%d\n", | |||
1642 | iommu->id, dmar_bdf(sid), (uint64_t)dom->ptep, lvl, | |||
1643 | dom->did, tt); | |||
1644 | } | |||
1645 | } | |||
1646 | ||||
1647 | struct domain * | |||
1648 | acpidmar_pci_attach(struct acpidmar_softc *sc, int segment, int sid, int mapctx) | |||
1649 | { | |||
1650 | static struct domain *dom; | |||
1651 | ||||
1652 | dom = domain_lookup(sc, segment, sid); | |||
1653 | if (!dom) { | |||
1654 | printf("no domain: %s\n", dmar_bdf(sid)); | |||
1655 | return NULL((void *)0); | |||
1656 | } | |||
1657 | ||||
1658 | if (mapctx) { | |||
1659 | domain_map_device(dom, sid); | |||
1660 | } | |||
1661 | ||||
1662 | return dom; | |||
1663 | } | |||
1664 | ||||
1665 | void | |||
1666 | acpidmar_pci_hook(pci_chipset_tag_t pc, struct pci_attach_args *pa) | |||
1667 | { | |||
1668 | int bus, dev, fun, sid; | |||
1669 | struct domain *dom; | |||
1670 | pcireg_t reg; | |||
1671 | ||||
1672 | if (!acpidmar_sc) { | |||
1673 | /* No DMAR, ignore */ | |||
1674 | return; | |||
1675 | } | |||
1676 | ||||
1677 | /* Add device to our list if valid */ | |||
1678 | pci_decompose_tag(pc, pa->pa_tag, &bus, &dev, &fun); | |||
1679 | sid = mksid(bus, dev, fun); | |||
1680 | if (sid_flag[sid] & SID_INVALID0x80000000L) | |||
1681 | return; | |||
1682 | ||||
1683 | reg = pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG0x08); | |||
1684 | ||||
1685 | /* Add device to domain */ | |||
1686 | dom = acpidmar_pci_attach(acpidmar_sc, pa->pa_domain, sid, 0); | |||
1687 | if (dom == NULL((void *)0)) | |||
1688 | return; | |||
1689 | ||||
1690 | if (PCI_CLASS(reg)(((reg) >> 24) & 0xff) == PCI_CLASS_DISPLAY0x03 && | |||
1691 | PCI_SUBCLASS(reg)(((reg) >> 16) & 0xff) == PCI_SUBCLASS_DISPLAY_VGA0x00) { | |||
1692 | dom->flag = DOM_NOMAP0x2; | |||
1693 | } | |||
1694 | if (PCI_CLASS(reg)(((reg) >> 24) & 0xff) == PCI_CLASS_BRIDGE0x06 && | |||
1695 | PCI_SUBCLASS(reg)(((reg) >> 16) & 0xff) == PCI_SUBCLASS_BRIDGE_ISA0x01) { | |||
1696 | /* For ISA Bridges, map 0-16Mb as 1:1 */ | |||
1697 | printf("dmar: %.4x:%.2x:%.2x.%x mapping ISA\n", | |||
1698 | pa->pa_domain, bus, dev, fun); | |||
1699 | domain_map_pthru(dom, 0x00, 16*1024*1024); | |||
1700 | } | |||
1701 | ||||
1702 | /* Change DMA tag */ | |||
1703 | pa->pa_dmat = &dom->dmat; | |||
1704 | } | |||
1705 | ||||
1706 | /* Create list of device scope entries from ACPI table */ | |||
1707 | void | |||
1708 | acpidmar_parse_devscope(union acpidmar_entry *de, int off, int segment, | |||
1709 | struct devlist_head *devlist) | |||
1710 | { | |||
1711 | struct acpidmar_devscope *ds; | |||
1712 | struct dmar_devlist *d; | |||
1713 | int dplen, i; | |||
1714 | ||||
1715 | TAILQ_INIT(devlist)do { (devlist)->tqh_first = ((void *)0); (devlist)->tqh_last = &(devlist)->tqh_first; } while (0); | |||
1716 | while (off < de->length) { | |||
1717 | ds = (struct acpidmar_devscope *)((unsigned char *)de + off); | |||
1718 | off += ds->length; | |||
1719 | ||||
1720 | /* We only care about bridges and endpoints */ | |||
1721 | if (ds->type != DMAR_ENDPOINT0x1 && ds->type != DMAR_BRIDGE0x2) | |||
1722 | continue; | |||
1723 | ||||
1724 | dplen = ds->length - sizeof(*ds); | |||
1725 | d = malloc(sizeof(*d) + dplen, M_DEVBUF2, M_ZERO0x0008 | M_WAITOK0x0001); | |||
1726 | d->bus = ds->bus; | |||
1727 | d->type = ds->type; | |||
1728 | d->ndp = dplen / 2; | |||
1729 | d->dp = (void *)&d[1]; | |||
1730 | memcpy(d->dp, &ds[1], dplen)__builtin_memcpy((d->dp), (&ds[1]), (dplen)); | |||
1731 | TAILQ_INSERT_TAIL(devlist, d, link)do { (d)->link.tqe_next = ((void *)0); (d)->link.tqe_prev = (devlist)->tqh_last; *(devlist)->tqh_last = (d); (devlist )->tqh_last = &(d)->link.tqe_next; } while (0); | |||
1732 | ||||
1733 | DPRINTF(1, " %8s %.4x:%.2x.%.2x.%x {", | |||
1734 | ds->type == DMAR_BRIDGE ? "bridge" : "endpoint", | |||
1735 | segment, ds->bus, | |||
1736 | d->dp[0].device, | |||
1737 | d->dp[0].function); | |||
1738 | ||||
1739 | for (i = 1; i < d->ndp; i++) { | |||
1740 | DPRINTF(1, " %2x.%x ", | |||
1741 | d->dp[i].device, | |||
1742 | d->dp[i].function); | |||
1743 | } | |||
1744 | DPRINTF(1, "}\n"); | |||
1745 | } | |||
1746 | } | |||
1747 | ||||
1748 | /* DMA Remapping Hardware Unit */ | |||
1749 | void | |||
1750 | acpidmar_drhd(struct acpidmar_softc *sc, union acpidmar_entry *de) | |||
1751 | { | |||
1752 | struct iommu_softc *iommu; | |||
1753 | ||||
1754 | printf("DRHD: segment:%.4x base:%.16llx flags:%.2x\n", | |||
1755 | de->drhd.segment, | |||
1756 | de->drhd.address, | |||
1757 | de->drhd.flags); | |||
1758 | iommu = malloc(sizeof(*iommu), M_DEVBUF2, M_ZERO0x0008 | M_WAITOK0x0001); | |||
1759 | acpidmar_parse_devscope(de, sizeof(de->drhd), de->drhd.segment, | |||
1760 | &iommu->devices); | |||
1761 | iommu_init(sc, iommu, &de->drhd); | |||
1762 | ||||
1763 | if (de->drhd.flags) { | |||
1764 | /* Catchall IOMMU goes at end of list */ | |||
1765 | TAILQ_INSERT_TAIL(&sc->sc_drhds, iommu, link)do { (iommu)->link.tqe_next = ((void *)0); (iommu)->link .tqe_prev = (&sc->sc_drhds)->tqh_last; *(&sc-> sc_drhds)->tqh_last = (iommu); (&sc->sc_drhds)-> tqh_last = &(iommu)->link.tqe_next; } while (0); | |||
1766 | } else { | |||
1767 | TAILQ_INSERT_HEAD(&sc->sc_drhds, iommu, link)do { if (((iommu)->link.tqe_next = (&sc->sc_drhds)-> tqh_first) != ((void *)0)) (&sc->sc_drhds)->tqh_first ->link.tqe_prev = &(iommu)->link.tqe_next; else (& sc->sc_drhds)->tqh_last = &(iommu)->link.tqe_next ; (&sc->sc_drhds)->tqh_first = (iommu); (iommu)-> link.tqe_prev = &(&sc->sc_drhds)->tqh_first; } while (0); | |||
1768 | } | |||
1769 | } | |||
1770 | ||||
1771 | /* Reserved Memory Region Reporting */ | |||
1772 | void | |||
1773 | acpidmar_rmrr(struct acpidmar_softc *sc, union acpidmar_entry *de) | |||
1774 | { | |||
1775 | struct rmrr_softc *rmrr; | |||
1776 | bios_memmap_t *im, *jm; | |||
1777 | uint64_t start, end; | |||
1778 | ||||
1779 | printf("RMRR: segment:%.4x range:%.16llx-%.16llx\n", | |||
1780 | de->rmrr.segment, de->rmrr.base, de->rmrr.limit); | |||
1781 | if (de->rmrr.limit <= de->rmrr.base) { | |||
1782 | printf(" buggy BIOS\n"); | |||
1783 | return; | |||
1784 | } | |||
1785 | ||||
1786 | rmrr = malloc(sizeof(*rmrr), M_DEVBUF2, M_ZERO0x0008 | M_WAITOK0x0001); | |||
1787 | rmrr->start = trunc_page(de->rmrr.base)((de->rmrr.base) & ~((1 << 12) - 1)); | |||
1788 | rmrr->end = round_page(de->rmrr.limit)(((de->rmrr.limit) + ((1 << 12) - 1)) & ~((1 << 12) - 1)); | |||
1789 | rmrr->segment = de->rmrr.segment; | |||
1790 | acpidmar_parse_devscope(de, sizeof(de->rmrr), de->rmrr.segment, | |||
1791 | &rmrr->devices); | |||
1792 | ||||
1793 | for (im = bios_memmap; im->type != BIOS_MAP_END0x00; im++) { | |||
1794 | if (im->type != BIOS_MAP_RES0x02) | |||
1795 | continue; | |||
1796 | /* Search for adjacent reserved regions */ | |||
1797 | start = im->addr; | |||
1798 | end = im->addr+im->size; | |||
1799 | for (jm = im+1; jm->type == BIOS_MAP_RES0x02 && end == jm->addr; | |||
1800 | jm++) { | |||
1801 | end = jm->addr+jm->size; | |||
1802 | } | |||
1803 | printf("e820: %.16llx - %.16llx\n", start, end); | |||
1804 | if (start <= rmrr->start && rmrr->end <= end) { | |||
1805 | /* Bah.. some buggy BIOS stomp outside RMRR */ | |||
1806 | printf(" ** inside E820 Reserved %.16llx %.16llx\n", | |||
1807 | start, end); | |||
1808 | rmrr->start = trunc_page(start)((start) & ~((1 << 12) - 1)); | |||
1809 | rmrr->end = round_page(end)(((end) + ((1 << 12) - 1)) & ~((1 << 12) - 1) ); | |||
1810 | break; | |||
1811 | } | |||
1812 | } | |||
1813 | TAILQ_INSERT_TAIL(&sc->sc_rmrrs, rmrr, link)do { (rmrr)->link.tqe_next = ((void *)0); (rmrr)->link. tqe_prev = (&sc->sc_rmrrs)->tqh_last; *(&sc-> sc_rmrrs)->tqh_last = (rmrr); (&sc->sc_rmrrs)->tqh_last = &(rmrr)->link.tqe_next; } while (0); | |||
1814 | } | |||
1815 | ||||
1816 | /* Root Port ATS Reporting */ | |||
1817 | void | |||
1818 | acpidmar_atsr(struct acpidmar_softc *sc, union acpidmar_entry *de) | |||
1819 | { | |||
1820 | struct atsr_softc *atsr; | |||
1821 | ||||
1822 | printf("ATSR: segment:%.4x flags:%x\n", | |||
1823 | de->atsr.segment, | |||
1824 | de->atsr.flags); | |||
1825 | ||||
1826 | atsr = malloc(sizeof(*atsr), M_DEVBUF2, M_ZERO0x0008 | M_WAITOK0x0001); | |||
1827 | atsr->flags = de->atsr.flags; | |||
1828 | atsr->segment = de->atsr.segment; | |||
1829 | acpidmar_parse_devscope(de, sizeof(de->atsr), de->atsr.segment, | |||
1830 | &atsr->devices); | |||
1831 | ||||
1832 | TAILQ_INSERT_TAIL(&sc->sc_atsrs, atsr, link)do { (atsr)->link.tqe_next = ((void *)0); (atsr)->link. tqe_prev = (&sc->sc_atsrs)->tqh_last; *(&sc-> sc_atsrs)->tqh_last = (atsr); (&sc->sc_atsrs)->tqh_last = &(atsr)->link.tqe_next; } while (0); | |||
1833 | } | |||
1834 | ||||
1835 | void | |||
1836 | acpidmar_init(struct acpidmar_softc *sc, struct acpi_dmar *dmar) | |||
1837 | { | |||
1838 | struct rmrr_softc *rmrr; | |||
1839 | struct iommu_softc *iommu; | |||
1840 | struct domain *dom; | |||
1841 | struct dmar_devlist *dl; | |||
1842 | union acpidmar_entry *de; | |||
1843 | int off, sid, rc; | |||
1844 | ||||
1845 | domain_map_page = domain_map_page_intel; | |||
1846 | printf(": hardware width: %d, intr_remap:%d x2apic_opt_out:%d\n", | |||
1847 | dmar->haw+1, | |||
1848 | !!(dmar->flags & 0x1), | |||
1849 | !!(dmar->flags & 0x2)); | |||
1850 | sc->sc_haw = dmar->haw+1; | |||
1851 | sc->sc_flags = dmar->flags; | |||
1852 | ||||
1853 | TAILQ_INIT(&sc->sc_drhds)do { (&sc->sc_drhds)->tqh_first = ((void *)0); (& sc->sc_drhds)->tqh_last = &(&sc->sc_drhds)-> tqh_first; } while (0); | |||
1854 | TAILQ_INIT(&sc->sc_rmrrs)do { (&sc->sc_rmrrs)->tqh_first = ((void *)0); (& sc->sc_rmrrs)->tqh_last = &(&sc->sc_rmrrs)-> tqh_first; } while (0); | |||
1855 | TAILQ_INIT(&sc->sc_atsrs)do { (&sc->sc_atsrs)->tqh_first = ((void *)0); (& sc->sc_atsrs)->tqh_last = &(&sc->sc_atsrs)-> tqh_first; } while (0); | |||
1856 | ||||
1857 | off = sizeof(*dmar); | |||
1858 | while (off < dmar->hdr.length) { | |||
1859 | de = (union acpidmar_entry *)((unsigned char *)dmar + off); | |||
1860 | switch (de->type) { | |||
1861 | case DMAR_DRHD0x0: | |||
1862 | acpidmar_drhd(sc, de); | |||
1863 | break; | |||
1864 | case DMAR_RMRR0x1: | |||
1865 | acpidmar_rmrr(sc, de); | |||
1866 | break; | |||
1867 | case DMAR_ATSR0x2: | |||
1868 | acpidmar_atsr(sc, de); | |||
1869 | break; | |||
1870 | default: | |||
1871 | printf("DMAR: unknown %x\n", de->type); | |||
1872 | break; | |||
1873 | } | |||
1874 | off += de->length; | |||
1875 | } | |||
1876 | ||||
1877 | /* Pre-create domains for iommu devices */ | |||
1878 | TAILQ_FOREACH(iommu, &sc->sc_drhds, link)for((iommu) = ((&sc->sc_drhds)->tqh_first); (iommu) != ((void *)0); (iommu) = ((iommu)->link.tqe_next)) { | |||
1879 | TAILQ_FOREACH(dl, &iommu->devices, link)for((dl) = ((&iommu->devices)->tqh_first); (dl) != ( (void *)0); (dl) = ((dl)->link.tqe_next)) { | |||
1880 | sid = mksid(dl->bus, dl->dp[0].device, | |||
1881 | dl->dp[0].function); | |||
1882 | dom = acpidmar_pci_attach(sc, iommu->segment, sid, 0); | |||
1883 | if (dom != NULL((void *)0)) { | |||
1884 | printf("%.4x:%.2x:%.2x.%x iommu:%d did:%.4x\n", | |||
1885 | iommu->segment, dl->bus, dl->dp[0].device, dl->dp[0].function, | |||
1886 | iommu->id, dom->did); | |||
1887 | } | |||
1888 | } | |||
1889 | } | |||
1890 | /* Map passthrough pages for RMRR */ | |||
1891 | TAILQ_FOREACH(rmrr, &sc->sc_rmrrs, link)for((rmrr) = ((&sc->sc_rmrrs)->tqh_first); (rmrr) != ((void *)0); (rmrr) = ((rmrr)->link.tqe_next)) { | |||
1892 | TAILQ_FOREACH(dl, &rmrr->devices, link)for((dl) = ((&rmrr->devices)->tqh_first); (dl) != ( (void *)0); (dl) = ((dl)->link.tqe_next)) { | |||
1893 | sid = mksid(dl->bus, dl->dp[0].device, | |||
1894 | dl->dp[0].function); | |||
1895 | dom = acpidmar_pci_attach(sc, rmrr->segment, sid, 0); | |||
1896 | if (dom != NULL((void *)0)) { | |||
1897 | printf("%s map ident: %.16llx %.16llx\n", | |||
1898 | dom_bdf(dom), rmrr->start, rmrr->end); | |||
1899 | domain_map_pthru(dom, rmrr->start, rmrr->end); | |||
1900 | rc = extent_alloc_region(dom->iovamap, | |||
1901 | rmrr->start, rmrr->end, | |||
1902 | EX_WAITOK0x0001 | EX_CONFLICTOK0x0080); | |||
1903 | } | |||
1904 | } | |||
1905 | } | |||
1906 | } | |||
1907 | ||||
1908 | ||||
1909 | /*===================================================== | |||
1910 | * AMD Vi | |||
1911 | *=====================================================*/ | |||
1912 | void acpiivrs_ivhd(struct acpidmar_softc *, struct acpi_ivhd *); | |||
1913 | int ivhd_iommu_init(struct acpidmar_softc *, struct iommu_softc *, | |||
1914 | struct acpi_ivhd *); | |||
1915 | int _ivhd_issue_command(struct iommu_softc *, const struct ivhd_command *); | |||
1916 | void ivhd_show_event(struct iommu_softc *, struct ivhd_event *evt, int); | |||
1917 | int ivhd_issue_command(struct iommu_softc *, const struct ivhd_command *, int); | |||
1918 | int ivhd_invalidate_domain(struct iommu_softc *, int); | |||
1919 | void ivhd_intr_map(struct iommu_softc *, int); | |||
1920 | void ivhd_checkerr(struct iommu_softc *iommu); | |||
1921 | int acpiivhd_intr(void *); | |||
1922 | ||||
1923 | int | |||
1924 | acpiivhd_intr(void *ctx) | |||
1925 | { | |||
1926 | struct iommu_softc *iommu = ctx; | |||
1927 | ||||
1928 | if (!iommu->dte) | |||
1929 | return (0); | |||
1930 | ivhd_poll_events(iommu); | |||
1931 | return (1); | |||
1932 | } | |||
1933 | ||||
1934 | /* Setup interrupt for AMD */ | |||
1935 | void | |||
1936 | ivhd_intr_map(struct iommu_softc *iommu, int devid) { | |||
1937 | pci_intr_handle_t ih; | |||
1938 | ||||
1939 | if (iommu->intr) | |||
1940 | return; | |||
1941 | ih.tag = pci_make_tag(NULL((void *)0), sid_bus(devid), sid_dev(devid), sid_fun(devid)); | |||
1942 | ih.line = APIC_INT_VIA_MSG0x20000000; | |||
1943 | ih.pin = 0; | |||
1944 | iommu->intr = pci_intr_establish(NULL((void *)0), ih, IPL_NET0x7 | IPL_MPSAFE0x100, | |||
1945 | acpiivhd_intr, iommu, "amd_iommu"); | |||
1946 | printf("amd iommu intr: %p\n", iommu->intr); | |||
1947 | } | |||
1948 | ||||
1949 | void | |||
1950 | _dumppte(struct pte_entry *pte, int lvl, vaddr_t va) | |||
1951 | { | |||
1952 | char *pfx[] = { " ", " ", " ", " ", "" }; | |||
1953 | uint64_t i, sh; | |||
1954 | struct pte_entry *npte; | |||
1955 | ||||
1956 | for (i = 0; i < 512; i++) { | |||
1957 | sh = (i << (((lvl-1) * 9) + 12)); | |||
1958 | if (pte[i].val & PTE_P(1L << 0)) { | |||
1959 | if (lvl > 1) { | |||
1960 | npte = (void *)PMAP_DIRECT_MAP((pte[i].val & PTE_PADDR_MASK))((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + ((pte[i].val & 0x000FFFFFFFFFF000LL))); | |||
1961 | printf("%slvl%d: %.16llx nxt:%llu\n", pfx[lvl], lvl, | |||
1962 | pte[i].val, (pte[i].val >> 9) & 7); | |||
1963 | _dumppte(npte, lvl-1, va | sh); | |||
1964 | } else { | |||
1965 | printf("%slvl%d: %.16llx <- %.16llx \n", pfx[lvl], lvl, | |||
1966 | pte[i].val, va | sh); | |||
1967 | } | |||
1968 | } | |||
1969 | } | |||
1970 | } | |||
1971 | ||||
1972 | void | |||
1973 | ivhd_showpage(struct iommu_softc *iommu, int sid, paddr_t paddr) | |||
1974 | { | |||
1975 | struct domain *dom; | |||
1976 | static int show = 0; | |||
1977 | ||||
1978 | if (show > 10) | |||
1979 | return; | |||
1980 | show++; | |||
1981 | dom = acpidmar_pci_attach(acpidmar_sc, 0, sid, 0); | |||
1982 | if (!dom) | |||
1983 | return; | |||
1984 | printf("DTE: %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", | |||
1985 | iommu->dte[sid].dw0, | |||
1986 | iommu->dte[sid].dw1, | |||
1987 | iommu->dte[sid].dw2, | |||
1988 | iommu->dte[sid].dw3, | |||
1989 | iommu->dte[sid].dw4, | |||
1990 | iommu->dte[sid].dw5, | |||
1991 | iommu->dte[sid].dw6, | |||
1992 | iommu->dte[sid].dw7); | |||
1993 | _dumppte(dom->pte, 3, 0); | |||
1994 | } | |||
1995 | ||||
1996 | /* Display AMD IOMMU Error */ | |||
1997 | void | |||
1998 | ivhd_show_event(struct iommu_softc *iommu, struct ivhd_event *evt, int head) | |||
1999 | { | |||
2000 | int type, sid, did, flag; | |||
2001 | uint64_t address; | |||
2002 | ||||
2003 | /* Get Device, Domain, Address and Type of event */ | |||
2004 | sid = __EXTRACT(evt->dw0, EVT_SID)(((evt->dw0) >> 0) & 0xFFFF); | |||
2005 | type = __EXTRACT(evt->dw1, EVT_TYPE)(((evt->dw1) >> 28) & 0xF); | |||
2006 | did = __EXTRACT(evt->dw1, EVT_DID)(((evt->dw1) >> 0) & 0xFFFF); | |||
2007 | flag = __EXTRACT(evt->dw1, EVT_FLAG)(((evt->dw1) >> 16) & 0xFFF); | |||
2008 | address = _get64(&evt->dw2)*(uint64_t *)(&evt->dw2); | |||
2009 | ||||
2010 | printf("=== IOMMU Error[%.4x]: ", head); | |||
2011 | switch (type) { | |||
2012 | case ILLEGAL_DEV_TABLE_ENTRY: | |||
2013 | printf("illegal dev table entry dev=%s addr=0x%.16llx %s, %s, %s, %s\n", | |||
2014 | dmar_bdf(sid), address, | |||
2015 | evt->dw1 & EVT_TR(1L << 24) ? "translation" : "transaction", | |||
2016 | evt->dw1 & EVT_RZ(1L << 23) ? "reserved bit" : "invalid level", | |||
2017 | evt->dw1 & EVT_RW(1L << 21) ? "write" : "read", | |||
2018 | evt->dw1 & EVT_I(1L << 19) ? "interrupt" : "memory"); | |||
2019 | ivhd_showdte(iommu); | |||
2020 | break; | |||
2021 | case IO_PAGE_FAULT: | |||
2022 | printf("io page fault dev=%s did=0x%.4x addr=0x%.16llx\n%s, %s, %s, %s, %s, %s\n", | |||
2023 | dmar_bdf(sid), did, address, | |||
2024 | evt->dw1 & EVT_TR(1L << 24) ? "translation" : "transaction", | |||
2025 | evt->dw1 & EVT_RZ(1L << 23) ? "reserved bit" : "invalid level", | |||
2026 | evt->dw1 & EVT_PE(1L << 22) ? "no perm" : "perm", | |||
2027 | evt->dw1 & EVT_RW(1L << 21) ? "write" : "read", | |||
2028 | evt->dw1 & EVT_PR(1L << 20) ? "present" : "not present", | |||
2029 | evt->dw1 & EVT_I(1L << 19) ? "interrupt" : "memory"); | |||
2030 | ivhd_showdte(iommu); | |||
2031 | ivhd_showpage(iommu, sid, address); | |||
2032 | break; | |||
2033 | case DEV_TAB_HARDWARE_ERROR: | |||
2034 | printf("device table hardware error dev=%s addr=0x%.16llx %s, %s, %s\n", | |||
2035 | dmar_bdf(sid), address, | |||
2036 | evt->dw1 & EVT_TR(1L << 24) ? "translation" : "transaction", | |||
2037 | evt->dw1 & EVT_RW(1L << 21) ? "write" : "read", | |||
2038 | evt->dw1 & EVT_I(1L << 19) ? "interrupt" : "memory"); | |||
2039 | ivhd_showdte(iommu); | |||
2040 | break; | |||
2041 | case PAGE_TAB_HARDWARE_ERROR: | |||
2042 | printf("page table hardware error dev=%s addr=0x%.16llx %s, %s, %s\n", | |||
2043 | dmar_bdf(sid), address, | |||
2044 | evt->dw1 & EVT_TR(1L << 24) ? "translation" : "transaction", | |||
2045 | evt->dw1 & EVT_RW(1L << 21) ? "write" : "read", | |||
2046 | evt->dw1 & EVT_I(1L << 19) ? "interrupt" : "memory"); | |||
2047 | ivhd_showdte(iommu); | |||
2048 | break; | |||
2049 | case ILLEGAL_COMMAND_ERROR: | |||
2050 | printf("illegal command addr=0x%.16llx\n", address); | |||
2051 | ivhd_showcmd(iommu); | |||
2052 | break; | |||
2053 | case COMMAND_HARDWARE_ERROR: | |||
2054 | printf("command hardware error addr=0x%.16llx flag=0x%.4x\n", | |||
2055 | address, flag); | |||
2056 | ivhd_showcmd(iommu); | |||
2057 | break; | |||
2058 | case IOTLB_INV_TIMEOUT: | |||
2059 | printf("iotlb invalidation timeout dev=%s address=0x%.16llx\n", | |||
2060 | dmar_bdf(sid), address); | |||
2061 | break; | |||
2062 | case INVALID_DEVICE_REQUEST: | |||
2063 | printf("invalid device request dev=%s addr=0x%.16llx flag=0x%.4x\n", | |||
2064 | dmar_bdf(sid), address, flag); | |||
2065 | break; | |||
2066 | default: | |||
2067 | printf("unknown type=0x%.2x\n", type); | |||
2068 | break; | |||
2069 | } | |||
2070 | /* Clear old event */ | |||
2071 | evt->dw0 = 0; | |||
2072 | evt->dw1 = 0; | |||
2073 | evt->dw2 = 0; | |||
2074 | evt->dw3 = 0; | |||
2075 | } | |||
2076 | ||||
2077 | /* AMD: Process IOMMU error from hardware */ | |||
2078 | int | |||
2079 | ivhd_poll_events(struct iommu_softc *iommu) | |||
2080 | { | |||
2081 | uint32_t head, tail; | |||
2082 | int sz; | |||
2083 | ||||
2084 | sz = sizeof(struct ivhd_event); | |||
2085 | head = iommu_read_4(iommu, EVT_HEAD_REG0x2010); | |||
2086 | tail = iommu_read_4(iommu, EVT_TAIL_REG0x2018); | |||
2087 | if (head == tail) { | |||
2088 | /* No pending events */ | |||
2089 | return (0); | |||
2090 | } | |||
2091 | while (head != tail) { | |||
2092 | ivhd_show_event(iommu, iommu->evt_tbl + head, head); | |||
2093 | head = (head + sz) % EVT_TBL_SIZE4096; | |||
2094 | } | |||
2095 | iommu_write_4(iommu, EVT_HEAD_REG0x2010, head); | |||
2096 | return (0); | |||
2097 | } | |||
2098 | ||||
2099 | /* AMD: Issue command to IOMMU queue */ | |||
2100 | int | |||
2101 | _ivhd_issue_command(struct iommu_softc *iommu, const struct ivhd_command *cmd) | |||
2102 | { | |||
2103 | u_long rf; | |||
2104 | uint32_t head, tail, next; | |||
2105 | int sz; | |||
2106 | ||||
2107 | head = iommu_read_4(iommu, CMD_HEAD_REG0x2000); | |||
2108 | sz = sizeof(*cmd); | |||
2109 | rf = intr_disable(); | |||
2110 | tail = iommu_read_4(iommu, CMD_TAIL_REG0x2008); | |||
2111 | next = (tail + sz) % CMD_TBL_SIZE4096; | |||
2112 | if (next == head) { | |||
2113 | printf("FULL\n"); | |||
2114 | /* Queue is full */ | |||
2115 | intr_restore(rf); | |||
2116 | return -EBUSY16; | |||
2117 | } | |||
2118 | memcpy(iommu->cmd_tbl + tail, cmd, sz)__builtin_memcpy((iommu->cmd_tbl + tail), (cmd), (sz)); | |||
2119 | iommu_write_4(iommu, CMD_TAIL_REG0x2008, next); | |||
2120 | intr_restore(rf); | |||
2121 | return (tail / sz); | |||
2122 | } | |||
2123 | ||||
2124 | #define IVHD_MAXDELAY8 8 | |||
2125 | ||||
2126 | int | |||
2127 | ivhd_issue_command(struct iommu_softc *iommu, const struct ivhd_command *cmd, int wait) | |||
2128 | { | |||
2129 | struct ivhd_command wq = { 0 }; | |||
2130 | volatile uint64_t wv __aligned(16)__attribute__((__aligned__(16))) = 0LL; | |||
2131 | paddr_t paddr; | |||
2132 | int rc, i; | |||
2133 | ||||
2134 | rc = _ivhd_issue_command(iommu, cmd); | |||
2135 | if (rc >= 0 && wait) { | |||
2136 | /* Wait for previous commands to complete. | |||
2137 | * Store address of completion variable to command */ | |||
2138 | pmap_extract(pmap_kernel()(&kernel_pmap_store), (vaddr_t)&wv, &paddr); | |||
2139 | wq.dw0 = (paddr & ~0xF) | 0x1; | |||
2140 | wq.dw1 = (COMPLETION_WAIT << CMD_SHIFT28) | ((paddr >> 32) & 0xFFFFF); | |||
2141 | wq.dw2 = 0xDEADBEEF; | |||
2142 | wq.dw3 = 0xFEEDC0DE; | |||
2143 | ||||
2144 | rc = _ivhd_issue_command(iommu, &wq); | |||
2145 | /* wv will change to value in dw2/dw3 when command is complete */ | |||
2146 | for (i = 0; i < IVHD_MAXDELAY8 && !wv; i++) { | |||
2147 | DELAY(10 << i)(*delay_func)(10 << i); | |||
2148 | } | |||
2149 | if (i == IVHD_MAXDELAY8) { | |||
2150 | printf("ivhd command timeout: %.8x %.8x %.8x %.8x wv:%llx idx:%x\n", | |||
2151 | cmd->dw0, cmd->dw1, cmd->dw2, cmd->dw3, wv, rc); | |||
2152 | } | |||
2153 | } | |||
2154 | return rc; | |||
2155 | ||||
2156 | } | |||
2157 | ||||
2158 | /* AMD: Flush changes to Device Table Entry for a specific domain */ | |||
2159 | int | |||
2160 | ivhd_flush_devtab(struct iommu_softc *iommu, int did) | |||
2161 | { | |||
2162 | struct ivhd_command cmd = { | |||
2163 | .dw0 = did, | |||
2164 | .dw1 = INVALIDATE_DEVTAB_ENTRY << CMD_SHIFT28 | |||
2165 | }; | |||
2166 | ||||
2167 | return ivhd_issue_command(iommu, &cmd, 1); | |||
2168 | } | |||
2169 | ||||
2170 | /* AMD: Invalidate all IOMMU device and page tables */ | |||
2171 | int | |||
2172 | ivhd_invalidate_iommu_all(struct iommu_softc *iommu) | |||
2173 | { | |||
2174 | struct ivhd_command cmd = { | |||
2175 | .dw1 = INVALIDATE_IOMMU_ALL << CMD_SHIFT28 | |||
2176 | }; | |||
2177 | ||||
2178 | return ivhd_issue_command(iommu, &cmd, 0); | |||
2179 | } | |||
2180 | ||||
2181 | /* AMD: Invalidate interrupt remapping */ | |||
2182 | int | |||
2183 | ivhd_invalidate_interrupt_table(struct iommu_softc *iommu, int did) | |||
2184 | { | |||
2185 | struct ivhd_command cmd = { | |||
2186 | .dw0 = did, | |||
2187 | .dw1 = INVALIDATE_INTERRUPT_TABLE << CMD_SHIFT28 | |||
2188 | }; | |||
2189 | ||||
2190 | return ivhd_issue_command(iommu, &cmd, 0); | |||
2191 | } | |||
2192 | ||||
2193 | /* AMD: Invalidate all page tables in a domain */ | |||
2194 | int | |||
2195 | ivhd_invalidate_domain(struct iommu_softc *iommu, int did) | |||
2196 | { | |||
2197 | struct ivhd_command cmd = { .dw1 = did | (INVALIDATE_IOMMU_PAGES << CMD_SHIFT28) }; | |||
2198 | ||||
2199 | cmd.dw2 = 0xFFFFF000 | 0x3; | |||
2200 | cmd.dw3 = 0x7FFFFFFF; | |||
2201 | return ivhd_issue_command(iommu, &cmd, 1); | |||
2202 | } | |||
2203 | ||||
2204 | /* AMD: Display Registers */ | |||
2205 | void | |||
2206 | ivhd_showreg(struct iommu_softc *iommu) | |||
2207 | { | |||
2208 | printf("---- dt:%.16llx cmd:%.16llx evt:%.16llx ctl:%.16llx sts:%.16llx\n", | |||
2209 | iommu_read_8(iommu, DEV_TAB_BASE_REG0x0000), | |||
2210 | iommu_read_8(iommu, CMD_BASE_REG0x0008), | |||
2211 | iommu_read_8(iommu, EVT_BASE_REG0x0010), | |||
2212 | iommu_read_8(iommu, IOMMUCTL_REG0x0018), | |||
2213 | iommu_read_8(iommu, IOMMUSTS_REG0x2020)); | |||
2214 | printf("---- cmd queue:%.16llx %.16llx evt queue:%.16llx %.16llx\n", | |||
2215 | iommu_read_8(iommu, CMD_HEAD_REG0x2000), | |||
2216 | iommu_read_8(iommu, CMD_TAIL_REG0x2008), | |||
2217 | iommu_read_8(iommu, EVT_HEAD_REG0x2010), | |||
2218 | iommu_read_8(iommu, EVT_TAIL_REG0x2018)); | |||
2219 | } | |||
2220 | ||||
2221 | /* AMD: Generate Errors to test event handler */ | |||
2222 | void | |||
2223 | ivhd_checkerr(struct iommu_softc *iommu) | |||
2224 | { | |||
2225 | struct ivhd_command cmd = { -1, -1, -1, -1 }; | |||
2226 | ||||
2227 | /* Generate ILLEGAL DEV TAB entry? */ | |||
2228 | iommu->dte[0x2303].dw0 = -1; /* invalid */ | |||
2229 | iommu->dte[0x2303].dw2 = 0x1234; /* domain */ | |||
2230 | iommu->dte[0x2303].dw7 = -1; /* reserved */ | |||
2231 | ivhd_flush_devtab(iommu, 0x1234); | |||
2232 | ivhd_poll_events(iommu); | |||
2233 | ||||
2234 | /* Generate ILLEGAL_COMMAND_ERROR : ok */ | |||
2235 | ivhd_issue_command(iommu, &cmd, 0); | |||
2236 | ivhd_poll_events(iommu); | |||
2237 | ||||
2238 | /* Generate page hardware error */ | |||
2239 | } | |||
2240 | ||||
2241 | /* AMD: Show Device Table Entry */ | |||
2242 | void | |||
2243 | ivhd_showdte(struct iommu_softc *iommu) | |||
2244 | { | |||
2245 | int i; | |||
2246 | ||||
2247 | for (i = 0; i < 65536; i++) { | |||
2248 | if (iommu->dte[i].dw0) { | |||
2249 | printf("%.2x:%.2x.%x: %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", | |||
2250 | i >> 8, (i >> 3) & 0x1F, i & 0x7, | |||
2251 | iommu->dte[i].dw0, iommu->dte[i].dw1, | |||
2252 | iommu->dte[i].dw2, iommu->dte[i].dw3, | |||
2253 | iommu->dte[i].dw4, iommu->dte[i].dw5, | |||
2254 | iommu->dte[i].dw6, iommu->dte[i].dw7); | |||
2255 | } | |||
2256 | } | |||
2257 | } | |||
2258 | ||||
2259 | /* AMD: Show command entries */ | |||
2260 | void | |||
2261 | ivhd_showcmd(struct iommu_softc *iommu) | |||
2262 | { | |||
2263 | struct ivhd_command *ihd; | |||
2264 | paddr_t phd; | |||
2265 | int i; | |||
2266 | ||||
2267 | ihd = iommu->cmd_tbl; | |||
2268 | phd = iommu_read_8(iommu, CMD_BASE_REG0x0008) & CMD_BASE_MASK0x000FFFFFFFFFF000LL; | |||
2269 | for (i = 0; i < 4096 / 128; i++) { | |||
2270 | printf("%.2x: %.16llx %.8x %.8x %.8x %.8x\n", i, | |||
2271 | (uint64_t)phd + i * sizeof(*ihd), | |||
2272 | ihd[i].dw0,ihd[i].dw1,ihd[i].dw2,ihd[i].dw3); | |||
2273 | } | |||
2274 | } | |||
2275 | ||||
2276 | #define _c(x)(int)((iommu->ecap >> x_SHIFT) & x_MASK) (int)((iommu->ecap >> x ##_SHIFT) & x ## _MASK) | |||
2277 | ||||
2278 | /* AMD: Initialize IOMMU */ | |||
2279 | int | |||
2280 | ivhd_iommu_init(struct acpidmar_softc *sc, struct iommu_softc *iommu, | |||
2281 | struct acpi_ivhd *ivhd) | |||
2282 | { | |||
2283 | static int niommu; | |||
2284 | paddr_t paddr; | |||
2285 | uint64_t ov; | |||
2286 | ||||
2287 | if (sc == NULL((void *)0) || iommu == NULL((void *)0) || ivhd == NULL((void *)0)) { | |||
2288 | printf("Bad pointer to iommu_init!\n"); | |||
2289 | return -1; | |||
2290 | } | |||
2291 | if (_bus_space_map(sc->sc_memt, ivhd->address, 0x80000, 0, &iommu->ioh) != 0) { | |||
2292 | printf("Bus Space Map fails\n"); | |||
2293 | return -1; | |||
2294 | } | |||
2295 | TAILQ_INIT(&iommu->domains)do { (&iommu->domains)->tqh_first = ((void *)0); (& iommu->domains)->tqh_last = &(&iommu->domains )->tqh_first; } while (0); | |||
2296 | TAILQ_INIT(&iommu->devices)do { (&iommu->devices)->tqh_first = ((void *)0); (& iommu->devices)->tqh_last = &(&iommu->devices )->tqh_first; } while (0); | |||
2297 | ||||
2298 | /* Setup address width and number of domains */ | |||
2299 | iommu->id = ++niommu; | |||
2300 | iommu->iot = sc->sc_memt; | |||
2301 | iommu->mgaw = 48; | |||
2302 | iommu->agaw = 48; | |||
2303 | iommu->flags = 1; | |||
2304 | iommu->segment = 0; | |||
2305 | iommu->ndoms = 256; | |||
2306 | ||||
2307 | printf(": AMD iommu%d at 0x%.8llx\n", iommu->id, ivhd->address); | |||
2308 | ||||
2309 | iommu->ecap = iommu_read_8(iommu, EXTFEAT_REG0x0030); | |||
2310 | DPRINTF(0,"iommu%d: ecap:%.16llx ", iommu->id, iommu->ecap); | |||
2311 | DPRINTF(0,"%s%s%s%s%s%s%s%s\n", | |||
2312 | iommu->ecap & EFR_PREFSUP ? "pref " : "", | |||
2313 | iommu->ecap & EFR_PPRSUP ? "ppr " : "", | |||
2314 | iommu->ecap & EFR_NXSUP ? "nx " : "", | |||
2315 | iommu->ecap & EFR_GTSUP ? "gt " : "", | |||
2316 | iommu->ecap & EFR_IASUP ? "ia " : "", | |||
2317 | iommu->ecap & EFR_GASUP ? "ga " : "", | |||
2318 | iommu->ecap & EFR_HESUP ? "he " : "", | |||
2319 | iommu->ecap & EFR_PCSUP ? "pc " : ""); | |||
2320 | DPRINTF(0,"hats:%x gats:%x glxsup:%x smif:%x smifrc:%x gam:%x\n", | |||
2321 | _c(EFR_HATS), _c(EFR_GATS), _c(EFR_GLXSUP), _c(EFR_SMIFSUP), | |||
2322 | _c(EFR_SMIFRC), _c(EFR_GAMSUP)); | |||
2323 | ||||
2324 | /* Turn off iommu */ | |||
2325 | ov = iommu_read_8(iommu, IOMMUCTL_REG0x0018); | |||
2326 | iommu_write_8(iommu, IOMMUCTL_REG0x0018, ov & ~(CTL_IOMMUEN(1L << 0) | CTL_COHERENT(1L << 10) | | |||
2327 | CTL_HTTUNEN(1L << 1) | CTL_RESPASSPW(1L << 9) | CTL_PASSPW(1L << 8) | CTL_ISOC(1L << 11))); | |||
2328 | ||||
2329 | /* Enable intr, mark IOMMU device as invalid for remap */ | |||
2330 | sid_flag[ivhd->devid] |= SID_INVALID0x80000000L; | |||
2331 | ivhd_intr_map(iommu, ivhd->devid); | |||
2332 | ||||
2333 | /* Setup command buffer with 4k buffer (128 entries) */ | |||
2334 | iommu->cmd_tbl = iommu_alloc_page(iommu, &paddr); | |||
2335 | iommu_write_8(iommu, CMD_BASE_REG0x0008, (paddr & CMD_BASE_MASK0x000FFFFFFFFFF000LL) | CMD_TBL_LEN_4K(8LL << 56)); | |||
2336 | iommu_write_4(iommu, CMD_HEAD_REG0x2000, 0x00); | |||
2337 | iommu_write_4(iommu, CMD_TAIL_REG0x2008, 0x00); | |||
2338 | iommu->cmd_tblp = paddr; | |||
2339 | ||||
2340 | /* Setup event log with 4k buffer (128 entries) */ | |||
2341 | iommu->evt_tbl = iommu_alloc_page(iommu, &paddr); | |||
2342 | iommu_write_8(iommu, EVT_BASE_REG0x0010, (paddr & EVT_BASE_MASK0x000FFFFFFFFFF000LL) | EVT_TBL_LEN_4K(8LL << 56)); | |||
2343 | iommu_write_4(iommu, EVT_HEAD_REG0x2010, 0x00); | |||
2344 | iommu_write_4(iommu, EVT_TAIL_REG0x2018, 0x00); | |||
2345 | iommu->evt_tblp = paddr; | |||
2346 | ||||
2347 | /* Setup device table | |||
2348 | * 1 entry per source ID (bus:device:function - 64k entries) | |||
2349 | */ | |||
2350 | iommu->dte = sc->sc_hwdte; | |||
2351 | iommu_write_8(iommu, DEV_TAB_BASE_REG0x0000, (sc->sc_hwdtep & DEV_TAB_MASK0x000FFFFFFFFFF000LL) | DEV_TAB_LEN0x1FF); | |||
2352 | ||||
2353 | /* Enable IOMMU */ | |||
2354 | ov |= (CTL_IOMMUEN(1L << 0) | CTL_EVENTLOGEN(1L << 2) | CTL_CMDBUFEN(1L << 12) | CTL_EVENTINTEN(1L << 3)); | |||
2355 | if (ivhd->flags & IVHD_COHERENT(1L << 5)) | |||
2356 | ov |= CTL_COHERENT(1L << 10); | |||
2357 | if (ivhd->flags & IVHD_HTTUNEN(1L << 0)) | |||
2358 | ov |= CTL_HTTUNEN(1L << 1); | |||
2359 | if (ivhd->flags & IVHD_RESPASSPW(1L << 2)) | |||
2360 | ov |= CTL_RESPASSPW(1L << 9); | |||
2361 | if (ivhd->flags & IVHD_PASSPW(1L << 1)) | |||
2362 | ov |= CTL_PASSPW(1L << 8); | |||
2363 | if (ivhd->flags & IVHD_ISOC(1L << 3)) | |||
2364 | ov |= CTL_ISOC(1L << 11); | |||
2365 | ov &= ~(CTL_INVTIMEOUT_MASK0x7 << CTL_INVTIMEOUT_SHIFT5); | |||
2366 | ov |= (CTL_INVTIMEOUT_10MS2 << CTL_INVTIMEOUT_SHIFT5); | |||
2367 | iommu_write_8(iommu, IOMMUCTL_REG0x0018, ov); | |||
2368 | ||||
2369 | ivhd_invalidate_iommu_all(iommu); | |||
2370 | ||||
2371 | TAILQ_INSERT_TAIL(&sc->sc_drhds, iommu, link)do { (iommu)->link.tqe_next = ((void *)0); (iommu)->link .tqe_prev = (&sc->sc_drhds)->tqh_last; *(&sc-> sc_drhds)->tqh_last = (iommu); (&sc->sc_drhds)-> tqh_last = &(iommu)->link.tqe_next; } while (0); | |||
2372 | return 0; | |||
2373 | } | |||
2374 | ||||
2375 | void | |||
2376 | acpiivrs_ivhd(struct acpidmar_softc *sc, struct acpi_ivhd *ivhd) | |||
2377 | { | |||
2378 | struct iommu_softc *iommu; | |||
2379 | struct acpi_ivhd_ext *ext; | |||
2380 | union acpi_ivhd_entry *ie; | |||
2381 | int start, off, dte, all_dte = 0; | |||
2382 | ||||
2383 | if (ivhd->type == IVRS_IVHD_EXT0x11) { | |||
2384 | ext = (struct acpi_ivhd_ext *)ivhd; | |||
2385 | DPRINTF(0,"ivhd: %.2x %.2x %.4x %.4x:%s %.4x %.16llx %.4x %.8x %.16llx\n", | |||
2386 | ext->type, ext->flags, ext->length, | |||
2387 | ext->segment, dmar_bdf(ext->devid), ext->cap, | |||
2388 | ext->address, ext->info, | |||
2389 | ext->attrib, ext->efr); | |||
2390 | if (ext->flags & IVHD_PPRSUP(1L << 7)) | |||
2391 | DPRINTF(0," PPRSup"); | |||
2392 | if (ext->flags & IVHD_PREFSUP(1L << 6)) | |||
2393 | DPRINTF(0," PreFSup"); | |||
2394 | if (ext->flags & IVHD_COHERENT(1L << 5)) | |||
2395 | DPRINTF(0," Coherent"); | |||
2396 | if (ext->flags & IVHD_IOTLB(1L << 4)) | |||
2397 | DPRINTF(0," Iotlb"); | |||
2398 | if (ext->flags & IVHD_ISOC(1L << 3)) | |||
2399 | DPRINTF(0," ISoc"); | |||
2400 | if (ext->flags & IVHD_RESPASSPW(1L << 2)) | |||
2401 | DPRINTF(0," ResPassPW"); | |||
2402 | if (ext->flags & IVHD_PASSPW(1L << 1)) | |||
2403 | DPRINTF(0," PassPW"); | |||
2404 | if (ext->flags & IVHD_HTTUNEN(1L << 0)) | |||
2405 | DPRINTF(0, " HtTunEn"); | |||
2406 | if (ext->flags) | |||
2407 | DPRINTF(0,"\n"); | |||
2408 | off = sizeof(*ext); | |||
2409 | iommu = malloc(sizeof(*iommu), M_DEVBUF2, M_ZERO0x0008|M_WAITOK0x0001); | |||
2410 | ivhd_iommu_init(sc, iommu, ivhd); | |||
2411 | } else { | |||
2412 | DPRINTF(0,"ivhd: %.2x %.2x %.4x %.4x:%s %.4x %.16llx %.4x %.8x\n", | |||
2413 | ivhd->type, ivhd->flags, ivhd->length, | |||
2414 | ivhd->segment, dmar_bdf(ivhd->devid), ivhd->cap, | |||
2415 | ivhd->address, ivhd->info, | |||
2416 | ivhd->feature); | |||
2417 | if (ivhd->flags & IVHD_PPRSUP(1L << 7)) | |||
2418 | DPRINTF(0," PPRSup"); | |||
2419 | if (ivhd->flags & IVHD_PREFSUP(1L << 6)) | |||
2420 | DPRINTF(0," PreFSup"); | |||
2421 | if (ivhd->flags & IVHD_COHERENT(1L << 5)) | |||
2422 | DPRINTF(0," Coherent"); | |||
2423 | if (ivhd->flags & IVHD_IOTLB(1L << 4)) | |||
2424 | DPRINTF(0," Iotlb"); | |||
2425 | if (ivhd->flags & IVHD_ISOC(1L << 3)) | |||
2426 | DPRINTF(0," ISoc"); | |||
2427 | if (ivhd->flags & IVHD_RESPASSPW(1L << 2)) | |||
2428 | DPRINTF(0," ResPassPW"); | |||
2429 | if (ivhd->flags & IVHD_PASSPW(1L << 1)) | |||
2430 | DPRINTF(0," PassPW"); | |||
2431 | if (ivhd->flags & IVHD_HTTUNEN(1L << 0)) | |||
2432 | DPRINTF(0, " HtTunEn"); | |||
2433 | if (ivhd->flags) | |||
2434 | DPRINTF(0,"\n"); | |||
2435 | off = sizeof(*ivhd); | |||
2436 | } | |||
2437 | while (off < ivhd->length) { | |||
2438 | ie = (void *)ivhd + off; | |||
2439 | switch (ie->type) { | |||
2440 | case IVHD_ALL1: | |||
2441 | all_dte = ie->all.data; | |||
2442 | DPRINTF(0," ALL %.4x\n", dte); | |||
2443 | off += sizeof(ie->all); | |||
2444 | break; | |||
2445 | case IVHD_SEL2: | |||
2446 | dte = ie->sel.data; | |||
2447 | DPRINTF(0," SELECT: %s %.4x\n", dmar_bdf(ie->sel.devid), dte); | |||
2448 | off += sizeof(ie->sel); | |||
2449 | break; | |||
2450 | case IVHD_SOR3: | |||
2451 | dte = ie->sor.data; | |||
2452 | start = ie->sor.devid; | |||
2453 | DPRINTF(0," SOR: %s %.4x\n", dmar_bdf(start), dte); | |||
2454 | off += sizeof(ie->sor); | |||
2455 | break; | |||
2456 | case IVHD_EOR4: | |||
2457 | DPRINTF(0," EOR: %s\n", dmar_bdf(ie->eor.devid)); | |||
2458 | off += sizeof(ie->eor); | |||
2459 | break; | |||
2460 | case IVHD_ALIAS_SEL66: | |||
2461 | dte = ie->alias.data; | |||
2462 | DPRINTF(0," ALIAS: src=%s: ", dmar_bdf(ie->alias.srcid)); | |||
2463 | DPRINTF(0," %s %.4x\n", dmar_bdf(ie->alias.devid), dte); | |||
2464 | off += sizeof(ie->alias); | |||
2465 | break; | |||
2466 | case IVHD_ALIAS_SOR67: | |||
2467 | dte = ie->alias.data; | |||
2468 | DPRINTF(0," ALIAS_SOR: %s %.4x ", dmar_bdf(ie->alias.devid), dte); | |||
2469 | DPRINTF(0," src=%s\n", dmar_bdf(ie->alias.srcid)); | |||
2470 | off += sizeof(ie->alias); | |||
2471 | break; | |||
2472 | case IVHD_EXT_SEL70: | |||
2473 | dte = ie->ext.data; | |||
2474 | DPRINTF(0," EXT SEL: %s %.4x %.8x\n", dmar_bdf(ie->ext.devid), | |||
2475 | dte, ie->ext.extdata); | |||
2476 | off += sizeof(ie->ext); | |||
2477 | break; | |||
2478 | case IVHD_EXT_SOR71: | |||
2479 | dte = ie->ext.data; | |||
2480 | DPRINTF(0," EXT SOR: %s %.4x %.8x\n", dmar_bdf(ie->ext.devid), | |||
2481 | dte, ie->ext.extdata); | |||
2482 | off += sizeof(ie->ext); | |||
2483 | break; | |||
2484 | case IVHD_SPECIAL72: | |||
2485 | DPRINTF(0," SPECIAL\n"); | |||
2486 | off += sizeof(ie->special); | |||
2487 | break; | |||
2488 | default: | |||
2489 | DPRINTF(0," 2:unknown %x\n", ie->type); | |||
2490 | off = ivhd->length; | |||
2491 | break; | |||
2492 | } | |||
2493 | } | |||
2494 | } | |||
2495 | ||||
2496 | void | |||
2497 | acpiivrs_init(struct acpidmar_softc *sc, struct acpi_ivrs *ivrs) | |||
2498 | { | |||
2499 | union acpi_ivrs_entry *ie; | |||
2500 | int off; | |||
2501 | ||||
2502 | if (!sc->sc_hwdte) { | |||
2503 | sc->sc_hwdte = iommu_alloc_hwdte(sc, HWDTE_SIZE(65536 * sizeof(struct ivhd_dte)), &sc->sc_hwdtep); | |||
2504 | if (sc->sc_hwdte == NULL((void *)0)) | |||
2505 | panic("Can't allocate HWDTE!"); | |||
2506 | } | |||
2507 | ||||
2508 | domain_map_page = domain_map_page_amd; | |||
2509 | DPRINTF(0,"IVRS Version: %d\n", ivrs->hdr.revision); | |||
2510 | DPRINTF(0," VA Size: %d\n", | |||
2511 | (ivrs->ivinfo >> IVRS_VASIZE_SHIFT) & IVRS_VASIZE_MASK); | |||
2512 | DPRINTF(0," PA Size: %d\n", | |||
2513 | (ivrs->ivinfo >> IVRS_PASIZE_SHIFT) & IVRS_PASIZE_MASK); | |||
2514 | ||||
2515 | TAILQ_INIT(&sc->sc_drhds)do { (&sc->sc_drhds)->tqh_first = ((void *)0); (& sc->sc_drhds)->tqh_last = &(&sc->sc_drhds)-> tqh_first; } while (0); | |||
2516 | TAILQ_INIT(&sc->sc_rmrrs)do { (&sc->sc_rmrrs)->tqh_first = ((void *)0); (& sc->sc_rmrrs)->tqh_last = &(&sc->sc_rmrrs)-> tqh_first; } while (0); | |||
2517 | TAILQ_INIT(&sc->sc_atsrs)do { (&sc->sc_atsrs)->tqh_first = ((void *)0); (& sc->sc_atsrs)->tqh_last = &(&sc->sc_atsrs)-> tqh_first; } while (0); | |||
2518 | ||||
2519 | DPRINTF(0,"======== IVRS\n"); | |||
2520 | off = sizeof(*ivrs); | |||
2521 | while (off < ivrs->hdr.length) { | |||
2522 | ie = (void *)ivrs + off; | |||
2523 | switch (ie->type) { | |||
2524 | case IVRS_IVHD0x10: | |||
2525 | case IVRS_IVHD_EXT0x11: | |||
2526 | acpiivrs_ivhd(sc, &ie->ivhd); | |||
2527 | break; | |||
2528 | case IVRS_IVMD_ALL0x20: | |||
2529 | case IVRS_IVMD_SPECIFIED0x21: | |||
2530 | case IVRS_IVMD_RANGE0x22: | |||
2531 | DPRINTF(0,"ivmd\n"); | |||
2532 | break; | |||
2533 | default: | |||
2534 | DPRINTF(0,"1:unknown: %x\n", ie->type); | |||
2535 | break; | |||
2536 | } | |||
2537 | off += ie->length; | |||
2538 | } | |||
2539 | DPRINTF(0,"======== End IVRS\n"); | |||
2540 | } | |||
2541 | ||||
2542 | static int | |||
2543 | acpiivhd_activate(struct iommu_softc *iommu, int act) | |||
2544 | { | |||
2545 | switch (act) { | |||
2546 | case DVACT_SUSPEND3: | |||
2547 | iommu->flags |= IOMMU_FLAGS_SUSPEND0x4; | |||
2548 | break; | |||
2549 | case DVACT_RESUME4: | |||
2550 | iommu->flags &= ~IOMMU_FLAGS_SUSPEND0x4; | |||
2551 | break; | |||
2552 | } | |||
2553 | return (0); | |||
2554 | } | |||
2555 | ||||
2556 | int | |||
2557 | acpidmar_activate(struct device *self, int act) | |||
2558 | { | |||
2559 | struct acpidmar_softc *sc = (struct acpidmar_softc *)self; | |||
2560 | struct iommu_softc *iommu; | |||
2561 | ||||
2562 | printf("called acpidmar_activate %d %p\n", act, sc); | |||
2563 | ||||
2564 | if (sc == NULL((void *)0)) { | |||
2565 | return (0); | |||
2566 | } | |||
2567 | ||||
2568 | switch (act) { | |||
2569 | case DVACT_RESUME4: | |||
2570 | TAILQ_FOREACH(iommu, &sc->sc_drhds, link)for((iommu) = ((&sc->sc_drhds)->tqh_first); (iommu) != ((void *)0); (iommu) = ((iommu)->link.tqe_next)) { | |||
2571 | printf("iommu%d resume\n", iommu->id); | |||
2572 | if (iommu->dte) { | |||
2573 | acpiivhd_activate(iommu, act); | |||
2574 | continue; | |||
2575 | } | |||
2576 | iommu_flush_write_buffer(iommu); | |||
2577 | iommu_set_rtaddr(iommu, iommu->rtaddr); | |||
2578 | iommu_write_4(iommu, DMAR_FEDATA_REG0x3c, iommu->fedata); | |||
2579 | iommu_write_4(iommu, DMAR_FEADDR_REG0x40, iommu->feaddr); | |||
2580 | iommu_write_4(iommu, DMAR_FEUADDR_REG0x44, | |||
2581 | iommu->feaddr >> 32); | |||
2582 | if ((iommu->flags & (IOMMU_FLAGS_BAD0x2|IOMMU_FLAGS_SUSPEND0x4)) == | |||
2583 | IOMMU_FLAGS_SUSPEND0x4) { | |||
2584 | printf("enable wakeup translation\n"); | |||
2585 | iommu_enable_translation(iommu, 1); | |||
2586 | } | |||
2587 | iommu_showcfg(iommu, -1); | |||
2588 | } | |||
2589 | break; | |||
2590 | case DVACT_SUSPEND3: | |||
2591 | TAILQ_FOREACH(iommu, &sc->sc_drhds, link)for((iommu) = ((&sc->sc_drhds)->tqh_first); (iommu) != ((void *)0); (iommu) = ((iommu)->link.tqe_next)) { | |||
2592 | printf("iommu%d suspend\n", iommu->id); | |||
2593 | if (iommu->flags & IOMMU_FLAGS_BAD0x2) | |||
2594 | continue; | |||
2595 | if (iommu->dte) { | |||
2596 | acpiivhd_activate(iommu, act); | |||
2597 | continue; | |||
2598 | } | |||
2599 | iommu->flags |= IOMMU_FLAGS_SUSPEND0x4; | |||
2600 | iommu_enable_translation(iommu, 0); | |||
2601 | iommu_showcfg(iommu, -1); | |||
2602 | } | |||
2603 | break; | |||
2604 | } | |||
2605 | return (0); | |||
2606 | } | |||
2607 | ||||
2608 | int | |||
2609 | acpidmar_match(struct device *parent, void *match, void *aux) | |||
2610 | { | |||
2611 | struct acpi_attach_args *aaa = aux; | |||
2612 | struct acpi_table_header *hdr; | |||
2613 | ||||
2614 | /* If we do not have a table, it is not us */ | |||
2615 | if (aaa->aaa_table == NULL((void *)0)) | |||
2616 | return (0); | |||
2617 | ||||
2618 | /* If it is an DMAR table, we can attach */ | |||
2619 | hdr = (struct acpi_table_header *)aaa->aaa_table; | |||
2620 | if (memcmp(hdr->signature, DMAR_SIG, sizeof(DMAR_SIG) - 1)__builtin_memcmp((hdr->signature), ("DMAR"), (sizeof("DMAR" ) - 1)) == 0) | |||
2621 | return (1); | |||
2622 | if (memcmp(hdr->signature, IVRS_SIG, sizeof(IVRS_SIG) - 1)__builtin_memcmp((hdr->signature), ("IVRS"), (sizeof("IVRS" ) - 1)) == 0) | |||
2623 | return (1); | |||
2624 | ||||
2625 | return (0); | |||
2626 | } | |||
2627 | ||||
2628 | void | |||
2629 | acpidmar_attach(struct device *parent, struct device *self, void *aux) | |||
2630 | { | |||
2631 | struct acpidmar_softc *sc = (void *)self; | |||
2632 | struct acpi_attach_args *aaa = aux; | |||
2633 | struct acpi_dmar *dmar = (struct acpi_dmar *)aaa->aaa_table; | |||
2634 | struct acpi_ivrs *ivrs = (struct acpi_ivrs *)aaa->aaa_table; | |||
2635 | struct acpi_table_header *hdr; | |||
2636 | ||||
2637 | hdr = (struct acpi_table_header *)aaa->aaa_table; | |||
2638 | sc->sc_memt = aaa->aaa_memt; | |||
2639 | sc->sc_dmat = aaa->aaa_dmat; | |||
2640 | if (memcmp(hdr->signature, DMAR_SIG, sizeof(DMAR_SIG) - 1)__builtin_memcmp((hdr->signature), ("DMAR"), (sizeof("DMAR" ) - 1)) == 0) { | |||
2641 | acpidmar_sc = sc; | |||
2642 | acpidmar_init(sc, dmar); | |||
2643 | } | |||
2644 | if (memcmp(hdr->signature, IVRS_SIG, sizeof(IVRS_SIG) - 1)__builtin_memcmp((hdr->signature), ("IVRS"), (sizeof("IVRS" ) - 1)) == 0) { | |||
2645 | acpidmar_sc = sc; | |||
2646 | acpiivrs_init(sc, ivrs); | |||
2647 | } | |||
2648 | } | |||
2649 | ||||
2650 | /* Interrupt shiz */ | |||
2651 | void acpidmar_msi_hwmask(struct pic *, int); | |||
2652 | void acpidmar_msi_hwunmask(struct pic *, int); | |||
2653 | void acpidmar_msi_addroute(struct pic *, struct cpu_info *, int, int, int); | |||
2654 | void acpidmar_msi_delroute(struct pic *, struct cpu_info *, int, int, int); | |||
2655 | ||||
2656 | void | |||
2657 | acpidmar_msi_hwmask(struct pic *pic, int pin) | |||
2658 | { | |||
2659 | struct iommu_pic *ip = (void *)pic; | |||
2660 | struct iommu_softc *iommu = ip->iommu; | |||
2661 | ||||
2662 | printf("msi_hwmask\n"); | |||
2663 | ||||
2664 | mtx_enter(&iommu->reg_lock); | |||
2665 | ||||
2666 | iommu_write_4(iommu, DMAR_FECTL_REG0x38, FECTL_IM(1LL << 31)); | |||
2667 | iommu_read_4(iommu, DMAR_FECTL_REG0x38); | |||
2668 | ||||
2669 | mtx_leave(&iommu->reg_lock); | |||
2670 | } | |||
2671 | ||||
2672 | void | |||
2673 | acpidmar_msi_hwunmask(struct pic *pic, int pin) | |||
2674 | { | |||
2675 | struct iommu_pic *ip = (void *)pic; | |||
2676 | struct iommu_softc *iommu = ip->iommu; | |||
2677 | ||||
2678 | printf("msi_hwunmask\n"); | |||
2679 | ||||
2680 | mtx_enter(&iommu->reg_lock); | |||
2681 | ||||
2682 | iommu_write_4(iommu, DMAR_FECTL_REG0x38, 0); | |||
2683 | iommu_read_4(iommu, DMAR_FECTL_REG0x38); | |||
2684 | ||||
2685 | mtx_leave(&iommu->reg_lock); | |||
2686 | } | |||
2687 | ||||
2688 | void | |||
2689 | acpidmar_msi_addroute(struct pic *pic, struct cpu_info *ci, int pin, int vec, | |||
2690 | int type) | |||
2691 | { | |||
2692 | struct iommu_pic *ip = (void *)pic; | |||
2693 | struct iommu_softc *iommu = ip->iommu; | |||
2694 | ||||
2695 | mtx_enter(&iommu->reg_lock); | |||
2696 | ||||
2697 | iommu->fedata = vec; | |||
2698 | iommu->feaddr = 0xfee00000L | (ci->ci_apicid << 12); | |||
2699 | iommu_write_4(iommu, DMAR_FEDATA_REG0x3c, vec); | |||
2700 | iommu_write_4(iommu, DMAR_FEADDR_REG0x40, iommu->feaddr); | |||
2701 | iommu_write_4(iommu, DMAR_FEUADDR_REG0x44, iommu->feaddr >> 32); | |||
2702 | ||||
2703 | mtx_leave(&iommu->reg_lock); | |||
2704 | } | |||
2705 | ||||
2706 | void | |||
2707 | acpidmar_msi_delroute(struct pic *pic, struct cpu_info *ci, int pin, int vec, | |||
2708 | int type) | |||
2709 | { | |||
2710 | printf("msi_delroute\n"); | |||
2711 | } | |||
2712 | ||||
2713 | void * | |||
2714 | acpidmar_intr_establish(void *ctx, int level, int (*func)(void *), | |||
2715 | void *arg, const char *what) | |||
2716 | { | |||
2717 | struct iommu_softc *iommu = ctx; | |||
2718 | struct pic *pic; | |||
2719 | ||||
2720 | pic = &iommu->pic.pic; | |||
2721 | iommu->pic.iommu = iommu; | |||
2722 | ||||
2723 | strlcpy(pic->pic_dev.dv_xname, "dmarpic", | |||
2724 | sizeof(pic->pic_dev.dv_xname)); | |||
2725 | pic->pic_type = PIC_MSI3; | |||
2726 | pic->pic_hwmask = acpidmar_msi_hwmask; | |||
2727 | pic->pic_hwunmask = acpidmar_msi_hwunmask; | |||
2728 | pic->pic_addroute = acpidmar_msi_addroute; | |||
2729 | pic->pic_delroute = acpidmar_msi_delroute; | |||
2730 | pic->pic_edge_stubs = ioapic_edge_stubs; | |||
2731 | #ifdef MULTIPROCESSOR1 | |||
2732 | mtx_init(&pic->pic_mutex, level)do { (void)(((void *)0)); (void)(0); __mtx_init((&pic-> pic_mutex), ((((level)) > 0x0 && ((level)) < 0x9 ) ? 0x9 : ((level)))); } while (0); | |||
2733 | #endif | |||
2734 | ||||
2735 | return intr_establish(-1, pic, 0, IST_PULSE1, level, NULL((void *)0), func, arg, what); | |||
2736 | } | |||
2737 | ||||
2738 | /* Intel: Handle DMAR Interrupt */ | |||
2739 | int | |||
2740 | acpidmar_intr(void *ctx) | |||
2741 | { | |||
2742 | struct iommu_softc *iommu = ctx; | |||
2743 | struct fault_entry fe; | |||
2744 | static struct fault_entry ofe; | |||
2745 | int fro, nfr, fri, i; | |||
2746 | uint32_t sts; | |||
2747 | ||||
2748 | /*splassert(IPL_HIGH);*/ | |||
2749 | ||||
2750 | if (!(iommu->gcmd & GCMD_TE(1LL << 31))) { | |||
2751 | return (1); | |||
2752 | } | |||
2753 | mtx_enter(&iommu->reg_lock); | |||
2754 | sts = iommu_read_4(iommu, DMAR_FECTL_REG0x38); | |||
2755 | sts = iommu_read_4(iommu, DMAR_FSTS_REG0x34); | |||
2756 | ||||
2757 | if (!(sts & FSTS_PPF(1LL << 1))) { | |||
2758 | mtx_leave(&iommu->reg_lock); | |||
2759 | return (1); | |||
2760 | } | |||
2761 | ||||
2762 | nfr = cap_nfr(iommu->cap)((uint32_t)(((iommu->cap)>> 40LL) & 0xFF) + 1); | |||
2763 | fro = cap_fro(iommu->cap)((uint32_t)(((iommu->cap)>> 24LL) & 0x3FF) * 16); | |||
2764 | fri = (sts >> FSTS_FRI_SHIFT8) & FSTS_FRI_MASK0xFF; | |||
2765 | for (i = 0; i < nfr; i++) { | |||
2766 | fe.hi = iommu_read_8(iommu, fro + (fri*16) + 8); | |||
2767 | if (!(fe.hi & FRCD_HI_F(1LL << (127-64)))) | |||
2768 | break; | |||
2769 | ||||
2770 | fe.lo = iommu_read_8(iommu, fro + (fri*16)); | |||
2771 | if (ofe.hi != fe.hi || ofe.lo != fe.lo) { | |||
2772 | iommu_showfault(iommu, fri, &fe); | |||
2773 | ofe.hi = fe.hi; | |||
2774 | ofe.lo = fe.lo; | |||
2775 | } | |||
2776 | fri = (fri + 1) % nfr; | |||
2777 | } | |||
2778 | ||||
2779 | iommu_write_4(iommu, DMAR_FSTS_REG0x34, FSTS_PFO(1LL << 0) | FSTS_PPF(1LL << 1)); | |||
2780 | ||||
2781 | mtx_leave(&iommu->reg_lock); | |||
2782 | ||||
2783 | return (1); | |||
2784 | } | |||
2785 | ||||
2786 | const char *vtd_faults[] = { | |||
2787 | "Software", | |||
2788 | "Root Entry Not Present", /* ok (rtaddr + 4096) */ | |||
2789 | "Context Entry Not Present", /* ok (no CTX_P) */ | |||
2790 | "Context Entry Invalid", /* ok (tt = 3) */ | |||
2791 | "Address Beyond MGAW", | |||
2792 | "Write", /* ok */ | |||
2793 | "Read", /* ok */ | |||
2794 | "Paging Entry Invalid", /* ok */ | |||
2795 | "Root Table Invalid", | |||
2796 | "Context Table Invalid", | |||
2797 | "Root Entry Reserved", /* ok (root.lo |= 0x4) */ | |||
2798 | "Context Entry Reserved", | |||
2799 | "Paging Entry Reserved", | |||
2800 | "Context Entry TT", | |||
2801 | "Reserved", | |||
2802 | }; | |||
2803 | ||||
2804 | void iommu_showpte(uint64_t, int, uint64_t); | |||
2805 | ||||
2806 | /* Intel: Show IOMMU page table entry */ | |||
2807 | void | |||
2808 | iommu_showpte(uint64_t ptep, int lvl, uint64_t base) | |||
2809 | { | |||
2810 | uint64_t nb, pb, i; | |||
2811 | struct pte_entry *pte; | |||
2812 | ||||
2813 | pte = (void *)PMAP_DIRECT_MAP(ptep)((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + (ptep)); | |||
2814 | for (i = 0; i < 512; i++) { | |||
2815 | if (!(pte[i].val & PTE_P(1L << 0))) | |||
2816 | continue; | |||
2817 | nb = base + (i << lvl); | |||
2818 | pb = pte[i].val & ~VTD_PAGE_MASK0xFFF; | |||
2819 | if(lvl == VTD_LEVEL012) { | |||
2820 | printf(" %3llx %.16llx = %.16llx %c%c %s\n", | |||
2821 | i, nb, pb, | |||
2822 | pte[i].val == PTE_R0x00 ? 'r' : ' ', | |||
2823 | pte[i].val & PTE_W(1L << 1) ? 'w' : ' ', | |||
2824 | (nb == pb) ? " ident" : ""); | |||
2825 | if (nb == pb) | |||
2826 | return; | |||
2827 | } else { | |||
2828 | iommu_showpte(pb, lvl - VTD_STRIDE_SIZE9, nb); | |||
2829 | } | |||
2830 | } | |||
2831 | } | |||
2832 | ||||
2833 | /* Intel: Show IOMMU configuration */ | |||
2834 | void | |||
2835 | iommu_showcfg(struct iommu_softc *iommu, int sid) | |||
2836 | { | |||
2837 | int i, j, sts, cmd; | |||
2838 | struct context_entry *ctx; | |||
2839 | pcitag_t tag; | |||
2840 | pcireg_t clc; | |||
2841 | ||||
2842 | cmd = iommu_read_4(iommu, DMAR_GCMD_REG0x18); | |||
2843 | sts = iommu_read_4(iommu, DMAR_GSTS_REG0x1c); | |||
2844 | printf("iommu%d: flags:%d root pa:%.16llx %s %s %s %.8x %.8x\n", | |||
2845 | iommu->id, iommu->flags, iommu_read_8(iommu, DMAR_RTADDR_REG0x20), | |||
2846 | sts & GSTS_TES(1LL << 31) ? "enabled" : "disabled", | |||
2847 | sts & GSTS_QIES(1LL << 26) ? "qi" : "ccmd", | |||
2848 | sts & GSTS_IRES(1LL << 25) ? "ir" : "", | |||
2849 | cmd, sts); | |||
2850 | for (i = 0; i < 256; i++) { | |||
2851 | if (!root_entry_is_valid(&iommu->root[i])) { | |||
2852 | continue; | |||
2853 | } | |||
2854 | for (j = 0; j < 256; j++) { | |||
2855 | ctx = iommu->ctx[i] + j; | |||
2856 | if (!context_entry_is_valid(ctx)) { | |||
2857 | continue; | |||
2858 | } | |||
2859 | tag = pci_make_tag(NULL((void *)0), i, (j >> 3), j & 0x7); | |||
2860 | clc = pci_conf_read(NULL((void *)0), tag, 0x08) >> 8; | |||
2861 | printf(" %.2x:%.2x.%x lvl:%d did:%.4x tt:%d ptep:%.16llx flag:%x cc:%.6x\n", | |||
2862 | i, (j >> 3), j & 7, | |||
2863 | context_address_width(ctx), | |||
2864 | context_domain_id(ctx), | |||
2865 | context_translation_type(ctx), | |||
2866 | context_pte(ctx), | |||
2867 | context_user(ctx), | |||
2868 | clc); | |||
2869 | #if 0 | |||
2870 | /* dump pagetables */ | |||
2871 | iommu_showpte(ctx->lo & ~VTD_PAGE_MASK0xFFF, iommu->agaw - | |||
2872 | VTD_STRIDE_SIZE9, 0); | |||
2873 | #endif | |||
2874 | } | |||
2875 | } | |||
2876 | } | |||
2877 | ||||
2878 | /* Intel: Show IOMMU fault */ | |||
2879 | void | |||
2880 | iommu_showfault(struct iommu_softc *iommu, int fri, struct fault_entry *fe) | |||
2881 | { | |||
2882 | int bus, dev, fun, type, fr, df; | |||
2883 | bios_memmap_t *im; | |||
2884 | const char *mapped; | |||
2885 | ||||
2886 | if (!(fe->hi & FRCD_HI_F(1LL << (127-64)))) | |||
2887 | return; | |||
2888 | type = (fe->hi & FRCD_HI_T(1LL << (126-64))) ? 'r' : 'w'; | |||
2889 | fr = (fe->hi >> FRCD_HI_FR_SHIFT(96-64)) & FRCD_HI_FR_MASK0xFF; | |||
2890 | bus = (fe->hi >> FRCD_HI_BUS_SHIFT8) & FRCD_HI_BUS_MASK0xFF; | |||
2891 | dev = (fe->hi >> FRCD_HI_DEV_SHIFT3) & FRCD_HI_DEV_MASK0x1F; | |||
2892 | fun = (fe->hi >> FRCD_HI_FUN_SHIFT0) & FRCD_HI_FUN_MASK0x7; | |||
2893 | df = (fe->hi >> FRCD_HI_FUN_SHIFT0) & 0xFF; | |||
2894 | iommu_showcfg(iommu, mksid(bus,dev,fun)); | |||
2895 | if (!iommu->ctx[bus]) { | |||
2896 | /* Bus is not initialized */ | |||
2897 | mapped = "nobus"; | |||
2898 | } else if (!context_entry_is_valid(&iommu->ctx[bus][df])) { | |||
2899 | /* DevFn not initialized */ | |||
2900 | mapped = "nodevfn"; | |||
2901 | } else if (context_user(&iommu->ctx[bus][df]) != 0xA) { | |||
2902 | /* no bus_space_map */ | |||
2903 | mapped = "nomap"; | |||
2904 | } else { | |||
2905 | /* bus_space_map */ | |||
2906 | mapped = "mapped"; | |||
2907 | } | |||
2908 | printf("fri%d: dmar: %.2x:%.2x.%x %s error at %llx fr:%d [%s] iommu:%d [%s]\n", | |||
2909 | fri, bus, dev, fun, | |||
2910 | type == 'r' ? "read" : "write", | |||
2911 | fe->lo, | |||
2912 | fr, fr <= 13 ? vtd_faults[fr] : "unknown", | |||
2913 | iommu->id, | |||
2914 | mapped); | |||
2915 | for (im = bios_memmap; im->type != BIOS_MAP_END0x00; im++) { | |||
2916 | if ((im->type == BIOS_MAP_RES0x02) && | |||
2917 | (im->addr <= fe->lo) && | |||
2918 | (fe->lo <= im->addr+im->size)) { | |||
2919 | printf("mem in e820.reserved\n"); | |||
2920 | } | |||
2921 | } | |||
2922 | #ifdef DDB1 | |||
2923 | if (acpidmar_ddb) | |||
2924 | db_enter(); | |||
2925 | #endif | |||
2926 | } | |||
2927 |