File: | dev/usb/xhci.c |
Warning: | line 3222, column 18 Access to field 'trb_flags' results in a dereference of a null pointer (loaded from variable 'trb0') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: xhci.c,v 1.124 2022/01/09 05:43:02 jsg Exp $ */ | |||
2 | ||||
3 | /* | |||
4 | * Copyright (c) 2014-2015 Martin Pieuchot | |||
5 | * | |||
6 | * Permission to use, copy, modify, and distribute this software for any | |||
7 | * purpose with or without fee is hereby granted, provided that the above | |||
8 | * copyright notice and this permission notice appear in all copies. | |||
9 | * | |||
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |||
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |||
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |||
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |||
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |||
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |||
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |||
17 | */ | |||
18 | ||||
19 | #include <sys/param.h> | |||
20 | #include <sys/systm.h> | |||
21 | #include <sys/kernel.h> | |||
22 | #include <sys/malloc.h> | |||
23 | #include <sys/device.h> | |||
24 | #include <sys/queue.h> | |||
25 | #include <sys/timeout.h> | |||
26 | #include <sys/pool.h> | |||
27 | #include <sys/endian.h> | |||
28 | #include <sys/rwlock.h> | |||
29 | ||||
30 | #include <machine/bus.h> | |||
31 | ||||
32 | #include <dev/usb/usb.h> | |||
33 | #include <dev/usb/usbdi.h> | |||
34 | #include <dev/usb/usbdivar.h> | |||
35 | #include <dev/usb/usb_mem.h> | |||
36 | ||||
37 | #include <dev/usb/xhcireg.h> | |||
38 | #include <dev/usb/xhcivar.h> | |||
39 | ||||
40 | struct cfdriver xhci_cd = { | |||
41 | NULL((void *)0), "xhci", DV_DULL, CD_SKIPHIBERNATE2 | |||
42 | }; | |||
43 | ||||
44 | #ifdef XHCI_DEBUG | |||
45 | #define DPRINTF(x) do { if (xhcidebug) printf x; } while(0) | |||
46 | #define DPRINTFN(n,x) do { if (xhcidebug>(n)) printf x; } while (0) | |||
47 | int xhcidebug = 3; | |||
48 | #else | |||
49 | #define DPRINTF(x) | |||
50 | #define DPRINTFN(n,x) | |||
51 | #endif | |||
52 | ||||
53 | #define DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname) ((sc)->sc_bus.bdev.dv_xname) | |||
54 | ||||
55 | #define TRBOFF(r, trb)((char *)(trb) - (char *)((r)->trbs)) ((char *)(trb) - (char *)((r)->trbs)) | |||
56 | #define DEQPTR(r)((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index)) ((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index)) | |||
57 | ||||
58 | struct pool *xhcixfer; | |||
59 | ||||
60 | struct xhci_pipe { | |||
61 | struct usbd_pipe pipe; | |||
62 | ||||
63 | uint8_t dci; | |||
64 | uint8_t slot; /* Device slot ID */ | |||
65 | struct xhci_ring ring; | |||
66 | ||||
67 | /* | |||
68 | * XXX used to pass the xfer pointer back to the | |||
69 | * interrupt routine, better way? | |||
70 | */ | |||
71 | struct usbd_xfer *pending_xfers[XHCI_MAX_XFER(16 * 16)]; | |||
72 | struct usbd_xfer *aborted_xfer; | |||
73 | int halted; | |||
74 | size_t free_trbs; | |||
75 | int skip; | |||
76 | #define TRB_PROCESSED_NO0 0 | |||
77 | #define TRB_PROCESSED_YES1 1 | |||
78 | #define TRB_PROCESSED_SHORT2 2 | |||
79 | uint8_t trb_processed[XHCI_MAX_XFER(16 * 16)]; | |||
80 | }; | |||
81 | ||||
82 | int xhci_reset(struct xhci_softc *); | |||
83 | int xhci_intr1(struct xhci_softc *); | |||
84 | void xhci_event_dequeue(struct xhci_softc *); | |||
85 | void xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t); | |||
86 | int xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *, | |||
87 | struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t); | |||
88 | int xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *, | |||
89 | uint32_t, int, uint8_t); | |||
90 | void xhci_event_command(struct xhci_softc *, uint64_t); | |||
91 | void xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t); | |||
92 | int xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *); | |||
93 | int xhci_context_setup(struct xhci_softc *, struct usbd_pipe *); | |||
94 | int xhci_scratchpad_alloc(struct xhci_softc *, int); | |||
95 | void xhci_scratchpad_free(struct xhci_softc *); | |||
96 | int xhci_softdev_alloc(struct xhci_softc *, uint8_t); | |||
97 | void xhci_softdev_free(struct xhci_softc *, uint8_t); | |||
98 | int xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t, | |||
99 | size_t); | |||
100 | void xhci_ring_free(struct xhci_softc *, struct xhci_ring *); | |||
101 | void xhci_ring_reset(struct xhci_softc *, struct xhci_ring *); | |||
102 | struct xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *); | |||
103 | struct xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *); | |||
104 | ||||
105 | struct xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*, | |||
106 | uint8_t *, int); | |||
107 | void xhci_xfer_done(struct usbd_xfer *xfer); | |||
108 | /* xHCI command helpers. */ | |||
109 | int xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int); | |||
110 | int xhci_command_abort(struct xhci_softc *); | |||
111 | ||||
112 | void xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t); | |||
113 | void xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t); | |||
114 | int xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t); | |||
115 | int xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t); | |||
116 | int xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int); | |||
117 | int xhci_cmd_set_address(struct xhci_softc *, uint8_t, uint64_t, uint32_t); | |||
118 | #ifdef XHCI_DEBUG | |||
119 | int xhci_cmd_noop(struct xhci_softc *); | |||
120 | #endif | |||
121 | ||||
122 | /* XXX should be part of the Bus interface. */ | |||
123 | void xhci_abort_xfer(struct usbd_xfer *, usbd_status); | |||
124 | void xhci_pipe_close(struct usbd_pipe *); | |||
125 | void xhci_noop(struct usbd_xfer *); | |||
126 | ||||
127 | void xhci_timeout(void *); | |||
128 | void xhci_timeout_task(void *); | |||
129 | ||||
130 | /* USBD Bus Interface. */ | |||
131 | usbd_status xhci_pipe_open(struct usbd_pipe *); | |||
132 | int xhci_setaddr(struct usbd_device *, int); | |||
133 | void xhci_softintr(void *); | |||
134 | void xhci_poll(struct usbd_bus *); | |||
135 | struct usbd_xfer *xhci_allocx(struct usbd_bus *); | |||
136 | void xhci_freex(struct usbd_bus *, struct usbd_xfer *); | |||
137 | ||||
138 | usbd_status xhci_root_ctrl_transfer(struct usbd_xfer *); | |||
139 | usbd_status xhci_root_ctrl_start(struct usbd_xfer *); | |||
140 | ||||
141 | usbd_status xhci_root_intr_transfer(struct usbd_xfer *); | |||
142 | usbd_status xhci_root_intr_start(struct usbd_xfer *); | |||
143 | void xhci_root_intr_abort(struct usbd_xfer *); | |||
144 | void xhci_root_intr_done(struct usbd_xfer *); | |||
145 | ||||
146 | usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); | |||
147 | usbd_status xhci_device_ctrl_start(struct usbd_xfer *); | |||
148 | void xhci_device_ctrl_abort(struct usbd_xfer *); | |||
149 | ||||
150 | usbd_status xhci_device_generic_transfer(struct usbd_xfer *); | |||
151 | usbd_status xhci_device_generic_start(struct usbd_xfer *); | |||
152 | void xhci_device_generic_abort(struct usbd_xfer *); | |||
153 | void xhci_device_generic_done(struct usbd_xfer *); | |||
154 | ||||
155 | usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); | |||
156 | usbd_status xhci_device_isoc_start(struct usbd_xfer *); | |||
157 | ||||
158 | #define XHCI_INTR_ENDPT1 1 | |||
159 | ||||
160 | struct usbd_bus_methods xhci_bus_methods = { | |||
161 | .open_pipe = xhci_pipe_open, | |||
162 | .dev_setaddr = xhci_setaddr, | |||
163 | .soft_intr = xhci_softintr, | |||
164 | .do_poll = xhci_poll, | |||
165 | .allocx = xhci_allocx, | |||
166 | .freex = xhci_freex, | |||
167 | }; | |||
168 | ||||
169 | struct usbd_pipe_methods xhci_root_ctrl_methods = { | |||
170 | .transfer = xhci_root_ctrl_transfer, | |||
171 | .start = xhci_root_ctrl_start, | |||
172 | .abort = xhci_noop, | |||
173 | .close = xhci_pipe_close, | |||
174 | .done = xhci_noop, | |||
175 | }; | |||
176 | ||||
177 | struct usbd_pipe_methods xhci_root_intr_methods = { | |||
178 | .transfer = xhci_root_intr_transfer, | |||
179 | .start = xhci_root_intr_start, | |||
180 | .abort = xhci_root_intr_abort, | |||
181 | .close = xhci_pipe_close, | |||
182 | .done = xhci_root_intr_done, | |||
183 | }; | |||
184 | ||||
185 | struct usbd_pipe_methods xhci_device_ctrl_methods = { | |||
186 | .transfer = xhci_device_ctrl_transfer, | |||
187 | .start = xhci_device_ctrl_start, | |||
188 | .abort = xhci_device_ctrl_abort, | |||
189 | .close = xhci_pipe_close, | |||
190 | .done = xhci_noop, | |||
191 | }; | |||
192 | ||||
193 | struct usbd_pipe_methods xhci_device_intr_methods = { | |||
194 | .transfer = xhci_device_generic_transfer, | |||
195 | .start = xhci_device_generic_start, | |||
196 | .abort = xhci_device_generic_abort, | |||
197 | .close = xhci_pipe_close, | |||
198 | .done = xhci_device_generic_done, | |||
199 | }; | |||
200 | ||||
201 | struct usbd_pipe_methods xhci_device_bulk_methods = { | |||
202 | .transfer = xhci_device_generic_transfer, | |||
203 | .start = xhci_device_generic_start, | |||
204 | .abort = xhci_device_generic_abort, | |||
205 | .close = xhci_pipe_close, | |||
206 | .done = xhci_device_generic_done, | |||
207 | }; | |||
208 | ||||
209 | struct usbd_pipe_methods xhci_device_isoc_methods = { | |||
210 | .transfer = xhci_device_isoc_transfer, | |||
211 | .start = xhci_device_isoc_start, | |||
212 | .abort = xhci_device_generic_abort, | |||
213 | .close = xhci_pipe_close, | |||
214 | .done = xhci_noop, | |||
215 | }; | |||
216 | ||||
217 | #ifdef XHCI_DEBUG | |||
218 | static void | |||
219 | xhci_dump_trb(struct xhci_trb *trb) | |||
220 | { | |||
221 | printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb, | |||
222 | (long long)letoh64(trb->trb_paddr)((__uint64_t)(trb->trb_paddr)), letoh32(trb->trb_status)((__uint32_t)(trb->trb_status)), | |||
223 | (int)letoh32(trb->trb_flags)((__uint32_t)(trb->trb_flags)), XHCI_TRB_FLAGS_BITMASK"\20" "\040SIA" "\022TRT_OUT" "\021DIR_IN" "\012BSR" "\007IDT" "\006IOC" "\005CHAIN" "\004NOSNOOP" "\003ISP" "\002LINKSEG" "\001CYCLE"); | |||
224 | } | |||
225 | #endif | |||
226 | ||||
227 | int usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *, | |||
228 | void **, bus_size_t, bus_size_t, bus_size_t); | |||
229 | void usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *); | |||
230 | ||||
231 | int | |||
232 | usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma, | |||
233 | void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary) | |||
234 | { | |||
235 | int error; | |||
236 | ||||
237 | dma->tag = bus->dmatag; | |||
238 | dma->size = size; | |||
239 | ||||
240 | error = bus_dmamap_create(dma->tag, size, 1, size, boundary,(*(dma->tag)->_dmamap_create)((dma->tag), (size), (1 ), (size), (boundary), (0x0001), (&dma->map)) | |||
241 | BUS_DMA_NOWAIT, &dma->map)(*(dma->tag)->_dmamap_create)((dma->tag), (size), (1 ), (size), (boundary), (0x0001), (&dma->map)); | |||
242 | if (error != 0) | |||
243 | return (error); | |||
244 | ||||
245 | error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), (alignment ), (boundary), (&dma->seg), (1), (&dma->nsegs), (0x0001 | 0x1000)) | |||
246 | 1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dma->tag)->_dmamem_alloc)((dma->tag), (size), (alignment ), (boundary), (&dma->seg), (1), (&dma->nsegs), (0x0001 | 0x1000)); | |||
247 | if (error != 0) | |||
248 | goto destroy; | |||
249 | ||||
250 | error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,(*(dma->tag)->_dmamem_map)((dma->tag), (&dma-> seg), (1), (size), (&dma->vaddr), (0x0001 | 0x0004)) | |||
251 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT)(*(dma->tag)->_dmamem_map)((dma->tag), (&dma-> seg), (1), (size), (&dma->vaddr), (0x0001 | 0x0004)); | |||
252 | if (error != 0) | |||
253 | goto free; | |||
254 | ||||
255 | error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,(*(dma->tag)->_dmamap_load_raw)((dma->tag), (dma-> map), (&dma->seg), (1), (size), (0x0001)) | |||
256 | BUS_DMA_NOWAIT)(*(dma->tag)->_dmamap_load_raw)((dma->tag), (dma-> map), (&dma->seg), (1), (size), (0x0001)); | |||
257 | if (error != 0) | |||
258 | goto unmap; | |||
259 | ||||
260 | bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD |(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map ), (0), (size), (0x01 | 0x04)) | |||
261 | BUS_DMASYNC_PREWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map ), (0), (size), (0x01 | 0x04)); | |||
262 | ||||
263 | dma->paddr = dma->map->dm_segs[0].ds_addr; | |||
264 | if (kvap != NULL((void *)0)) | |||
265 | *kvap = dma->vaddr; | |||
266 | ||||
267 | return (0); | |||
268 | ||||
269 | unmap: | |||
270 | bus_dmamem_unmap(dma->tag, dma->vaddr, size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr ), (size)); | |||
271 | free: | |||
272 | bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma-> seg), (1)); | |||
273 | destroy: | |||
274 | bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma-> map)); | |||
275 | return (error); | |||
276 | } | |||
277 | ||||
278 | void | |||
279 | usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma) | |||
280 | { | |||
281 | if (dma->map != NULL((void *)0)) { | |||
282 | bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,(*(bus->dmatag)->_dmamap_sync)((bus->dmatag), (dma-> map), (0), (dma->size), (0x02 | 0x08)) | |||
283 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(bus->dmatag)->_dmamap_sync)((bus->dmatag), (dma-> map), (0), (dma->size), (0x02 | 0x08)); | |||
284 | bus_dmamap_unload(bus->dmatag, dma->map)(*(bus->dmatag)->_dmamap_unload)((bus->dmatag), (dma ->map)); | |||
285 | bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size)(*(bus->dmatag)->_dmamem_unmap)((bus->dmatag), (dma-> vaddr), (dma->size)); | |||
286 | bus_dmamem_free(bus->dmatag, &dma->seg, 1)(*(bus->dmatag)->_dmamem_free)((bus->dmatag), (& dma->seg), (1)); | |||
287 | bus_dmamap_destroy(bus->dmatag, dma->map)(*(bus->dmatag)->_dmamap_destroy)((bus->dmatag), (dma ->map)); | |||
288 | dma->map = NULL((void *)0); | |||
289 | } | |||
290 | } | |||
291 | ||||
292 | int | |||
293 | xhci_init(struct xhci_softc *sc) | |||
294 | { | |||
295 | uint32_t hcr; | |||
296 | int npage, error; | |||
297 | ||||
298 | sc->sc_bus.usbrev = USBREV_3_05; | |||
299 | sc->sc_bus.methods = &xhci_bus_methods; | |||
300 | sc->sc_bus.pipe_size = sizeof(struct xhci_pipe); | |||
301 | ||||
302 | sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH)(((sc)->iot)->read_1(((sc)->ioh), ((0x00)))); | |||
303 | sc->sc_door_off = XREAD4(sc, XHCI_DBOFF)(((sc)->iot)->read_4(((sc)->ioh), ((0x14)))); | |||
304 | sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF)(((sc)->iot)->read_4(((sc)->ioh), ((0x18)))); | |||
305 | ||||
306 | sc->sc_version = XREAD2(sc, XHCI_HCIVERSION)(((sc)->iot)->read_2(((sc)->ioh), ((0x02)))); | |||
307 | printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff); | |||
308 | ||||
309 | #ifdef XHCI_DEBUG | |||
310 | printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), sc->sc_oper_off); | |||
311 | printf("%s: DOORBELL=%#lx\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), sc->sc_door_off); | |||
312 | printf("%s: RUNTIME=%#lx\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), sc->sc_runt_off); | |||
313 | #endif | |||
314 | ||||
315 | error = xhci_reset(sc); | |||
316 | if (error) | |||
317 | return (error); | |||
318 | ||||
319 | if (xhcixfer == NULL((void *)0)) { | |||
320 | xhcixfer = malloc(sizeof(struct pool), M_USBHC103, M_NOWAIT0x0002); | |||
321 | if (xhcixfer == NULL((void *)0)) { | |||
322 | printf("%s: unable to allocate pool descriptor\n", | |||
323 | DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
324 | return (ENOMEM12); | |||
325 | } | |||
326 | pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB0x5, | |||
327 | 0, "xhcixfer", NULL((void *)0)); | |||
328 | } | |||
329 | ||||
330 | hcr = XREAD4(sc, XHCI_HCCPARAMS)(((sc)->iot)->read_4(((sc)->ioh), ((0x10)))); | |||
331 | sc->sc_ctxsize = XHCI_HCC_CSZ(hcr)(((hcr) >> 2) & 0x1) ? 64 : 32; | |||
332 | DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize)); | |||
333 | ||||
334 | #ifdef XHCI_DEBUG | |||
335 | hcr = XOREAD4(sc, XHCI_PAGESIZE)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x08)))); | |||
336 | printf("%s: supported page size 0x%08x\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), hcr); | |||
337 | #endif | |||
338 | /* Use 4K for the moment since it's easier. */ | |||
339 | sc->sc_pagesize = 4096; | |||
340 | ||||
341 | /* Get port and device slot numbers. */ | |||
342 | hcr = XREAD4(sc, XHCI_HCSPARAMS1)(((sc)->iot)->read_4(((sc)->ioh), ((0x04)))); | |||
343 | sc->sc_noport = XHCI_HCS1_N_PORTS(hcr)(((hcr) >> 24) & 0xff); | |||
344 | sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr)((hcr) & 0xff); | |||
345 | DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport, | |||
346 | sc->sc_noslot)); | |||
347 | ||||
348 | /* Setup Device Context Base Address Array. */ | |||
349 | error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma, | |||
350 | (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t), | |||
351 | XHCI_DCBAA_ALIGN64, sc->sc_pagesize); | |||
352 | if (error) | |||
353 | return (ENOMEM12); | |||
354 | ||||
355 | /* Setup command ring. */ | |||
356 | rw_init(&sc->sc_cmd_lock, "xhcicmd")_rw_init_flags(&sc->sc_cmd_lock, "xhcicmd", 0, ((void * )0)); | |||
357 | error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS(16 * 1), | |||
358 | XHCI_CMDS_RING_ALIGN64); | |||
359 | if (error) { | |||
360 | printf("%s: could not allocate command ring.\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
361 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); | |||
362 | return (error); | |||
363 | } | |||
364 | ||||
365 | /* Setup one event ring and its segment table (ERST). */ | |||
366 | error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS(16 * 13), | |||
367 | XHCI_EVTS_RING_ALIGN64); | |||
368 | if (error) { | |||
369 | printf("%s: could not allocate event ring.\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
370 | xhci_ring_free(sc, &sc->sc_cmd_ring); | |||
371 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); | |||
372 | return (error); | |||
373 | } | |||
374 | ||||
375 | /* Allocate the required entry for the segment table. */ | |||
376 | error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma, | |||
377 | (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg), | |||
378 | XHCI_ERST_ALIGN64, XHCI_ERST_BOUNDARY0); | |||
379 | if (error) { | |||
380 | printf("%s: could not allocate segment table.\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
381 | xhci_ring_free(sc, &sc->sc_evt_ring); | |||
382 | xhci_ring_free(sc, &sc->sc_cmd_ring); | |||
383 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); | |||
384 | return (ENOMEM12); | |||
385 | } | |||
386 | ||||
387 | /* Set our ring address and size in its corresponding segment. */ | |||
388 | sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr)((__uint64_t)(sc->sc_evt_ring.dma.paddr)); | |||
389 | sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS)((__uint32_t)((16 * 13))); | |||
390 | sc->sc_erst.segs[0].er_rsvd = 0; | |||
391 | bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,(*(sc->sc_erst.dma.tag)->_dmamap_sync)((sc->sc_erst. dma.tag), (sc->sc_erst.dma.map), (0), (sc->sc_erst.dma. size), (0x01 | 0x04)) | |||
392 | sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_erst.dma.tag)->_dmamap_sync)((sc->sc_erst. dma.tag), (sc->sc_erst.dma.map), (0), (sc->sc_erst.dma. size), (0x01 | 0x04)); | |||
393 | ||||
394 | /* Get the number of scratch pages and configure them if necessary. */ | |||
395 | hcr = XREAD4(sc, XHCI_HCSPARAMS2)(((sc)->iot)->read_4(((sc)->ioh), ((0x08)))); | |||
396 | npage = XHCI_HCS2_SPB_MAX(hcr)((((hcr) >> 16) & 0x3e0) | (((hcr) >> 27) & 0x1f)); | |||
397 | DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage, | |||
398 | XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr))); | |||
399 | ||||
400 | if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) { | |||
401 | printf("%s: could not allocate scratchpad.\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
402 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); | |||
403 | xhci_ring_free(sc, &sc->sc_evt_ring); | |||
404 | xhci_ring_free(sc, &sc->sc_cmd_ring); | |||
405 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); | |||
406 | return (ENOMEM12); | |||
407 | } | |||
408 | ||||
409 | ||||
410 | return (0); | |||
411 | } | |||
412 | ||||
413 | void | |||
414 | xhci_config(struct xhci_softc *sc) | |||
415 | { | |||
416 | uint64_t paddr; | |||
417 | uint32_t hcr; | |||
418 | ||||
419 | /* Make sure to program a number of device slots we can handle. */ | |||
420 | if (sc->sc_noslot > USB_MAX_DEVICES128) | |||
421 | sc->sc_noslot = USB_MAX_DEVICES128; | |||
422 | hcr = XOREAD4(sc, XHCI_CONFIG)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x38)))) & ~XHCI_CONFIG_SLOTS_MASK0x000000ff; | |||
423 | XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x38)), ((hcr | sc->sc_noslot)))); | |||
424 | ||||
425 | /* Set the device context base array address. */ | |||
426 | paddr = (uint64_t)sc->sc_dcbaa.dma.paddr; | |||
427 | XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x30)), (((uint32_t)paddr)))); | |||
428 | XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32))(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x34)), (((uint32_t)(paddr >> 32))))); | |||
429 | ||||
430 | DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc), | |||
431 | XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO))); | |||
432 | ||||
433 | /* Set the command ring address. */ | |||
434 | paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr; | |||
435 | XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x18)), ((((uint32_t)paddr) | 0x00000001)))); | |||
436 | XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32))(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x1C)), (((uint32_t)(paddr >> 32))))); | |||
437 | ||||
438 | DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc), | |||
439 | XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr)); | |||
440 | ||||
441 | /* Set the ERST count number to 1, since we use only one event ring. */ | |||
442 | XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1))(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0028 + (0x20 * (0))))), ((((1) & 0xffff))))); | |||
443 | ||||
444 | /* Set the segment table address. */ | |||
445 | paddr = (uint64_t)sc->sc_erst.dma.paddr; | |||
446 | XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0030 + (0x20 * (0))))), (((uint32_t)paddr)))); | |||
447 | XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32))(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0034 + (0x20 * (0))))), (((uint32_t)(paddr >> 32 ))))); | |||
448 | ||||
449 | DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc), | |||
450 | XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0)))); | |||
451 | ||||
452 | /* Set the ring dequeue address. */ | |||
453 | paddr = (uint64_t)sc->sc_evt_ring.dma.paddr; | |||
454 | XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0038 + (0x20 * (0))))), (((uint32_t)paddr)))); | |||
455 | XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32))(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x003c + (0x20 * (0))))), (((uint32_t)(paddr >> 32 ))))); | |||
456 | ||||
457 | DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc), | |||
458 | XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0)))); | |||
459 | ||||
460 | /* Enable interrupts. */ | |||
461 | hcr = XRREAD4(sc, XHCI_IMAN(0))(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0020 + (0x20 * (0))))))); | |||
462 | XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0020 + (0x20 * (0))))), ((hcr | 0x00000002)))); | |||
463 | ||||
464 | /* Set default interrupt moderation. */ | |||
465 | XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0024 + (0x20 * (0))))), ((0x000001F4U)))); | |||
466 | ||||
467 | /* Allow event interrupt and start the controller. */ | |||
468 | XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x00)), ((0x00000004|0x00000001)))); | |||
469 | ||||
470 | DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD))); | |||
471 | DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0)))); | |||
472 | } | |||
473 | ||||
474 | int | |||
475 | xhci_detach(struct device *self, int flags) | |||
476 | { | |||
477 | struct xhci_softc *sc = (struct xhci_softc *)self; | |||
478 | int rv; | |||
479 | ||||
480 | rv = config_detach_children(self, flags); | |||
481 | if (rv != 0) { | |||
482 | printf("%s: error while detaching %d\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), rv); | |||
483 | return (rv); | |||
484 | } | |||
485 | ||||
486 | /* Since the hardware might already be gone, ignore the errors. */ | |||
487 | xhci_command_abort(sc); | |||
488 | ||||
489 | xhci_reset(sc); | |||
490 | ||||
491 | /* Disable interrupts. */ | |||
492 | XRWRITE4(sc, XHCI_IMOD(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0024 + (0x20 * (0))))), ((0)))); | |||
493 | XRWRITE4(sc, XHCI_IMAN(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0020 + (0x20 * (0))))), ((0)))); | |||
494 | ||||
495 | /* Clear the event ring address. */ | |||
496 | XRWRITE4(sc, XHCI_ERDP_LO(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0038 + (0x20 * (0))))), ((0)))); | |||
497 | XRWRITE4(sc, XHCI_ERDP_HI(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x003c + (0x20 * (0))))), ((0)))); | |||
498 | ||||
499 | XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0030 + (0x20 * (0))))), ((0)))); | |||
500 | XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0034 + (0x20 * (0))))), ((0)))); | |||
501 | ||||
502 | XRWRITE4(sc, XHCI_ERSTSZ(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0028 + (0x20 * (0))))), ((0)))); | |||
503 | ||||
504 | /* Clear the command ring address. */ | |||
505 | XOWRITE4(sc, XHCI_CRCR_LO, 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x18)), ((0)))); | |||
506 | XOWRITE4(sc, XHCI_CRCR_HI, 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x1C)), ((0)))); | |||
507 | ||||
508 | XOWRITE4(sc, XHCI_DCBAAP_LO, 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x30)), ((0)))); | |||
509 | XOWRITE4(sc, XHCI_DCBAAP_HI, 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x34)), ((0)))); | |||
510 | ||||
511 | if (sc->sc_spad.npage > 0) | |||
512 | xhci_scratchpad_free(sc); | |||
513 | ||||
514 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); | |||
515 | xhci_ring_free(sc, &sc->sc_evt_ring); | |||
516 | xhci_ring_free(sc, &sc->sc_cmd_ring); | |||
517 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); | |||
518 | ||||
519 | return (0); | |||
520 | } | |||
521 | ||||
522 | int | |||
523 | xhci_activate(struct device *self, int act) | |||
524 | { | |||
525 | struct xhci_softc *sc = (struct xhci_softc *)self; | |||
526 | int rv = 0; | |||
527 | ||||
528 | switch (act) { | |||
529 | case DVACT_RESUME4: | |||
530 | sc->sc_bus.use_polling++; | |||
531 | ||||
532 | xhci_reset(sc); | |||
533 | xhci_ring_reset(sc, &sc->sc_cmd_ring); | |||
534 | xhci_ring_reset(sc, &sc->sc_evt_ring); | |||
535 | ||||
536 | /* Renesas controllers, at least, need more time to resume. */ | |||
537 | usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT50); | |||
538 | ||||
539 | xhci_config(sc); | |||
540 | ||||
541 | sc->sc_bus.use_polling--; | |||
542 | rv = config_activate_children(self, act); | |||
543 | break; | |||
544 | case DVACT_POWERDOWN6: | |||
545 | rv = config_activate_children(self, act); | |||
546 | xhci_reset(sc); | |||
547 | break; | |||
548 | default: | |||
549 | rv = config_activate_children(self, act); | |||
550 | break; | |||
551 | } | |||
552 | ||||
553 | return (rv); | |||
554 | } | |||
555 | ||||
556 | int | |||
557 | xhci_reset(struct xhci_softc *sc) | |||
558 | { | |||
559 | uint32_t hcr; | |||
560 | int i; | |||
561 | ||||
562 | XOWRITE4(sc, XHCI_USBCMD, 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x00)), ((0)))); /* Halt controller */ | |||
563 | for (i = 0; i < 100; i++) { | |||
564 | usb_delay_ms(&sc->sc_bus, 1); | |||
565 | hcr = XOREAD4(sc, XHCI_USBSTS)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x04)))) & XHCI_STS_HCH0x00000001; | |||
566 | if (hcr) | |||
567 | break; | |||
568 | } | |||
569 | ||||
570 | if (!hcr) | |||
571 | printf("%s: halt timeout\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
572 | ||||
573 | XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x00)), ((0x00000002)))); | |||
574 | for (i = 0; i < 100; i++) { | |||
575 | usb_delay_ms(&sc->sc_bus, 1); | |||
576 | hcr = (XOREAD4(sc, XHCI_USBCMD)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x00)))) & XHCI_CMD_HCRST0x00000002) | | |||
577 | (XOREAD4(sc, XHCI_USBSTS)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x04)))) & XHCI_STS_CNR0x00000800); | |||
578 | if (!hcr) | |||
579 | break; | |||
580 | } | |||
581 | ||||
582 | if (hcr) { | |||
583 | printf("%s: reset timeout\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
584 | return (EIO5); | |||
585 | } | |||
586 | ||||
587 | return (0); | |||
588 | } | |||
589 | ||||
590 | ||||
591 | int | |||
592 | xhci_intr(void *v) | |||
593 | { | |||
594 | struct xhci_softc *sc = v; | |||
595 | ||||
596 | if (sc == NULL((void *)0) || sc->sc_bus.dying) | |||
597 | return (0); | |||
598 | ||||
599 | /* If we get an interrupt while polling, then just ignore it. */ | |||
600 | if (sc->sc_bus.use_polling) { | |||
601 | DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n")); | |||
602 | return (0); | |||
603 | } | |||
604 | ||||
605 | return (xhci_intr1(sc)); | |||
606 | } | |||
607 | ||||
608 | int | |||
609 | xhci_intr1(struct xhci_softc *sc) | |||
610 | { | |||
611 | uint32_t intrs; | |||
612 | ||||
613 | intrs = XOREAD4(sc, XHCI_USBSTS)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x04)))); | |||
614 | if (intrs == 0xffffffff) { | |||
615 | sc->sc_bus.dying = 1; | |||
616 | return (0); | |||
617 | } | |||
618 | ||||
619 | if ((intrs & XHCI_STS_EINT0x00000008) == 0) | |||
620 | return (0); | |||
621 | ||||
622 | sc->sc_bus.no_intrs++; | |||
623 | ||||
624 | if (intrs & XHCI_STS_HSE0x00000004) { | |||
625 | printf("%s: host system error\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
626 | sc->sc_bus.dying = 1; | |||
627 | return (1); | |||
628 | } | |||
629 | ||||
630 | /* Acknowledge interrupts */ | |||
631 | XOWRITE4(sc, XHCI_USBSTS, intrs)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x04)), ((intrs)))); | |||
632 | intrs = XRREAD4(sc, XHCI_IMAN(0))(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0020 + (0x20 * (0))))))); | |||
633 | XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0020 + (0x20 * (0))))), ((intrs | 0x00000001)))); | |||
634 | ||||
635 | usb_schedsoftintr(&sc->sc_bus); | |||
636 | ||||
637 | return (1); | |||
638 | } | |||
639 | ||||
640 | void | |||
641 | xhci_poll(struct usbd_bus *bus) | |||
642 | { | |||
643 | struct xhci_softc *sc = (struct xhci_softc *)bus; | |||
644 | ||||
645 | if (XOREAD4(sc, XHCI_USBSTS)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x04))))) | |||
646 | xhci_intr1(sc); | |||
647 | } | |||
648 | ||||
649 | void | |||
650 | xhci_softintr(void *v) | |||
651 | { | |||
652 | struct xhci_softc *sc = v; | |||
653 | ||||
654 | if (sc->sc_bus.dying) | |||
655 | return; | |||
656 | ||||
657 | sc->sc_bus.intr_context++; | |||
658 | xhci_event_dequeue(sc); | |||
659 | sc->sc_bus.intr_context--; | |||
660 | } | |||
661 | ||||
662 | void | |||
663 | xhci_event_dequeue(struct xhci_softc *sc) | |||
664 | { | |||
665 | struct xhci_trb *trb; | |||
666 | uint64_t paddr; | |||
667 | uint32_t status, flags; | |||
668 | ||||
669 | while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL((void *)0)) { | |||
670 | paddr = letoh64(trb->trb_paddr)((__uint64_t)(trb->trb_paddr)); | |||
671 | status = letoh32(trb->trb_status)((__uint32_t)(trb->trb_status)); | |||
672 | flags = letoh32(trb->trb_flags)((__uint32_t)(trb->trb_flags)); | |||
673 | ||||
674 | switch (flags & XHCI_TRB_TYPE_MASK0xfc00) { | |||
675 | case XHCI_EVT_XFER(32 << 10): | |||
676 | xhci_event_xfer(sc, paddr, status, flags); | |||
677 | break; | |||
678 | case XHCI_EVT_CMD_COMPLETE(33 << 10): | |||
679 | memcpy(&sc->sc_result_trb, trb, sizeof(*trb))__builtin_memcpy((&sc->sc_result_trb), (trb), (sizeof( *trb))); | |||
680 | xhci_event_command(sc, paddr); | |||
681 | break; | |||
682 | case XHCI_EVT_PORT_CHANGE(34 << 10): | |||
683 | xhci_event_port_change(sc, paddr, status); | |||
684 | break; | |||
685 | case XHCI_EVT_HOST_CTRL(37 << 10): | |||
686 | /* TODO */ | |||
687 | break; | |||
688 | default: | |||
689 | #ifdef XHCI_DEBUG | |||
690 | printf("event (%d): ", XHCI_TRB_TYPE(flags)(((flags) & 0xfc00) >> 10)); | |||
691 | xhci_dump_trb(trb); | |||
692 | #endif | |||
693 | break; | |||
694 | } | |||
695 | ||||
696 | } | |||
697 | ||||
698 | paddr = (uint64_t)DEQPTR(sc->sc_evt_ring)((sc->sc_evt_ring).dma.paddr + (sizeof(struct xhci_trb) * ( sc->sc_evt_ring).index)); | |||
699 | XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x0038 + (0x20 * (0))))), ((((uint32_t)paddr) | 0x00000008 )))); | |||
700 | XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32))(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_runt_off + ((0x003c + (0x20 * (0))))), (((uint32_t)(paddr >> 32 ))))); | |||
701 | } | |||
702 | ||||
703 | void | |||
704 | xhci_skip_all(struct xhci_pipe *xp) | |||
705 | { | |||
706 | struct usbd_xfer *xfer, *last; | |||
707 | ||||
708 | if (xp->skip) { | |||
709 | /* | |||
710 | * Find the last transfer to skip, this is necessary | |||
711 | * as xhci_xfer_done() posts new transfers which we | |||
712 | * don't want to skip | |||
713 | */ | |||
714 | last = SIMPLEQ_FIRST(&xp->pipe.queue)((&xp->pipe.queue)->sqh_first); | |||
715 | if (last == NULL((void *)0)) | |||
716 | goto done; | |||
717 | while ((xfer = SIMPLEQ_NEXT(last, next)((last)->next.sqe_next)) != NULL((void *)0)) | |||
718 | last = xfer; | |||
719 | ||||
720 | do { | |||
721 | xfer = SIMPLEQ_FIRST(&xp->pipe.queue)((&xp->pipe.queue)->sqh_first); | |||
722 | if (xfer == NULL((void *)0)) | |||
723 | goto done; | |||
724 | DPRINTF(("%s: skipping %p\n", __func__, xfer)); | |||
725 | xfer->status = USBD_NORMAL_COMPLETION; | |||
726 | xhci_xfer_done(xfer); | |||
727 | } while (xfer != last); | |||
728 | done: | |||
729 | xp->skip = 0; | |||
730 | } | |||
731 | } | |||
732 | ||||
733 | void | |||
734 | xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status, | |||
735 | uint32_t flags) | |||
736 | { | |||
737 | struct xhci_pipe *xp; | |||
738 | struct usbd_xfer *xfer; | |||
739 | uint8_t dci, slot, code, xfertype; | |||
740 | uint32_t remain; | |||
741 | int trb_idx; | |||
742 | ||||
743 | slot = XHCI_TRB_GET_SLOT(flags)(((flags) >> 24) & 0xff); | |||
744 | dci = XHCI_TRB_GET_EP(flags)(((flags) >> 16) & 0x1f); | |||
745 | if (slot > sc->sc_noslot) { | |||
746 | DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot)); | |||
747 | return; | |||
748 | } | |||
749 | ||||
750 | xp = sc->sc_sdevs[slot].pipes[dci - 1]; | |||
751 | if (xp == NULL((void *)0)) { | |||
752 | DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci)); | |||
753 | return; | |||
754 | } | |||
755 | ||||
756 | code = XHCI_TRB_GET_CODE(status)(((status) >> 24) & 0xff); | |||
757 | remain = XHCI_TRB_REMAIN(status)((status) & 0xffffff); | |||
758 | ||||
759 | switch (code) { | |||
760 | case XHCI_CODE_RING_UNDERRUN14: | |||
761 | DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc), | |||
762 | slot, xp->ring.ntrb - xp->free_trbs)); | |||
763 | xhci_skip_all(xp); | |||
764 | return; | |||
765 | case XHCI_CODE_RING_OVERRUN15: | |||
766 | DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc), | |||
767 | slot, xp->ring.ntrb - xp->free_trbs)); | |||
768 | xhci_skip_all(xp); | |||
769 | return; | |||
770 | case XHCI_CODE_MISSED_SRV23: | |||
771 | DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc), | |||
772 | slot, xp->ring.ntrb - xp->free_trbs)); | |||
773 | xp->skip = 1; | |||
774 | return; | |||
775 | default: | |||
776 | break; | |||
777 | } | |||
778 | ||||
779 | trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb); | |||
780 | if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) { | |||
781 | printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), | |||
782 | trb_idx, xp->ring.ntrb - 1); | |||
783 | return; | |||
784 | } | |||
785 | ||||
786 | xfer = xp->pending_xfers[trb_idx]; | |||
787 | if (xfer == NULL((void *)0)) { | |||
788 | DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc))); | |||
789 | return; | |||
790 | } | |||
791 | ||||
792 | if (remain > xfer->length) | |||
793 | remain = xfer->length; | |||
794 | ||||
795 | xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes)((xfer->pipe->endpoint->edesc->bmAttributes) & 0x03); | |||
796 | ||||
797 | switch (xfertype) { | |||
798 | case UE_BULK0x02: | |||
799 | case UE_INTERRUPT0x03: | |||
800 | case UE_CONTROL0x00: | |||
801 | if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx, | |||
802 | code, slot, dci)) | |||
803 | return; | |||
804 | break; | |||
805 | case UE_ISOCHRONOUS0x01: | |||
806 | if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx, code)) | |||
807 | return; | |||
808 | break; | |||
809 | default: | |||
810 | panic("xhci_event_xfer: unknown xfer type %u", xfertype); | |||
811 | } | |||
812 | ||||
813 | xhci_xfer_done(xfer); | |||
814 | } | |||
815 | ||||
816 | uint32_t | |||
817 | xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp, | |||
818 | int trb_idx) | |||
819 | { | |||
820 | int trb0_idx; | |||
821 | uint32_t len = 0, type; | |||
822 | ||||
823 | trb0_idx = | |||
824 | ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); | |||
825 | ||||
826 | while (1) { | |||
827 | type = letoh32(xp->ring.trbs[trb0_idx].trb_flags)((__uint32_t)(xp->ring.trbs[trb0_idx].trb_flags)) & | |||
828 | XHCI_TRB_TYPE_MASK0xfc00; | |||
829 | if (type == XHCI_TRB_TYPE_NORMAL(1 << 10) || type == XHCI_TRB_TYPE_DATA(3 << 10)) | |||
830 | len += XHCI_TRB_LEN(letoh32(((((__uint32_t)(xp->ring.trbs[trb0_idx].trb_status))) & 0x1ffff) | |||
831 | xp->ring.trbs[trb0_idx].trb_status))((((__uint32_t)(xp->ring.trbs[trb0_idx].trb_status))) & 0x1ffff); | |||
832 | if (trb0_idx == trb_idx) | |||
833 | break; | |||
834 | if (++trb0_idx == xp->ring.ntrb) | |||
835 | trb0_idx = 0; | |||
836 | } | |||
837 | return len; | |||
838 | } | |||
839 | ||||
840 | int | |||
841 | xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer, | |||
842 | struct xhci_pipe *xp, uint32_t remain, int trb_idx, | |||
843 | uint8_t code, uint8_t slot, uint8_t dci) | |||
844 | { | |||
845 | struct xhci_xfer *xx = (struct xhci_xfer *)xfer; | |||
846 | ||||
847 | switch (code) { | |||
848 | case XHCI_CODE_SUCCESS1: | |||
849 | if (xfer->actlen == 0) { | |||
850 | if (remain) | |||
851 | xfer->actlen = | |||
852 | xhci_xfer_length_generic(xx, xp, trb_idx) - | |||
853 | remain; | |||
854 | else | |||
855 | xfer->actlen = xfer->length; | |||
856 | } | |||
857 | if (xfer->actlen) | |||
858 | usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, | |||
859 | usbd_xfer_isread(xfer) ? | |||
860 | BUS_DMASYNC_POSTREAD0x02 : BUS_DMASYNC_POSTWRITE0x08); | |||
861 | xfer->status = USBD_NORMAL_COMPLETION; | |||
862 | break; | |||
863 | case XHCI_CODE_SHORT_XFER13: | |||
864 | /* | |||
865 | * Use values from the transfer TRB instead of the status TRB. | |||
866 | */ | |||
867 | if (xfer->actlen == 0) | |||
868 | xfer->actlen = | |||
869 | xhci_xfer_length_generic(xx, xp, trb_idx) - remain; | |||
870 | /* | |||
871 | * If this is not the last TRB of a transfer, we should | |||
872 | * theoretically clear the IOC at the end of the chain | |||
873 | * but the HC might have already processed it before we | |||
874 | * had a chance to schedule the softinterrupt. | |||
875 | */ | |||
876 | if (xx->index != trb_idx) { | |||
877 | DPRINTF(("%s: short xfer %p for %u\n", | |||
878 | DEVNAME(sc), xfer, xx->index)); | |||
879 | return (1); | |||
880 | } | |||
881 | if (xfer->actlen) | |||
882 | usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, | |||
883 | usbd_xfer_isread(xfer) ? | |||
884 | BUS_DMASYNC_POSTREAD0x02 : BUS_DMASYNC_POSTWRITE0x08); | |||
885 | xfer->status = USBD_NORMAL_COMPLETION; | |||
886 | break; | |||
887 | case XHCI_CODE_TXERR4: | |||
888 | case XHCI_CODE_SPLITERR36: | |||
889 | DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code)); | |||
890 | xfer->status = USBD_IOERROR; | |||
891 | break; | |||
892 | case XHCI_CODE_STALL6: | |||
893 | case XHCI_CODE_BABBLE3: | |||
894 | DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code)); | |||
895 | /* Prevent any timeout to kick in. */ | |||
896 | timeout_del(&xfer->timeout_handle); | |||
897 | usb_rem_task(xfer->device, &xfer->abort_task); | |||
898 | ||||
899 | /* We need to report this condition for umass(4). */ | |||
900 | if (code == XHCI_CODE_STALL6) | |||
901 | xp->halted = USBD_STALLED; | |||
902 | else | |||
903 | xp->halted = USBD_IOERROR; | |||
904 | /* | |||
905 | * Since the stack might try to start a new transfer as | |||
906 | * soon as a pending one finishes, make sure the endpoint | |||
907 | * is fully reset before calling usb_transfer_complete(). | |||
908 | */ | |||
909 | xp->aborted_xfer = xfer; | |||
910 | xhci_cmd_reset_ep_async(sc, slot, dci); | |||
911 | return (1); | |||
912 | case XHCI_CODE_XFER_STOPPED26: | |||
913 | case XHCI_CODE_XFER_STOPINV27: | |||
914 | /* Endpoint stopped while processing a TD. */ | |||
915 | if (xfer == xp->aborted_xfer) { | |||
916 | DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer)); | |||
917 | return (1); | |||
918 | } | |||
919 | ||||
920 | /* FALLTHROUGH */ | |||
921 | default: | |||
922 | DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code)); | |||
923 | xfer->status = USBD_IOERROR; | |||
924 | xp->halted = 1; | |||
925 | break; | |||
926 | } | |||
927 | ||||
928 | return (0); | |||
929 | } | |||
930 | ||||
931 | int | |||
932 | xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp, | |||
933 | uint32_t remain, int trb_idx, uint8_t code) | |||
934 | { | |||
935 | struct usbd_xfer *skipxfer; | |||
936 | struct xhci_xfer *xx = (struct xhci_xfer *)xfer; | |||
937 | int trb0_idx, frame_idx = 0, skip_trb = 0; | |||
938 | ||||
939 | KASSERT(xx->index >= 0)((xx->index >= 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c" , 939, "xx->index >= 0")); | |||
940 | ||||
941 | switch (code) { | |||
942 | case XHCI_CODE_SHORT_XFER13: | |||
943 | xp->trb_processed[trb_idx] = TRB_PROCESSED_SHORT2; | |||
944 | break; | |||
945 | default: | |||
946 | xp->trb_processed[trb_idx] = TRB_PROCESSED_YES1; | |||
947 | break; | |||
948 | } | |||
949 | ||||
950 | trb0_idx = | |||
951 | ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); | |||
952 | ||||
953 | /* Find the according frame index for this TRB. */ | |||
954 | while (trb0_idx != trb_idx) { | |||
955 | if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags)((__uint32_t)(xp->ring.trbs[trb0_idx].trb_flags)) & | |||
956 | XHCI_TRB_TYPE_MASK0xfc00) == XHCI_TRB_TYPE_ISOCH(5 << 10)) | |||
957 | frame_idx++; | |||
958 | if (trb0_idx++ == (xp->ring.ntrb - 1)) | |||
959 | trb0_idx = 0; | |||
960 | } | |||
961 | ||||
962 | /* | |||
963 | * If we queued two TRBs for a frame and this is the second TRB, | |||
964 | * check if the first TRB needs accounting since it might not have | |||
965 | * raised an interrupt in case of full data received. | |||
966 | */ | |||
967 | if ((letoh32(xp->ring.trbs[trb_idx].trb_flags)((__uint32_t)(xp->ring.trbs[trb_idx].trb_flags)) & XHCI_TRB_TYPE_MASK0xfc00) == | |||
968 | XHCI_TRB_TYPE_NORMAL(1 << 10)) { | |||
969 | frame_idx--; | |||
970 | if (trb_idx == 0) | |||
971 | trb0_idx = xp->ring.ntrb - 2; | |||
972 | else | |||
973 | trb0_idx = trb_idx - 1; | |||
974 | if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_NO0) { | |||
975 | xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32(((((__uint32_t)(xp->ring.trbs[trb0_idx].trb_status))) & 0x1ffff) | |||
976 | xp->ring.trbs[trb0_idx].trb_status))((((__uint32_t)(xp->ring.trbs[trb0_idx].trb_status))) & 0x1ffff); | |||
977 | } else if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_SHORT2) { | |||
978 | skip_trb = 1; | |||
979 | } | |||
980 | } | |||
981 | ||||
982 | if (!skip_trb) { | |||
983 | xfer->frlengths[frame_idx] += | |||
984 | XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status))((((__uint32_t)(xp->ring.trbs[trb_idx].trb_status))) & 0x1ffff) - | |||
985 | remain; | |||
986 | xfer->actlen += xfer->frlengths[frame_idx]; | |||
987 | } | |||
988 | ||||
989 | if (xx->index != trb_idx) | |||
990 | return (1); | |||
991 | ||||
992 | if (xp->skip) { | |||
993 | while (1) { | |||
994 | skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue)((&xp->pipe.queue)->sqh_first); | |||
995 | if (skipxfer == xfer || skipxfer == NULL((void *)0)) | |||
996 | break; | |||
997 | DPRINTF(("%s: skipping %p\n", __func__, skipxfer)); | |||
998 | skipxfer->status = USBD_NORMAL_COMPLETION; | |||
999 | xhci_xfer_done(skipxfer); | |||
1000 | } | |||
1001 | xp->skip = 0; | |||
1002 | } | |||
1003 | ||||
1004 | usb_syncmem(&xfer->dmabuf, 0, xfer->length, | |||
1005 | usbd_xfer_isread(xfer) ? | |||
1006 | BUS_DMASYNC_POSTREAD0x02 : BUS_DMASYNC_POSTWRITE0x08); | |||
1007 | xfer->status = USBD_NORMAL_COMPLETION; | |||
1008 | ||||
1009 | return (0); | |||
1010 | } | |||
1011 | ||||
1012 | void | |||
1013 | xhci_event_command(struct xhci_softc *sc, uint64_t paddr) | |||
1014 | { | |||
1015 | struct xhci_trb *trb; | |||
1016 | struct xhci_pipe *xp; | |||
1017 | uint32_t flags; | |||
1018 | uint8_t dci, slot; | |||
1019 | int trb_idx, status; | |||
1020 | ||||
1021 | trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb); | |||
1022 | if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) { | |||
1023 | printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), | |||
1024 | trb_idx, sc->sc_cmd_ring.ntrb - 1); | |||
1025 | return; | |||
1026 | } | |||
1027 | ||||
1028 | trb = &sc->sc_cmd_ring.trbs[trb_idx]; | |||
1029 | ||||
1030 | bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x02 | 0x08)) | |||
1031 | TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x02 | 0x08)) | |||
1032 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x02 | 0x08)); | |||
1033 | ||||
1034 | flags = letoh32(trb->trb_flags)((__uint32_t)(trb->trb_flags)); | |||
1035 | ||||
1036 | slot = XHCI_TRB_GET_SLOT(flags)(((flags) >> 24) & 0xff); | |||
1037 | dci = XHCI_TRB_GET_EP(flags)(((flags) >> 16) & 0x1f); | |||
1038 | ||||
1039 | switch (flags & XHCI_TRB_TYPE_MASK0xfc00) { | |||
1040 | case XHCI_CMD_RESET_EP(14 << 10): | |||
1041 | xp = sc->sc_sdevs[slot].pipes[dci - 1]; | |||
1042 | if (xp == NULL((void *)0)) | |||
1043 | break; | |||
1044 | ||||
1045 | /* Update the dequeue pointer past the last TRB. */ | |||
1046 | xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, | |||
1047 | DEQPTR(xp->ring)((xp->ring).dma.paddr + (sizeof(struct xhci_trb) * (xp-> ring).index)) | xp->ring.toggle); | |||
1048 | break; | |||
1049 | case XHCI_CMD_SET_TR_DEQ(16 << 10): | |||
1050 | xp = sc->sc_sdevs[slot].pipes[dci - 1]; | |||
1051 | if (xp == NULL((void *)0)) | |||
1052 | break; | |||
1053 | ||||
1054 | status = xp->halted; | |||
1055 | xp->halted = 0; | |||
1056 | if (xp->aborted_xfer != NULL((void *)0)) { | |||
1057 | xp->aborted_xfer->status = status; | |||
1058 | xhci_xfer_done(xp->aborted_xfer); | |||
1059 | wakeup(xp); | |||
1060 | } | |||
1061 | break; | |||
1062 | case XHCI_CMD_CONFIG_EP(12 << 10): | |||
1063 | case XHCI_CMD_STOP_EP(15 << 10): | |||
1064 | case XHCI_CMD_DISABLE_SLOT(10 << 10): | |||
1065 | case XHCI_CMD_ENABLE_SLOT(9 << 10): | |||
1066 | case XHCI_CMD_ADDRESS_DEVICE(11 << 10): | |||
1067 | case XHCI_CMD_EVAL_CTX(13 << 10): | |||
1068 | case XHCI_CMD_NOOP(23 << 10): | |||
1069 | /* | |||
1070 | * All these commands are synchronous. | |||
1071 | * | |||
1072 | * If TRBs differ, this could be a delayed result after we | |||
1073 | * gave up waiting for the expected TRB due to timeout. | |||
1074 | */ | |||
1075 | if (sc->sc_cmd_trb == trb) { | |||
1076 | sc->sc_cmd_trb = NULL((void *)0); | |||
1077 | wakeup(&sc->sc_cmd_trb); | |||
1078 | } | |||
1079 | break; | |||
1080 | default: | |||
1081 | DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags)); | |||
1082 | } | |||
1083 | } | |||
1084 | ||||
1085 | void | |||
1086 | xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status) | |||
1087 | { | |||
1088 | struct usbd_xfer *xfer = sc->sc_intrxfer; | |||
1089 | uint32_t port = XHCI_TRB_PORTID(paddr)(((paddr) & (0xff << 24)) >> 24); | |||
1090 | uint8_t *p; | |||
1091 | ||||
1092 | if (XHCI_TRB_GET_CODE(status)(((status) >> 24) & 0xff) != XHCI_CODE_SUCCESS1) { | |||
1093 | DPRINTF(("%s: failed port status event\n", DEVNAME(sc))); | |||
1094 | return; | |||
1095 | } | |||
1096 | ||||
1097 | if (xfer == NULL((void *)0)) | |||
1098 | return; | |||
1099 | ||||
1100 | p = KERNADDR(&xfer->dmabuf, 0)((void *)((char *)((&xfer->dmabuf)->block->kaddr + (&xfer->dmabuf)->offs) + (0))); | |||
1101 | memset(p, 0, xfer->length)__builtin_memset((p), (0), (xfer->length)); | |||
1102 | ||||
1103 | p[port/8] |= 1 << (port%8); | |||
1104 | DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p)); | |||
1105 | ||||
1106 | xfer->actlen = xfer->length; | |||
1107 | xfer->status = USBD_NORMAL_COMPLETION; | |||
1108 | ||||
1109 | usb_transfer_complete(xfer); | |||
1110 | } | |||
1111 | ||||
1112 | void | |||
1113 | xhci_xfer_done(struct usbd_xfer *xfer) | |||
1114 | { | |||
1115 | struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; | |||
1116 | struct xhci_xfer *xx = (struct xhci_xfer *)xfer; | |||
1117 | int ntrb, i; | |||
1118 | ||||
1119 | splsoftassert(IPL_SOFTUSB)do { if (splassert_ctl > 0) { splassert_check(0x5, __func__ ); } } while (0); | |||
1120 | ||||
1121 | #ifdef XHCI_DEBUG | |||
1122 | if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL((void *)0)) { | |||
1123 | printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__, | |||
1124 | xfer, xx->index, xx->ntrb); | |||
1125 | } | |||
1126 | #endif | |||
1127 | ||||
1128 | if (xp->aborted_xfer == xfer) | |||
1129 | xp->aborted_xfer = NULL((void *)0); | |||
1130 | ||||
1131 | for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) { | |||
1132 | xp->pending_xfers[i] = NULL((void *)0); | |||
1133 | if (i == 0) | |||
1134 | i = (xp->ring.ntrb - 1); | |||
1135 | } | |||
1136 | xp->free_trbs += xx->ntrb; | |||
1137 | xp->free_trbs += xx->zerotd; | |||
1138 | xx->index = -1; | |||
1139 | xx->ntrb = 0; | |||
1140 | xx->zerotd = 0; | |||
1141 | ||||
1142 | timeout_del(&xfer->timeout_handle); | |||
1143 | usb_rem_task(xfer->device, &xfer->abort_task); | |||
1144 | usb_transfer_complete(xfer); | |||
1145 | } | |||
1146 | ||||
1147 | /* | |||
1148 | * Calculate the Device Context Index (DCI) for endpoints as stated | |||
1149 | * in section 4.5.1 of xHCI specification r1.1. | |||
1150 | */ | |||
1151 | static inline uint8_t | |||
1152 | xhci_ed2dci(usb_endpoint_descriptor_t *ed) | |||
1153 | { | |||
1154 | uint8_t dir; | |||
1155 | ||||
1156 | if (UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03) == UE_CONTROL0x00) | |||
1157 | return (UE_GET_ADDR(ed->bEndpointAddress)((ed->bEndpointAddress) & 0x0f) * 2 + 1); | |||
1158 | ||||
1159 | if (UE_GET_DIR(ed->bEndpointAddress)((ed->bEndpointAddress) & 0x80) == UE_DIR_IN0x80) | |||
1160 | dir = 1; | |||
1161 | else | |||
1162 | dir = 0; | |||
1163 | ||||
1164 | return (UE_GET_ADDR(ed->bEndpointAddress)((ed->bEndpointAddress) & 0x0f) * 2 + dir); | |||
1165 | } | |||
1166 | ||||
1167 | usbd_status | |||
1168 | xhci_pipe_open(struct usbd_pipe *pipe) | |||
1169 | { | |||
1170 | struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; | |||
1171 | struct xhci_pipe *xp = (struct xhci_pipe *)pipe; | |||
1172 | usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; | |||
1173 | uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03); | |||
1174 | int error; | |||
1175 | ||||
1176 | KASSERT(xp->slot == 0)((xp->slot == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c" , 1176, "xp->slot == 0")); | |||
1177 | ||||
1178 | if (sc->sc_bus.dying) | |||
1179 | return (USBD_IOERROR); | |||
1180 | ||||
1181 | /* Root Hub */ | |||
1182 | if (pipe->device->depth == 0) { | |||
1183 | switch (ed->bEndpointAddress) { | |||
1184 | case USB_CONTROL_ENDPOINT0: | |||
1185 | pipe->methods = &xhci_root_ctrl_methods; | |||
1186 | break; | |||
1187 | case UE_DIR_IN0x80 | XHCI_INTR_ENDPT1: | |||
1188 | pipe->methods = &xhci_root_intr_methods; | |||
1189 | break; | |||
1190 | default: | |||
1191 | pipe->methods = NULL((void *)0); | |||
1192 | return (USBD_INVAL); | |||
1193 | } | |||
1194 | return (USBD_NORMAL_COMPLETION); | |||
1195 | } | |||
1196 | ||||
1197 | #if 0 | |||
1198 | /* Issue a noop to check if the command ring is correctly configured. */ | |||
1199 | xhci_cmd_noop(sc); | |||
1200 | #endif | |||
1201 | ||||
1202 | switch (xfertype) { | |||
1203 | case UE_CONTROL0x00: | |||
1204 | pipe->methods = &xhci_device_ctrl_methods; | |||
1205 | ||||
1206 | /* | |||
1207 | * Get a slot and init the device's contexts. | |||
1208 | * | |||
1209 | * Since the control endpoint, represented as the default | |||
1210 | * pipe, is always opened first we are dealing with a | |||
1211 | * new device. Put a new slot in the ENABLED state. | |||
1212 | * | |||
1213 | */ | |||
1214 | error = xhci_cmd_slot_control(sc, &slot, 1); | |||
1215 | if (error || slot == 0 || slot > sc->sc_noslot) | |||
1216 | return (USBD_INVAL); | |||
1217 | ||||
1218 | if (xhci_softdev_alloc(sc, slot)) { | |||
1219 | xhci_cmd_slot_control(sc, &slot, 0); | |||
1220 | return (USBD_NOMEM); | |||
1221 | } | |||
1222 | ||||
1223 | break; | |||
1224 | case UE_ISOCHRONOUS0x01: | |||
1225 | pipe->methods = &xhci_device_isoc_methods; | |||
1226 | break; | |||
1227 | case UE_BULK0x02: | |||
1228 | pipe->methods = &xhci_device_bulk_methods; | |||
1229 | break; | |||
1230 | case UE_INTERRUPT0x03: | |||
1231 | pipe->methods = &xhci_device_intr_methods; | |||
1232 | break; | |||
1233 | default: | |||
1234 | return (USBD_INVAL); | |||
1235 | } | |||
1236 | ||||
1237 | /* | |||
1238 | * Our USBD Bus Interface is pipe-oriented but for most of the | |||
1239 | * operations we need to access a device context, so keep track | |||
1240 | * of the slot ID in every pipe. | |||
1241 | */ | |||
1242 | if (slot == 0) | |||
1243 | slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot; | |||
1244 | ||||
1245 | xp->slot = slot; | |||
1246 | xp->dci = xhci_ed2dci(ed); | |||
1247 | ||||
1248 | if (xhci_pipe_init(sc, pipe)) { | |||
1249 | xhci_cmd_slot_control(sc, &slot, 0); | |||
1250 | return (USBD_IOERROR); | |||
1251 | } | |||
1252 | ||||
1253 | return (USBD_NORMAL_COMPLETION); | |||
1254 | } | |||
1255 | ||||
1256 | /* | |||
1257 | * Set the maximum Endpoint Service Interface Time (ESIT) payload and | |||
1258 | * the average TRB buffer length for an endpoint. | |||
1259 | */ | |||
1260 | static inline uint32_t | |||
1261 | xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe) | |||
1262 | { | |||
1263 | usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; | |||
1264 | uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize)(*(u_int16_t *)(ed->wMaxPacketSize)); | |||
1265 | ||||
1266 | switch (UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03)) { | |||
1267 | case UE_CONTROL0x00: | |||
1268 | mep = 0; | |||
1269 | atl = 8; | |||
1270 | break; | |||
1271 | case UE_INTERRUPT0x03: | |||
1272 | case UE_ISOCHRONOUS0x01: | |||
1273 | if (pipe->device->speed == USB_SPEED_SUPER4) { | |||
1274 | /* XXX Read the companion descriptor */ | |||
1275 | } | |||
1276 | ||||
1277 | mep = (UE_GET_TRANS(mps)(((mps) >> 11) & 0x3) + 1) * UE_GET_SIZE(mps)((mps) & 0x7ff); | |||
1278 | atl = mep; | |||
1279 | break; | |||
1280 | case UE_BULK0x02: | |||
1281 | default: | |||
1282 | mep = 0; | |||
1283 | atl = 0; | |||
1284 | } | |||
1285 | ||||
1286 | return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep)(((mep) & 0xffff) << 16) | XHCI_EPCTX_AVG_TRB_LEN(atl)((atl) & 0xffff)); | |||
1287 | } | |||
1288 | ||||
1289 | static inline uint32_t | |||
1290 | xhci_linear_interval(usb_endpoint_descriptor_t *ed) | |||
1291 | { | |||
1292 | uint32_t ival = min(max(1, ed->bInterval), 255); | |||
1293 | ||||
1294 | return (fls(ival) - 1); | |||
1295 | } | |||
1296 | ||||
1297 | static inline uint32_t | |||
1298 | xhci_exponential_interval(usb_endpoint_descriptor_t *ed) | |||
1299 | { | |||
1300 | uint32_t ival = min(max(1, ed->bInterval), 16); | |||
1301 | ||||
1302 | return (ival - 1); | |||
1303 | } | |||
1304 | /* | |||
1305 | * Return interval for endpoint expressed in 2^(ival) * 125us. | |||
1306 | * | |||
1307 | * See section 6.2.3.6 of xHCI r1.1 Specification for more details. | |||
1308 | */ | |||
1309 | uint32_t | |||
1310 | xhci_pipe_interval(struct usbd_pipe *pipe) | |||
1311 | { | |||
1312 | usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; | |||
1313 | uint8_t speed = pipe->device->speed; | |||
1314 | uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03); | |||
1315 | uint32_t ival; | |||
1316 | ||||
1317 | if (xfertype == UE_CONTROL0x00 || xfertype == UE_BULK0x02) { | |||
1318 | /* Control and Bulk endpoints never NAKs. */ | |||
1319 | ival = 0; | |||
1320 | } else { | |||
1321 | switch (speed) { | |||
1322 | case USB_SPEED_FULL2: | |||
1323 | if (xfertype == UE_ISOCHRONOUS0x01) { | |||
1324 | /* Convert 1-2^(15)ms into 3-18 */ | |||
1325 | ival = xhci_exponential_interval(ed) + 3; | |||
1326 | break; | |||
1327 | } | |||
1328 | /* FALLTHROUGH */ | |||
1329 | case USB_SPEED_LOW1: | |||
1330 | /* Convert 1-255ms into 3-10 */ | |||
1331 | ival = xhci_linear_interval(ed) + 3; | |||
1332 | break; | |||
1333 | case USB_SPEED_HIGH3: | |||
1334 | case USB_SPEED_SUPER4: | |||
1335 | default: | |||
1336 | /* Convert 1-2^(15) * 125us into 0-15 */ | |||
1337 | ival = xhci_exponential_interval(ed); | |||
1338 | break; | |||
1339 | } | |||
1340 | } | |||
1341 | ||||
1342 | KASSERT(ival <= 15)((ival <= 15) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c" , 1342, "ival <= 15")); | |||
1343 | return (XHCI_EPCTX_SET_IVAL(ival)(((ival) & 0xff) << 16)); | |||
1344 | } | |||
1345 | ||||
1346 | uint32_t | |||
1347 | xhci_pipe_maxburst(struct usbd_pipe *pipe) | |||
1348 | { | |||
1349 | usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; | |||
1350 | uint32_t mps = UGETW(ed->wMaxPacketSize)(*(u_int16_t *)(ed->wMaxPacketSize)); | |||
1351 | uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03); | |||
1352 | uint32_t maxb = 0; | |||
1353 | ||||
1354 | switch (pipe->device->speed) { | |||
1355 | case USB_SPEED_HIGH3: | |||
1356 | if (xfertype == UE_ISOCHRONOUS0x01 || xfertype == UE_INTERRUPT0x03) | |||
1357 | maxb = UE_GET_TRANS(mps)(((mps) >> 11) & 0x3); | |||
1358 | break; | |||
1359 | case USB_SPEED_SUPER4: | |||
1360 | /* XXX Read the companion descriptor */ | |||
1361 | default: | |||
1362 | break; | |||
1363 | } | |||
1364 | ||||
1365 | return (maxb); | |||
1366 | } | |||
1367 | ||||
1368 | static inline uint32_t | |||
1369 | xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore) | |||
1370 | { | |||
1371 | struct xhci_pipe *lxp; | |||
1372 | int i; | |||
1373 | ||||
1374 | /* Find the last valid Endpoint Context. */ | |||
1375 | for (i = 30; i >= 0; i--) { | |||
1376 | lxp = pipes[i]; | |||
1377 | if (lxp != NULL((void *)0) && lxp != ignore) | |||
1378 | return XHCI_SCTX_DCI(lxp->dci)(((lxp->dci) & 0x1f) << 27); | |||
1379 | } | |||
1380 | ||||
1381 | return 0; | |||
1382 | } | |||
1383 | ||||
1384 | int | |||
1385 | xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe) | |||
1386 | { | |||
1387 | struct xhci_pipe *xp = (struct xhci_pipe *)pipe; | |||
1388 | struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; | |||
1389 | usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; | |||
1390 | uint32_t mps = UGETW(ed->wMaxPacketSize)(*(u_int16_t *)(ed->wMaxPacketSize)); | |||
1391 | uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes)((ed->bmAttributes) & 0x03); | |||
1392 | uint8_t speed, cerr = 0; | |||
1393 | uint32_t route = 0, rhport = 0; | |||
1394 | struct usbd_device *hub; | |||
1395 | ||||
1396 | /* | |||
1397 | * Calculate the Route String. Assume that there is no hub with | |||
1398 | * more than 15 ports and that they all have a detph < 6. See | |||
1399 | * section 8.9 of USB 3.1 Specification for more details. | |||
1400 | */ | |||
1401 | for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) { | |||
1402 | uint32_t port = hub->powersrc->portno; | |||
1403 | uint32_t depth = hub->myhub->depth; | |||
1404 | ||||
1405 | route |= port << (4 * (depth - 1)); | |||
1406 | } | |||
1407 | ||||
1408 | /* Get Root Hub port */ | |||
1409 | rhport = hub->powersrc->portno; | |||
1410 | ||||
1411 | switch (pipe->device->speed) { | |||
1412 | case USB_SPEED_LOW1: | |||
1413 | speed = XHCI_SPEED_LOW2; | |||
1414 | break; | |||
1415 | case USB_SPEED_FULL2: | |||
1416 | speed = XHCI_SPEED_FULL1; | |||
1417 | break; | |||
1418 | case USB_SPEED_HIGH3: | |||
1419 | speed = XHCI_SPEED_HIGH3; | |||
1420 | break; | |||
1421 | case USB_SPEED_SUPER4: | |||
1422 | speed = XHCI_SPEED_SUPER4; | |||
1423 | break; | |||
1424 | default: | |||
1425 | return (USBD_INVAL); | |||
1426 | } | |||
1427 | ||||
1428 | /* Setup the endpoint context */ | |||
1429 | if (xfertype != UE_ISOCHRONOUS0x01) | |||
1430 | cerr = 3; | |||
1431 | ||||
1432 | if ((ed->bEndpointAddress & UE_DIR_IN0x80) || (xfertype == UE_CONTROL0x00)) | |||
1433 | xfertype |= 0x4; | |||
1434 | ||||
1435 | sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe))((__uint32_t)(xhci_pipe_interval(pipe))); | |||
1436 | sdev->ep_ctx[xp->dci-1]->info_hi = htole32(((__uint32_t)((((((mps) & 0x7ff)) & 0xffff) << 16 ) | (((xhci_pipe_maxburst(pipe)) & 0xff) << 8) | (( (xfertype) & 0x7) << 3) | (((cerr) & 0x3) << 1))) | |||
1437 | XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) |((__uint32_t)((((((mps) & 0x7ff)) & 0xffff) << 16 ) | (((xhci_pipe_maxburst(pipe)) & 0xff) << 8) | (( (xfertype) & 0x7) << 3) | (((cerr) & 0x3) << 1))) | |||
1438 | XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) |((__uint32_t)((((((mps) & 0x7ff)) & 0xffff) << 16 ) | (((xhci_pipe_maxburst(pipe)) & 0xff) << 8) | (( (xfertype) & 0x7) << 3) | (((cerr) & 0x3) << 1))) | |||
1439 | XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr)((__uint32_t)((((((mps) & 0x7ff)) & 0xffff) << 16 ) | (((xhci_pipe_maxburst(pipe)) & 0xff) << 8) | (( (xfertype) & 0x7) << 3) | (((cerr) & 0x3) << 1))) | |||
1440 | )((__uint32_t)((((((mps) & 0x7ff)) & 0xffff) << 16 ) | (((xhci_pipe_maxburst(pipe)) & 0xff) << 8) | (( (xfertype) & 0x7) << 3) | (((cerr) & 0x3) << 1))); | |||
1441 | sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe))((__uint32_t)(xhci_get_txinfo(sc, pipe))); | |||
1442 | sdev->ep_ctx[xp->dci-1]->deqp = htole64(((__uint64_t)(((xp->ring).dma.paddr + (sizeof(struct xhci_trb ) * (xp->ring).index)) | xp->ring.toggle)) | |||
1443 | DEQPTR(xp->ring) | xp->ring.toggle((__uint64_t)(((xp->ring).dma.paddr + (sizeof(struct xhci_trb ) * (xp->ring).index)) | xp->ring.toggle)) | |||
1444 | )((__uint64_t)(((xp->ring).dma.paddr + (sizeof(struct xhci_trb ) * (xp->ring).index)) | xp->ring.toggle)); | |||
1445 | ||||
1446 | /* Unmask the new endpoint */ | |||
1447 | sdev->input_ctx->drop_flags = 0; | |||
1448 | sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci))((__uint32_t)((0x1 << (xp->dci)))); | |||
1449 | ||||
1450 | /* Setup the slot context */ | |||
1451 | sdev->slot_ctx->info_lo = htole32(((__uint32_t)(xhci_last_valid_dci(sdev->pipes, ((void *)0) ) | (((speed) & 0xf) << 20) | ((route) & 0xfffff ))) | |||
1452 | xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) |((__uint32_t)(xhci_last_valid_dci(sdev->pipes, ((void *)0) ) | (((speed) & 0xf) << 20) | ((route) & 0xfffff ))) | |||
1453 | XHCI_SCTX_ROUTE(route)((__uint32_t)(xhci_last_valid_dci(sdev->pipes, ((void *)0) ) | (((speed) & 0xf) << 20) | ((route) & 0xfffff ))) | |||
1454 | )((__uint32_t)(xhci_last_valid_dci(sdev->pipes, ((void *)0) ) | (((speed) & 0xf) << 20) | ((route) & 0xfffff ))); | |||
1455 | sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport))((__uint32_t)((((rhport) & 0xff) << 16))); | |||
1456 | sdev->slot_ctx->tt = 0; | |||
1457 | sdev->slot_ctx->state = 0; | |||
1458 | ||||
1459 | /* XXX */ | |||
1460 | #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT0x02) | |||
1461 | /* | |||
1462 | * If we are opening the interrupt pipe of a hub, update its | |||
1463 | * context before putting it in the CONFIGURED state. | |||
1464 | */ | |||
1465 | if (pipe->device->hub != NULL((void *)0)) { | |||
1466 | int nports = pipe->device->hub->nports; | |||
1467 | ||||
1468 | sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1))((__uint32_t)((((1) & 0x1) << 26))); | |||
1469 | sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports))((__uint32_t)((((nports) & 0xff) << 24))); | |||
1470 | ||||
1471 | if (UHUB_IS_MTT(pipe->device)) | |||
1472 | sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1))((__uint32_t)((((1) & 0x1) << 25))); | |||
1473 | ||||
1474 | sdev->slot_ctx->tt |= htole32(((__uint32_t)((((pipe->device->hub->ttthink) & 0x3 ) << 16))) | |||
1475 | XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)((__uint32_t)((((pipe->device->hub->ttthink) & 0x3 ) << 16))) | |||
1476 | )((__uint32_t)((((pipe->device->hub->ttthink) & 0x3 ) << 16))); | |||
1477 | } | |||
1478 | ||||
1479 | /* | |||
1480 | * If this is a Low or Full Speed device below an external High | |||
1481 | * Speed hub, it needs some TT love. | |||
1482 | */ | |||
1483 | if (speed < XHCI_SPEED_HIGH3 && pipe->device->myhsport != NULL((void *)0)) { | |||
1484 | struct usbd_device *hshub = pipe->device->myhsport->parent; | |||
1485 | uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot; | |||
1486 | ||||
1487 | if (UHUB_IS_MTT(hshub)) | |||
1488 | sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1))((__uint32_t)((((1) & 0x1) << 25))); | |||
1489 | ||||
1490 | sdev->slot_ctx->tt |= htole32(((__uint32_t)(((slot) & 0xff) | (((pipe->device->myhsport ->portno) & 0xff) << 8))) | |||
1491 | XHCI_SCTX_TT_HUB_SID(slot) |((__uint32_t)(((slot) & 0xff) | (((pipe->device->myhsport ->portno) & 0xff) << 8))) | |||
1492 | XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)((__uint32_t)(((slot) & 0xff) | (((pipe->device->myhsport ->portno) & 0xff) << 8))) | |||
1493 | )((__uint32_t)(((slot) & 0xff) | (((pipe->device->myhsport ->portno) & 0xff) << 8))); | |||
1494 | } | |||
1495 | #undef UHUB_IS_MTT | |||
1496 | ||||
1497 | /* Unmask the slot context */ | |||
1498 | sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0))((__uint32_t)((0x1 << (0)))); | |||
1499 | ||||
1500 | bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,(*(sdev->ictx_dma.tag)->_dmamap_sync)((sdev->ictx_dma .tag), (sdev->ictx_dma.map), (0), (sc->sc_pagesize), (0x01 | 0x04)) | |||
1501 | sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sdev->ictx_dma.tag)->_dmamap_sync)((sdev->ictx_dma .tag), (sdev->ictx_dma.map), (0), (sc->sc_pagesize), (0x01 | 0x04)); | |||
1502 | ||||
1503 | return (0); | |||
1504 | } | |||
1505 | ||||
1506 | int | |||
1507 | xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe) | |||
1508 | { | |||
1509 | struct xhci_pipe *xp = (struct xhci_pipe *)pipe; | |||
1510 | struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; | |||
1511 | int error; | |||
1512 | ||||
1513 | #ifdef XHCI_DEBUG | |||
1514 | struct usbd_device *dev = pipe->device; | |||
1515 | printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u" | |||
1516 | " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth, | |||
1517 | dev->powersrc->portno, dev->speed, xp->slot, xp->dci, | |||
1518 | pipe->endpoint->edesc->bEndpointAddress); | |||
1519 | #endif | |||
1520 | ||||
1521 | if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER(16 * 16), XHCI_XFER_RING_ALIGN16)) | |||
1522 | return (ENOMEM12); | |||
1523 | ||||
1524 | xp->free_trbs = xp->ring.ntrb; | |||
1525 | xp->halted = 0; | |||
1526 | ||||
1527 | sdev->pipes[xp->dci - 1] = xp; | |||
1528 | ||||
1529 | error = xhci_context_setup(sc, pipe); | |||
1530 | if (error) | |||
1531 | return (error); | |||
1532 | ||||
1533 | if (xp->dci == 1) { | |||
1534 | /* | |||
1535 | * If we are opening the default pipe, the Slot should | |||
1536 | * be in the ENABLED state. Issue an "Address Device" | |||
1537 | * with BSR=1 to put the device in the DEFAULT state. | |||
1538 | * We cannot jump directly to the ADDRESSED state with | |||
1539 | * BSR=0 because some Low/Full speed devices won't accept | |||
1540 | * a SET_ADDRESS command before we've read their device | |||
1541 | * descriptor. | |||
1542 | */ | |||
1543 | error = xhci_cmd_set_address(sc, xp->slot, | |||
1544 | sdev->ictx_dma.paddr, XHCI_TRB_BSR(1 << 9)); | |||
1545 | } else { | |||
1546 | error = xhci_cmd_configure_ep(sc, xp->slot, | |||
1547 | sdev->ictx_dma.paddr); | |||
1548 | } | |||
1549 | ||||
1550 | if (error) { | |||
1551 | xhci_ring_free(sc, &xp->ring); | |||
1552 | return (EIO5); | |||
1553 | } | |||
1554 | ||||
1555 | return (0); | |||
1556 | } | |||
1557 | ||||
1558 | void | |||
1559 | xhci_pipe_close(struct usbd_pipe *pipe) | |||
1560 | { | |||
1561 | struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; | |||
1562 | struct xhci_pipe *xp = (struct xhci_pipe *)pipe; | |||
1563 | struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; | |||
1564 | ||||
1565 | /* Root Hub */ | |||
1566 | if (pipe->device->depth == 0) | |||
1567 | return; | |||
1568 | ||||
1569 | /* Mask the endpoint */ | |||
1570 | sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci))((__uint32_t)((0x1 << (xp->dci)))); | |||
1571 | sdev->input_ctx->add_flags = 0; | |||
1572 | ||||
1573 | /* Update last valid Endpoint Context */ | |||
1574 | sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31))((__uint32_t)(~(((31) & 0x1f) << 27))); | |||
1575 | sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp))((__uint32_t)(xhci_last_valid_dci(sdev->pipes, xp))); | |||
1576 | ||||
1577 | /* Clear the Endpoint Context */ | |||
1578 | memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx))__builtin_memset((sdev->ep_ctx[xp->dci - 1]), (0), (sizeof (struct xhci_epctx))); | |||
1579 | ||||
1580 | bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,(*(sdev->ictx_dma.tag)->_dmamap_sync)((sdev->ictx_dma .tag), (sdev->ictx_dma.map), (0), (sc->sc_pagesize), (0x01 | 0x04)) | |||
1581 | sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sdev->ictx_dma.tag)->_dmamap_sync)((sdev->ictx_dma .tag), (sdev->ictx_dma.map), (0), (sc->sc_pagesize), (0x01 | 0x04)); | |||
1582 | ||||
1583 | if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr)) | |||
1584 | DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci)); | |||
1585 | ||||
1586 | xhci_ring_free(sc, &xp->ring); | |||
1587 | sdev->pipes[xp->dci - 1] = NULL((void *)0); | |||
1588 | ||||
1589 | /* | |||
1590 | * If we are closing the default pipe, the device is probably | |||
1591 | * gone, so put its slot in the DISABLED state. | |||
1592 | */ | |||
1593 | if (xp->dci == 1) { | |||
1594 | xhci_cmd_slot_control(sc, &xp->slot, 0); | |||
1595 | xhci_softdev_free(sc, xp->slot); | |||
1596 | } | |||
1597 | } | |||
1598 | ||||
1599 | /* | |||
1600 | * Transition a device from DEFAULT to ADDRESSED Slot state, this hook | |||
1601 | * is needed for Low/Full speed devices. | |||
1602 | * | |||
1603 | * See section 4.5.3 of USB 3.1 Specification for more details. | |||
1604 | */ | |||
1605 | int | |||
1606 | xhci_setaddr(struct usbd_device *dev, int addr) | |||
1607 | { | |||
1608 | struct xhci_softc *sc = (struct xhci_softc *)dev->bus; | |||
1609 | struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe; | |||
1610 | struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; | |||
1611 | int error; | |||
1612 | ||||
1613 | /* Root Hub */ | |||
1614 | if (dev->depth == 0) | |||
1615 | return (0); | |||
1616 | ||||
1617 | KASSERT(xp->dci == 1)((xp->dci == 1) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c" , 1617, "xp->dci == 1")); | |||
1618 | ||||
1619 | error = xhci_context_setup(sc, dev->default_pipe); | |||
1620 | if (error) | |||
1621 | return (error); | |||
1622 | ||||
1623 | error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0); | |||
1624 | ||||
1625 | #ifdef XHCI_DEBUG | |||
1626 | if (error == 0) { | |||
1627 | struct xhci_sctx *sctx; | |||
1628 | uint8_t addr; | |||
1629 | ||||
1630 | bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,(*(sdev->octx_dma.tag)->_dmamap_sync)((sdev->octx_dma .tag), (sdev->octx_dma.map), (0), (sc->sc_pagesize), (0x02 )) | |||
1631 | sc->sc_pagesize, BUS_DMASYNC_POSTREAD)(*(sdev->octx_dma.tag)->_dmamap_sync)((sdev->octx_dma .tag), (sdev->octx_dma.map), (0), (sc->sc_pagesize), (0x02 )); | |||
1632 | ||||
1633 | /* Get output slot context. */ | |||
1634 | sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr; | |||
1635 | addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state))((((__uint32_t)(sctx->state))) & 0xff); | |||
1636 | error = (addr == 0); | |||
1637 | ||||
1638 | printf("%s: dev %d addr %d\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), xp->slot, addr); | |||
1639 | } | |||
1640 | #endif | |||
1641 | ||||
1642 | return (error); | |||
1643 | } | |||
1644 | ||||
1645 | struct usbd_xfer * | |||
1646 | xhci_allocx(struct usbd_bus *bus) | |||
1647 | { | |||
1648 | return (pool_get(xhcixfer, PR_NOWAIT0x0002 | PR_ZERO0x0008)); | |||
1649 | } | |||
1650 | ||||
1651 | void | |||
1652 | xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) | |||
1653 | { | |||
1654 | pool_put(xhcixfer, xfer); | |||
1655 | } | |||
1656 | ||||
1657 | int | |||
1658 | xhci_scratchpad_alloc(struct xhci_softc *sc, int npage) | |||
1659 | { | |||
1660 | uint64_t *pte; | |||
1661 | int error, i; | |||
1662 | ||||
1663 | /* Allocate the required entry for the table. */ | |||
1664 | error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma, | |||
1665 | (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN64, | |||
1666 | sc->sc_pagesize); | |||
1667 | if (error) | |||
1668 | return (ENOMEM12); | |||
1669 | ||||
1670 | /* Allocate pages. XXX does not need to be contiguous. */ | |||
1671 | error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma, | |||
1672 | NULL((void *)0), npage * sc->sc_pagesize, sc->sc_pagesize, 0); | |||
1673 | if (error) { | |||
1674 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); | |||
1675 | return (ENOMEM12); | |||
1676 | } | |||
1677 | ||||
1678 | for (i = 0; i < npage; i++) { | |||
1679 | pte[i] = htole64(((__uint64_t)(sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize ))) | |||
1680 | sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)((__uint64_t)(sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize ))) | |||
1681 | )((__uint64_t)(sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize ))); | |||
1682 | } | |||
1683 | ||||
1684 | bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,(*(sc->sc_spad.table_dma.tag)->_dmamap_sync)((sc->sc_spad .table_dma.tag), (sc->sc_spad.table_dma.map), (0), (npage * sizeof(uint64_t)), (0x01 | 0x04)) | |||
1685 | npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD |(*(sc->sc_spad.table_dma.tag)->_dmamap_sync)((sc->sc_spad .table_dma.tag), (sc->sc_spad.table_dma.map), (0), (npage * sizeof(uint64_t)), (0x01 | 0x04)) | |||
1686 | BUS_DMASYNC_PREWRITE)(*(sc->sc_spad.table_dma.tag)->_dmamap_sync)((sc->sc_spad .table_dma.tag), (sc->sc_spad.table_dma.map), (0), (npage * sizeof(uint64_t)), (0x01 | 0x04)); | |||
1687 | ||||
1688 | /* Entry 0 points to the table of scratchpad pointers. */ | |||
1689 | sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr)((__uint64_t)(sc->sc_spad.table_dma.paddr)); | |||
1690 | bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (0), (sizeof(uint64_t)) , (0x01 | 0x04)) | |||
1691 | sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (0), (sizeof(uint64_t)) , (0x01 | 0x04)); | |||
1692 | ||||
1693 | sc->sc_spad.npage = npage; | |||
1694 | ||||
1695 | return (0); | |||
1696 | } | |||
1697 | ||||
1698 | void | |||
1699 | xhci_scratchpad_free(struct xhci_softc *sc) | |||
1700 | { | |||
1701 | sc->sc_dcbaa.segs[0] = 0; | |||
1702 | bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (0), (sizeof(uint64_t)) , (0x01 | 0x04)) | |||
1703 | sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (0), (sizeof(uint64_t)) , (0x01 | 0x04)); | |||
1704 | ||||
1705 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma); | |||
1706 | usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); | |||
1707 | } | |||
1708 | ||||
1709 | int | |||
1710 | xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb, | |||
1711 | size_t alignment) | |||
1712 | { | |||
1713 | size_t size; | |||
1714 | int error; | |||
1715 | ||||
1716 | size = ntrb * sizeof(struct xhci_trb); | |||
1717 | ||||
1718 | error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma, | |||
1719 | (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY(64 * 1024)); | |||
1720 | if (error) | |||
1721 | return (error); | |||
1722 | ||||
1723 | ring->ntrb = ntrb; | |||
1724 | ||||
1725 | xhci_ring_reset(sc, ring); | |||
1726 | ||||
1727 | return (0); | |||
1728 | } | |||
1729 | ||||
1730 | void | |||
1731 | xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring) | |||
1732 | { | |||
1733 | usbd_dma_contig_free(&sc->sc_bus, &ring->dma); | |||
1734 | } | |||
1735 | ||||
1736 | void | |||
1737 | xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring) | |||
1738 | { | |||
1739 | size_t size; | |||
1740 | ||||
1741 | size = ring->ntrb * sizeof(struct xhci_trb); | |||
1742 | ||||
1743 | memset(ring->trbs, 0, size)__builtin_memset((ring->trbs), (0), (size)); | |||
1744 | ||||
1745 | ring->index = 0; | |||
1746 | ring->toggle = XHCI_TRB_CYCLE(1 << 0); | |||
1747 | ||||
1748 | /* | |||
1749 | * Since all our rings use only one segment, at least for | |||
1750 | * the moment, link their tail to their head. | |||
1751 | */ | |||
1752 | if (ring != &sc->sc_evt_ring) { | |||
1753 | struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1]; | |||
1754 | ||||
1755 | trb->trb_paddr = htole64(ring->dma.paddr)((__uint64_t)(ring->dma.paddr)); | |||
1756 | trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG |((__uint32_t)((6 << 10) | (1 << 1) | (1 << 0 ))) | |||
1757 | XHCI_TRB_CYCLE)((__uint32_t)((6 << 10) | (1 << 1) | (1 << 0 ))); | |||
1758 | bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (0), (size), (0x04)) | |||
1759 | BUS_DMASYNC_PREWRITE)(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (0), (size), (0x04)); | |||
1760 | } else | |||
1761 | bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (0), (size), (0x01 | 0x04)) | |||
1762 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (0), (size), (0x01 | 0x04)); | |||
1763 | } | |||
1764 | ||||
1765 | struct xhci_trb* | |||
1766 | xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring) | |||
1767 | { | |||
1768 | struct xhci_trb *trb = &ring->trbs[ring->index]; | |||
1769 | ||||
1770 | KASSERT(ring->index < ring->ntrb)((ring->index < ring->ntrb) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/usb/xhci.c", 1770, "ring->index < ring->ntrb" )); | |||
1771 | ||||
1772 | bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(trb) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02)) | |||
1773 | sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD)(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(trb) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02)); | |||
1774 | ||||
1775 | /* Make sure this TRB can be consumed. */ | |||
1776 | if (ring->toggle != (letoh32(trb->trb_flags)((__uint32_t)(trb->trb_flags)) & XHCI_TRB_CYCLE(1 << 0))) | |||
1777 | return (NULL((void *)0)); | |||
1778 | ||||
1779 | ring->index++; | |||
1780 | ||||
1781 | if (ring->index == ring->ntrb) { | |||
1782 | ring->index = 0; | |||
1783 | ring->toggle ^= 1; | |||
1784 | } | |||
1785 | ||||
1786 | return (trb); | |||
1787 | } | |||
1788 | ||||
1789 | struct xhci_trb* | |||
1790 | xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring) | |||
1791 | { | |||
1792 | struct xhci_trb *lnk, *trb; | |||
1793 | ||||
1794 | KASSERT(ring->index < ring->ntrb)((ring->index < ring->ntrb) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/usb/xhci.c", 1794, "ring->index < ring->ntrb" )); | |||
1795 | ||||
1796 | /* Setup the link TRB after the previous TRB is done. */ | |||
1797 | if (ring->index == 0) { | |||
1798 | lnk = &ring->trbs[ring->ntrb - 1]; | |||
1799 | trb = &ring->trbs[ring->ntrb - 2]; | |||
1800 | ||||
1801 | bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(lnk) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02 | 0x08)) | |||
1802 | sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(lnk) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02 | 0x08)) | |||
1803 | BUS_DMASYNC_POSTWRITE)(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(lnk) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02 | 0x08)); | |||
1804 | ||||
1805 | lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN)((__uint32_t)(~(1 << 4))); | |||
1806 | if (letoh32(trb->trb_flags)((__uint32_t)(trb->trb_flags)) & XHCI_TRB_CHAIN(1 << 4)) | |||
1807 | lnk->trb_flags |= htole32(XHCI_TRB_CHAIN)((__uint32_t)((1 << 4))); | |||
1808 | ||||
1809 | bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(lnk) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
1810 | sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE)(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(lnk) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
1811 | ||||
1812 | lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE)((__uint32_t)((1 << 0))); | |||
1813 | ||||
1814 | bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(lnk) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
1815 | sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE)(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(lnk) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
1816 | } | |||
1817 | ||||
1818 | trb = &ring->trbs[ring->index++]; | |||
1819 | bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(trb) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02 | 0x08)) | |||
1820 | sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(trb) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02 | 0x08)) | |||
1821 | BUS_DMASYNC_POSTWRITE)(*(ring->dma.tag)->_dmamap_sync)((ring->dma.tag), (ring ->dma.map), (((char *)(trb) - (char *)((ring)->trbs))), (sizeof(struct xhci_trb)), (0x02 | 0x08)); | |||
1822 | ||||
1823 | /* Toggle cycle state of the link TRB and skip it. */ | |||
1824 | if (ring->index == (ring->ntrb - 1)) { | |||
1825 | ring->index = 0; | |||
1826 | ring->toggle ^= 1; | |||
1827 | } | |||
1828 | ||||
1829 | return (trb); | |||
1830 | } | |||
1831 | ||||
1832 | struct xhci_trb * | |||
1833 | xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer, | |||
1834 | uint8_t *togglep, int last) | |||
1835 | { | |||
1836 | struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; | |||
1837 | struct xhci_xfer *xx = (struct xhci_xfer *)xfer; | |||
1838 | ||||
1839 | KASSERT(xp->free_trbs >= 1)((xp->free_trbs >= 1) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/usb/xhci.c", 1839, "xp->free_trbs >= 1" )); | |||
1840 | xp->free_trbs--; | |||
1841 | *togglep = xp->ring.toggle; | |||
1842 | ||||
1843 | switch (last) { | |||
1844 | case -1: /* This will be a zero-length TD. */ | |||
1845 | xp->pending_xfers[xp->ring.index] = NULL((void *)0); | |||
1846 | xx->zerotd += 1; | |||
1847 | break; | |||
1848 | case 0: /* This will be in a chain. */ | |||
1849 | xp->pending_xfers[xp->ring.index] = xfer; | |||
1850 | xx->index = -2; | |||
1851 | xx->ntrb += 1; | |||
1852 | break; | |||
1853 | case 1: /* This will terminate a chain. */ | |||
1854 | xp->pending_xfers[xp->ring.index] = xfer; | |||
1855 | xx->index = xp->ring.index; | |||
1856 | xx->ntrb += 1; | |||
1857 | break; | |||
1858 | } | |||
1859 | ||||
1860 | xp->trb_processed[xp->ring.index] = TRB_PROCESSED_NO0; | |||
1861 | ||||
1862 | return (xhci_ring_produce(sc, &xp->ring)); | |||
1863 | } | |||
1864 | ||||
1865 | int | |||
1866 | xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout) | |||
1867 | { | |||
1868 | struct xhci_trb *trb; | |||
1869 | int s, error = 0; | |||
1870 | ||||
1871 | KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL)((timeout == 0 || sc->sc_cmd_trb == ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c", 1871 , "timeout == 0 || sc->sc_cmd_trb == NULL")); | |||
1872 | ||||
1873 | trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle)((__uint32_t)(sc->sc_cmd_ring.toggle)); | |||
1874 | ||||
1875 | trb = xhci_ring_produce(sc, &sc->sc_cmd_ring); | |||
1876 | if (trb == NULL((void *)0)) | |||
1877 | return (EAGAIN35); | |||
1878 | trb->trb_paddr = trb0->trb_paddr; | |||
1879 | trb->trb_status = trb0->trb_status; | |||
1880 | bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x04)) | |||
1881 | TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x04)) | |||
1882 | BUS_DMASYNC_PREWRITE)(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x04)); | |||
1883 | ||||
1884 | trb->trb_flags = trb0->trb_flags; | |||
1885 | bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x04)) | |||
1886 | TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x04)) | |||
1887 | BUS_DMASYNC_PREWRITE)(*(sc->sc_cmd_ring.dma.tag)->_dmamap_sync)((sc->sc_cmd_ring .dma.tag), (sc->sc_cmd_ring.dma.map), (((char *)(trb) - (char *)((&sc->sc_cmd_ring)->trbs))), (sizeof(struct xhci_trb )), (0x04)); | |||
1888 | ||||
1889 | if (timeout == 0) { | |||
1890 | XDWRITE4(sc, XHCI_DOORBELL(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_door_off + ((0x0000 + (4 * (0))))), ((0)))); | |||
1891 | return (0); | |||
1892 | } | |||
1893 | ||||
1894 | rw_assert_wrlock(&sc->sc_cmd_lock); | |||
1895 | ||||
1896 | s = splusb()splraise(0x5); | |||
1897 | sc->sc_cmd_trb = trb; | |||
1898 | XDWRITE4(sc, XHCI_DOORBELL(0), 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_door_off + ((0x0000 + (4 * (0))))), ((0)))); | |||
1899 | error = tsleep_nsec(&sc->sc_cmd_trb, PZERO22, "xhcicmd", timeout); | |||
1900 | if (error) { | |||
1901 | #ifdef XHCI_DEBUG | |||
1902 | printf("%s: tsleep() = %d\n", __func__, error); | |||
1903 | printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags))(((((__uint32_t)(trb->trb_flags))) & 0xfc00) >> 10 )); | |||
1904 | xhci_dump_trb(trb); | |||
1905 | #endif | |||
1906 | KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL)((sc->sc_cmd_trb == trb || sc->sc_cmd_trb == ((void *)0 )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c" , 1906, "sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL" )); | |||
1907 | /* | |||
1908 | * Just because the timeout expired this does not mean that the | |||
1909 | * TRB isn't active anymore! We could get an interrupt from | |||
1910 | * this TRB later on and then wonder what to do with it. | |||
1911 | * We'd rather abort it. | |||
1912 | */ | |||
1913 | xhci_command_abort(sc); | |||
1914 | sc->sc_cmd_trb = NULL((void *)0); | |||
1915 | splx(s)spllower(s); | |||
1916 | return (error); | |||
1917 | } | |||
1918 | splx(s)spllower(s); | |||
1919 | ||||
1920 | memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb))__builtin_memcpy((trb0), (&sc->sc_result_trb), (sizeof (struct xhci_trb))); | |||
1921 | ||||
1922 | if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status))(((((__uint32_t)(trb0->trb_status))) >> 24) & 0xff ) == XHCI_CODE_SUCCESS1) | |||
1923 | return (0); | |||
1924 | ||||
1925 | #ifdef XHCI_DEBUG | |||
1926 | printf("%s: event error code=%d, result=%d \n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname), | |||
1927 | XHCI_TRB_GET_CODE(letoh32(trb0->trb_status))(((((__uint32_t)(trb0->trb_status))) >> 24) & 0xff ), | |||
1928 | XHCI_TRB_TYPE(letoh32(trb0->trb_flags))(((((__uint32_t)(trb0->trb_flags))) & 0xfc00) >> 10)); | |||
1929 | xhci_dump_trb(trb0); | |||
1930 | #endif | |||
1931 | return (EIO5); | |||
1932 | } | |||
1933 | ||||
1934 | int | |||
1935 | xhci_command_abort(struct xhci_softc *sc) | |||
1936 | { | |||
1937 | uint32_t reg; | |||
1938 | int i; | |||
1939 | ||||
1940 | reg = XOREAD4(sc, XHCI_CRCR_LO)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x18)))); | |||
1941 | if ((reg & XHCI_CRCR_LO_CRR0x00000008) == 0) | |||
1942 | return (0); | |||
1943 | ||||
1944 | XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x18)), ((reg | 0x00000004)))); | |||
1945 | XOWRITE4(sc, XHCI_CRCR_HI, 0)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (0x1C)), ((0)))); | |||
1946 | ||||
1947 | for (i = 0; i < 2500; i++) { | |||
1948 | DELAY(100)(*delay_func)(100); | |||
1949 | reg = XOREAD4(sc, XHCI_CRCR_LO)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (0x18)))) & XHCI_CRCR_LO_CRR0x00000008; | |||
1950 | if (!reg) | |||
1951 | break; | |||
1952 | } | |||
1953 | ||||
1954 | if (reg) { | |||
1955 | printf("%s: command ring abort timeout\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
1956 | return (1); | |||
1957 | } | |||
1958 | ||||
1959 | return (0); | |||
1960 | } | |||
1961 | ||||
1962 | int | |||
1963 | xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr) | |||
1964 | { | |||
1965 | struct xhci_trb trb; | |||
1966 | int error; | |||
1967 | ||||
1968 | DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot)); | |||
1969 | ||||
1970 | trb.trb_paddr = htole64(addr)((__uint64_t)(addr)); | |||
1971 | trb.trb_status = 0; | |||
1972 | trb.trb_flags = htole32(((__uint32_t)((((slot) & 0xff) << 24) | (12 << 10))) | |||
1973 | XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP((__uint32_t)((((slot) & 0xff) << 24) | (12 << 10))) | |||
1974 | )((__uint32_t)((((slot) & 0xff) << 24) | (12 << 10))); | |||
1975 | ||||
1976 | rw_enter_write(&sc->sc_cmd_lock); | |||
1977 | error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUTMSEC_TO_NSEC(500)); | |||
1978 | rw_exit_write(&sc->sc_cmd_lock); | |||
1979 | return (error); | |||
1980 | } | |||
1981 | ||||
1982 | int | |||
1983 | xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci) | |||
1984 | { | |||
1985 | struct xhci_trb trb; | |||
1986 | int error; | |||
1987 | ||||
1988 | DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); | |||
1989 | ||||
1990 | trb.trb_paddr = 0; | |||
1991 | trb.trb_status = 0; | |||
1992 | trb.trb_flags = htole32(((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (15 << 10))) | |||
1993 | XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (15 << 10))) | |||
1994 | )((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (15 << 10))); | |||
1995 | ||||
1996 | rw_enter_write(&sc->sc_cmd_lock); | |||
1997 | error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUTMSEC_TO_NSEC(500)); | |||
1998 | rw_exit_write(&sc->sc_cmd_lock); | |||
1999 | return (error); | |||
2000 | } | |||
2001 | ||||
2002 | void | |||
2003 | xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci) | |||
2004 | { | |||
2005 | struct xhci_trb trb; | |||
2006 | ||||
2007 | DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); | |||
2008 | ||||
2009 | trb.trb_paddr = 0; | |||
2010 | trb.trb_status = 0; | |||
2011 | trb.trb_flags = htole32(((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (14 << 10))) | |||
2012 | XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (14 << 10))) | |||
2013 | )((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (14 << 10))); | |||
2014 | ||||
2015 | xhci_command_submit(sc, &trb, 0); | |||
2016 | } | |||
2017 | ||||
2018 | void | |||
2019 | xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci, | |||
2020 | uint64_t addr) | |||
2021 | { | |||
2022 | struct xhci_trb trb; | |||
2023 | ||||
2024 | DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); | |||
2025 | ||||
2026 | trb.trb_paddr = htole64(addr)((__uint64_t)(addr)); | |||
2027 | trb.trb_status = 0; | |||
2028 | trb.trb_flags = htole32(((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (16 << 10))) | |||
2029 | XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (16 << 10))) | |||
2030 | )((__uint32_t)((((slot) & 0xff) << 24) | (((dci) & 0x1f) << 16) | (16 << 10))); | |||
2031 | ||||
2032 | xhci_command_submit(sc, &trb, 0); | |||
2033 | } | |||
2034 | ||||
2035 | int | |||
2036 | xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable) | |||
2037 | { | |||
2038 | struct xhci_trb trb; | |||
2039 | int error; | |||
2040 | ||||
2041 | DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); | |||
2042 | ||||
2043 | trb.trb_paddr = 0; | |||
2044 | trb.trb_status = 0; | |||
2045 | if (enable) | |||
2046 | trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT)((__uint32_t)((9 << 10))); | |||
2047 | else | |||
2048 | trb.trb_flags = htole32(((__uint32_t)((((*slotp) & 0xff) << 24) | (10 << 10))) | |||
2049 | XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT((__uint32_t)((((*slotp) & 0xff) << 24) | (10 << 10))) | |||
2050 | )((__uint32_t)((((*slotp) & 0xff) << 24) | (10 << 10))); | |||
2051 | ||||
2052 | rw_enter_write(&sc->sc_cmd_lock); | |||
2053 | error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUTMSEC_TO_NSEC(500)); | |||
2054 | rw_exit_write(&sc->sc_cmd_lock); | |||
2055 | if (error != 0) | |||
2056 | return (EIO5); | |||
2057 | ||||
2058 | if (enable) | |||
2059 | *slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags))(((((__uint32_t)(trb.trb_flags))) >> 24) & 0xff); | |||
2060 | ||||
2061 | return (0); | |||
2062 | } | |||
2063 | ||||
2064 | int | |||
2065 | xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr, | |||
2066 | uint32_t bsr) | |||
2067 | { | |||
2068 | struct xhci_trb trb; | |||
2069 | int error; | |||
2070 | ||||
2071 | DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0)); | |||
2072 | ||||
2073 | trb.trb_paddr = htole64(addr)((__uint64_t)(addr)); | |||
2074 | trb.trb_status = 0; | |||
2075 | trb.trb_flags = htole32(((__uint32_t)((((slot) & 0xff) << 24) | (11 << 10) | bsr)) | |||
2076 | XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr((__uint32_t)((((slot) & 0xff) << 24) | (11 << 10) | bsr)) | |||
2077 | )((__uint32_t)((((slot) & 0xff) << 24) | (11 << 10) | bsr)); | |||
2078 | ||||
2079 | rw_enter_write(&sc->sc_cmd_lock); | |||
2080 | error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUTMSEC_TO_NSEC(500)); | |||
2081 | rw_exit_write(&sc->sc_cmd_lock); | |||
2082 | return (error); | |||
2083 | } | |||
2084 | ||||
2085 | #ifdef XHCI_DEBUG | |||
2086 | int | |||
2087 | xhci_cmd_noop(struct xhci_softc *sc) | |||
2088 | { | |||
2089 | struct xhci_trb trb; | |||
2090 | int error; | |||
2091 | ||||
2092 | DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); | |||
2093 | ||||
2094 | trb.trb_paddr = 0; | |||
2095 | trb.trb_status = 0; | |||
2096 | trb.trb_flags = htole32(XHCI_CMD_NOOP)((__uint32_t)((23 << 10))); | |||
2097 | ||||
2098 | rw_enter_write(&sc->sc_cmd_lock); | |||
2099 | error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUTMSEC_TO_NSEC(500)); | |||
2100 | rw_exit_write(&sc->sc_cmd_lock); | |||
2101 | return (error); | |||
2102 | } | |||
2103 | #endif | |||
2104 | ||||
2105 | int | |||
2106 | xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot) | |||
2107 | { | |||
2108 | struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; | |||
2109 | int i, error; | |||
2110 | uint8_t *kva; | |||
2111 | ||||
2112 | /* | |||
2113 | * Setup input context. Even with 64 byte context size, it | |||
2114 | * fits into the smallest supported page size, so use that. | |||
2115 | */ | |||
2116 | error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma, | |||
2117 | (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN64, sc->sc_pagesize); | |||
2118 | if (error) | |||
2119 | return (ENOMEM12); | |||
2120 | ||||
2121 | sdev->input_ctx = (struct xhci_inctx *)kva; | |||
2122 | sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize); | |||
2123 | for (i = 0; i < 31; i++) | |||
2124 | sdev->ep_ctx[i] = | |||
2125 | (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize); | |||
2126 | ||||
2127 | DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc), | |||
2128 | slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0])); | |||
2129 | ||||
2130 | /* Setup output context */ | |||
2131 | error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL((void *)0), | |||
2132 | sc->sc_pagesize, XHCI_OCTX_ALIGN32, sc->sc_pagesize); | |||
2133 | if (error) { | |||
2134 | usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); | |||
2135 | return (ENOMEM12); | |||
2136 | } | |||
2137 | ||||
2138 | memset(&sdev->pipes, 0, sizeof(sdev->pipes))__builtin_memset((&sdev->pipes), (0), (sizeof(sdev-> pipes))); | |||
2139 | ||||
2140 | DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc), | |||
2141 | slot, (long long)sdev->octx_dma.paddr)); | |||
2142 | ||||
2143 | sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr)((__uint64_t)(sdev->octx_dma.paddr)); | |||
2144 | bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (slot * sizeof(uint64_t )), (sizeof(uint64_t)), (0x01 | 0x04)) | |||
2145 | slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (slot * sizeof(uint64_t )), (sizeof(uint64_t)), (0x01 | 0x04)) | |||
2146 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (slot * sizeof(uint64_t )), (sizeof(uint64_t)), (0x01 | 0x04)); | |||
2147 | ||||
2148 | return (0); | |||
2149 | } | |||
2150 | ||||
2151 | void | |||
2152 | xhci_softdev_free(struct xhci_softc *sc, uint8_t slot) | |||
2153 | { | |||
2154 | struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; | |||
2155 | ||||
2156 | sc->sc_dcbaa.segs[slot] = 0; | |||
2157 | bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (slot * sizeof(uint64_t )), (sizeof(uint64_t)), (0x01 | 0x04)) | |||
2158 | slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (slot * sizeof(uint64_t )), (sizeof(uint64_t)), (0x01 | 0x04)) | |||
2159 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dcbaa.dma.tag)->_dmamap_sync)((sc->sc_dcbaa .dma.tag), (sc->sc_dcbaa.dma.map), (slot * sizeof(uint64_t )), (sizeof(uint64_t)), (0x01 | 0x04)); | |||
2160 | ||||
2161 | usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma); | |||
2162 | usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); | |||
2163 | ||||
2164 | memset(sdev, 0, sizeof(struct xhci_soft_dev))__builtin_memset((sdev), (0), (sizeof(struct xhci_soft_dev))); | |||
2165 | } | |||
2166 | ||||
2167 | /* Root hub descriptors. */ | |||
2168 | usb_device_descriptor_t xhci_devd = { | |||
2169 | USB_DEVICE_DESCRIPTOR_SIZE18, | |||
2170 | UDESC_DEVICE0x01, /* type */ | |||
2171 | {0x00, 0x03}, /* USB version */ | |||
2172 | UDCLASS_HUB0x09, /* class */ | |||
2173 | UDSUBCLASS_HUB0x00, /* subclass */ | |||
2174 | UDPROTO_HSHUBSTT0x01, /* protocol */ | |||
2175 | 9, /* max packet */ | |||
2176 | {0},{0},{0x00,0x01}, /* device id */ | |||
2177 | 1,2,0, /* string indexes */ | |||
2178 | 1 /* # of configurations */ | |||
2179 | }; | |||
2180 | ||||
2181 | const usb_config_descriptor_t xhci_confd = { | |||
2182 | USB_CONFIG_DESCRIPTOR_SIZE9, | |||
2183 | UDESC_CONFIG0x02, | |||
2184 | {USB_CONFIG_DESCRIPTOR_SIZE9 + | |||
2185 | USB_INTERFACE_DESCRIPTOR_SIZE9 + | |||
2186 | USB_ENDPOINT_DESCRIPTOR_SIZE7}, | |||
2187 | 1, | |||
2188 | 1, | |||
2189 | 0, | |||
2190 | UC_BUS_POWERED0x80 | UC_SELF_POWERED0x40, | |||
2191 | 0 /* max power */ | |||
2192 | }; | |||
2193 | ||||
2194 | const usb_interface_descriptor_t xhci_ifcd = { | |||
2195 | USB_INTERFACE_DESCRIPTOR_SIZE9, | |||
2196 | UDESC_INTERFACE0x04, | |||
2197 | 0, | |||
2198 | 0, | |||
2199 | 1, | |||
2200 | UICLASS_HUB0x09, | |||
2201 | UISUBCLASS_HUB0, | |||
2202 | UIPROTO_HSHUBSTT0, | |||
2203 | 0 | |||
2204 | }; | |||
2205 | ||||
2206 | const usb_endpoint_descriptor_t xhci_endpd = { | |||
2207 | USB_ENDPOINT_DESCRIPTOR_SIZE7, | |||
2208 | UDESC_ENDPOINT0x05, | |||
2209 | UE_DIR_IN0x80 | XHCI_INTR_ENDPT1, | |||
2210 | UE_INTERRUPT0x03, | |||
2211 | {2, 0}, /* max 15 ports */ | |||
2212 | 255 | |||
2213 | }; | |||
2214 | ||||
2215 | const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = { | |||
2216 | USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE6, | |||
2217 | UDESC_ENDPOINT_SS_COMP0x30, | |||
2218 | 0, | |||
2219 | 0, | |||
2220 | {0, 0} | |||
2221 | }; | |||
2222 | ||||
2223 | const usb_hub_descriptor_t xhci_hubd = { | |||
2224 | USB_HUB_DESCRIPTOR_SIZE8, | |||
2225 | UDESC_SS_HUB0x2A, | |||
2226 | 0, | |||
2227 | {0,0}, | |||
2228 | 0, | |||
2229 | 0, | |||
2230 | {0}, | |||
2231 | }; | |||
2232 | ||||
2233 | void | |||
2234 | xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) | |||
2235 | { | |||
2236 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
2237 | struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; | |||
2238 | int error; | |||
2239 | ||||
2240 | splsoftassert(IPL_SOFTUSB)do { if (splassert_ctl > 0) { splassert_check(0x5, __func__ ); } } while (0); | |||
2241 | ||||
2242 | DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n", | |||
2243 | __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status), | |||
2244 | xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index)); | |||
2245 | ||||
2246 | /* XXX The stack should not call abort() in this case. */ | |||
2247 | if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) { | |||
2248 | xfer->status = status; | |||
2249 | timeout_del(&xfer->timeout_handle); | |||
2250 | usb_rem_task(xfer->device, &xfer->abort_task); | |||
2251 | usb_transfer_complete(xfer); | |||
2252 | return; | |||
2253 | } | |||
2254 | ||||
2255 | /* Transfer is already done. */ | |||
2256 | if (xfer->status != USBD_IN_PROGRESS) { | |||
2257 | DPRINTF(("%s: already done \n", __func__)); | |||
2258 | return; | |||
2259 | } | |||
2260 | ||||
2261 | /* Prevent any timeout to kick in. */ | |||
2262 | timeout_del(&xfer->timeout_handle); | |||
2263 | usb_rem_task(xfer->device, &xfer->abort_task); | |||
2264 | ||||
2265 | /* Indicate that we are aborting this transfer. */ | |||
2266 | xp->halted = status; | |||
2267 | xp->aborted_xfer = xfer; | |||
2268 | ||||
2269 | /* Stop the endpoint and wait until the hardware says so. */ | |||
2270 | if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) { | |||
2271 | DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc))); | |||
2272 | /* Assume the device is gone. */ | |||
2273 | xp->halted = 0; | |||
2274 | xp->aborted_xfer = NULL((void *)0); | |||
2275 | xfer->status = status; | |||
2276 | usb_transfer_complete(xfer); | |||
2277 | return; | |||
2278 | } | |||
2279 | ||||
2280 | /* | |||
2281 | * The transfer was already completed when we stopped the | |||
2282 | * endpoint, no need to move the dequeue pointer past its | |||
2283 | * TRBs. | |||
2284 | */ | |||
2285 | if (xp->aborted_xfer == NULL((void *)0)) { | |||
2286 | DPRINTF(("%s: done before stopping the endpoint\n", __func__)); | |||
2287 | xp->halted = 0; | |||
2288 | return; | |||
2289 | } | |||
2290 | ||||
2291 | /* | |||
2292 | * At this stage the endpoint has been stopped, so update its | |||
2293 | * dequeue pointer past the last TRB of the transfer. | |||
2294 | * | |||
2295 | * Note: This assumes that only one transfer per endpoint has | |||
2296 | * pending TRBs on the ring. | |||
2297 | */ | |||
2298 | xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, | |||
2299 | DEQPTR(xp->ring)((xp->ring).dma.paddr + (sizeof(struct xhci_trb) * (xp-> ring).index)) | xp->ring.toggle); | |||
2300 | error = tsleep_nsec(xp, PZERO22, "xhciab", XHCI_CMD_TIMEOUTMSEC_TO_NSEC(500)); | |||
2301 | if (error) | |||
2302 | printf("%s: timeout aborting transfer\n", DEVNAME(sc)((sc)->sc_bus.bdev.dv_xname)); | |||
2303 | } | |||
2304 | ||||
2305 | void | |||
2306 | xhci_timeout(void *addr) | |||
2307 | { | |||
2308 | struct usbd_xfer *xfer = addr; | |||
2309 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
2310 | ||||
2311 | if (sc->sc_bus.dying) { | |||
2312 | xhci_timeout_task(addr); | |||
2313 | return; | |||
2314 | } | |||
2315 | ||||
2316 | usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,((&xfer->abort_task)->fun = (xhci_timeout_task), (& xfer->abort_task)->arg = (addr), (&xfer->abort_task )->type = (2), (&xfer->abort_task)->state = 0x0) | |||
2317 | USB_TASK_TYPE_ABORT)((&xfer->abort_task)->fun = (xhci_timeout_task), (& xfer->abort_task)->arg = (addr), (&xfer->abort_task )->type = (2), (&xfer->abort_task)->state = 0x0); | |||
2318 | usb_add_task(xfer->device, &xfer->abort_task); | |||
2319 | } | |||
2320 | ||||
2321 | void | |||
2322 | xhci_timeout_task(void *addr) | |||
2323 | { | |||
2324 | struct usbd_xfer *xfer = addr; | |||
2325 | int s; | |||
2326 | ||||
2327 | s = splusb()splraise(0x5); | |||
2328 | xhci_abort_xfer(xfer, USBD_TIMEOUT); | |||
2329 | splx(s)spllower(s); | |||
2330 | } | |||
2331 | ||||
2332 | usbd_status | |||
2333 | xhci_root_ctrl_transfer(struct usbd_xfer *xfer) | |||
2334 | { | |||
2335 | usbd_status err; | |||
2336 | ||||
2337 | err = usb_insert_transfer(xfer); | |||
2338 | if (err) | |||
2339 | return (err); | |||
2340 | ||||
2341 | return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)((&xfer->pipe->queue)->sqh_first))); | |||
2342 | } | |||
2343 | ||||
2344 | usbd_status | |||
2345 | xhci_root_ctrl_start(struct usbd_xfer *xfer) | |||
2346 | { | |||
2347 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
2348 | usb_port_status_t ps; | |||
2349 | usb_device_request_t *req; | |||
2350 | void *buf = NULL((void *)0); | |||
2351 | usb_hub_descriptor_t hubd; | |||
2352 | usbd_status err; | |||
2353 | int s, len, value, index; | |||
2354 | int l, totlen = 0; | |||
2355 | int port, i; | |||
2356 | uint32_t v; | |||
2357 | ||||
2358 | KASSERT(xfer->rqflags & URQ_REQUEST)((xfer->rqflags & 0x01) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/usb/xhci.c", 2358, "xfer->rqflags & URQ_REQUEST" )); | |||
2359 | ||||
2360 | if (sc->sc_bus.dying) | |||
2361 | return (USBD_IOERROR); | |||
2362 | ||||
2363 | req = &xfer->request; | |||
2364 | ||||
2365 | DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__, | |||
2366 | req->bmRequestType, req->bRequest)); | |||
2367 | ||||
2368 | len = UGETW(req->wLength)(*(u_int16_t *)(req->wLength)); | |||
2369 | value = UGETW(req->wValue)(*(u_int16_t *)(req->wValue)); | |||
2370 | index = UGETW(req->wIndex)(*(u_int16_t *)(req->wIndex)); | |||
2371 | ||||
2372 | if (len != 0) | |||
2373 | buf = KERNADDR(&xfer->dmabuf, 0)((void *)((char *)((&xfer->dmabuf)->block->kaddr + (&xfer->dmabuf)->offs) + (0))); | |||
2374 | ||||
2375 | #define C(x,y)((x) | ((y) << 8)) ((x) | ((y) << 8)) | |||
2376 | switch(C(req->bRequest, req->bmRequestType)((req->bRequest) | ((req->bmRequestType) << 8))) { | |||
2377 | case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE)((0x01) | (((0x00 | 0x00 | 0x00)) << 8)): | |||
2378 | case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE)((0x01) | (((0x00 | 0x00 | 0x01)) << 8)): | |||
2379 | case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT)((0x01) | (((0x00 | 0x00 | 0x02)) << 8)): | |||
2380 | /* | |||
2381 | * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops | |||
2382 | * for the integrated root hub. | |||
2383 | */ | |||
2384 | break; | |||
2385 | case C(UR_GET_CONFIG, UT_READ_DEVICE)((0x08) | (((0x80 | 0x00 | 0x00)) << 8)): | |||
2386 | if (len > 0) { | |||
2387 | *(uint8_t *)buf = sc->sc_conf; | |||
2388 | totlen = 1; | |||
2389 | } | |||
2390 | break; | |||
2391 | case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE)((0x06) | (((0x80 | 0x00 | 0x00)) << 8)): | |||
2392 | DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value)); | |||
2393 | switch(value >> 8) { | |||
2394 | case UDESC_DEVICE0x01: | |||
2395 | if ((value & 0xff) != 0) { | |||
2396 | err = USBD_IOERROR; | |||
2397 | goto ret; | |||
2398 | } | |||
2399 | totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE18); | |||
2400 | USETW(xhci_devd.idVendor, sc->sc_id_vendor)(*(u_int16_t *)(xhci_devd.idVendor) = (sc->sc_id_vendor)); | |||
2401 | memcpy(buf, &xhci_devd, l)__builtin_memcpy((buf), (&xhci_devd), (l)); | |||
2402 | break; | |||
2403 | /* | |||
2404 | * We can't really operate at another speed, but the spec says | |||
2405 | * we need this descriptor. | |||
2406 | */ | |||
2407 | case UDESC_OTHER_SPEED_CONFIGURATION0x07: | |||
2408 | case UDESC_CONFIG0x02: | |||
2409 | if ((value & 0xff) != 0) { | |||
2410 | err = USBD_IOERROR; | |||
2411 | goto ret; | |||
2412 | } | |||
2413 | totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE9); | |||
2414 | memcpy(buf, &xhci_confd, l)__builtin_memcpy((buf), (&xhci_confd), (l)); | |||
2415 | ((usb_config_descriptor_t *)buf)->bDescriptorType = | |||
2416 | value >> 8; | |||
2417 | buf = (char *)buf + l; | |||
2418 | len -= l; | |||
2419 | l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE9); | |||
2420 | totlen += l; | |||
2421 | memcpy(buf, &xhci_ifcd, l)__builtin_memcpy((buf), (&xhci_ifcd), (l)); | |||
2422 | buf = (char *)buf + l; | |||
2423 | len -= l; | |||
2424 | l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE7); | |||
2425 | totlen += l; | |||
2426 | memcpy(buf, &xhci_endpd, l)__builtin_memcpy((buf), (&xhci_endpd), (l)); | |||
2427 | break; | |||
2428 | case UDESC_STRING0x03: | |||
2429 | if (len == 0) | |||
2430 | break; | |||
2431 | *(u_int8_t *)buf = 0; | |||
2432 | totlen = 1; | |||
2433 | switch (value & 0xff) { | |||
2434 | case 0: /* Language table */ | |||
2435 | totlen = usbd_str(buf, len, "\001"); | |||
2436 | break; | |||
2437 | case 1: /* Vendor */ | |||
2438 | totlen = usbd_str(buf, len, sc->sc_vendor); | |||
2439 | break; | |||
2440 | case 2: /* Product */ | |||
2441 | totlen = usbd_str(buf, len, "xHCI root hub"); | |||
2442 | break; | |||
2443 | } | |||
2444 | break; | |||
2445 | default: | |||
2446 | err = USBD_IOERROR; | |||
2447 | goto ret; | |||
2448 | } | |||
2449 | break; | |||
2450 | case C(UR_GET_INTERFACE, UT_READ_INTERFACE)((0x0a) | (((0x80 | 0x00 | 0x01)) << 8)): | |||
2451 | if (len > 0) { | |||
2452 | *(uint8_t *)buf = 0; | |||
2453 | totlen = 1; | |||
2454 | } | |||
2455 | break; | |||
2456 | case C(UR_GET_STATUS, UT_READ_DEVICE)((0x00) | (((0x80 | 0x00 | 0x00)) << 8)): | |||
2457 | if (len > 1) { | |||
2458 | USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED)(*(u_int16_t *)(((usb_status_t *)buf)->wStatus) = (0x0001) ); | |||
2459 | totlen = 2; | |||
2460 | } | |||
2461 | break; | |||
2462 | case C(UR_GET_STATUS, UT_READ_INTERFACE)((0x00) | (((0x80 | 0x00 | 0x01)) << 8)): | |||
2463 | case C(UR_GET_STATUS, UT_READ_ENDPOINT)((0x00) | (((0x80 | 0x00 | 0x02)) << 8)): | |||
2464 | if (len > 1) { | |||
2465 | USETW(((usb_status_t *)buf)->wStatus, 0)(*(u_int16_t *)(((usb_status_t *)buf)->wStatus) = (0)); | |||
2466 | totlen = 2; | |||
2467 | } | |||
2468 | break; | |||
2469 | case C(UR_SET_ADDRESS, UT_WRITE_DEVICE)((0x05) | (((0x00 | 0x00 | 0x00)) << 8)): | |||
2470 | if (value >= USB_MAX_DEVICES128) { | |||
2471 | err = USBD_IOERROR; | |||
2472 | goto ret; | |||
2473 | } | |||
2474 | break; | |||
2475 | case C(UR_SET_CONFIG, UT_WRITE_DEVICE)((0x09) | (((0x00 | 0x00 | 0x00)) << 8)): | |||
2476 | if (value != 0 && value != 1) { | |||
2477 | err = USBD_IOERROR; | |||
2478 | goto ret; | |||
2479 | } | |||
2480 | sc->sc_conf = value; | |||
2481 | break; | |||
2482 | case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE)((0x07) | (((0x00 | 0x00 | 0x00)) << 8)): | |||
2483 | break; | |||
2484 | case C(UR_SET_FEATURE, UT_WRITE_DEVICE)((0x03) | (((0x00 | 0x00 | 0x00)) << 8)): | |||
2485 | case C(UR_SET_FEATURE, UT_WRITE_INTERFACE)((0x03) | (((0x00 | 0x00 | 0x01)) << 8)): | |||
2486 | case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT)((0x03) | (((0x00 | 0x00 | 0x02)) << 8)): | |||
2487 | err = USBD_IOERROR; | |||
2488 | goto ret; | |||
2489 | case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE)((0x0b) | (((0x00 | 0x00 | 0x01)) << 8)): | |||
2490 | break; | |||
2491 | case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT)((0x0c) | (((0x00 | 0x00 | 0x02)) << 8)): | |||
2492 | break; | |||
2493 | /* Hub requests */ | |||
2494 | case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE)((0x01) | (((0x00 | 0x20 | 0x00)) << 8)): | |||
2495 | break; | |||
2496 | case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER)((0x01) | (((0x00 | 0x20 | 0x03)) << 8)): | |||
2497 | DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE " | |||
2498 | "port=%d feature=%d\n", index, value)); | |||
2499 | if (index < 1 || index > sc->sc_noport) { | |||
2500 | err = USBD_IOERROR; | |||
2501 | goto ret; | |||
2502 | } | |||
2503 | port = XHCI_PORTSC(index)(0x3f0 + (0x10 * (index))); | |||
2504 | v = XOREAD4(sc, port)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (port)))) & ~XHCI_PS_CLEAR0x80ff01ffu; | |||
2505 | switch (value) { | |||
2506 | case UHF_PORT_ENABLE1: | |||
2507 | XOWRITE4(sc, port, v | XHCI_PS_PED)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00000002)))); | |||
2508 | break; | |||
2509 | case UHF_PORT_SUSPEND2: | |||
2510 | /* TODO */ | |||
2511 | break; | |||
2512 | case UHF_PORT_POWER8: | |||
2513 | XOWRITE4(sc, port, v & ~XHCI_PS_PP)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v & ~0x00000200)))); | |||
2514 | break; | |||
2515 | case UHF_PORT_INDICATOR22: | |||
2516 | XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3))(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v & ~(((3) & 0x3) << 14))))); | |||
2517 | break; | |||
2518 | case UHF_C_PORT_CONNECTION16: | |||
2519 | XOWRITE4(sc, port, v | XHCI_PS_CSC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00020000)))); | |||
2520 | break; | |||
2521 | case UHF_C_PORT_ENABLE17: | |||
2522 | XOWRITE4(sc, port, v | XHCI_PS_PEC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00040000)))); | |||
2523 | break; | |||
2524 | case UHF_C_PORT_SUSPEND18: | |||
2525 | case UHF_C_PORT_LINK_STATE25: | |||
2526 | XOWRITE4(sc, port, v | XHCI_PS_PLC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00400000)))); | |||
2527 | break; | |||
2528 | case UHF_C_PORT_OVER_CURRENT19: | |||
2529 | XOWRITE4(sc, port, v | XHCI_PS_OCC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00100000)))); | |||
2530 | break; | |||
2531 | case UHF_C_PORT_RESET20: | |||
2532 | XOWRITE4(sc, port, v | XHCI_PS_PRC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00200000)))); | |||
2533 | break; | |||
2534 | case UHF_C_BH_PORT_RESET29: | |||
2535 | XOWRITE4(sc, port, v | XHCI_PS_WRC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00080000)))); | |||
2536 | break; | |||
2537 | default: | |||
2538 | err = USBD_IOERROR; | |||
2539 | goto ret; | |||
2540 | } | |||
2541 | break; | |||
2542 | ||||
2543 | case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE)((0x06) | (((0x80 | 0x20 | 0x00)) << 8)): | |||
2544 | if (len == 0) | |||
2545 | break; | |||
2546 | if ((value & 0xff) != 0) { | |||
2547 | err = USBD_IOERROR; | |||
2548 | goto ret; | |||
2549 | } | |||
2550 | v = XREAD4(sc, XHCI_HCCPARAMS)(((sc)->iot)->read_4(((sc)->ioh), ((0x10)))); | |||
2551 | hubd = xhci_hubd; | |||
2552 | hubd.bNbrPorts = sc->sc_noport; | |||
2553 | USETW(hubd.wHubCharacteristics,(*(u_int16_t *)(hubd.wHubCharacteristics) = (((((v) >> 3 ) & 0x1) ? 0x0001 : 0x0000) | ((((v) >> 4) & 0x1 ) ? 0x0080 : 0))) | |||
2554 | (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |(*(u_int16_t *)(hubd.wHubCharacteristics) = (((((v) >> 3 ) & 0x1) ? 0x0001 : 0x0000) | ((((v) >> 4) & 0x1 ) ? 0x0080 : 0))) | |||
2555 | (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0))(*(u_int16_t *)(hubd.wHubCharacteristics) = (((((v) >> 3 ) & 0x1) ? 0x0001 : 0x0000) | ((((v) >> 4) & 0x1 ) ? 0x0080 : 0))); | |||
2556 | hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */ | |||
2557 | for (i = 1; i <= sc->sc_noport; i++) { | |||
2558 | v = XOREAD4(sc, XHCI_PORTSC(i))(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + ((0x3f0 + (0x10 * (i))))))); | |||
2559 | if (v & XHCI_PS_DR0x40000000) | |||
2560 | hubd.DeviceRemovable[i / 8] |= 1U << (i % 8); | |||
2561 | } | |||
2562 | hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE8 + i; | |||
2563 | l = min(len, hubd.bDescLength); | |||
2564 | totlen = l; | |||
2565 | memcpy(buf, &hubd, l)__builtin_memcpy((buf), (&hubd), (l)); | |||
2566 | break; | |||
2567 | case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE)((0x00) | (((0x80 | 0x20 | 0x00)) << 8)): | |||
2568 | if (len != 16) { | |||
2569 | err = USBD_IOERROR; | |||
2570 | goto ret; | |||
2571 | } | |||
2572 | memset(buf, 0, len)__builtin_memset((buf), (0), (len)); | |||
2573 | totlen = len; | |||
2574 | break; | |||
2575 | case C(UR_GET_STATUS, UT_READ_CLASS_OTHER)((0x00) | (((0x80 | 0x20 | 0x03)) << 8)): | |||
2576 | DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n", | |||
2577 | index)); | |||
2578 | if (index < 1 || index > sc->sc_noport) { | |||
2579 | err = USBD_IOERROR; | |||
2580 | goto ret; | |||
2581 | } | |||
2582 | if (len != 4) { | |||
2583 | err = USBD_IOERROR; | |||
2584 | goto ret; | |||
2585 | } | |||
2586 | v = XOREAD4(sc, XHCI_PORTSC(index))(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + ((0x3f0 + (0x10 * (index))))))); | |||
2587 | DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v)); | |||
2588 | i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v))((((((v) >> 5) & 0xf)) & 0xf) << 5); | |||
2589 | switch (XHCI_PS_SPEED(v)(((v) >> 10) & 0xf)) { | |||
2590 | case XHCI_SPEED_FULL1: | |||
2591 | i |= UPS_FULL_SPEED0x0000; | |||
2592 | break; | |||
2593 | case XHCI_SPEED_LOW2: | |||
2594 | i |= UPS_LOW_SPEED0x0200; | |||
2595 | break; | |||
2596 | case XHCI_SPEED_HIGH3: | |||
2597 | i |= UPS_HIGH_SPEED0x0400; | |||
2598 | break; | |||
2599 | case XHCI_SPEED_SUPER4: | |||
2600 | default: | |||
2601 | break; | |||
2602 | } | |||
2603 | if (v & XHCI_PS_CCS0x00000001) i |= UPS_CURRENT_CONNECT_STATUS0x0001; | |||
2604 | if (v & XHCI_PS_PED0x00000002) i |= UPS_PORT_ENABLED0x0002; | |||
2605 | if (v & XHCI_PS_OCA0x00000008) i |= UPS_OVERCURRENT_INDICATOR0x0008; | |||
2606 | if (v & XHCI_PS_PR0x00000010) i |= UPS_RESET0x0010; | |||
2607 | if (v & XHCI_PS_PP0x00000200) { | |||
2608 | if (XHCI_PS_SPEED(v)(((v) >> 10) & 0xf) >= XHCI_SPEED_FULL1 && | |||
2609 | XHCI_PS_SPEED(v)(((v) >> 10) & 0xf) <= XHCI_SPEED_HIGH3) | |||
2610 | i |= UPS_PORT_POWER0x0100; | |||
2611 | else | |||
2612 | i |= UPS_PORT_POWER_SS0x0200; | |||
2613 | } | |||
2614 | USETW(ps.wPortStatus, i)(*(u_int16_t *)(ps.wPortStatus) = (i)); | |||
2615 | i = 0; | |||
2616 | if (v & XHCI_PS_CSC0x00020000) i |= UPS_C_CONNECT_STATUS0x0001; | |||
2617 | if (v & XHCI_PS_PEC0x00040000) i |= UPS_C_PORT_ENABLED0x0002; | |||
2618 | if (v & XHCI_PS_OCC0x00100000) i |= UPS_C_OVERCURRENT_INDICATOR0x0008; | |||
2619 | if (v & XHCI_PS_PRC0x00200000) i |= UPS_C_PORT_RESET0x0010; | |||
2620 | if (v & XHCI_PS_WRC0x00080000) i |= UPS_C_BH_PORT_RESET0x0020; | |||
2621 | if (v & XHCI_PS_PLC0x00400000) i |= UPS_C_PORT_LINK_STATE0x0040; | |||
2622 | if (v & XHCI_PS_CEC0x00800000) i |= UPS_C_PORT_CONFIG_ERROR0x0080; | |||
2623 | USETW(ps.wPortChange, i)(*(u_int16_t *)(ps.wPortChange) = (i)); | |||
2624 | l = min(len, sizeof ps); | |||
2625 | memcpy(buf, &ps, l)__builtin_memcpy((buf), (&ps), (l)); | |||
2626 | totlen = l; | |||
2627 | break; | |||
2628 | case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE)((0x07) | (((0x00 | 0x20 | 0x00)) << 8)): | |||
2629 | err = USBD_IOERROR; | |||
2630 | goto ret; | |||
2631 | case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE)((0x03) | (((0x00 | 0x20 | 0x00)) << 8)): | |||
2632 | break; | |||
2633 | case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER)((0x03) | (((0x00 | 0x20 | 0x03)) << 8)): | |||
2634 | ||||
2635 | i = index >> 8; | |||
2636 | index &= 0x00ff; | |||
2637 | ||||
2638 | if (index < 1 || index > sc->sc_noport) { | |||
2639 | err = USBD_IOERROR; | |||
2640 | goto ret; | |||
2641 | } | |||
2642 | port = XHCI_PORTSC(index)(0x3f0 + (0x10 * (index))); | |||
2643 | v = XOREAD4(sc, port)(((sc)->iot)->read_4(((sc)->ioh), ((sc)->sc_oper_off + (port)))) & ~XHCI_PS_CLEAR0x80ff01ffu; | |||
2644 | ||||
2645 | switch (value) { | |||
2646 | case UHF_PORT_ENABLE1: | |||
2647 | XOWRITE4(sc, port, v | XHCI_PS_PED)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00000002)))); | |||
2648 | break; | |||
2649 | case UHF_PORT_SUSPEND2: | |||
2650 | DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i)); | |||
2651 | if (XHCI_PS_SPEED(v)(((v) >> 10) & 0xf) == XHCI_SPEED_SUPER4) { | |||
2652 | err = USBD_IOERROR; | |||
2653 | goto ret; | |||
2654 | } | |||
2655 | XOWRITE4(sc, port, v |(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | (((i ? 2 : 3) & 0xf) << 5) | 0x00010000 )))) | |||
2656 | XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | (((i ? 2 : 3) & 0xf) << 5) | 0x00010000 )))); | |||
2657 | break; | |||
2658 | case UHF_PORT_RESET4: | |||
2659 | DPRINTFN(6, ("reset port %d\n", index)); | |||
2660 | XOWRITE4(sc, port, v | XHCI_PS_PR)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00000010)))); | |||
2661 | break; | |||
2662 | case UHF_PORT_POWER8: | |||
2663 | DPRINTFN(3, ("set port power %d\n", index)); | |||
2664 | XOWRITE4(sc, port, v | XHCI_PS_PP)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00000200)))); | |||
2665 | break; | |||
2666 | case UHF_PORT_INDICATOR22: | |||
2667 | DPRINTFN(3, ("set port indicator %d\n", index)); | |||
2668 | ||||
2669 | v &= ~XHCI_PS_SET_PIC(3)(((3) & 0x3) << 14); | |||
2670 | v |= XHCI_PS_SET_PIC(1)(((1) & 0x3) << 14); | |||
2671 | ||||
2672 | XOWRITE4(sc, port, v)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v)))); | |||
2673 | break; | |||
2674 | case UHF_C_PORT_RESET20: | |||
2675 | XOWRITE4(sc, port, v | XHCI_PS_PRC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00200000)))); | |||
2676 | break; | |||
2677 | case UHF_C_BH_PORT_RESET29: | |||
2678 | XOWRITE4(sc, port, v | XHCI_PS_WRC)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_oper_off + (port)), ((v | 0x00080000)))); | |||
2679 | break; | |||
2680 | default: | |||
2681 | err = USBD_IOERROR; | |||
2682 | goto ret; | |||
2683 | } | |||
2684 | break; | |||
2685 | case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER)((0x08) | (((0x00 | 0x20 | 0x03)) << 8)): | |||
2686 | case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER)((0x09) | (((0x00 | 0x20 | 0x03)) << 8)): | |||
2687 | case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER)((0x0a) | (((0x80 | 0x20 | 0x03)) << 8)): | |||
2688 | case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER)((0x0b) | (((0x00 | 0x20 | 0x03)) << 8)): | |||
2689 | break; | |||
2690 | default: | |||
2691 | err = USBD_IOERROR; | |||
2692 | goto ret; | |||
2693 | } | |||
2694 | xfer->actlen = totlen; | |||
2695 | err = USBD_NORMAL_COMPLETION; | |||
2696 | ret: | |||
2697 | xfer->status = err; | |||
2698 | s = splusb()splraise(0x5); | |||
2699 | usb_transfer_complete(xfer); | |||
2700 | splx(s)spllower(s); | |||
2701 | return (err); | |||
2702 | } | |||
2703 | ||||
2704 | ||||
2705 | void | |||
2706 | xhci_noop(struct usbd_xfer *xfer) | |||
2707 | { | |||
2708 | } | |||
2709 | ||||
2710 | ||||
2711 | usbd_status | |||
2712 | xhci_root_intr_transfer(struct usbd_xfer *xfer) | |||
2713 | { | |||
2714 | usbd_status err; | |||
2715 | ||||
2716 | err = usb_insert_transfer(xfer); | |||
2717 | if (err) | |||
2718 | return (err); | |||
2719 | ||||
2720 | return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)((&xfer->pipe->queue)->sqh_first))); | |||
2721 | } | |||
2722 | ||||
2723 | usbd_status | |||
2724 | xhci_root_intr_start(struct usbd_xfer *xfer) | |||
2725 | { | |||
2726 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
2727 | ||||
2728 | if (sc->sc_bus.dying) | |||
2729 | return (USBD_IOERROR); | |||
2730 | ||||
2731 | sc->sc_intrxfer = xfer; | |||
2732 | ||||
2733 | return (USBD_IN_PROGRESS); | |||
2734 | } | |||
2735 | ||||
2736 | void | |||
2737 | xhci_root_intr_abort(struct usbd_xfer *xfer) | |||
2738 | { | |||
2739 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
2740 | int s; | |||
2741 | ||||
2742 | sc->sc_intrxfer = NULL((void *)0); | |||
2743 | ||||
2744 | xfer->status = USBD_CANCELLED; | |||
2745 | s = splusb()splraise(0x5); | |||
2746 | usb_transfer_complete(xfer); | |||
2747 | splx(s)spllower(s); | |||
2748 | } | |||
2749 | ||||
2750 | void | |||
2751 | xhci_root_intr_done(struct usbd_xfer *xfer) | |||
2752 | { | |||
2753 | } | |||
2754 | ||||
2755 | /* | |||
2756 | * Number of packets remaining in the TD after the corresponding TRB. | |||
2757 | * | |||
2758 | * Section 4.11.2.4 of xHCI specification r1.1. | |||
2759 | */ | |||
2760 | static inline uint32_t | |||
2761 | xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len) | |||
2762 | { | |||
2763 | uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize)(*(u_int16_t *)(xfer->pipe->endpoint->edesc->wMaxPacketSize )); | |||
2764 | ||||
2765 | if (len == 0) | |||
2766 | return XHCI_TRB_TDREM(0)(((0) & 0x1f) << 17); | |||
2767 | ||||
2768 | npkt = howmany(remain - len, UE_GET_SIZE(mps))(((remain - len) + ((((mps) & 0x7ff)) - 1)) / (((mps) & 0x7ff))); | |||
2769 | if (npkt > 31) | |||
2770 | npkt = 31; | |||
2771 | ||||
2772 | return XHCI_TRB_TDREM(npkt)(((npkt) & 0x1f) << 17); | |||
2773 | } | |||
2774 | ||||
2775 | /* | |||
2776 | * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC). | |||
2777 | * | |||
2778 | * Section 4.11.2.3 of xHCI specification r1.1. | |||
2779 | */ | |||
2780 | static inline uint32_t | |||
2781 | xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc) | |||
2782 | { | |||
2783 | uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize)(*(u_int16_t *)(xfer->pipe->endpoint->edesc->wMaxPacketSize )); | |||
2784 | uint32_t maxb, tdpc, residue, tbc; | |||
2785 | ||||
2786 | /* Transfer Descriptor Packet Count, section 4.14.1. */ | |||
2787 | tdpc = howmany(len, UE_GET_SIZE(mps))(((len) + ((((mps) & 0x7ff)) - 1)) / (((mps) & 0x7ff) )); | |||
2788 | if (tdpc == 0) | |||
2789 | tdpc = 1; | |||
2790 | ||||
2791 | /* Transfer Burst Count */ | |||
2792 | maxb = xhci_pipe_maxburst(xfer->pipe); | |||
2793 | tbc = howmany(tdpc, maxb + 1)(((tdpc) + ((maxb + 1) - 1)) / (maxb + 1)) - 1; | |||
2794 | ||||
2795 | /* Transfer Last Burst Packet Count */ | |||
2796 | if (xfer->device->speed == USB_SPEED_SUPER4) { | |||
2797 | residue = tdpc % (maxb + 1); | |||
2798 | if (residue == 0) | |||
2799 | *tlbpc = maxb; | |||
2800 | else | |||
2801 | *tlbpc = residue - 1; | |||
2802 | } else { | |||
2803 | *tlbpc = tdpc - 1; | |||
2804 | } | |||
2805 | ||||
2806 | return (tbc); | |||
2807 | } | |||
2808 | ||||
2809 | usbd_status | |||
2810 | xhci_device_ctrl_transfer(struct usbd_xfer *xfer) | |||
2811 | { | |||
2812 | usbd_status err; | |||
2813 | ||||
2814 | err = usb_insert_transfer(xfer); | |||
2815 | if (err) | |||
2816 | return (err); | |||
2817 | ||||
2818 | return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)((&xfer->pipe->queue)->sqh_first))); | |||
2819 | } | |||
2820 | ||||
2821 | usbd_status | |||
2822 | xhci_device_ctrl_start(struct usbd_xfer *xfer) | |||
2823 | { | |||
2824 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
2825 | struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; | |||
2826 | struct xhci_trb *trb0, *trb; | |||
2827 | uint32_t flags, len = UGETW(xfer->request.wLength)(*(u_int16_t *)(xfer->request.wLength)); | |||
2828 | uint8_t toggle; | |||
2829 | int s; | |||
2830 | ||||
2831 | KASSERT(xfer->rqflags & URQ_REQUEST)((xfer->rqflags & 0x01) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/usb/xhci.c", 2831, "xfer->rqflags & URQ_REQUEST" )); | |||
2832 | ||||
2833 | if (sc->sc_bus.dying || xp->halted) | |||
2834 | return (USBD_IOERROR); | |||
2835 | ||||
2836 | if (xp->free_trbs < 3) | |||
2837 | return (USBD_NOMEM); | |||
2838 | ||||
2839 | if (len != 0) | |||
2840 | usb_syncmem(&xfer->dmabuf, 0, len, | |||
2841 | usbd_xfer_isread(xfer) ? | |||
2842 | BUS_DMASYNC_PREREAD0x01 : BUS_DMASYNC_PREWRITE0x04); | |||
2843 | ||||
2844 | /* We'll toggle the setup TRB once we're finished with the stages. */ | |||
2845 | trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0); | |||
2846 | ||||
2847 | flags = XHCI_TRB_TYPE_SETUP(2 << 10) | XHCI_TRB_IDT(1 << 6) | (toggle ^ 1); | |||
2848 | if (len != 0) { | |||
2849 | if (usbd_xfer_isread(xfer)) | |||
2850 | flags |= XHCI_TRB_TRT_IN(3 << 16); | |||
2851 | else | |||
2852 | flags |= XHCI_TRB_TRT_OUT(2 << 16); | |||
2853 | } | |||
2854 | ||||
2855 | memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr))__builtin_memcpy((&trb0->trb_paddr), (&xfer->request ), (sizeof(trb0->trb_paddr))); | |||
2856 | trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8))((__uint32_t)((((0) & 0x3ff) << 22) | ((8) & 0x1ffff ))); | |||
2857 | trb0->trb_flags = htole32(flags)((__uint32_t)(flags)); | |||
2858 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2859 | TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2860 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
2861 | ||||
2862 | /* Data TRB */ | |||
2863 | if (len != 0) { | |||
2864 | trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0); | |||
2865 | ||||
2866 | flags = XHCI_TRB_TYPE_DATA(3 << 10) | toggle; | |||
2867 | if (usbd_xfer_isread(xfer)) | |||
2868 | flags |= XHCI_TRB_DIR_IN(1 << 16) | XHCI_TRB_ISP(1 << 2); | |||
2869 | ||||
2870 | trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0))((__uint64_t)(((&xfer->dmabuf)->block->map->dm_segs [0].ds_addr + (&xfer->dmabuf)->offs + (0)))); | |||
2871 | trb->trb_status = htole32(((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, len, len))) | |||
2872 | XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, len, len))) | |||
2873 | xhci_xfer_tdsize(xfer, len, len)((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, len, len))) | |||
2874 | )((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, len, len))); | |||
2875 | trb->trb_flags = htole32(flags)((__uint32_t)(flags)); | |||
2876 | ||||
2877 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2878 | TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2879 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
2880 | } | |||
2881 | ||||
2882 | /* Status TRB */ | |||
2883 | trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1); | |||
2884 | ||||
2885 | flags = XHCI_TRB_TYPE_STATUS(4 << 10) | XHCI_TRB_IOC(1 << 5) | toggle; | |||
2886 | if (len == 0 || !usbd_xfer_isread(xfer)) | |||
2887 | flags |= XHCI_TRB_DIR_IN(1 << 16); | |||
2888 | ||||
2889 | trb->trb_paddr = 0; | |||
2890 | trb->trb_status = htole32(XHCI_TRB_INTR(0))((__uint32_t)((((0) & 0x3ff) << 22))); | |||
2891 | trb->trb_flags = htole32(flags)((__uint32_t)(flags)); | |||
2892 | ||||
2893 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2894 | TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2895 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
2896 | ||||
2897 | /* Setup TRB */ | |||
2898 | trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE)((__uint32_t)((1 << 0))); | |||
2899 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2900 | TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2901 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
2902 | ||||
2903 | s = splusb()splraise(0x5); | |||
2904 | XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_door_off + ((0x0000 + (4 * (xp->slot))))), ((xp->dci)))); | |||
2905 | ||||
2906 | xfer->status = USBD_IN_PROGRESS; | |||
2907 | if (xfer->timeout && !sc->sc_bus.use_polling) { | |||
2908 | timeout_del(&xfer->timeout_handle); | |||
2909 | timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); | |||
2910 | timeout_add_msec(&xfer->timeout_handle, xfer->timeout); | |||
2911 | } | |||
2912 | splx(s)spllower(s); | |||
2913 | ||||
2914 | return (USBD_IN_PROGRESS); | |||
2915 | } | |||
2916 | ||||
2917 | void | |||
2918 | xhci_device_ctrl_abort(struct usbd_xfer *xfer) | |||
2919 | { | |||
2920 | xhci_abort_xfer(xfer, USBD_CANCELLED); | |||
2921 | } | |||
2922 | ||||
2923 | usbd_status | |||
2924 | xhci_device_generic_transfer(struct usbd_xfer *xfer) | |||
2925 | { | |||
2926 | usbd_status err; | |||
2927 | ||||
2928 | err = usb_insert_transfer(xfer); | |||
2929 | if (err) | |||
2930 | return (err); | |||
2931 | ||||
2932 | return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)((&xfer->pipe->queue)->sqh_first))); | |||
2933 | } | |||
2934 | ||||
2935 | usbd_status | |||
2936 | xhci_device_generic_start(struct usbd_xfer *xfer) | |||
2937 | { | |||
2938 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
2939 | struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; | |||
2940 | struct xhci_trb *trb0, *trb; | |||
2941 | uint32_t len, remain, flags; | |||
2942 | uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize)(*(u_int16_t *)(xfer->pipe->endpoint->edesc->wMaxPacketSize )); | |||
2943 | uint64_t paddr = DMAADDR(&xfer->dmabuf, 0)((&xfer->dmabuf)->block->map->dm_segs[0].ds_addr + (&xfer->dmabuf)->offs + (0)); | |||
2944 | uint8_t toggle; | |||
2945 | int s, i, ntrb, zerotd = 0; | |||
2946 | ||||
2947 | KASSERT(!(xfer->rqflags & URQ_REQUEST))((!(xfer->rqflags & 0x01)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/usb/xhci.c", 2947, "!(xfer->rqflags & URQ_REQUEST)" )); | |||
2948 | ||||
2949 | if (sc->sc_bus.dying || xp->halted) | |||
2950 | return (USBD_IOERROR); | |||
2951 | ||||
2952 | /* How many TRBs do we need for this transfer? */ | |||
2953 | ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE)(((xfer->length) + (((64 * 1024)) - 1)) / ((64 * 1024))); | |||
2954 | ||||
2955 | /* If the buffer crosses a 64k boundary, we need one more. */ | |||
2956 | len = XHCI_TRB_MAXSIZE(64 * 1024) - (paddr & (XHCI_TRB_MAXSIZE(64 * 1024) - 1)); | |||
2957 | if (len < xfer->length) | |||
2958 | ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE)(((xfer->length - len) + (((64 * 1024)) - 1)) / ((64 * 1024 ))) + 1; | |||
2959 | else | |||
2960 | len = xfer->length; | |||
2961 | ||||
2962 | /* If we need to append a zero length packet, we need one more. */ | |||
2963 | if ((xfer->flags & USBD_FORCE_SHORT_XFER0x08 || xfer->length == 0) && | |||
2964 | (xfer->length % UE_GET_SIZE(mps)((mps) & 0x7ff) == 0)) | |||
2965 | zerotd = 1; | |||
2966 | ||||
2967 | if (xp->free_trbs < (ntrb + zerotd)) | |||
2968 | return (USBD_NOMEM); | |||
2969 | ||||
2970 | usb_syncmem(&xfer->dmabuf, 0, xfer->length, | |||
2971 | usbd_xfer_isread(xfer) ? | |||
2972 | BUS_DMASYNC_PREREAD0x01 : BUS_DMASYNC_PREWRITE0x04); | |||
2973 | ||||
2974 | /* We'll toggle the first TRB once we're finished with the chain. */ | |||
2975 | trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); | |||
2976 | flags = XHCI_TRB_TYPE_NORMAL(1 << 10) | (toggle ^ 1); | |||
2977 | if (usbd_xfer_isread(xfer)) | |||
2978 | flags |= XHCI_TRB_ISP(1 << 2); | |||
2979 | flags |= (ntrb == 1) ? XHCI_TRB_IOC(1 << 5) : XHCI_TRB_CHAIN(1 << 4); | |||
2980 | ||||
2981 | trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0))((__uint64_t)(((&xfer->dmabuf)->block->map->dm_segs [0].ds_addr + (&xfer->dmabuf)->offs + (0)))); | |||
2982 | trb0->trb_status = htole32(((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->length, len))) | |||
2983 | XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->length, len))) | |||
2984 | xhci_xfer_tdsize(xfer, xfer->length, len)((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->length, len))) | |||
2985 | )((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->length, len))); | |||
2986 | trb0->trb_flags = htole32(flags)((__uint32_t)(flags)); | |||
2987 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2988 | TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
2989 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
2990 | ||||
2991 | remain = xfer->length - len; | |||
2992 | paddr += len; | |||
2993 | ||||
2994 | /* Chain more TRBs if needed. */ | |||
2995 | for (i = ntrb - 1; i > 0; i--) { | |||
2996 | len = min(remain, XHCI_TRB_MAXSIZE(64 * 1024)); | |||
2997 | ||||
2998 | /* Next (or Last) TRB. */ | |||
2999 | trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1)); | |||
3000 | flags = XHCI_TRB_TYPE_NORMAL(1 << 10) | toggle; | |||
3001 | if (usbd_xfer_isread(xfer)) | |||
3002 | flags |= XHCI_TRB_ISP(1 << 2); | |||
3003 | flags |= (i == 1) ? XHCI_TRB_IOC(1 << 5) : XHCI_TRB_CHAIN(1 << 4); | |||
3004 | ||||
3005 | trb->trb_paddr = htole64(paddr)((__uint64_t)(paddr)); | |||
3006 | trb->trb_status = htole32(((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))) | |||
3007 | XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))) | |||
3008 | xhci_xfer_tdsize(xfer, remain, len)((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))) | |||
3009 | )((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))); | |||
3010 | trb->trb_flags = htole32(flags)((__uint32_t)(flags)); | |||
3011 | ||||
3012 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3013 | TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3014 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
3015 | ||||
3016 | remain -= len; | |||
3017 | paddr += len; | |||
3018 | } | |||
3019 | ||||
3020 | /* Do we need to issue a zero length transfer? */ | |||
3021 | if (zerotd == 1) { | |||
3022 | trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1); | |||
3023 | trb->trb_paddr = 0; | |||
3024 | trb->trb_status = 0; | |||
3025 | trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle)((__uint32_t)((1 << 10) | (1 << 5) | toggle)); | |||
3026 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3027 | TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3028 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
3029 | } | |||
3030 | ||||
3031 | /* First TRB. */ | |||
3032 | trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE)((__uint32_t)((1 << 0))); | |||
3033 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3034 | TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3035 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
3036 | ||||
3037 | s = splusb()splraise(0x5); | |||
3038 | XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_door_off + ((0x0000 + (4 * (xp->slot))))), ((xp->dci)))); | |||
3039 | ||||
3040 | xfer->status = USBD_IN_PROGRESS; | |||
3041 | if (xfer->timeout && !sc->sc_bus.use_polling) { | |||
3042 | timeout_del(&xfer->timeout_handle); | |||
3043 | timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); | |||
3044 | timeout_add_msec(&xfer->timeout_handle, xfer->timeout); | |||
3045 | } | |||
3046 | splx(s)spllower(s); | |||
3047 | ||||
3048 | return (USBD_IN_PROGRESS); | |||
3049 | } | |||
3050 | ||||
3051 | void | |||
3052 | xhci_device_generic_done(struct usbd_xfer *xfer) | |||
3053 | { | |||
3054 | /* Only happens with interrupt transfers. */ | |||
3055 | if (xfer->pipe->repeat) { | |||
3056 | xfer->actlen = 0; | |||
3057 | xhci_device_generic_start(xfer); | |||
3058 | } | |||
3059 | } | |||
3060 | ||||
3061 | void | |||
3062 | xhci_device_generic_abort(struct usbd_xfer *xfer) | |||
3063 | { | |||
3064 | KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer)((!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer ) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c" , 3064, "!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer" )); | |||
3065 | ||||
3066 | xhci_abort_xfer(xfer, USBD_CANCELLED); | |||
3067 | } | |||
3068 | ||||
3069 | usbd_status | |||
3070 | xhci_device_isoc_transfer(struct usbd_xfer *xfer) | |||
3071 | { | |||
3072 | usbd_status err; | |||
3073 | ||||
3074 | err = usb_insert_transfer(xfer); | |||
3075 | if (err && err != USBD_IN_PROGRESS) | |||
| ||||
3076 | return (err); | |||
3077 | ||||
3078 | return (xhci_device_isoc_start(xfer)); | |||
3079 | } | |||
3080 | ||||
3081 | usbd_status | |||
3082 | xhci_device_isoc_start(struct usbd_xfer *xfer) | |||
3083 | { | |||
3084 | struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; | |||
3085 | struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; | |||
3086 | struct xhci_xfer *xx = (struct xhci_xfer *)xfer; | |||
3087 | struct xhci_trb *trb0, *trb; | |||
3088 | uint32_t len, remain, flags; | |||
3089 | uint64_t paddr; | |||
3090 | uint32_t tbc, tlbpc; | |||
3091 | int s, i, j, ntrb = xfer->nframes; | |||
3092 | uint8_t toggle; | |||
3093 | ||||
3094 | KASSERT(!(xfer->rqflags & URQ_REQUEST))((!(xfer->rqflags & 0x01)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/usb/xhci.c", 3094, "!(xfer->rqflags & URQ_REQUEST)" )); | |||
3095 | ||||
3096 | /* | |||
3097 | * To allow continuous transfers, above we start all transfers | |||
3098 | * immediately. However, we're still going to get usbd_start_next call | |||
3099 | * this when another xfer completes. So, check if this is already | |||
3100 | * in progress or not | |||
3101 | */ | |||
3102 | if (xx->ntrb > 0) | |||
3103 | return (USBD_IN_PROGRESS); | |||
3104 | ||||
3105 | if (sc->sc_bus.dying || xp->halted) | |||
3106 | return (USBD_IOERROR); | |||
3107 | ||||
3108 | /* Why would you do that anyway? */ | |||
3109 | if (sc->sc_bus.use_polling) | |||
3110 | return (USBD_INVAL); | |||
3111 | ||||
3112 | paddr = DMAADDR(&xfer->dmabuf, 0)((&xfer->dmabuf)->block->map->dm_segs[0].ds_addr + (&xfer->dmabuf)->offs + (0)); | |||
3113 | ||||
3114 | /* How many TRBs do for all Transfers? */ | |||
3115 | for (i = 0, ntrb = 0; i < xfer->nframes; i++) { | |||
3116 | /* How many TRBs do we need for this transfer? */ | |||
3117 | ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE)(((xfer->frlengths[i]) + (((64 * 1024)) - 1)) / ((64 * 1024 ))); | |||
3118 | ||||
3119 | /* If the buffer crosses a 64k boundary, we need one more. */ | |||
3120 | len = XHCI_TRB_MAXSIZE(64 * 1024) - (paddr & (XHCI_TRB_MAXSIZE(64 * 1024) - 1)); | |||
3121 | if (len < xfer->frlengths[i]) | |||
3122 | ntrb++; | |||
3123 | ||||
3124 | paddr += xfer->frlengths[i]; | |||
3125 | } | |||
3126 | ||||
3127 | if (xp->free_trbs < ntrb
| |||
3128 | return (USBD_NOMEM); | |||
3129 | ||||
3130 | usb_syncmem(&xfer->dmabuf, 0, xfer->length, | |||
3131 | usbd_xfer_isread(xfer) ? | |||
3132 | BUS_DMASYNC_PREREAD0x01 : BUS_DMASYNC_PREWRITE0x04); | |||
3133 | ||||
3134 | paddr = DMAADDR(&xfer->dmabuf, 0)((&xfer->dmabuf)->block->map->dm_segs[0].ds_addr + (&xfer->dmabuf)->offs + (0)); | |||
3135 | ||||
3136 | for (i = 0, trb0 = NULL((void *)0); i < xfer->nframes; i++) { | |||
3137 | /* How many TRBs do we need for this transfer? */ | |||
3138 | ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE)(((xfer->frlengths[i]) + (((64 * 1024)) - 1)) / ((64 * 1024 ))); | |||
3139 | ||||
3140 | /* If the buffer crosses a 64k boundary, we need one more. */ | |||
3141 | len = XHCI_TRB_MAXSIZE(64 * 1024) - (paddr & (XHCI_TRB_MAXSIZE(64 * 1024) - 1)); | |||
3142 | if (len < xfer->frlengths[i]) | |||
3143 | ntrb++; | |||
3144 | else | |||
3145 | len = xfer->frlengths[i]; | |||
3146 | ||||
3147 | KASSERT(ntrb < 3)((ntrb < 3) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/usb/xhci.c" , 3147, "ntrb < 3")); | |||
3148 | ||||
3149 | /* | |||
3150 | * We'll commit the first TRB once we're finished with the | |||
3151 | * chain. | |||
3152 | */ | |||
3153 | trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); | |||
3154 | ||||
3155 | DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx " | |||
3156 | "len %u\n", __func__, __LINE__, | |||
3157 | &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr, | |||
3158 | len)); | |||
3159 | ||||
3160 | /* Record the first TRB so we can toggle later. */ | |||
3161 | if (trb0 == NULL((void *)0)) { | |||
3162 | trb0 = trb; | |||
3163 | toggle ^= 1; | |||
3164 | } | |||
3165 | ||||
3166 | flags = XHCI_TRB_TYPE_ISOCH(5 << 10) | XHCI_TRB_SIA(1U << 31) | toggle; | |||
3167 | if (usbd_xfer_isread(xfer)) | |||
3168 | flags |= XHCI_TRB_ISP(1 << 2); | |||
3169 | flags |= (ntrb == 1) ? XHCI_TRB_IOC(1 << 5) : XHCI_TRB_CHAIN(1 << 4); | |||
3170 | ||||
3171 | tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc); | |||
3172 | flags |= XHCI_TRB_ISOC_TBC(tbc)(((tbc) & 0x3) << 7) | XHCI_TRB_ISOC_TLBPC(tlbpc)(((tlbpc) & 0xf) << 16); | |||
3173 | ||||
3174 | trb->trb_paddr = htole64(paddr)((__uint64_t)(paddr)); | |||
3175 | trb->trb_status = htole32(((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->frlengths[i], len))) | |||
3176 | XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->frlengths[i], len))) | |||
3177 | xhci_xfer_tdsize(xfer, xfer->frlengths[i], len)((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->frlengths[i], len))) | |||
3178 | )((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, xfer->frlengths[i], len))); | |||
3179 | trb->trb_flags = htole32(flags)((__uint32_t)(flags)); | |||
3180 | ||||
3181 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3182 | TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3183 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
3184 | ||||
3185 | remain = xfer->frlengths[i] - len; | |||
3186 | paddr += len; | |||
3187 | ||||
3188 | /* Chain more TRBs if needed. */ | |||
3189 | for (j = ntrb - 1; j > 0; j--) { | |||
3190 | len = min(remain, XHCI_TRB_MAXSIZE(64 * 1024)); | |||
3191 | ||||
3192 | /* Next (or Last) TRB. */ | |||
3193 | trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1)); | |||
3194 | flags = XHCI_TRB_TYPE_NORMAL(1 << 10) | toggle; | |||
3195 | if (usbd_xfer_isread(xfer)) | |||
3196 | flags |= XHCI_TRB_ISP(1 << 2); | |||
3197 | flags |= (j == 1) ? XHCI_TRB_IOC(1 << 5) : XHCI_TRB_CHAIN(1 << 4); | |||
3198 | DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d " | |||
3199 | "paddr %llx len %u\n", __func__, __LINE__, | |||
3200 | &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, | |||
3201 | paddr, len)); | |||
3202 | ||||
3203 | trb->trb_paddr = htole64(paddr)((__uint64_t)(paddr)); | |||
3204 | trb->trb_status = htole32(((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))) | |||
3205 | XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))) | |||
3206 | xhci_xfer_tdsize(xfer, remain, len)((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))) | |||
3207 | )((__uint32_t)((((0) & 0x3ff) << 22) | ((len) & 0x1ffff ) | xhci_xfer_tdsize(xfer, remain, len))); | |||
3208 | trb->trb_flags = htole32(flags)((__uint32_t)(flags)); | |||
3209 | ||||
3210 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3211 | TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3212 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
3213 | ||||
3214 | remain -= len; | |||
3215 | paddr += len; | |||
3216 | } | |||
3217 | ||||
3218 | xfer->frlengths[i] = 0; | |||
3219 | } | |||
3220 | ||||
3221 | /* First TRB. */ | |||
3222 | trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE)((__uint32_t)((1 << 0))); | |||
| ||||
3223 | bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3224 | TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)) | |||
3225 | BUS_DMASYNC_PREWRITE)(*(xp->ring.dma.tag)->_dmamap_sync)((xp->ring.dma.tag ), (xp->ring.dma.map), (((char *)(trb0) - (char *)((&xp ->ring)->trbs))), (sizeof(struct xhci_trb)), (0x04)); | |||
3226 | ||||
3227 | s = splusb()splraise(0x5); | |||
3228 | XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci)(((sc)->iot)->write_4(((sc)->ioh), ((sc)->sc_door_off + ((0x0000 + (4 * (xp->slot))))), ((xp->dci)))); | |||
3229 | ||||
3230 | xfer->status = USBD_IN_PROGRESS; | |||
3231 | ||||
3232 | if (xfer->timeout) { | |||
3233 | timeout_del(&xfer->timeout_handle); | |||
3234 | timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); | |||
3235 | timeout_add_msec(&xfer->timeout_handle, xfer->timeout); | |||
3236 | } | |||
3237 | splx(s)spllower(s); | |||
3238 | ||||
3239 | return (USBD_IN_PROGRESS); | |||
3240 | } |