File: | dev/ic/nvme.c |
Warning: | line 788, column 2 Value stored to 'ns' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: nvme.c,v 1.103 2021/08/31 04:21:04 dlg Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2014 David Gwynne <dlg@openbsd.org> |
5 | * |
6 | * Permission to use, copy, modify, and distribute this software for any |
7 | * purpose with or without fee is hereby granted, provided that the above |
8 | * copyright notice and this permission notice appear in all copies. |
9 | * |
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 | */ |
18 | |
19 | #include <sys/param.h> |
20 | #include <sys/systm.h> |
21 | #include <sys/buf.h> |
22 | #include <sys/kernel.h> |
23 | #include <sys/malloc.h> |
24 | #include <sys/device.h> |
25 | #include <sys/queue.h> |
26 | #include <sys/mutex.h> |
27 | #include <sys/pool.h> |
28 | |
29 | #include <sys/atomic.h> |
30 | |
31 | #include <machine/bus.h> |
32 | |
33 | #include <scsi/scsi_all.h> |
34 | #include <scsi/scsi_disk.h> |
35 | #include <scsi/scsiconf.h> |
36 | |
37 | #include <dev/ic/nvmereg.h> |
38 | #include <dev/ic/nvmevar.h> |
39 | |
40 | struct cfdriver nvme_cd = { |
41 | NULL((void *)0), |
42 | "nvme", |
43 | DV_DULL |
44 | }; |
45 | |
46 | int nvme_ready(struct nvme_softc *, u_int32_t); |
47 | int nvme_enable(struct nvme_softc *); |
48 | int nvme_disable(struct nvme_softc *); |
49 | int nvme_shutdown(struct nvme_softc *); |
50 | int nvme_resume(struct nvme_softc *); |
51 | |
52 | void nvme_dumpregs(struct nvme_softc *); |
53 | int nvme_identify(struct nvme_softc *, u_int); |
54 | void nvme_fill_identify(struct nvme_softc *, struct nvme_ccb *, void *); |
55 | |
56 | int nvme_ccbs_alloc(struct nvme_softc *, u_int); |
57 | void nvme_ccbs_free(struct nvme_softc *, u_int); |
58 | |
59 | void * nvme_ccb_get(void *); |
60 | void nvme_ccb_put(void *, void *); |
61 | |
62 | int nvme_poll(struct nvme_softc *, struct nvme_queue *, struct nvme_ccb *, |
63 | void (*)(struct nvme_softc *, struct nvme_ccb *, void *)); |
64 | void nvme_poll_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
65 | void nvme_poll_done(struct nvme_softc *, struct nvme_ccb *, |
66 | struct nvme_cqe *); |
67 | void nvme_sqe_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
68 | void nvme_empty_done(struct nvme_softc *, struct nvme_ccb *, |
69 | struct nvme_cqe *); |
70 | |
71 | struct nvme_queue * |
72 | nvme_q_alloc(struct nvme_softc *, u_int16_t, u_int, u_int); |
73 | int nvme_q_create(struct nvme_softc *, struct nvme_queue *); |
74 | int nvme_q_reset(struct nvme_softc *, struct nvme_queue *); |
75 | int nvme_q_delete(struct nvme_softc *, struct nvme_queue *); |
76 | void nvme_q_submit(struct nvme_softc *, |
77 | struct nvme_queue *, struct nvme_ccb *, |
78 | void (*)(struct nvme_softc *, struct nvme_ccb *, void *)); |
79 | int nvme_q_complete(struct nvme_softc *, struct nvme_queue *); |
80 | void nvme_q_free(struct nvme_softc *, struct nvme_queue *); |
81 | |
82 | void nvme_scsi_cmd(struct scsi_xfer *); |
83 | void nvme_minphys(struct buf *, struct scsi_link *); |
84 | int nvme_scsi_probe(struct scsi_link *); |
85 | void nvme_scsi_free(struct scsi_link *); |
86 | |
87 | #ifdef HIBERNATE1 |
88 | #include <uvm/uvm_extern.h> |
89 | #include <sys/hibernate.h> |
90 | #include <sys/disk.h> |
91 | #include <sys/disklabel.h> |
92 | |
93 | int nvme_hibernate_io(dev_t, daddr_t, vaddr_t, size_t, int, void *); |
94 | #endif |
95 | |
96 | struct scsi_adapter nvme_switch = { |
97 | nvme_scsi_cmd, nvme_minphys, nvme_scsi_probe, nvme_scsi_free, NULL((void *)0) |
98 | }; |
99 | |
100 | void nvme_scsi_io(struct scsi_xfer *, int); |
101 | void nvme_scsi_io_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
102 | void nvme_scsi_io_done(struct nvme_softc *, struct nvme_ccb *, |
103 | struct nvme_cqe *); |
104 | |
105 | void nvme_scsi_sync(struct scsi_xfer *); |
106 | void nvme_scsi_sync_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
107 | void nvme_scsi_sync_done(struct nvme_softc *, struct nvme_ccb *, |
108 | struct nvme_cqe *); |
109 | |
110 | void nvme_scsi_inq(struct scsi_xfer *); |
111 | void nvme_scsi_inquiry(struct scsi_xfer *); |
112 | void nvme_scsi_capacity16(struct scsi_xfer *); |
113 | void nvme_scsi_capacity(struct scsi_xfer *); |
114 | |
115 | uint32_t nvme_op_sq_enter(struct nvme_softc *, |
116 | struct nvme_queue *, struct nvme_ccb *); |
117 | void nvme_op_sq_leave(struct nvme_softc *, |
118 | struct nvme_queue *, struct nvme_ccb *); |
119 | uint32_t nvme_op_sq_enter_locked(struct nvme_softc *, |
120 | struct nvme_queue *, struct nvme_ccb *); |
121 | void nvme_op_sq_leave_locked(struct nvme_softc *, |
122 | struct nvme_queue *, struct nvme_ccb *); |
123 | |
124 | void nvme_op_cq_done(struct nvme_softc *, |
125 | struct nvme_queue *, struct nvme_ccb *); |
126 | |
127 | static const struct nvme_ops nvme_ops = { |
128 | .op_sq_enter = nvme_op_sq_enter, |
129 | .op_sq_leave = nvme_op_sq_leave, |
130 | .op_sq_enter_locked = nvme_op_sq_enter_locked, |
131 | .op_sq_leave_locked = nvme_op_sq_leave_locked, |
132 | |
133 | .op_cq_done = nvme_op_cq_done, |
134 | }; |
135 | |
136 | /* |
137 | * Some controllers, at least Apple NVMe, always require split |
138 | * transfers, so don't use bus_space_{read,write}_8() on LP64. |
139 | */ |
140 | u_int64_t |
141 | nvme_read8(struct nvme_softc *sc, bus_size_t r) |
142 | { |
143 | u_int64_t v; |
144 | |
145 | v = (u_int64_t)nvme_read4(sc, r)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((r)))) | |
146 | (u_int64_t)nvme_read4(sc, r + 4)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((r + 4)))) << 32; |
147 | |
148 | return (v); |
149 | } |
150 | |
151 | void |
152 | nvme_write8(struct nvme_softc *sc, bus_size_t r, u_int64_t v) |
153 | { |
154 | nvme_write4(sc, r, v)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((r)), ((v) ))); |
155 | nvme_write4(sc, r + 4, v >> 32)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((r + 4)), ( (v >> 32)))); |
156 | } |
157 | |
158 | void |
159 | nvme_dumpregs(struct nvme_softc *sc) |
160 | { |
161 | u_int64_t r8; |
162 | u_int32_t r4; |
163 | |
164 | r8 = nvme_read8(sc, NVME_CAP0x0000); |
165 | printf("%s: cap 0x%016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read8(sc, NVME_CAP0x0000)); |
166 | printf("%s: mpsmax %u (%u)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
167 | (u_int)NVME_CAP_MPSMAX(r8)(12 + (((r8) >> 52) & 0xf)), (1 << NVME_CAP_MPSMAX(r8)(12 + (((r8) >> 52) & 0xf)))); |
168 | printf("%s: mpsmin %u (%u)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
169 | (u_int)NVME_CAP_MPSMIN(r8)(12 + (((r8) >> 48) & 0xf)), (1 << NVME_CAP_MPSMIN(r8)(12 + (((r8) >> 48) & 0xf)))); |
170 | printf("%s: css %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_CSS(r8)(((r8) >> 37) & 0x7f)); |
171 | printf("%s: nssrs %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_NSSRS(r8)(((r8)) & ((1ULL << 36)))); |
172 | printf("%s: dstrd %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_DSTRD(r8)(1 << (2 + (((r8) >> 32) & 0xf)))); |
173 | printf("%s: to %llu msec\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_TO(r8)(500 * (((r8) >> 24) & 0xff))); |
174 | printf("%s: ams %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_AMS(r8)(((r8) >> 17) & 0x3)); |
175 | printf("%s: cqr %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_CQR(r8)(((r8)) & ((1 << 16)))); |
176 | printf("%s: mqes %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_MQES(r8)(((r8) & 0xffff) + 1)); |
177 | |
178 | printf("%s: vs 0x%04x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read4(sc, NVME_VS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0008))))); |
179 | |
180 | r4 = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
181 | printf("%s: cc 0x%04x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), r4); |
182 | printf("%s: iocqes %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_IOCQES_R(r4)(((r4) >> 20) & 0xf)); |
183 | printf("%s: iosqes %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_IOSQES_R(r4)(((r4) >> 16) & 0xf)); |
184 | printf("%s: shn %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_SHN_R(r4)(((r4) >> 15) & 0x3)); |
185 | printf("%s: ams %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_AMS_R(r4)(((r4) >> 11) & 0xf)); |
186 | printf("%s: mps %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_MPS_R(r4)(12 + (((r4) >> 7) & 0xf))); |
187 | printf("%s: css %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_CSS_R(r4)(((r4) >> 4) & 0x7)); |
188 | printf("%s: en %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), ISSET(r4, NVME_CC_EN)((r4) & ((1 << 0)))); |
189 | |
190 | printf("%s: csts 0x%08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c))))); |
191 | printf("%s: aqa 0x%08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read4(sc, NVME_AQA)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0024))))); |
192 | printf("%s: asq 0x%016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read8(sc, NVME_ASQ0x0028)); |
193 | printf("%s: acq 0x%016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read8(sc, NVME_ACQ0x0030)); |
194 | } |
195 | |
196 | int |
197 | nvme_ready(struct nvme_softc *sc, u_int32_t rdy) |
198 | { |
199 | u_int i = 0; |
200 | |
201 | while ((nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c)))) & NVME_CSTS_RDY(1 << 0)) != rdy) { |
202 | if (i++ > sc->sc_rdy_to) |
203 | return (1); |
204 | |
205 | delay(1000)(*delay_func)(1000); |
206 | nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0x001c), (4), (0x01)); |
207 | } |
208 | |
209 | return (0); |
210 | } |
211 | |
212 | int |
213 | nvme_enable(struct nvme_softc *sc) |
214 | { |
215 | u_int32_t cc; |
216 | |
217 | cc = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
218 | if (ISSET(cc, NVME_CC_EN)((cc) & ((1 << 0)))) |
219 | return (nvme_ready(sc, NVME_CSTS_RDY(1 << 0))); |
220 | |
221 | if (sc->sc_ops->op_enable != NULL((void *)0)) |
222 | sc->sc_ops->op_enable(sc); |
223 | |
224 | nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0024)), (((((sc->sc_admin_q->q_entries) - 1) << 16) | (( (sc->sc_admin_q->q_entries) - 1) << 0))))) |
225 | NVME_AQA_ASQS(sc->sc_admin_q->q_entries))(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0024)), (((((sc->sc_admin_q->q_entries) - 1) << 16) | (( (sc->sc_admin_q->q_entries) - 1) << 0))))); |
226 | nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x02)); |
227 | |
228 | nvme_write8(sc, NVME_ASQ0x0028, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem)((u_int64_t)(sc->sc_admin_q->q_sq_dmamem)->ndm_map-> dm_segs[0].ds_addr)); |
229 | nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x02)); |
230 | nvme_write8(sc, NVME_ACQ0x0030, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem)((u_int64_t)(sc->sc_admin_q->q_cq_dmamem)->ndm_map-> dm_segs[0].ds_addr)); |
231 | nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x02)); |
232 | |
233 | CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |((cc) &= ~((((0xf) & 0xf) << 20) | (((0xf) & 0xf) << 16) | (((0x3) & 0x3) << 14) | (((0x7 ) & 0x7) << 11) | (0xf << 7) | (((0x7) & 0x7 ) << 4))) |
234 | NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK)((cc) &= ~((((0xf) & 0xf) << 20) | (((0xf) & 0xf) << 16) | (((0x3) & 0x3) << 14) | (((0x7 ) & 0x7) << 11) | (0xf << 7) | (((0x7) & 0x7 ) << 4))); |
235 | SET(cc, NVME_CC_IOSQES(6))((cc) |= ((((6) & 0xf) << 16))); /* Submission queue size == 2**6 (64) */ |
236 | SET(cc, NVME_CC_IOCQES(4))((cc) |= ((((4) & 0xf) << 20))); /* Completion queue size == 2**4 (16) */ |
237 | SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE))((cc) |= ((((0) & 0x3) << 14))); |
238 | SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM))((cc) |= ((((0) & 0x7) << 4))); |
239 | SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR))((cc) |= ((((0) & 0x7) << 11))); |
240 | SET(cc, NVME_CC_MPS(ffs(sc->sc_mps) - 1))((cc) |= (((((ffs(sc->sc_mps) - 1) - 12) & 0xf) << 7))); |
241 | SET(cc, NVME_CC_EN)((cc) |= ((1 << 0))); |
242 | |
243 | nvme_write4(sc, NVME_CC, cc)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0014)), ((cc)))); |
244 | nvme_barrier(sc, 0, sc->sc_ios,bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)) |
245 | BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)); |
246 | |
247 | return (nvme_ready(sc, NVME_CSTS_RDY(1 << 0))); |
248 | } |
249 | |
250 | int |
251 | nvme_disable(struct nvme_softc *sc) |
252 | { |
253 | u_int32_t cc, csts; |
254 | |
255 | cc = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
256 | if (ISSET(cc, NVME_CC_EN)((cc) & ((1 << 0)))) { |
257 | csts = nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c)))); |
258 | if (!ISSET(csts, NVME_CSTS_CFS)((csts) & ((1 << 1))) && |
259 | nvme_ready(sc, NVME_CSTS_RDY(1 << 0)) != 0) |
260 | return (1); |
261 | } |
262 | |
263 | CLR(cc, NVME_CC_EN)((cc) &= ~((1 << 0))); |
264 | |
265 | nvme_write4(sc, NVME_CC, cc)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0014)), ((cc)))); |
266 | nvme_barrier(sc, 0, sc->sc_ios,bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)) |
267 | BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)); |
268 | |
269 | return (nvme_ready(sc, 0)); |
270 | } |
271 | |
272 | int |
273 | nvme_attach(struct nvme_softc *sc) |
274 | { |
275 | struct scsibus_attach_args saa; |
276 | u_int64_t cap; |
277 | u_int32_t reg; |
278 | u_int nccbs = 0; |
279 | |
280 | mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc-> sc_ccb_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ? 0x9 : ((0x6)))); } while (0); |
281 | SIMPLEQ_INIT(&sc->sc_ccb_list)do { (&sc->sc_ccb_list)->sqh_first = ((void *)0); ( &sc->sc_ccb_list)->sqh_last = &(&sc->sc_ccb_list )->sqh_first; } while (0); |
282 | scsi_iopool_init(&sc->sc_iopool, sc, nvme_ccb_get, nvme_ccb_put); |
283 | if (sc->sc_ops == NULL((void *)0)) |
284 | sc->sc_ops = &nvme_ops; |
285 | if (sc->sc_openings == 0) |
286 | sc->sc_openings = 64; |
287 | |
288 | reg = nvme_read4(sc, NVME_VS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0008)))); |
289 | if (reg == 0xffffffff) { |
290 | printf("invalid mapping\n"); |
291 | return (1); |
292 | } |
293 | |
294 | printf("NVMe %d.%d\n", NVME_VS_MJR(reg)(((reg) & 0xffff0000) >> 16), NVME_VS_MNR(reg)(((reg) & 0x0000ff00) >> 8)); |
295 | |
296 | cap = nvme_read8(sc, NVME_CAP0x0000); |
297 | sc->sc_dstrd = NVME_CAP_DSTRD(cap)(1 << (2 + (((cap) >> 32) & 0xf))); |
298 | if (NVME_CAP_MPSMIN(cap)(12 + (((cap) >> 48) & 0xf)) > PAGE_SHIFT12) { |
299 | printf("%s: NVMe minimum page size %u " |
300 | "is greater than CPU page size %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
301 | 1 << NVME_CAP_MPSMIN(cap)(12 + (((cap) >> 48) & 0xf)), 1 << PAGE_SHIFT12); |
302 | return (1); |
303 | } |
304 | if (NVME_CAP_MPSMAX(cap)(12 + (((cap) >> 52) & 0xf)) < PAGE_SHIFT12) |
305 | sc->sc_mps = 1 << NVME_CAP_MPSMAX(cap)(12 + (((cap) >> 52) & 0xf)); |
306 | else |
307 | sc->sc_mps = 1 << PAGE_SHIFT12; |
308 | |
309 | sc->sc_rdy_to = NVME_CAP_TO(cap)(500 * (((cap) >> 24) & 0xff)); |
310 | sc->sc_mdts = MAXPHYS(64 * 1024); |
311 | sc->sc_max_prpl = sc->sc_mdts / sc->sc_mps; |
312 | |
313 | if (nvme_disable(sc) != 0) { |
314 | printf("%s: unable to disable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
315 | return (1); |
316 | } |
317 | |
318 | sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q0, 128, sc->sc_dstrd); |
319 | if (sc->sc_admin_q == NULL((void *)0)) { |
320 | printf("%s: unable to allocate admin queue\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
321 | return (1); |
322 | } |
323 | |
324 | if (nvme_ccbs_alloc(sc, 16) != 0) { |
325 | printf("%s: unable to allocate initial ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
326 | goto free_admin_q; |
327 | } |
328 | nccbs = 16; |
329 | |
330 | if (nvme_enable(sc) != 0) { |
331 | printf("%s: unable to enable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
332 | goto free_ccbs; |
333 | } |
334 | |
335 | if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)(12 + (((cap) >> 48) & 0xf))) != 0) { |
336 | printf("%s: unable to identify controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
337 | goto disable; |
338 | } |
339 | |
340 | /* We now know the real values of sc_mdts and sc_max_prpl. */ |
341 | nvme_ccbs_free(sc, nccbs); |
342 | if (nvme_ccbs_alloc(sc, 64) != 0) { |
343 | printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
344 | goto free_admin_q; |
345 | } |
346 | nccbs = 64; |
347 | |
348 | sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q1, 128, sc->sc_dstrd); |
349 | if (sc->sc_q == NULL((void *)0)) { |
350 | printf("%s: unable to allocate io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
351 | goto disable; |
352 | } |
353 | |
354 | if (nvme_q_create(sc, sc->sc_q) != 0) { |
355 | printf("%s: unable to create io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
356 | goto free_q; |
357 | } |
358 | |
359 | #ifdef HIBERNATE1 |
360 | sc->sc_hib_q = nvme_q_alloc(sc, NVME_HIB_Q2, 4, sc->sc_dstrd); |
361 | if (sc->sc_hib_q == NULL((void *)0)) { |
362 | printf("%s: unable to allocate hibernate io queue\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
363 | goto free_q; |
364 | } |
365 | #endif |
366 | |
367 | nvme_write4(sc, NVME_INTMC, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((1)))); |
368 | |
369 | sc->sc_namespaces = mallocarray(sc->sc_nn + 1, |
370 | sizeof(*sc->sc_namespaces), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
371 | |
372 | saa.saa_adapter = &nvme_switch; |
373 | saa.saa_adapter_softc = sc; |
374 | saa.saa_adapter_buswidth = sc->sc_nn + 1; |
375 | saa.saa_luns = 1; |
376 | saa.saa_adapter_target = 0; |
377 | saa.saa_openings = sc->sc_openings; |
378 | saa.saa_pool = &sc->sc_iopool; |
379 | saa.saa_quirks = saa.saa_flags = 0; |
380 | saa.saa_wwpn = saa.saa_wwnn = 0; |
381 | |
382 | config_found(&sc->sc_dev, &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint) , ((void *)0)); |
383 | |
384 | return (0); |
385 | |
386 | free_q: |
387 | nvme_q_free(sc, sc->sc_q); |
388 | disable: |
389 | nvme_disable(sc); |
390 | free_ccbs: |
391 | nvme_ccbs_free(sc, nccbs); |
392 | free_admin_q: |
393 | nvme_q_free(sc, sc->sc_admin_q); |
394 | |
395 | return (1); |
396 | } |
397 | |
398 | int |
399 | nvme_resume(struct nvme_softc *sc) |
400 | { |
401 | if (nvme_disable(sc) != 0) { |
402 | printf("%s: unable to disable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
403 | return (1); |
404 | } |
405 | |
406 | if (nvme_q_reset(sc, sc->sc_admin_q) != 0) { |
407 | printf("%s: unable to reset admin queue\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
408 | return (1); |
409 | } |
410 | |
411 | if (nvme_enable(sc) != 0) { |
412 | printf("%s: unable to enable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
413 | return (1); |
414 | } |
415 | |
416 | sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q1, 128, sc->sc_dstrd); |
417 | if (sc->sc_q == NULL((void *)0)) { |
418 | printf("%s: unable to allocate io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
419 | goto disable; |
420 | } |
421 | |
422 | if (nvme_q_create(sc, sc->sc_q) != 0) { |
423 | printf("%s: unable to create io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
424 | goto free_q; |
425 | } |
426 | |
427 | nvme_write4(sc, NVME_INTMC, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((1)))); |
428 | |
429 | return (0); |
430 | |
431 | free_q: |
432 | nvme_q_free(sc, sc->sc_q); |
433 | disable: |
434 | nvme_disable(sc); |
435 | |
436 | return (1); |
437 | } |
438 | |
439 | int |
440 | nvme_scsi_probe(struct scsi_link *link) |
441 | { |
442 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
443 | struct nvme_sqe sqe; |
444 | struct nvm_identify_namespace *identify; |
445 | struct nvme_dmamem *mem; |
446 | struct nvme_ccb *ccb; |
447 | int rv; |
448 | |
449 | ccb = scsi_io_get(&sc->sc_iopool, 0); |
450 | KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/nvme.c" , 450, "ccb != NULL")); |
451 | |
452 | mem = nvme_dmamem_alloc(sc, sizeof(*identify)); |
453 | if (mem == NULL((void *)0)) |
454 | return (ENOMEM12); |
455 | |
456 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
457 | sqe.opcode = NVM_ADMIN_IDENTIFY0x06; |
458 | htolem32(&sqe.nsid, link->target)(*(__uint32_t *)(&sqe.nsid) = ((__uint32_t)(link->target ))); |
459 | htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem))(*(__uint64_t *)(&sqe.entry.prp[0]) = ((__uint64_t)(((u_int64_t )(mem)->ndm_map->dm_segs[0].ds_addr)))); |
460 | htolem32(&sqe.cdw10, 0)(*(__uint32_t *)(&sqe.cdw10) = ((__uint32_t)(0))); |
461 | |
462 | ccb->ccb_done = nvme_empty_done; |
463 | ccb->ccb_cookie = &sqe; |
464 | |
465 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD0x01); |
466 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
467 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD0x02); |
468 | |
469 | scsi_io_put(&sc->sc_iopool, ccb); |
470 | |
471 | identify = NVME_DMA_KVA(mem)((void *)(mem)->ndm_kva); |
472 | if (rv == 0) { |
473 | if (lemtoh64(&identify->nsze)((__uint64_t)(*(__uint64_t *)(&identify->nsze))) > 0) { |
474 | /* Commit namespace if it has a size greater than zero. */ |
475 | identify = malloc(sizeof(*identify), M_DEVBUF2, M_WAITOK0x0001); |
476 | memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify))__builtin_memcpy((identify), (((void *)(mem)->ndm_kva)), ( sizeof(*identify))); |
477 | sc->sc_namespaces[link->target].ident = identify; |
478 | } else { |
479 | /* Don't attach a namespace if its size is zero. */ |
480 | rv = ENXIO6; |
481 | } |
482 | } |
483 | |
484 | nvme_dmamem_free(sc, mem); |
485 | |
486 | return (rv); |
487 | } |
488 | |
489 | int |
490 | nvme_shutdown(struct nvme_softc *sc) |
491 | { |
492 | u_int32_t cc, csts; |
493 | int i; |
494 | |
495 | nvme_write4(sc, NVME_INTMC, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((0)))); |
496 | |
497 | if (nvme_q_delete(sc, sc->sc_q) != 0) { |
498 | printf("%s: unable to delete q, disabling\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
499 | goto disable; |
500 | } |
501 | |
502 | cc = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
503 | CLR(cc, NVME_CC_SHN_MASK)((cc) &= ~((((0x3) & 0x3) << 14))); |
504 | SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL))((cc) |= ((((1) & 0x3) << 14))); |
505 | nvme_write4(sc, NVME_CC, cc)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0014)), ((cc)))); |
506 | |
507 | for (i = 0; i < 4000; i++) { |
508 | nvme_barrier(sc, 0, sc->sc_ios,bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)) |
509 | BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)); |
510 | csts = nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c)))); |
511 | if ((csts & NVME_CSTS_SHST_MASK(0x3 << 2)) == NVME_CSTS_SHST_DONE(0x2 << 2)) |
512 | return (0); |
513 | |
514 | delay(1000)(*delay_func)(1000); |
515 | } |
516 | |
517 | printf("%s: unable to shutdown, disabling\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
518 | |
519 | disable: |
520 | nvme_disable(sc); |
521 | return (0); |
522 | } |
523 | |
524 | int |
525 | nvme_activate(struct nvme_softc *sc, int act) |
526 | { |
527 | int rv; |
528 | |
529 | switch (act) { |
530 | case DVACT_POWERDOWN6: |
531 | rv = config_activate_children(&sc->sc_dev, act); |
532 | nvme_shutdown(sc); |
533 | break; |
534 | case DVACT_RESUME4: |
535 | rv = nvme_resume(sc); |
536 | if (rv == 0) |
537 | rv = config_activate_children(&sc->sc_dev, act); |
538 | break; |
539 | default: |
540 | rv = config_activate_children(&sc->sc_dev, act); |
541 | break; |
542 | } |
543 | |
544 | return (rv); |
545 | } |
546 | |
547 | void |
548 | nvme_scsi_cmd(struct scsi_xfer *xs) |
549 | { |
550 | switch (xs->cmd.opcode) { |
551 | case READ_COMMAND0x08: |
552 | case READ_100x28: |
553 | case READ_120xa8: |
554 | case READ_160x88: |
555 | nvme_scsi_io(xs, SCSI_DATA_IN0x00800); |
556 | return; |
557 | case WRITE_COMMAND0x0a: |
558 | case WRITE_100x2a: |
559 | case WRITE_120xaa: |
560 | case WRITE_160x8a: |
561 | nvme_scsi_io(xs, SCSI_DATA_OUT0x01000); |
562 | return; |
563 | |
564 | case SYNCHRONIZE_CACHE0x35: |
565 | nvme_scsi_sync(xs); |
566 | return; |
567 | |
568 | case INQUIRY0x12: |
569 | nvme_scsi_inq(xs); |
570 | return; |
571 | case READ_CAPACITY_160x9e: |
572 | nvme_scsi_capacity16(xs); |
573 | return; |
574 | case READ_CAPACITY0x25: |
575 | nvme_scsi_capacity(xs); |
576 | return; |
577 | |
578 | case TEST_UNIT_READY0x00: |
579 | case PREVENT_ALLOW0x1e: |
580 | case START_STOP0x1b: |
581 | xs->error = XS_NOERROR0; |
582 | scsi_done(xs); |
583 | return; |
584 | |
585 | default: |
586 | break; |
587 | } |
588 | |
589 | xs->error = XS_DRIVER_STUFFUP2; |
590 | scsi_done(xs); |
591 | } |
592 | |
593 | void |
594 | nvme_minphys(struct buf *bp, struct scsi_link *link) |
595 | { |
596 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
597 | |
598 | if (bp->b_bcount > sc->sc_mdts) |
599 | bp->b_bcount = sc->sc_mdts; |
600 | } |
601 | |
602 | void |
603 | nvme_scsi_io(struct scsi_xfer *xs, int dir) |
604 | { |
605 | struct scsi_link *link = xs->sc_link; |
606 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
607 | struct nvme_ccb *ccb = xs->io; |
608 | bus_dmamap_t dmap = ccb->ccb_dmamap; |
609 | int i; |
610 | |
611 | if ((xs->flags & (SCSI_DATA_IN0x00800|SCSI_DATA_OUT0x01000)) != dir) |
612 | goto stuffup; |
613 | |
614 | ccb->ccb_done = nvme_scsi_io_done; |
615 | ccb->ccb_cookie = xs; |
616 | |
617 | if (bus_dmamap_load(sc->sc_dmat, dmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap) , (xs->data), (xs->datalen), (((void *)0)), (((xs->flags ) & (0x00001)) ? 0x0001 : 0x0000)) |
618 | xs->data, xs->datalen, NULL, ISSET(xs->flags, SCSI_NOSLEEP) ?(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap) , (xs->data), (xs->datalen), (((void *)0)), (((xs->flags ) & (0x00001)) ? 0x0001 : 0x0000)) |
619 | BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap) , (xs->data), (xs->datalen), (((void *)0)), (((xs->flags ) & (0x00001)) ? 0x0001 : 0x0000)) != 0) |
620 | goto stuffup; |
621 | |
622 | bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x01 : 0x04)) |
623 | ISSET(xs->flags, SCSI_DATA_IN) ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x01 : 0x04)) |
624 | BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x01 : 0x04)); |
625 | |
626 | if (dmap->dm_nsegs > 2) { |
627 | for (i = 1; i < dmap->dm_nsegs; i++) { |
628 | htolem64(&ccb->ccb_prpl[i - 1],(*(__uint64_t *)(&ccb->ccb_prpl[i - 1]) = ((__uint64_t )(dmap->dm_segs[i].ds_addr))) |
629 | dmap->dm_segs[i].ds_addr)(*(__uint64_t *)(&ccb->ccb_prpl[i - 1]) = ((__uint64_t )(dmap->dm_segs[i].ds_addr))); |
630 | } |
631 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
632 | NVME_DMA_MAP(sc->sc_ccb_prpls),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
633 | ccb->ccb_prpl_off,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
634 | sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
635 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)); |
636 | } |
637 | |
638 | if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) { |
639 | nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_io_fill); |
640 | return; |
641 | } |
642 | |
643 | nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_io_fill); |
644 | return; |
645 | |
646 | stuffup: |
647 | xs->error = XS_DRIVER_STUFFUP2; |
648 | scsi_done(xs); |
649 | } |
650 | |
651 | void |
652 | nvme_scsi_io_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
653 | { |
654 | struct nvme_sqe_io *sqe = slot; |
655 | struct scsi_xfer *xs = ccb->ccb_cookie; |
656 | struct scsi_link *link = xs->sc_link; |
657 | bus_dmamap_t dmap = ccb->ccb_dmamap; |
658 | u_int64_t lba; |
659 | u_int32_t blocks; |
660 | |
661 | scsi_cmd_rw_decode(&xs->cmd, &lba, &blocks); |
662 | |
663 | sqe->opcode = ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)) ? |
664 | NVM_CMD_READ0x02 : NVM_CMD_WRITE0x01; |
665 | htolem32(&sqe->nsid, link->target)(*(__uint32_t *)(&sqe->nsid) = ((__uint32_t)(link-> target))); |
666 | |
667 | htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr)(*(__uint64_t *)(&sqe->entry.prp[0]) = ((__uint64_t)(dmap ->dm_segs[0].ds_addr))); |
668 | switch (dmap->dm_nsegs) { |
669 | case 1: |
670 | break; |
671 | case 2: |
672 | htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr)(*(__uint64_t *)(&sqe->entry.prp[1]) = ((__uint64_t)(dmap ->dm_segs[1].ds_addr))); |
673 | break; |
674 | default: |
675 | /* the prp list is already set up and synced */ |
676 | htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva)(*(__uint64_t *)(&sqe->entry.prp[1]) = ((__uint64_t)(ccb ->ccb_prpl_dva))); |
677 | break; |
678 | } |
679 | |
680 | htolem64(&sqe->slba, lba)(*(__uint64_t *)(&sqe->slba) = ((__uint64_t)(lba))); |
681 | htolem16(&sqe->nlb, blocks - 1)(*(__uint16_t *)(&sqe->nlb) = ((__uint16_t)(blocks - 1 ))); |
682 | } |
683 | |
684 | void |
685 | nvme_scsi_io_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
686 | struct nvme_cqe *cqe) |
687 | { |
688 | struct scsi_xfer *xs = ccb->ccb_cookie; |
689 | bus_dmamap_t dmap = ccb->ccb_dmamap; |
690 | u_int16_t flags; |
691 | |
692 | if (dmap->dm_nsegs > 2) { |
693 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
694 | NVME_DMA_MAP(sc->sc_ccb_prpls),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
695 | ccb->ccb_prpl_off,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
696 | sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
697 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)); |
698 | } |
699 | |
700 | bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x02 : 0x08)) |
701 | ISSET(xs->flags, SCSI_DATA_IN) ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x02 : 0x08)) |
702 | BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x02 : 0x08)); |
703 | |
704 | bus_dmamap_unload(sc->sc_dmat, dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmap )); |
705 | |
706 | flags = lemtoh16(&cqe->flags)((__uint16_t)(*(__uint16_t *)(&cqe->flags))); |
707 | |
708 | xs->error = (NVME_CQE_SC(flags)((flags) & (0x7f << 1)) == NVME_CQE_SC_SUCCESS(0x00 << 1)) ? |
709 | XS_NOERROR0 : XS_DRIVER_STUFFUP2; |
710 | xs->status = SCSI_OK0x00; |
711 | xs->resid = 0; |
712 | scsi_done(xs); |
713 | } |
714 | |
715 | void |
716 | nvme_scsi_sync(struct scsi_xfer *xs) |
717 | { |
718 | struct scsi_link *link = xs->sc_link; |
719 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
720 | struct nvme_ccb *ccb = xs->io; |
721 | |
722 | ccb->ccb_done = nvme_scsi_sync_done; |
723 | ccb->ccb_cookie = xs; |
724 | |
725 | if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) { |
726 | nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_sync_fill); |
727 | return; |
728 | } |
729 | |
730 | nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_sync_fill); |
731 | } |
732 | |
733 | void |
734 | nvme_scsi_sync_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
735 | { |
736 | struct nvme_sqe *sqe = slot; |
737 | struct scsi_xfer *xs = ccb->ccb_cookie; |
738 | struct scsi_link *link = xs->sc_link; |
739 | |
740 | sqe->opcode = NVM_CMD_FLUSH0x00; |
741 | htolem32(&sqe->nsid, link->target)(*(__uint32_t *)(&sqe->nsid) = ((__uint32_t)(link-> target))); |
742 | } |
743 | |
744 | void |
745 | nvme_scsi_sync_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
746 | struct nvme_cqe *cqe) |
747 | { |
748 | struct scsi_xfer *xs = ccb->ccb_cookie; |
749 | u_int16_t flags; |
750 | |
751 | flags = lemtoh16(&cqe->flags)((__uint16_t)(*(__uint16_t *)(&cqe->flags))); |
752 | |
753 | xs->error = (NVME_CQE_SC(flags)((flags) & (0x7f << 1)) == NVME_CQE_SC_SUCCESS(0x00 << 1)) ? |
754 | XS_NOERROR0 : XS_DRIVER_STUFFUP2; |
755 | xs->status = SCSI_OK0x00; |
756 | xs->resid = 0; |
757 | scsi_done(xs); |
758 | } |
759 | |
760 | void |
761 | nvme_scsi_inq(struct scsi_xfer *xs) |
762 | { |
763 | struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd; |
764 | |
765 | if (!ISSET(inq->flags, SI_EVPD)((inq->flags) & (0x01))) { |
766 | nvme_scsi_inquiry(xs); |
767 | return; |
768 | } |
769 | |
770 | switch (inq->pagecode) { |
771 | default: |
772 | /* printf("%s: %d\n", __func__, inq->pagecode); */ |
773 | break; |
774 | } |
775 | |
776 | xs->error = XS_DRIVER_STUFFUP2; |
777 | scsi_done(xs); |
778 | } |
779 | |
780 | void |
781 | nvme_scsi_inquiry(struct scsi_xfer *xs) |
782 | { |
783 | struct scsi_inquiry_data inq; |
784 | struct scsi_link *link = xs->sc_link; |
785 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
786 | struct nvm_identify_namespace *ns; |
787 | |
788 | ns = sc->sc_namespaces[link->target].ident; |
Value stored to 'ns' is never read | |
789 | |
790 | memset(&inq, 0, sizeof(inq))__builtin_memset((&inq), (0), (sizeof(inq))); |
791 | |
792 | inq.device = T_DIRECT0x00; |
793 | inq.version = SCSI_REV_SPC40x06; |
794 | inq.response_format = SID_SCSI2_RESPONSE0x02; |
795 | inq.additional_length = SID_SCSI2_ALEN31; |
796 | inq.flags |= SID_CmdQue0x02; |
797 | memcpy(inq.vendor, "NVMe ", sizeof(inq.vendor))__builtin_memcpy((inq.vendor), ("NVMe "), (sizeof(inq.vendor ))); |
798 | memcpy(inq.product, sc->sc_identify.mn, sizeof(inq.product))__builtin_memcpy((inq.product), (sc->sc_identify.mn), (sizeof (inq.product))); |
799 | memcpy(inq.revision, sc->sc_identify.fr, sizeof(inq.revision))__builtin_memcpy((inq.revision), (sc->sc_identify.fr), (sizeof (inq.revision))); |
800 | |
801 | scsi_copy_internal_data(xs, &inq, sizeof(inq)); |
802 | |
803 | xs->error = XS_NOERROR0; |
804 | scsi_done(xs); |
805 | } |
806 | |
807 | void |
808 | nvme_scsi_capacity16(struct scsi_xfer *xs) |
809 | { |
810 | struct scsi_read_cap_data_16 rcd; |
811 | struct scsi_link *link = xs->sc_link; |
812 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
813 | struct nvm_identify_namespace *ns; |
814 | struct nvm_namespace_format *f; |
815 | u_int64_t nsze; |
816 | u_int16_t tpe = READ_CAP_16_TPE0x8000; |
817 | |
818 | ns = sc->sc_namespaces[link->target].ident; |
819 | |
820 | if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) { |
821 | xs->error = XS_DRIVER_STUFFUP2; |
822 | scsi_done(xs); |
823 | return; |
824 | } |
825 | |
826 | /* sd_read_cap_16() will add one */ |
827 | nsze = lemtoh64(&ns->nsze)((__uint64_t)(*(__uint64_t *)(&ns->nsze))) - 1; |
828 | f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)((ns->flbas) & 0x0f)]; |
829 | |
830 | memset(&rcd, 0, sizeof(rcd))__builtin_memset((&rcd), (0), (sizeof(rcd))); |
831 | _lto8b(nsze, rcd.addr); |
832 | _lto4b(1 << f->lbads, rcd.length); |
833 | _lto2b(tpe, rcd.lowest_aligned); |
834 | |
835 | memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen))__builtin_memcpy((xs->data), (&rcd), ((((sizeof(rcd))< (xs->datalen))?(sizeof(rcd)):(xs->datalen)))); |
836 | |
837 | xs->error = XS_NOERROR0; |
838 | scsi_done(xs); |
839 | } |
840 | |
841 | void |
842 | nvme_scsi_capacity(struct scsi_xfer *xs) |
843 | { |
844 | struct scsi_read_cap_data rcd; |
845 | struct scsi_link *link = xs->sc_link; |
846 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
847 | struct nvm_identify_namespace *ns; |
848 | struct nvm_namespace_format *f; |
849 | u_int64_t nsze; |
850 | |
851 | ns = sc->sc_namespaces[link->target].ident; |
852 | |
853 | if (xs->cmdlen != sizeof(struct scsi_read_capacity)) { |
854 | xs->error = XS_DRIVER_STUFFUP2; |
855 | scsi_done(xs); |
856 | return; |
857 | } |
858 | |
859 | /* sd_read_cap_10() will add one */ |
860 | nsze = lemtoh64(&ns->nsze)((__uint64_t)(*(__uint64_t *)(&ns->nsze))) - 1; |
861 | if (nsze > 0xffffffff) |
862 | nsze = 0xffffffff; |
863 | |
864 | f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)((ns->flbas) & 0x0f)]; |
865 | |
866 | memset(&rcd, 0, sizeof(rcd))__builtin_memset((&rcd), (0), (sizeof(rcd))); |
867 | _lto4b(nsze, rcd.addr); |
868 | _lto4b(1 << f->lbads, rcd.length); |
869 | |
870 | memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen))__builtin_memcpy((xs->data), (&rcd), ((((sizeof(rcd))< (xs->datalen))?(sizeof(rcd)):(xs->datalen)))); |
871 | |
872 | xs->error = XS_NOERROR0; |
873 | scsi_done(xs); |
874 | } |
875 | |
876 | void |
877 | nvme_scsi_free(struct scsi_link *link) |
878 | { |
879 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
880 | struct nvm_identify_namespace *identify; |
881 | |
882 | identify = sc->sc_namespaces[link->target].ident; |
883 | sc->sc_namespaces[link->target].ident = NULL((void *)0); |
884 | |
885 | free(identify, M_DEVBUF2, sizeof(*identify)); |
886 | } |
887 | |
888 | uint32_t |
889 | nvme_op_sq_enter(struct nvme_softc *sc, |
890 | struct nvme_queue *q, struct nvme_ccb *ccb) |
891 | { |
892 | mtx_enter(&q->q_sq_mtx); |
893 | return (nvme_op_sq_enter_locked(sc, q, ccb)); |
894 | } |
895 | |
896 | uint32_t |
897 | nvme_op_sq_enter_locked(struct nvme_softc *sc, |
898 | struct nvme_queue *q, struct nvme_ccb *ccb) |
899 | { |
900 | return (q->q_sq_tail); |
901 | } |
902 | |
903 | void |
904 | nvme_op_sq_leave_locked(struct nvme_softc *sc, |
905 | struct nvme_queue *q, struct nvme_ccb *ccb) |
906 | { |
907 | uint32_t tail; |
908 | |
909 | tail = ++q->q_sq_tail; |
910 | if (tail >= q->q_entries) |
911 | tail = 0; |
912 | q->q_sq_tail = tail; |
913 | nvme_write4(sc, q->q_sqtdbl, tail)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((q->q_sqtdbl )), ((tail)))); |
914 | } |
915 | |
916 | void |
917 | nvme_op_sq_leave(struct nvme_softc *sc, |
918 | struct nvme_queue *q, struct nvme_ccb *ccb) |
919 | { |
920 | nvme_op_sq_leave_locked(sc, q, ccb); |
921 | mtx_leave(&q->q_sq_mtx); |
922 | } |
923 | |
924 | void |
925 | nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb, |
926 | void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *)) |
927 | { |
928 | struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem)((void *)(q->q_sq_dmamem)->ndm_kva); |
929 | u_int32_t tail; |
930 | |
931 | tail = sc->sc_ops->op_sq_enter(sc, q, ccb); |
932 | |
933 | sqe += tail; |
934 | |
935 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)) |
936 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)); |
937 | memset(sqe, 0, sizeof(*sqe))__builtin_memset((sqe), (0), (sizeof(*sqe))); |
938 | (*fill)(sc, ccb, sqe); |
939 | sqe->cid = ccb->ccb_id; |
940 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)) |
941 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)); |
942 | |
943 | sc->sc_ops->op_sq_leave(sc, q, ccb); |
944 | } |
945 | |
946 | struct nvme_poll_state { |
947 | struct nvme_sqe s; |
948 | struct nvme_cqe c; |
949 | }; |
950 | |
951 | int |
952 | nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb, |
953 | void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *)) |
954 | { |
955 | struct nvme_poll_state state; |
956 | void (*done)(struct nvme_softc *, struct nvme_ccb *, struct nvme_cqe *); |
957 | void *cookie; |
958 | u_int16_t flags; |
959 | |
960 | memset(&state, 0, sizeof(state))__builtin_memset((&state), (0), (sizeof(state))); |
961 | (*fill)(sc, ccb, &state.s); |
962 | |
963 | done = ccb->ccb_done; |
964 | cookie = ccb->ccb_cookie; |
965 | |
966 | ccb->ccb_done = nvme_poll_done; |
967 | ccb->ccb_cookie = &state; |
968 | |
969 | nvme_q_submit(sc, q, ccb, nvme_poll_fill); |
970 | while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))((state.c.flags) & (((__uint16_t)((1 << 0)))))) { |
971 | if (nvme_q_complete(sc, q) == 0) |
972 | delay(10)(*delay_func)(10); |
973 | |
974 | /* XXX no timeout? */ |
975 | } |
976 | |
977 | ccb->ccb_cookie = cookie; |
978 | done(sc, ccb, &state.c); |
979 | |
980 | flags = lemtoh16(&state.c.flags)((__uint16_t)(*(__uint16_t *)(&state.c.flags))); |
981 | |
982 | return (flags & ~NVME_CQE_PHASE(1 << 0)); |
983 | } |
984 | |
985 | void |
986 | nvme_poll_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
987 | { |
988 | struct nvme_sqe *sqe = slot; |
989 | struct nvme_poll_state *state = ccb->ccb_cookie; |
990 | |
991 | *sqe = state->s; |
992 | } |
993 | |
994 | void |
995 | nvme_poll_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
996 | struct nvme_cqe *cqe) |
997 | { |
998 | struct nvme_poll_state *state = ccb->ccb_cookie; |
999 | |
1000 | state->c = *cqe; |
1001 | SET(state->c.flags, htole16(NVME_CQE_PHASE))((state->c.flags) |= (((__uint16_t)((1 << 0))))); |
1002 | } |
1003 | |
1004 | void |
1005 | nvme_sqe_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
1006 | { |
1007 | struct nvme_sqe *src = ccb->ccb_cookie; |
1008 | struct nvme_sqe *dst = slot; |
1009 | |
1010 | *dst = *src; |
1011 | } |
1012 | |
1013 | void |
1014 | nvme_empty_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
1015 | struct nvme_cqe *cqe) |
1016 | { |
1017 | } |
1018 | |
1019 | void |
1020 | nvme_op_cq_done(struct nvme_softc *sc, |
1021 | struct nvme_queue *q, struct nvme_ccb *ccb) |
1022 | { |
1023 | /* nop */ |
1024 | } |
1025 | |
1026 | int |
1027 | nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q) |
1028 | { |
1029 | struct nvme_ccb *ccb; |
1030 | struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem)((void *)(q->q_cq_dmamem)->ndm_kva), *cqe; |
1031 | u_int32_t head; |
1032 | u_int16_t flags; |
1033 | int rv = 0; |
1034 | |
1035 | if (!mtx_enter_try(&q->q_cq_mtx)) |
1036 | return (-1); |
1037 | |
1038 | head = q->q_cq_head; |
1039 | |
1040 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD0x02); |
1041 | for (;;) { |
1042 | cqe = &ring[head]; |
1043 | flags = lemtoh16(&cqe->flags)((__uint16_t)(*(__uint16_t *)(&cqe->flags))); |
1044 | if ((flags & NVME_CQE_PHASE(1 << 0)) != q->q_cq_phase) |
1045 | break; |
1046 | |
1047 | membar_consumer()do { __asm volatile("" ::: "memory"); } while (0); |
1048 | |
1049 | ccb = &sc->sc_ccbs[cqe->cid]; |
1050 | sc->sc_ops->op_cq_done(sc, q, ccb); |
1051 | ccb->ccb_done(sc, ccb, cqe); |
1052 | |
1053 | if (++head >= q->q_entries) { |
1054 | head = 0; |
1055 | q->q_cq_phase ^= NVME_CQE_PHASE(1 << 0); |
1056 | } |
1057 | |
1058 | rv = 1; |
1059 | } |
1060 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD0x01); |
1061 | |
1062 | if (rv) |
1063 | nvme_write4(sc, q->q_cqhdbl, q->q_cq_head = head)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((q->q_cqhdbl )), ((q->q_cq_head = head)))); |
1064 | mtx_leave(&q->q_cq_mtx); |
1065 | |
1066 | return (rv); |
1067 | } |
1068 | |
1069 | int |
1070 | nvme_identify(struct nvme_softc *sc, u_int mpsmin) |
1071 | { |
1072 | char sn[41], mn[81], fr[17]; |
1073 | struct nvm_identify_controller *identify; |
1074 | struct nvme_dmamem *mem; |
1075 | struct nvme_ccb *ccb; |
1076 | int rv = 1; |
1077 | |
1078 | ccb = nvme_ccb_get(sc); |
1079 | if (ccb == NULL((void *)0)) |
1080 | panic("nvme_identify: nvme_ccb_get returned NULL"); |
1081 | |
1082 | mem = nvme_dmamem_alloc(sc, sizeof(*identify)); |
1083 | if (mem == NULL((void *)0)) |
1084 | return (1); |
1085 | |
1086 | ccb->ccb_done = nvme_empty_done; |
1087 | ccb->ccb_cookie = mem; |
1088 | |
1089 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD0x01); |
1090 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify); |
1091 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD0x02); |
1092 | |
1093 | nvme_ccb_put(sc, ccb); |
1094 | |
1095 | if (rv != 0) |
1096 | goto done; |
1097 | |
1098 | identify = NVME_DMA_KVA(mem)((void *)(mem)->ndm_kva); |
1099 | |
1100 | scsi_strvis(sn, identify->sn, sizeof(identify->sn)); |
1101 | scsi_strvis(mn, identify->mn, sizeof(identify->mn)); |
1102 | scsi_strvis(fr, identify->fr, sizeof(identify->fr)); |
1103 | |
1104 | printf("%s: %s, firmware %s, serial %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), mn, fr, sn); |
1105 | |
1106 | if (identify->mdts > 0) { |
1107 | sc->sc_mdts = (1 << identify->mdts) * (1 << mpsmin); |
1108 | if (sc->sc_mdts > NVME_MAXPHYS(128 * 1024)) |
1109 | sc->sc_mdts = NVME_MAXPHYS(128 * 1024); |
1110 | sc->sc_max_prpl = sc->sc_mdts / sc->sc_mps; |
1111 | } |
1112 | |
1113 | sc->sc_nn = lemtoh32(&identify->nn)((__uint32_t)(*(__uint32_t *)(&identify->nn))); |
1114 | |
1115 | /* |
1116 | * At least one Apple NVMe device presents a second, bogus disk that is |
1117 | * inaccessible, so cap targets at 1. |
1118 | * |
1119 | * sd1 at scsibus1 targ 2 lun 0: <NVMe, APPLE SSD AP0512, 16.1> [..] |
1120 | * sd1: 0MB, 4096 bytes/sector, 2 sectors |
1121 | */ |
1122 | if (sc->sc_nn > 1 && |
1123 | mn[0] == 'A' && mn[1] == 'P' && mn[2] == 'P' && mn[3] == 'L' && |
1124 | mn[4] == 'E') |
1125 | sc->sc_nn = 1; |
1126 | |
1127 | memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify))__builtin_memcpy((&sc->sc_identify), (identify), (sizeof (sc->sc_identify))); |
1128 | |
1129 | done: |
1130 | nvme_dmamem_free(sc, mem); |
1131 | |
1132 | return (rv); |
1133 | } |
1134 | |
1135 | int |
1136 | nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q) |
1137 | { |
1138 | struct nvme_sqe_q sqe; |
1139 | struct nvme_ccb *ccb; |
1140 | int rv; |
1141 | |
1142 | ccb = scsi_io_get(&sc->sc_iopool, 0); |
1143 | KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/nvme.c" , 1143, "ccb != NULL")); |
1144 | |
1145 | ccb->ccb_done = nvme_empty_done; |
1146 | ccb->ccb_cookie = &sqe; |
1147 | |
1148 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1149 | sqe.opcode = NVM_ADMIN_ADD_IOCQ0x05; |
1150 | htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem))(*(__uint64_t *)(&sqe.prp1) = ((__uint64_t)(((u_int64_t)( q->q_cq_dmamem)->ndm_map->dm_segs[0].ds_addr)))); |
1151 | htolem16(&sqe.qsize, q->q_entries - 1)(*(__uint16_t *)(&sqe.qsize) = ((__uint16_t)(q->q_entries - 1))); |
1152 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1153 | sqe.qflags = NVM_SQE_CQ_IEN(1 << 1) | NVM_SQE_Q_PC(1 << 0); |
1154 | |
1155 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1156 | if (rv != 0) |
1157 | goto fail; |
1158 | |
1159 | ccb->ccb_done = nvme_empty_done; |
1160 | ccb->ccb_cookie = &sqe; |
1161 | |
1162 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1163 | sqe.opcode = NVM_ADMIN_ADD_IOSQ0x01; |
1164 | htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem))(*(__uint64_t *)(&sqe.prp1) = ((__uint64_t)(((u_int64_t)( q->q_sq_dmamem)->ndm_map->dm_segs[0].ds_addr)))); |
1165 | htolem16(&sqe.qsize, q->q_entries - 1)(*(__uint16_t *)(&sqe.qsize) = ((__uint16_t)(q->q_entries - 1))); |
1166 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1167 | htolem16(&sqe.cqid, q->q_id)(*(__uint16_t *)(&sqe.cqid) = ((__uint16_t)(q->q_id))); |
1168 | sqe.qflags = NVM_SQE_Q_PC(1 << 0); |
1169 | |
1170 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1171 | if (rv != 0) |
1172 | goto fail; |
1173 | |
1174 | fail: |
1175 | scsi_io_put(&sc->sc_iopool, ccb); |
1176 | return (rv); |
1177 | } |
1178 | |
1179 | int |
1180 | nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q) |
1181 | { |
1182 | struct nvme_sqe_q sqe; |
1183 | struct nvme_ccb *ccb; |
1184 | int rv; |
1185 | |
1186 | ccb = scsi_io_get(&sc->sc_iopool, 0); |
1187 | KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/nvme.c" , 1187, "ccb != NULL")); |
1188 | |
1189 | ccb->ccb_done = nvme_empty_done; |
1190 | ccb->ccb_cookie = &sqe; |
1191 | |
1192 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1193 | sqe.opcode = NVM_ADMIN_DEL_IOSQ0x00; |
1194 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1195 | |
1196 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1197 | if (rv != 0) |
1198 | goto fail; |
1199 | |
1200 | ccb->ccb_done = nvme_empty_done; |
1201 | ccb->ccb_cookie = &sqe; |
1202 | |
1203 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1204 | sqe.opcode = NVM_ADMIN_DEL_IOCQ0x04; |
1205 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1206 | |
1207 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1208 | if (rv != 0) |
1209 | goto fail; |
1210 | |
1211 | nvme_q_free(sc, q); |
1212 | |
1213 | fail: |
1214 | scsi_io_put(&sc->sc_iopool, ccb); |
1215 | return (rv); |
1216 | |
1217 | } |
1218 | |
1219 | void |
1220 | nvme_fill_identify(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
1221 | { |
1222 | struct nvme_sqe *sqe = slot; |
1223 | struct nvme_dmamem *mem = ccb->ccb_cookie; |
1224 | |
1225 | sqe->opcode = NVM_ADMIN_IDENTIFY0x06; |
1226 | htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem))(*(__uint64_t *)(&sqe->entry.prp[0]) = ((__uint64_t)(( (u_int64_t)(mem)->ndm_map->dm_segs[0].ds_addr)))); |
1227 | htolem32(&sqe->cdw10, 1)(*(__uint32_t *)(&sqe->cdw10) = ((__uint32_t)(1))); |
1228 | } |
1229 | |
1230 | int |
1231 | nvme_ccbs_alloc(struct nvme_softc *sc, u_int nccbs) |
1232 | { |
1233 | struct nvme_ccb *ccb; |
1234 | bus_addr_t off; |
1235 | u_int64_t *prpl; |
1236 | u_int i; |
1237 | |
1238 | sc->sc_ccbs = mallocarray(nccbs, sizeof(*ccb), M_DEVBUF2, |
1239 | M_WAITOK0x0001 | M_CANFAIL0x0004); |
1240 | if (sc->sc_ccbs == NULL((void *)0)) |
1241 | return (1); |
1242 | |
1243 | sc->sc_ccb_prpls = nvme_dmamem_alloc(sc, |
1244 | sizeof(*prpl) * sc->sc_max_prpl * nccbs); |
1245 | |
1246 | prpl = NVME_DMA_KVA(sc->sc_ccb_prpls)((void *)(sc->sc_ccb_prpls)->ndm_kva); |
1247 | off = 0; |
1248 | |
1249 | for (i = 0; i < nccbs; i++) { |
1250 | ccb = &sc->sc_ccbs[i]; |
1251 | |
1252 | if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002), (&ccb->ccb_dmamap)) |
1253 | sc->sc_max_prpl + 1, /* we get a free prp in the sqe */(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002), (&ccb->ccb_dmamap)) |
1254 | sc->sc_mps, sc->sc_mps, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002), (&ccb->ccb_dmamap)) |
1255 | &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002), (&ccb->ccb_dmamap)) != 0) |
1256 | goto free_maps; |
1257 | |
1258 | ccb->ccb_id = i; |
1259 | ccb->ccb_prpl = prpl; |
1260 | ccb->ccb_prpl_off = off; |
1261 | ccb->ccb_prpl_dva = NVME_DMA_DVA(sc->sc_ccb_prpls)((u_int64_t)(sc->sc_ccb_prpls)->ndm_map->dm_segs[0]. ds_addr) + off; |
1262 | |
1263 | SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_list, ccb, ccb_entry)do { (ccb)->ccb_entry.sqe_next = ((void *)0); *(&sc-> sc_ccb_list)->sqh_last = (ccb); (&sc->sc_ccb_list)-> sqh_last = &(ccb)->ccb_entry.sqe_next; } while (0); |
1264 | |
1265 | prpl += sc->sc_max_prpl; |
1266 | off += sizeof(*prpl) * sc->sc_max_prpl; |
1267 | } |
1268 | |
1269 | return (0); |
1270 | |
1271 | free_maps: |
1272 | nvme_ccbs_free(sc, nccbs); |
1273 | return (1); |
1274 | } |
1275 | |
1276 | void * |
1277 | nvme_ccb_get(void *cookie) |
1278 | { |
1279 | struct nvme_softc *sc = cookie; |
1280 | struct nvme_ccb *ccb; |
1281 | |
1282 | mtx_enter(&sc->sc_ccb_mtx); |
1283 | ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)((&sc->sc_ccb_list)->sqh_first); |
1284 | if (ccb != NULL((void *)0)) |
1285 | SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry)do { if (((&sc->sc_ccb_list)->sqh_first = (&sc-> sc_ccb_list)->sqh_first->ccb_entry.sqe_next) == ((void * )0)) (&sc->sc_ccb_list)->sqh_last = &(&sc-> sc_ccb_list)->sqh_first; } while (0); |
1286 | mtx_leave(&sc->sc_ccb_mtx); |
1287 | |
1288 | return (ccb); |
1289 | } |
1290 | |
1291 | void |
1292 | nvme_ccb_put(void *cookie, void *io) |
1293 | { |
1294 | struct nvme_softc *sc = cookie; |
1295 | struct nvme_ccb *ccb = io; |
1296 | |
1297 | mtx_enter(&sc->sc_ccb_mtx); |
1298 | SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry)do { if (((ccb)->ccb_entry.sqe_next = (&sc->sc_ccb_list )->sqh_first) == ((void *)0)) (&sc->sc_ccb_list)-> sqh_last = &(ccb)->ccb_entry.sqe_next; (&sc->sc_ccb_list )->sqh_first = (ccb); } while (0); |
1299 | mtx_leave(&sc->sc_ccb_mtx); |
1300 | } |
1301 | |
1302 | void |
1303 | nvme_ccbs_free(struct nvme_softc *sc, unsigned int nccbs) |
1304 | { |
1305 | struct nvme_ccb *ccb; |
1306 | |
1307 | while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)((&sc->sc_ccb_list)->sqh_first)) != NULL((void *)0)) { |
1308 | SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry)do { if (((&sc->sc_ccb_list)->sqh_first = (&sc-> sc_ccb_list)->sqh_first->ccb_entry.sqe_next) == ((void * )0)) (&sc->sc_ccb_list)->sqh_last = &(&sc-> sc_ccb_list)->sqh_first; } while (0); |
1309 | bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb ->ccb_dmamap)); |
1310 | } |
1311 | |
1312 | nvme_dmamem_free(sc, sc->sc_ccb_prpls); |
1313 | free(sc->sc_ccbs, M_DEVBUF2, nccbs * sizeof(*ccb)); |
1314 | } |
1315 | |
1316 | struct nvme_queue * |
1317 | nvme_q_alloc(struct nvme_softc *sc, u_int16_t id, u_int entries, u_int dstrd) |
1318 | { |
1319 | struct nvme_queue *q; |
1320 | |
1321 | q = malloc(sizeof(*q), M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004); |
1322 | if (q == NULL((void *)0)) |
1323 | return (NULL((void *)0)); |
1324 | |
1325 | q->q_sq_dmamem = nvme_dmamem_alloc(sc, |
1326 | sizeof(struct nvme_sqe) * entries); |
1327 | if (q->q_sq_dmamem == NULL((void *)0)) |
1328 | goto free; |
1329 | |
1330 | q->q_cq_dmamem = nvme_dmamem_alloc(sc, |
1331 | sizeof(struct nvme_cqe) * entries); |
1332 | if (q->q_cq_dmamem == NULL((void *)0)) |
1333 | goto free_sq; |
1334 | |
1335 | memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem))__builtin_memset((((void *)(q->q_sq_dmamem)->ndm_kva)), (0), (((q->q_sq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1336 | memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem))__builtin_memset((((void *)(q->q_cq_dmamem)->ndm_kva)), (0), (((q->q_cq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1337 | |
1338 | mtx_init(&q->q_sq_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&q->q_sq_mtx ), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ? 0x9 : (( 0x6)))); } while (0); |
1339 | mtx_init(&q->q_cq_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&q->q_cq_mtx ), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ? 0x9 : (( 0x6)))); } while (0); |
1340 | q->q_sqtdbl = NVME_SQTDBL(id, dstrd)(0x1000 + (2 * (id) + 0) * (dstrd)); |
1341 | q->q_cqhdbl = NVME_CQHDBL(id, dstrd)(0x1000 + (2 * (id) + 1) * (dstrd)); |
1342 | |
1343 | q->q_id = id; |
1344 | q->q_entries = entries; |
1345 | q->q_sq_tail = 0; |
1346 | q->q_cq_head = 0; |
1347 | q->q_cq_phase = NVME_CQE_PHASE(1 << 0); |
1348 | |
1349 | if (sc->sc_ops->op_q_alloc != NULL((void *)0)) { |
1350 | if (sc->sc_ops->op_q_alloc(sc, q) != 0) |
1351 | goto free_cq; |
1352 | } |
1353 | |
1354 | nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE0x04); |
1355 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD0x01); |
1356 | |
1357 | return (q); |
1358 | |
1359 | free_cq: |
1360 | nvme_dmamem_free(sc, q->q_cq_dmamem); |
1361 | free_sq: |
1362 | nvme_dmamem_free(sc, q->q_sq_dmamem); |
1363 | free: |
1364 | free(q, M_DEVBUF2, sizeof *q); |
1365 | |
1366 | return (NULL((void *)0)); |
1367 | } |
1368 | |
1369 | int |
1370 | nvme_q_reset(struct nvme_softc *sc, struct nvme_queue *q) |
1371 | { |
1372 | memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem))__builtin_memset((((void *)(q->q_sq_dmamem)->ndm_kva)), (0), (((q->q_sq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1373 | memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem))__builtin_memset((((void *)(q->q_cq_dmamem)->ndm_kva)), (0), (((q->q_cq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1374 | |
1375 | q->q_sq_tail = 0; |
1376 | q->q_cq_head = 0; |
1377 | q->q_cq_phase = NVME_CQE_PHASE(1 << 0); |
1378 | |
1379 | nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE0x04); |
1380 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD0x01); |
1381 | |
1382 | return (0); |
1383 | } |
1384 | |
1385 | void |
1386 | nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q) |
1387 | { |
1388 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD0x02); |
1389 | nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE0x08); |
1390 | |
1391 | if (sc->sc_ops->op_q_alloc != NULL((void *)0)) |
1392 | sc->sc_ops->op_q_free(sc, q); |
1393 | |
1394 | nvme_dmamem_free(sc, q->q_cq_dmamem); |
1395 | nvme_dmamem_free(sc, q->q_sq_dmamem); |
1396 | free(q, M_DEVBUF2, sizeof *q); |
1397 | } |
1398 | |
1399 | int |
1400 | nvme_intr(void *xsc) |
1401 | { |
1402 | struct nvme_softc *sc = xsc; |
1403 | int rv = 0; |
1404 | |
1405 | if (nvme_q_complete(sc, sc->sc_q)) |
1406 | rv = 1; |
1407 | if (nvme_q_complete(sc, sc->sc_admin_q)) |
1408 | rv = 1; |
1409 | |
1410 | return (rv); |
1411 | } |
1412 | |
1413 | int |
1414 | nvme_intr_intx(void *xsc) |
1415 | { |
1416 | struct nvme_softc *sc = xsc; |
1417 | int rv; |
1418 | |
1419 | nvme_write4(sc, NVME_INTMS, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x000c)), ((1)))); |
1420 | rv = nvme_intr(sc); |
1421 | nvme_write4(sc, NVME_INTMC, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((1)))); |
1422 | |
1423 | return (rv); |
1424 | } |
1425 | |
1426 | struct nvme_dmamem * |
1427 | nvme_dmamem_alloc(struct nvme_softc *sc, size_t size) |
1428 | { |
1429 | struct nvme_dmamem *ndm; |
1430 | int nsegs; |
1431 | |
1432 | ndm = malloc(sizeof(*ndm), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
1433 | if (ndm == NULL((void *)0)) |
1434 | return (NULL((void *)0)); |
1435 | |
1436 | ndm->ndm_size = size; |
1437 | |
1438 | if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size ), (1), (size), (0), (0x0000 | 0x0002), (&ndm->ndm_map )) |
1439 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size ), (1), (size), (0), (0x0000 | 0x0002), (&ndm->ndm_map )) != 0) |
1440 | goto ndmfree; |
1441 | |
1442 | if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size ), (sc->sc_mps), (0), (&ndm->ndm_seg), (1), (&nsegs ), (0x0000 | 0x1000)) |
1443 | 1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size ), (sc->sc_mps), (0), (&ndm->ndm_seg), (1), (&nsegs ), (0x0000 | 0x1000)) != 0) |
1444 | goto destroy; |
1445 | |
1446 | if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ndm ->ndm_seg), (nsegs), (size), (&ndm->ndm_kva), (0x0000 )) |
1447 | &ndm->ndm_kva, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ndm ->ndm_seg), (nsegs), (size), (&ndm->ndm_kva), (0x0000 )) != 0) |
1448 | goto free; |
1449 | |
1450 | if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ndm-> ndm_map), (ndm->ndm_kva), (size), (((void *)0)), (0x0000)) |
1451 | NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ndm-> ndm_map), (ndm->ndm_kva), (size), (((void *)0)), (0x0000)) != 0) |
1452 | goto unmap; |
1453 | |
1454 | return (ndm); |
1455 | |
1456 | unmap: |
1457 | bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ndm-> ndm_kva), (size)); |
1458 | free: |
1459 | bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& ndm->ndm_seg), (1)); |
1460 | destroy: |
1461 | bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ndm ->ndm_map)); |
1462 | ndmfree: |
1463 | free(ndm, M_DEVBUF2, sizeof *ndm); |
1464 | |
1465 | return (NULL((void *)0)); |
1466 | } |
1467 | |
1468 | void |
1469 | nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops) |
1470 | { |
1471 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem )->ndm_map)), (0), (((mem)->ndm_map->dm_segs[0].ds_len )), (ops)) |
1472 | 0, NVME_DMA_LEN(mem), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem )->ndm_map)), (0), (((mem)->ndm_map->dm_segs[0].ds_len )), (ops)); |
1473 | } |
1474 | |
1475 | void |
1476 | nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm) |
1477 | { |
1478 | bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ndm ->ndm_map)); |
1479 | bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ndm-> ndm_kva), (ndm->ndm_size)); |
1480 | bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& ndm->ndm_seg), (1)); |
1481 | bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ndm ->ndm_map)); |
1482 | free(ndm, M_DEVBUF2, sizeof *ndm); |
1483 | } |
1484 | |
1485 | #ifdef HIBERNATE1 |
1486 | |
1487 | int |
1488 | nvme_hibernate_admin_cmd(struct nvme_softc *sc, struct nvme_sqe *sqe, |
1489 | struct nvme_cqe *cqe, int cid) |
1490 | { |
1491 | struct nvme_sqe *asqe = NVME_DMA_KVA(sc->sc_admin_q->q_sq_dmamem)((void *)(sc->sc_admin_q->q_sq_dmamem)->ndm_kva); |
1492 | struct nvme_cqe *acqe = NVME_DMA_KVA(sc->sc_admin_q->q_cq_dmamem)((void *)(sc->sc_admin_q->q_cq_dmamem)->ndm_kva); |
1493 | struct nvme_queue *q = sc->sc_admin_q; |
1494 | int tail; |
1495 | u_int16_t flags; |
1496 | |
1497 | /* submit command */ |
1498 | tail = sc->sc_ops->op_sq_enter_locked(sc, q, /* XXX ccb */ NULL((void *)0)); |
1499 | |
1500 | asqe += tail; |
1501 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)) |
1502 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)); |
1503 | *asqe = *sqe; |
1504 | asqe->cid = cid; |
1505 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)) |
1506 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)); |
1507 | |
1508 | sc->sc_ops->op_sq_leave_locked(sc, q, /* XXX ccb */ NULL((void *)0)); |
1509 | |
1510 | /* wait for completion */ |
1511 | acqe += q->q_cq_head; |
1512 | for (;;) { |
1513 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD0x02); |
1514 | flags = lemtoh16(&acqe->flags)((__uint16_t)(*(__uint16_t *)(&acqe->flags))); |
1515 | if ((flags & NVME_CQE_PHASE(1 << 0)) == q->q_cq_phase) |
1516 | break; |
1517 | |
1518 | delay(10)(*delay_func)(10); |
1519 | } |
1520 | |
1521 | if (++q->q_cq_head >= q->q_entries) { |
1522 | q->q_cq_head = 0; |
1523 | q->q_cq_phase ^= NVME_CQE_PHASE(1 << 0); |
1524 | } |
1525 | nvme_write4(sc, q->q_cqhdbl, q->q_cq_head)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((q->q_cqhdbl )), ((q->q_cq_head)))); |
1526 | if ((NVME_CQE_SC(flags)((flags) & (0x7f << 1)) != NVME_CQE_SC_SUCCESS(0x00 << 1)) || (acqe->cid != cid)) |
1527 | return (EIO5); |
1528 | |
1529 | return (0); |
1530 | } |
1531 | |
1532 | int |
1533 | nvme_hibernate_io(dev_t dev, daddr_t blkno, vaddr_t addr, size_t size, |
1534 | int op, void *page) |
1535 | { |
1536 | struct nvme_hibernate_page { |
1537 | u_int64_t prpl[MAXPHYS(64 * 1024) / PAGE_SIZE(1 << 12)]; |
1538 | |
1539 | struct nvme_softc *sc; |
1540 | int nsid; |
1541 | int sq_tail; |
1542 | int cq_head; |
1543 | int cqe_phase; |
1544 | |
1545 | daddr_t poffset; |
1546 | size_t psize; |
1547 | } *my = page; |
1548 | struct nvme_sqe_io *isqe; |
1549 | struct nvme_cqe *icqe; |
1550 | paddr_t data_phys, page_phys; |
1551 | u_int64_t data_bus_phys, page_bus_phys; |
1552 | u_int16_t flags; |
1553 | int i; |
1554 | int error; |
1555 | |
1556 | if (op == HIB_INIT-1) { |
1557 | struct device *disk; |
1558 | struct device *scsibus; |
1559 | extern struct cfdriver sd_cd; |
1560 | struct scsi_link *link; |
1561 | struct scsibus_softc *bus_sc; |
1562 | struct nvme_sqe_q qsqe; |
1563 | struct nvme_cqe qcqe; |
1564 | |
1565 | /* find nvme softc */ |
1566 | disk = disk_lookup(&sd_cd, DISKUNIT(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >> 8)) / 16)); |
1567 | scsibus = disk->dv_parent; |
1568 | my->sc = (struct nvme_softc *)disk->dv_parent->dv_parent; |
1569 | |
1570 | /* find scsi_link, which tells us the target */ |
1571 | my->nsid = 0; |
1572 | bus_sc = (struct scsibus_softc *)scsibus; |
1573 | SLIST_FOREACH(link, &bus_sc->sc_link_list, bus_list)for((link) = ((&bus_sc->sc_link_list)->slh_first); ( link) != ((void *)0); (link) = ((link)->bus_list.sle_next) ) { |
1574 | if (link->device_softc == disk) { |
1575 | my->nsid = link->target; |
1576 | break; |
1577 | } |
1578 | } |
1579 | if (my->nsid == 0) |
1580 | return (EIO5); |
1581 | |
1582 | my->poffset = blkno; |
1583 | my->psize = size; |
1584 | |
1585 | memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem), 0,__builtin_memset((((void *)(my->sc->sc_hib_q->q_cq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe))) |
1586 | my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe))__builtin_memset((((void *)(my->sc->sc_hib_q->q_cq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe))); |
1587 | memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem), 0,__builtin_memset((((void *)(my->sc->sc_hib_q->q_sq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe))) |
1588 | my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe))__builtin_memset((((void *)(my->sc->sc_hib_q->q_sq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe))); |
1589 | |
1590 | my->sq_tail = 0; |
1591 | my->cq_head = 0; |
1592 | my->cqe_phase = NVME_CQE_PHASE(1 << 0); |
1593 | |
1594 | pmap_extract(pmap_kernel()(&kernel_pmap_store), (vaddr_t)page, &page_phys); |
1595 | |
1596 | memset(&qsqe, 0, sizeof(qsqe))__builtin_memset((&qsqe), (0), (sizeof(qsqe))); |
1597 | qsqe.opcode = NVM_ADMIN_ADD_IOCQ0x05; |
1598 | htolem64(&qsqe.prp1,(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_cq_dmamem)->ndm_map->dm_segs [0].ds_addr)))) |
1599 | NVME_DMA_DVA(my->sc->sc_hib_q->q_cq_dmamem))(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_cq_dmamem)->ndm_map->dm_segs [0].ds_addr)))); |
1600 | htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1)(*(__uint16_t *)(&qsqe.qsize) = ((__uint16_t)(my->sc-> sc_hib_q->q_entries - 1))); |
1601 | htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id)(*(__uint16_t *)(&qsqe.qid) = ((__uint16_t)(my->sc-> sc_hib_q->q_id))); |
1602 | qsqe.qflags = NVM_SQE_CQ_IEN(1 << 1) | NVM_SQE_Q_PC(1 << 0); |
1603 | if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe, |
1604 | &qcqe, 1) != 0) |
1605 | return (EIO5); |
1606 | |
1607 | memset(&qsqe, 0, sizeof(qsqe))__builtin_memset((&qsqe), (0), (sizeof(qsqe))); |
1608 | qsqe.opcode = NVM_ADMIN_ADD_IOSQ0x01; |
1609 | htolem64(&qsqe.prp1,(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_sq_dmamem)->ndm_map->dm_segs [0].ds_addr)))) |
1610 | NVME_DMA_DVA(my->sc->sc_hib_q->q_sq_dmamem))(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_sq_dmamem)->ndm_map->dm_segs [0].ds_addr)))); |
1611 | htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1)(*(__uint16_t *)(&qsqe.qsize) = ((__uint16_t)(my->sc-> sc_hib_q->q_entries - 1))); |
1612 | htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id)(*(__uint16_t *)(&qsqe.qid) = ((__uint16_t)(my->sc-> sc_hib_q->q_id))); |
1613 | htolem16(&qsqe.cqid, my->sc->sc_hib_q->q_id)(*(__uint16_t *)(&qsqe.cqid) = ((__uint16_t)(my->sc-> sc_hib_q->q_id))); |
1614 | qsqe.qflags = NVM_SQE_Q_PC(1 << 0); |
1615 | if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe, |
1616 | &qcqe, 2) != 0) |
1617 | return (EIO5); |
1618 | |
1619 | return (0); |
1620 | } |
1621 | |
1622 | if (op != HIB_W1) |
1623 | return (0); |
1624 | |
1625 | isqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem)((void *)(my->sc->sc_hib_q->q_sq_dmamem)->ndm_kva ); |
1626 | isqe += my->sq_tail; |
1627 | if (++my->sq_tail == my->sc->sc_hib_q->q_entries) |
1628 | my->sq_tail = 0; |
1629 | |
1630 | memset(isqe, 0, sizeof(*isqe))__builtin_memset((isqe), (0), (sizeof(*isqe))); |
1631 | isqe->opcode = NVM_CMD_WRITE0x01; |
1632 | htolem32(&isqe->nsid, my->nsid)(*(__uint32_t *)(&isqe->nsid) = ((__uint32_t)(my->nsid ))); |
1633 | |
1634 | pmap_extract(pmap_kernel()(&kernel_pmap_store), addr, &data_phys); |
1635 | data_bus_phys = data_phys; |
1636 | htolem64(&isqe->entry.prp[0], data_bus_phys)(*(__uint64_t *)(&isqe->entry.prp[0]) = ((__uint64_t)( data_bus_phys))); |
1637 | if ((size > my->sc->sc_mps) && (size <= my->sc->sc_mps * 2)) { |
1638 | htolem64(&isqe->entry.prp[1], data_bus_phys + my->sc->sc_mps)(*(__uint64_t *)(&isqe->entry.prp[1]) = ((__uint64_t)( data_bus_phys + my->sc->sc_mps))); |
1639 | } else if (size > my->sc->sc_mps * 2) { |
1640 | pmap_extract(pmap_kernel()(&kernel_pmap_store), (vaddr_t)page, &page_phys); |
1641 | page_bus_phys = page_phys; |
1642 | htolem64(&isqe->entry.prp[1], page_bus_phys +(*(__uint64_t *)(&isqe->entry.prp[1]) = ((__uint64_t)( page_bus_phys + __builtin_offsetof(struct nvme_hibernate_page , prpl)))) |
1643 | offsetof(struct nvme_hibernate_page, prpl))(*(__uint64_t *)(&isqe->entry.prp[1]) = ((__uint64_t)( page_bus_phys + __builtin_offsetof(struct nvme_hibernate_page , prpl)))); |
1644 | for (i = 1; i < (size / my->sc->sc_mps); i++) { |
1645 | htolem64(&my->prpl[i - 1], data_bus_phys +(*(__uint64_t *)(&my->prpl[i - 1]) = ((__uint64_t)(data_bus_phys + (i * my->sc->sc_mps)))) |
1646 | (i * my->sc->sc_mps))(*(__uint64_t *)(&my->prpl[i - 1]) = ((__uint64_t)(data_bus_phys + (i * my->sc->sc_mps)))); |
1647 | } |
1648 | } |
1649 | |
1650 | isqe->slba = blkno + my->poffset; |
1651 | isqe->nlb = (size / DEV_BSIZE(1 << 9)) - 1; |
1652 | isqe->cid = blkno % 0xffff; |
1653 | |
1654 | nvme_write4(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd),(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd)))), ( (my->sq_tail)))) |
1655 | my->sq_tail)(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd)))), ( (my->sq_tail)))); |
1656 | nvme_barrier(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd), 4,bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd))), (4), (0x02)) |
1657 | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd))), (4), (0x02)); |
1658 | |
1659 | error = 0; |
1660 | |
1661 | icqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem)((void *)(my->sc->sc_hib_q->q_cq_dmamem)->ndm_kva ); |
1662 | icqe += my->cq_head; |
1663 | |
1664 | nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem, |
1665 | BUS_DMASYNC_POSTREAD0x02); |
1666 | for (;;) { |
1667 | flags = lemtoh16(&icqe->flags)((__uint16_t)(*(__uint16_t *)(&icqe->flags))); |
1668 | if ((flags & NVME_CQE_PHASE(1 << 0)) == my->cqe_phase) { |
1669 | if ((NVME_CQE_SC(flags)((flags) & (0x7f << 1)) != NVME_CQE_SC_SUCCESS(0x00 << 1)) || |
1670 | (icqe->cid != blkno % 0xffff)) |
1671 | error = EIO5; |
1672 | |
1673 | break; |
1674 | } |
1675 | |
1676 | delay(1)(*delay_func)(1); |
1677 | nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem, |
1678 | BUS_DMASYNC_PREREAD0x01|BUS_DMASYNC_POSTREAD0x02); |
1679 | } |
1680 | nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem, |
1681 | BUS_DMASYNC_PREREAD0x01); |
1682 | |
1683 | if (++my->cq_head == my->sc->sc_hib_q->q_entries) { |
1684 | my->cq_head = 0; |
1685 | my->cqe_phase ^= NVME_CQE_PHASE(1 << 0); |
1686 | } |
1687 | |
1688 | nvme_write4(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd),(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd)))), ( (my->cq_head)))) |
1689 | my->cq_head)(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd)))), ( (my->cq_head)))); |
1690 | nvme_barrier(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd), 4,bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd))), (4), (0x02)) |
1691 | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd))), (4), (0x02)); |
1692 | |
1693 | return (error); |
1694 | } |
1695 | |
1696 | #endif |