File: | dev/ic/nvme.c |
Warning: | line 789, column 2 Value stored to 'ns' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: nvme.c,v 1.107 2023/12/20 13:37:25 krw Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2014 David Gwynne <dlg@openbsd.org> |
5 | * |
6 | * Permission to use, copy, modify, and distribute this software for any |
7 | * purpose with or without fee is hereby granted, provided that the above |
8 | * copyright notice and this permission notice appear in all copies. |
9 | * |
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 | */ |
18 | |
19 | #include <sys/param.h> |
20 | #include <sys/systm.h> |
21 | #include <sys/buf.h> |
22 | #include <sys/kernel.h> |
23 | #include <sys/malloc.h> |
24 | #include <sys/device.h> |
25 | #include <sys/queue.h> |
26 | #include <sys/mutex.h> |
27 | #include <sys/pool.h> |
28 | |
29 | #include <sys/atomic.h> |
30 | |
31 | #include <machine/bus.h> |
32 | |
33 | #include <scsi/scsi_all.h> |
34 | #include <scsi/scsi_disk.h> |
35 | #include <scsi/scsiconf.h> |
36 | |
37 | #include <dev/ic/nvmereg.h> |
38 | #include <dev/ic/nvmevar.h> |
39 | |
40 | struct cfdriver nvme_cd = { |
41 | NULL((void *)0), |
42 | "nvme", |
43 | DV_DULL |
44 | }; |
45 | |
46 | int nvme_ready(struct nvme_softc *, u_int32_t); |
47 | int nvme_enable(struct nvme_softc *); |
48 | int nvme_disable(struct nvme_softc *); |
49 | int nvme_shutdown(struct nvme_softc *); |
50 | int nvme_resume(struct nvme_softc *); |
51 | |
52 | void nvme_dumpregs(struct nvme_softc *); |
53 | int nvme_identify(struct nvme_softc *, u_int); |
54 | void nvme_fill_identify(struct nvme_softc *, struct nvme_ccb *, void *); |
55 | |
56 | int nvme_ccbs_alloc(struct nvme_softc *, u_int); |
57 | void nvme_ccbs_free(struct nvme_softc *, u_int); |
58 | |
59 | void * nvme_ccb_get(void *); |
60 | void nvme_ccb_put(void *, void *); |
61 | |
62 | int nvme_poll(struct nvme_softc *, struct nvme_queue *, struct nvme_ccb *, |
63 | void (*)(struct nvme_softc *, struct nvme_ccb *, void *)); |
64 | void nvme_poll_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
65 | void nvme_poll_done(struct nvme_softc *, struct nvme_ccb *, |
66 | struct nvme_cqe *); |
67 | void nvme_sqe_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
68 | void nvme_empty_done(struct nvme_softc *, struct nvme_ccb *, |
69 | struct nvme_cqe *); |
70 | |
71 | struct nvme_queue * |
72 | nvme_q_alloc(struct nvme_softc *, u_int16_t, u_int, u_int); |
73 | int nvme_q_create(struct nvme_softc *, struct nvme_queue *); |
74 | int nvme_q_reset(struct nvme_softc *, struct nvme_queue *); |
75 | int nvme_q_delete(struct nvme_softc *, struct nvme_queue *); |
76 | void nvme_q_submit(struct nvme_softc *, |
77 | struct nvme_queue *, struct nvme_ccb *, |
78 | void (*)(struct nvme_softc *, struct nvme_ccb *, void *)); |
79 | int nvme_q_complete(struct nvme_softc *, struct nvme_queue *); |
80 | void nvme_q_free(struct nvme_softc *, struct nvme_queue *); |
81 | |
82 | void nvme_scsi_cmd(struct scsi_xfer *); |
83 | void nvme_minphys(struct buf *, struct scsi_link *); |
84 | int nvme_scsi_probe(struct scsi_link *); |
85 | void nvme_scsi_free(struct scsi_link *); |
86 | uint64_t nvme_scsi_size(struct nvm_identify_namespace *); |
87 | |
88 | #ifdef HIBERNATE1 |
89 | #include <uvm/uvm_extern.h> |
90 | #include <sys/hibernate.h> |
91 | #include <sys/disk.h> |
92 | #include <sys/disklabel.h> |
93 | |
94 | int nvme_hibernate_io(dev_t, daddr_t, vaddr_t, size_t, int, void *); |
95 | #endif |
96 | |
97 | const struct scsi_adapter nvme_switch = { |
98 | nvme_scsi_cmd, nvme_minphys, nvme_scsi_probe, nvme_scsi_free, NULL((void *)0) |
99 | }; |
100 | |
101 | void nvme_scsi_io(struct scsi_xfer *, int); |
102 | void nvme_scsi_io_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
103 | void nvme_scsi_io_done(struct nvme_softc *, struct nvme_ccb *, |
104 | struct nvme_cqe *); |
105 | |
106 | void nvme_scsi_sync(struct scsi_xfer *); |
107 | void nvme_scsi_sync_fill(struct nvme_softc *, struct nvme_ccb *, void *); |
108 | void nvme_scsi_sync_done(struct nvme_softc *, struct nvme_ccb *, |
109 | struct nvme_cqe *); |
110 | |
111 | void nvme_scsi_inq(struct scsi_xfer *); |
112 | void nvme_scsi_inquiry(struct scsi_xfer *); |
113 | void nvme_scsi_capacity16(struct scsi_xfer *); |
114 | void nvme_scsi_capacity(struct scsi_xfer *); |
115 | |
116 | uint32_t nvme_op_sq_enter(struct nvme_softc *, |
117 | struct nvme_queue *, struct nvme_ccb *); |
118 | void nvme_op_sq_leave(struct nvme_softc *, |
119 | struct nvme_queue *, struct nvme_ccb *); |
120 | uint32_t nvme_op_sq_enter_locked(struct nvme_softc *, |
121 | struct nvme_queue *, struct nvme_ccb *); |
122 | void nvme_op_sq_leave_locked(struct nvme_softc *, |
123 | struct nvme_queue *, struct nvme_ccb *); |
124 | |
125 | void nvme_op_cq_done(struct nvme_softc *, |
126 | struct nvme_queue *, struct nvme_ccb *); |
127 | |
128 | static const struct nvme_ops nvme_ops = { |
129 | .op_sq_enter = nvme_op_sq_enter, |
130 | .op_sq_leave = nvme_op_sq_leave, |
131 | .op_sq_enter_locked = nvme_op_sq_enter_locked, |
132 | .op_sq_leave_locked = nvme_op_sq_leave_locked, |
133 | |
134 | .op_cq_done = nvme_op_cq_done, |
135 | }; |
136 | |
137 | /* |
138 | * Some controllers, at least Apple NVMe, always require split |
139 | * transfers, so don't use bus_space_{read,write}_8() on LP64. |
140 | */ |
141 | u_int64_t |
142 | nvme_read8(struct nvme_softc *sc, bus_size_t r) |
143 | { |
144 | u_int64_t v; |
145 | |
146 | v = (u_int64_t)nvme_read4(sc, r)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((r)))) | |
147 | (u_int64_t)nvme_read4(sc, r + 4)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((r + 4)))) << 32; |
148 | |
149 | return (v); |
150 | } |
151 | |
152 | void |
153 | nvme_write8(struct nvme_softc *sc, bus_size_t r, u_int64_t v) |
154 | { |
155 | nvme_write4(sc, r, v)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((r)), ((v) ))); |
156 | nvme_write4(sc, r + 4, v >> 32)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((r + 4)), ( (v >> 32)))); |
157 | } |
158 | |
159 | void |
160 | nvme_dumpregs(struct nvme_softc *sc) |
161 | { |
162 | u_int64_t r8; |
163 | u_int32_t r4; |
164 | |
165 | r8 = nvme_read8(sc, NVME_CAP0x0000); |
166 | printf("%s: cap 0x%016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read8(sc, NVME_CAP0x0000)); |
167 | printf("%s: mpsmax %u (%u)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
168 | (u_int)NVME_CAP_MPSMAX(r8)(12 + (((r8) >> 52) & 0xf)), (1 << NVME_CAP_MPSMAX(r8)(12 + (((r8) >> 52) & 0xf)))); |
169 | printf("%s: mpsmin %u (%u)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
170 | (u_int)NVME_CAP_MPSMIN(r8)(12 + (((r8) >> 48) & 0xf)), (1 << NVME_CAP_MPSMIN(r8)(12 + (((r8) >> 48) & 0xf)))); |
171 | printf("%s: css %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_CSS(r8)(((r8) >> 37) & 0x7f)); |
172 | printf("%s: nssrs %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_NSSRS(r8)(((r8)) & ((1ULL << 36)))); |
173 | printf("%s: dstrd %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_DSTRD(r8)(1 << (2 + (((r8) >> 32) & 0xf)))); |
174 | printf("%s: to %llu msec\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_TO(r8)(500 * (((r8) >> 24) & 0xff))); |
175 | printf("%s: ams %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_AMS(r8)(((r8) >> 17) & 0x3)); |
176 | printf("%s: cqr %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_CQR(r8)(((r8)) & ((1 << 16)))); |
177 | printf("%s: mqes %llu\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CAP_MQES(r8)(((r8) & 0xffff) + 1)); |
178 | |
179 | printf("%s: vs 0x%04x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read4(sc, NVME_VS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0008))))); |
180 | |
181 | r4 = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
182 | printf("%s: cc 0x%04x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), r4); |
183 | printf("%s: iocqes %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_IOCQES_R(r4)(((r4) >> 20) & 0xf)); |
184 | printf("%s: iosqes %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_IOSQES_R(r4)(((r4) >> 16) & 0xf)); |
185 | printf("%s: shn %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_SHN_R(r4)(((r4) >> 15) & 0x3)); |
186 | printf("%s: ams %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_AMS_R(r4)(((r4) >> 11) & 0xf)); |
187 | printf("%s: mps %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_MPS_R(r4)(12 + (((r4) >> 7) & 0xf))); |
188 | printf("%s: css %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), NVME_CC_CSS_R(r4)(((r4) >> 4) & 0x7)); |
189 | printf("%s: en %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), ISSET(r4, NVME_CC_EN)((r4) & ((1 << 0)))); |
190 | |
191 | printf("%s: csts 0x%08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c))))); |
192 | printf("%s: aqa 0x%08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read4(sc, NVME_AQA)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0024))))); |
193 | printf("%s: asq 0x%016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read8(sc, NVME_ASQ0x0028)); |
194 | printf("%s: acq 0x%016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), nvme_read8(sc, NVME_ACQ0x0030)); |
195 | } |
196 | |
197 | int |
198 | nvme_ready(struct nvme_softc *sc, u_int32_t rdy) |
199 | { |
200 | u_int i = 0; |
201 | |
202 | while ((nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c)))) & NVME_CSTS_RDY(1 << 0)) != rdy) { |
203 | if (i++ > sc->sc_rdy_to) |
204 | return (1); |
205 | |
206 | delay(1000)(*delay_func)(1000); |
207 | nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0x001c), (4), (0x01)); |
208 | } |
209 | |
210 | return (0); |
211 | } |
212 | |
213 | int |
214 | nvme_enable(struct nvme_softc *sc) |
215 | { |
216 | u_int32_t cc; |
217 | |
218 | cc = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
219 | if (ISSET(cc, NVME_CC_EN)((cc) & ((1 << 0)))) |
220 | return (nvme_ready(sc, NVME_CSTS_RDY(1 << 0))); |
221 | |
222 | if (sc->sc_ops->op_enable != NULL((void *)0)) |
223 | sc->sc_ops->op_enable(sc); |
224 | |
225 | nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0024)), (((((sc->sc_admin_q->q_entries) - 1) << 16) | (( (sc->sc_admin_q->q_entries) - 1) << 0))))) |
226 | NVME_AQA_ASQS(sc->sc_admin_q->q_entries))(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0024)), (((((sc->sc_admin_q->q_entries) - 1) << 16) | (( (sc->sc_admin_q->q_entries) - 1) << 0))))); |
227 | nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x02)); |
228 | |
229 | nvme_write8(sc, NVME_ASQ0x0028, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem)((u_int64_t)(sc->sc_admin_q->q_sq_dmamem)->ndm_map-> dm_segs[0].ds_addr)); |
230 | nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x02)); |
231 | nvme_write8(sc, NVME_ACQ0x0030, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem)((u_int64_t)(sc->sc_admin_q->q_cq_dmamem)->ndm_map-> dm_segs[0].ds_addr)); |
232 | nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x02)); |
233 | |
234 | CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |((cc) &= ~((((0xf) & 0xf) << 20) | (((0xf) & 0xf) << 16) | (((0x3) & 0x3) << 14) | (((0x7 ) & 0x7) << 11) | (0xf << 7) | (((0x7) & 0x7 ) << 4))) |
235 | NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK)((cc) &= ~((((0xf) & 0xf) << 20) | (((0xf) & 0xf) << 16) | (((0x3) & 0x3) << 14) | (((0x7 ) & 0x7) << 11) | (0xf << 7) | (((0x7) & 0x7 ) << 4))); |
236 | SET(cc, NVME_CC_IOSQES(6))((cc) |= ((((6) & 0xf) << 16))); /* Submission queue size == 2**6 (64) */ |
237 | SET(cc, NVME_CC_IOCQES(4))((cc) |= ((((4) & 0xf) << 20))); /* Completion queue size == 2**4 (16) */ |
238 | SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE))((cc) |= ((((0) & 0x3) << 14))); |
239 | SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM))((cc) |= ((((0) & 0x7) << 4))); |
240 | SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR))((cc) |= ((((0) & 0x7) << 11))); |
241 | SET(cc, NVME_CC_MPS(ffs(sc->sc_mps) - 1))((cc) |= (((((ffs(sc->sc_mps) - 1) - 12) & 0xf) << 7))); |
242 | SET(cc, NVME_CC_EN)((cc) |= ((1 << 0))); |
243 | |
244 | nvme_write4(sc, NVME_CC, cc)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0014)), ((cc)))); |
245 | nvme_barrier(sc, 0, sc->sc_ios,bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)) |
246 | BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)); |
247 | |
248 | return (nvme_ready(sc, NVME_CSTS_RDY(1 << 0))); |
249 | } |
250 | |
251 | int |
252 | nvme_disable(struct nvme_softc *sc) |
253 | { |
254 | u_int32_t cc, csts; |
255 | |
256 | cc = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
257 | if (ISSET(cc, NVME_CC_EN)((cc) & ((1 << 0)))) { |
258 | csts = nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c)))); |
259 | if (!ISSET(csts, NVME_CSTS_CFS)((csts) & ((1 << 1))) && |
260 | nvme_ready(sc, NVME_CSTS_RDY(1 << 0)) != 0) |
261 | return (1); |
262 | } |
263 | |
264 | CLR(cc, NVME_CC_EN)((cc) &= ~((1 << 0))); |
265 | |
266 | nvme_write4(sc, NVME_CC, cc)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0014)), ((cc)))); |
267 | nvme_barrier(sc, 0, sc->sc_ios,bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)) |
268 | BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)); |
269 | |
270 | return (nvme_ready(sc, 0)); |
271 | } |
272 | |
273 | int |
274 | nvme_attach(struct nvme_softc *sc) |
275 | { |
276 | struct scsibus_attach_args saa; |
277 | u_int64_t cap; |
278 | u_int32_t reg; |
279 | u_int nccbs = 0; |
280 | |
281 | mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc-> sc_ccb_mtx), ((((0x3)) > 0x0 && ((0x3)) < 0x9) ? 0x9 : ((0x3)))); } while (0); |
282 | SIMPLEQ_INIT(&sc->sc_ccb_list)do { (&sc->sc_ccb_list)->sqh_first = ((void *)0); ( &sc->sc_ccb_list)->sqh_last = &(&sc->sc_ccb_list )->sqh_first; } while (0); |
283 | scsi_iopool_init(&sc->sc_iopool, sc, nvme_ccb_get, nvme_ccb_put); |
284 | if (sc->sc_ops == NULL((void *)0)) |
285 | sc->sc_ops = &nvme_ops; |
286 | if (sc->sc_openings == 0) |
287 | sc->sc_openings = 64; |
288 | |
289 | reg = nvme_read4(sc, NVME_VS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0008)))); |
290 | if (reg == 0xffffffff) { |
291 | printf("invalid mapping\n"); |
292 | return (1); |
293 | } |
294 | |
295 | printf("NVMe %d.%d\n", NVME_VS_MJR(reg)(((reg) & 0xffff0000) >> 16), NVME_VS_MNR(reg)(((reg) & 0x0000ff00) >> 8)); |
296 | |
297 | cap = nvme_read8(sc, NVME_CAP0x0000); |
298 | sc->sc_dstrd = NVME_CAP_DSTRD(cap)(1 << (2 + (((cap) >> 32) & 0xf))); |
299 | if (NVME_CAP_MPSMIN(cap)(12 + (((cap) >> 48) & 0xf)) > PAGE_SHIFT12) { |
300 | printf("%s: NVMe minimum page size %u " |
301 | "is greater than CPU page size %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
302 | 1 << NVME_CAP_MPSMIN(cap)(12 + (((cap) >> 48) & 0xf)), 1 << PAGE_SHIFT12); |
303 | return (1); |
304 | } |
305 | if (NVME_CAP_MPSMAX(cap)(12 + (((cap) >> 52) & 0xf)) < PAGE_SHIFT12) |
306 | sc->sc_mps = 1 << NVME_CAP_MPSMAX(cap)(12 + (((cap) >> 52) & 0xf)); |
307 | else |
308 | sc->sc_mps = 1 << PAGE_SHIFT12; |
309 | |
310 | sc->sc_rdy_to = NVME_CAP_TO(cap)(500 * (((cap) >> 24) & 0xff)); |
311 | sc->sc_mdts = MAXPHYS(64 * 1024); |
312 | sc->sc_max_prpl = sc->sc_mdts / sc->sc_mps; |
313 | |
314 | if (nvme_disable(sc) != 0) { |
315 | printf("%s: unable to disable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
316 | return (1); |
317 | } |
318 | |
319 | sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q0, 128, sc->sc_dstrd); |
320 | if (sc->sc_admin_q == NULL((void *)0)) { |
321 | printf("%s: unable to allocate admin queue\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
322 | return (1); |
323 | } |
324 | |
325 | if (nvme_ccbs_alloc(sc, 16) != 0) { |
326 | printf("%s: unable to allocate initial ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
327 | goto free_admin_q; |
328 | } |
329 | nccbs = 16; |
330 | |
331 | if (nvme_enable(sc) != 0) { |
332 | printf("%s: unable to enable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
333 | goto free_ccbs; |
334 | } |
335 | |
336 | if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)(12 + (((cap) >> 48) & 0xf))) != 0) { |
337 | printf("%s: unable to identify controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
338 | goto disable; |
339 | } |
340 | |
341 | /* We now know the real values of sc_mdts and sc_max_prpl. */ |
342 | nvme_ccbs_free(sc, nccbs); |
343 | if (nvme_ccbs_alloc(sc, 64) != 0) { |
344 | printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
345 | goto free_admin_q; |
346 | } |
347 | nccbs = 64; |
348 | |
349 | sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q1, 128, sc->sc_dstrd); |
350 | if (sc->sc_q == NULL((void *)0)) { |
351 | printf("%s: unable to allocate io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
352 | goto disable; |
353 | } |
354 | |
355 | if (nvme_q_create(sc, sc->sc_q) != 0) { |
356 | printf("%s: unable to create io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
357 | goto free_q; |
358 | } |
359 | |
360 | #ifdef HIBERNATE1 |
361 | sc->sc_hib_q = nvme_q_alloc(sc, NVME_HIB_Q2, 4, sc->sc_dstrd); |
362 | if (sc->sc_hib_q == NULL((void *)0)) { |
363 | printf("%s: unable to allocate hibernate io queue\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
364 | goto free_q; |
365 | } |
366 | #endif |
367 | |
368 | nvme_write4(sc, NVME_INTMC, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((1)))); |
369 | |
370 | sc->sc_namespaces = mallocarray(sc->sc_nn + 1, |
371 | sizeof(*sc->sc_namespaces), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008); |
372 | |
373 | saa.saa_adapter = &nvme_switch; |
374 | saa.saa_adapter_softc = sc; |
375 | saa.saa_adapter_buswidth = sc->sc_nn + 1; |
376 | saa.saa_luns = 1; |
377 | saa.saa_adapter_target = 0; |
378 | saa.saa_openings = sc->sc_openings; |
379 | saa.saa_pool = &sc->sc_iopool; |
380 | saa.saa_quirks = saa.saa_flags = 0; |
381 | saa.saa_wwpn = saa.saa_wwnn = 0; |
382 | |
383 | config_found(&sc->sc_dev, &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint) , ((void *)0)); |
384 | |
385 | return (0); |
386 | |
387 | free_q: |
388 | nvme_q_free(sc, sc->sc_q); |
389 | disable: |
390 | nvme_disable(sc); |
391 | free_ccbs: |
392 | nvme_ccbs_free(sc, nccbs); |
393 | free_admin_q: |
394 | nvme_q_free(sc, sc->sc_admin_q); |
395 | |
396 | return (1); |
397 | } |
398 | |
399 | int |
400 | nvme_resume(struct nvme_softc *sc) |
401 | { |
402 | if (nvme_disable(sc) != 0) { |
403 | printf("%s: unable to disable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
404 | return (1); |
405 | } |
406 | |
407 | if (nvme_q_reset(sc, sc->sc_admin_q) != 0) { |
408 | printf("%s: unable to reset admin queue\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
409 | return (1); |
410 | } |
411 | |
412 | if (nvme_enable(sc) != 0) { |
413 | printf("%s: unable to enable controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
414 | return (1); |
415 | } |
416 | |
417 | sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q1, 128, sc->sc_dstrd); |
418 | if (sc->sc_q == NULL((void *)0)) { |
419 | printf("%s: unable to allocate io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
420 | goto disable; |
421 | } |
422 | |
423 | if (nvme_q_create(sc, sc->sc_q) != 0) { |
424 | printf("%s: unable to create io q\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
425 | goto free_q; |
426 | } |
427 | |
428 | nvme_write4(sc, NVME_INTMC, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((1)))); |
429 | |
430 | return (0); |
431 | |
432 | free_q: |
433 | nvme_q_free(sc, sc->sc_q); |
434 | disable: |
435 | nvme_disable(sc); |
436 | |
437 | return (1); |
438 | } |
439 | |
440 | int |
441 | nvme_scsi_probe(struct scsi_link *link) |
442 | { |
443 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
444 | struct nvme_sqe sqe; |
445 | struct nvm_identify_namespace *identify; |
446 | struct nvme_dmamem *mem; |
447 | struct nvme_ccb *ccb; |
448 | int rv; |
449 | |
450 | ccb = scsi_io_get(&sc->sc_iopool, 0); |
451 | KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/nvme.c" , 451, "ccb != NULL")); |
452 | |
453 | mem = nvme_dmamem_alloc(sc, sizeof(*identify)); |
454 | if (mem == NULL((void *)0)) |
455 | return (ENOMEM12); |
456 | |
457 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
458 | sqe.opcode = NVM_ADMIN_IDENTIFY0x06; |
459 | htolem32(&sqe.nsid, link->target)(*(__uint32_t *)(&sqe.nsid) = ((__uint32_t)(link->target ))); |
460 | htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem))(*(__uint64_t *)(&sqe.entry.prp[0]) = ((__uint64_t)(((u_int64_t )(mem)->ndm_map->dm_segs[0].ds_addr)))); |
461 | htolem32(&sqe.cdw10, 0)(*(__uint32_t *)(&sqe.cdw10) = ((__uint32_t)(0))); |
462 | |
463 | ccb->ccb_done = nvme_empty_done; |
464 | ccb->ccb_cookie = &sqe; |
465 | |
466 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD0x01); |
467 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
468 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD0x02); |
469 | |
470 | scsi_io_put(&sc->sc_iopool, ccb); |
471 | |
472 | identify = NVME_DMA_KVA(mem)((void *)(mem)->ndm_kva); |
473 | if (rv == 0) { |
474 | if (nvme_scsi_size(identify) > 0) { |
475 | /* Commit namespace if it has a size greater than zero. */ |
476 | identify = malloc(sizeof(*identify), M_DEVBUF2, M_WAITOK0x0001); |
477 | memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify))__builtin_memcpy((identify), (((void *)(mem)->ndm_kva)), ( sizeof(*identify))); |
478 | sc->sc_namespaces[link->target].ident = identify; |
479 | } else { |
480 | /* Don't attach a namespace if its size is zero. */ |
481 | rv = ENXIO6; |
482 | } |
483 | } |
484 | |
485 | nvme_dmamem_free(sc, mem); |
486 | |
487 | return (rv); |
488 | } |
489 | |
490 | int |
491 | nvme_shutdown(struct nvme_softc *sc) |
492 | { |
493 | u_int32_t cc, csts; |
494 | int i; |
495 | |
496 | nvme_write4(sc, NVME_INTMC, 0)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((0)))); |
497 | |
498 | if (nvme_q_delete(sc, sc->sc_q) != 0) { |
499 | printf("%s: unable to delete q, disabling\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
500 | goto disable; |
501 | } |
502 | |
503 | cc = nvme_read4(sc, NVME_CC)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x0014)))); |
504 | CLR(cc, NVME_CC_SHN_MASK)((cc) &= ~((((0x3) & 0x3) << 14))); |
505 | SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL))((cc) |= ((((1) & 0x3) << 14))); |
506 | nvme_write4(sc, NVME_CC, cc)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0014)), ((cc)))); |
507 | |
508 | for (i = 0; i < 4000; i++) { |
509 | nvme_barrier(sc, 0, sc->sc_ios,bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)) |
510 | BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, (0), (sc-> sc_ios), (0x01 | 0x02)); |
511 | csts = nvme_read4(sc, NVME_CSTS)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), ((0x001c)))); |
512 | if ((csts & NVME_CSTS_SHST_MASK(0x3 << 2)) == NVME_CSTS_SHST_DONE(0x2 << 2)) |
513 | return (0); |
514 | |
515 | delay(1000)(*delay_func)(1000); |
516 | } |
517 | |
518 | printf("%s: unable to shutdown, disabling\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
519 | |
520 | disable: |
521 | nvme_disable(sc); |
522 | return (0); |
523 | } |
524 | |
525 | int |
526 | nvme_activate(struct nvme_softc *sc, int act) |
527 | { |
528 | int rv; |
529 | |
530 | switch (act) { |
531 | case DVACT_POWERDOWN6: |
532 | rv = config_activate_children(&sc->sc_dev, act); |
533 | nvme_shutdown(sc); |
534 | break; |
535 | case DVACT_RESUME4: |
536 | rv = nvme_resume(sc); |
537 | if (rv == 0) |
538 | rv = config_activate_children(&sc->sc_dev, act); |
539 | break; |
540 | default: |
541 | rv = config_activate_children(&sc->sc_dev, act); |
542 | break; |
543 | } |
544 | |
545 | return (rv); |
546 | } |
547 | |
548 | void |
549 | nvme_scsi_cmd(struct scsi_xfer *xs) |
550 | { |
551 | switch (xs->cmd.opcode) { |
552 | case READ_COMMAND0x08: |
553 | case READ_100x28: |
554 | case READ_120xa8: |
555 | case READ_160x88: |
556 | nvme_scsi_io(xs, SCSI_DATA_IN0x00800); |
557 | return; |
558 | case WRITE_COMMAND0x0a: |
559 | case WRITE_100x2a: |
560 | case WRITE_120xaa: |
561 | case WRITE_160x8a: |
562 | nvme_scsi_io(xs, SCSI_DATA_OUT0x01000); |
563 | return; |
564 | |
565 | case SYNCHRONIZE_CACHE0x35: |
566 | nvme_scsi_sync(xs); |
567 | return; |
568 | |
569 | case INQUIRY0x12: |
570 | nvme_scsi_inq(xs); |
571 | return; |
572 | case READ_CAPACITY_160x9e: |
573 | nvme_scsi_capacity16(xs); |
574 | return; |
575 | case READ_CAPACITY0x25: |
576 | nvme_scsi_capacity(xs); |
577 | return; |
578 | |
579 | case TEST_UNIT_READY0x00: |
580 | case PREVENT_ALLOW0x1e: |
581 | case START_STOP0x1b: |
582 | xs->error = XS_NOERROR0; |
583 | scsi_done(xs); |
584 | return; |
585 | |
586 | default: |
587 | break; |
588 | } |
589 | |
590 | xs->error = XS_DRIVER_STUFFUP2; |
591 | scsi_done(xs); |
592 | } |
593 | |
594 | void |
595 | nvme_minphys(struct buf *bp, struct scsi_link *link) |
596 | { |
597 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
598 | |
599 | if (bp->b_bcount > sc->sc_mdts) |
600 | bp->b_bcount = sc->sc_mdts; |
601 | } |
602 | |
603 | void |
604 | nvme_scsi_io(struct scsi_xfer *xs, int dir) |
605 | { |
606 | struct scsi_link *link = xs->sc_link; |
607 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
608 | struct nvme_ccb *ccb = xs->io; |
609 | bus_dmamap_t dmap = ccb->ccb_dmamap; |
610 | int i; |
611 | |
612 | if ((xs->flags & (SCSI_DATA_IN0x00800|SCSI_DATA_OUT0x01000)) != dir) |
613 | goto stuffup; |
614 | |
615 | ccb->ccb_done = nvme_scsi_io_done; |
616 | ccb->ccb_cookie = xs; |
617 | |
618 | if (bus_dmamap_load(sc->sc_dmat, dmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap) , (xs->data), (xs->datalen), (((void *)0)), (((xs->flags ) & (0x00001)) ? 0x0001 : 0x0000)) |
619 | xs->data, xs->datalen, NULL, ISSET(xs->flags, SCSI_NOSLEEP) ?(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap) , (xs->data), (xs->datalen), (((void *)0)), (((xs->flags ) & (0x00001)) ? 0x0001 : 0x0000)) |
620 | BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap) , (xs->data), (xs->datalen), (((void *)0)), (((xs->flags ) & (0x00001)) ? 0x0001 : 0x0000)) != 0) |
621 | goto stuffup; |
622 | |
623 | bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x01 : 0x04)) |
624 | ISSET(xs->flags, SCSI_DATA_IN) ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x01 : 0x04)) |
625 | BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x01 : 0x04)); |
626 | |
627 | if (dmap->dm_nsegs > 2) { |
628 | for (i = 1; i < dmap->dm_nsegs; i++) { |
629 | htolem64(&ccb->ccb_prpl[i - 1],(*(__uint64_t *)(&ccb->ccb_prpl[i - 1]) = ((__uint64_t )(dmap->dm_segs[i].ds_addr))) |
630 | dmap->dm_segs[i].ds_addr)(*(__uint64_t *)(&ccb->ccb_prpl[i - 1]) = ((__uint64_t )(dmap->dm_segs[i].ds_addr))); |
631 | } |
632 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
633 | NVME_DMA_MAP(sc->sc_ccb_prpls),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
634 | ccb->ccb_prpl_off,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
635 | sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)) |
636 | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x04)); |
637 | } |
638 | |
639 | if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) { |
640 | nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_io_fill); |
641 | return; |
642 | } |
643 | |
644 | nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_io_fill); |
645 | return; |
646 | |
647 | stuffup: |
648 | xs->error = XS_DRIVER_STUFFUP2; |
649 | scsi_done(xs); |
650 | } |
651 | |
652 | void |
653 | nvme_scsi_io_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
654 | { |
655 | struct nvme_sqe_io *sqe = slot; |
656 | struct scsi_xfer *xs = ccb->ccb_cookie; |
657 | struct scsi_link *link = xs->sc_link; |
658 | bus_dmamap_t dmap = ccb->ccb_dmamap; |
659 | u_int64_t lba; |
660 | u_int32_t blocks; |
661 | |
662 | scsi_cmd_rw_decode(&xs->cmd, &lba, &blocks); |
663 | |
664 | sqe->opcode = ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)) ? |
665 | NVM_CMD_READ0x02 : NVM_CMD_WRITE0x01; |
666 | htolem32(&sqe->nsid, link->target)(*(__uint32_t *)(&sqe->nsid) = ((__uint32_t)(link-> target))); |
667 | |
668 | htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr)(*(__uint64_t *)(&sqe->entry.prp[0]) = ((__uint64_t)(dmap ->dm_segs[0].ds_addr))); |
669 | switch (dmap->dm_nsegs) { |
670 | case 1: |
671 | break; |
672 | case 2: |
673 | htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr)(*(__uint64_t *)(&sqe->entry.prp[1]) = ((__uint64_t)(dmap ->dm_segs[1].ds_addr))); |
674 | break; |
675 | default: |
676 | /* the prp list is already set up and synced */ |
677 | htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva)(*(__uint64_t *)(&sqe->entry.prp[1]) = ((__uint64_t)(ccb ->ccb_prpl_dva))); |
678 | break; |
679 | } |
680 | |
681 | htolem64(&sqe->slba, lba)(*(__uint64_t *)(&sqe->slba) = ((__uint64_t)(lba))); |
682 | htolem16(&sqe->nlb, blocks - 1)(*(__uint16_t *)(&sqe->nlb) = ((__uint16_t)(blocks - 1 ))); |
683 | } |
684 | |
685 | void |
686 | nvme_scsi_io_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
687 | struct nvme_cqe *cqe) |
688 | { |
689 | struct scsi_xfer *xs = ccb->ccb_cookie; |
690 | bus_dmamap_t dmap = ccb->ccb_dmamap; |
691 | u_int16_t flags; |
692 | |
693 | if (dmap->dm_nsegs > 2) { |
694 | bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
695 | NVME_DMA_MAP(sc->sc_ccb_prpls),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
696 | ccb->ccb_prpl_off,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
697 | sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)) |
698 | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc-> sc_ccb_prpls)->ndm_map)), (ccb->ccb_prpl_off), (sizeof( *ccb->ccb_prpl) * (dmap->dm_nsegs - 1)), (0x08)); |
699 | } |
700 | |
701 | bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x02 : 0x08)) |
702 | ISSET(xs->flags, SCSI_DATA_IN) ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x02 : 0x08)) |
703 | BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap) , (0), (dmap->dm_mapsize), (((xs->flags) & (0x00800 )) ? 0x02 : 0x08)); |
704 | |
705 | bus_dmamap_unload(sc->sc_dmat, dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmap )); |
706 | |
707 | flags = lemtoh16(&cqe->flags)((__uint16_t)(*(__uint16_t *)(&cqe->flags))); |
708 | |
709 | xs->error = (NVME_CQE_SC(flags)((flags) & (0xff << 1)) == NVME_CQE_SC_SUCCESS(0x00 << 1)) ? |
710 | XS_NOERROR0 : XS_DRIVER_STUFFUP2; |
711 | xs->status = SCSI_OK0x00; |
712 | xs->resid = 0; |
713 | scsi_done(xs); |
714 | } |
715 | |
716 | void |
717 | nvme_scsi_sync(struct scsi_xfer *xs) |
718 | { |
719 | struct scsi_link *link = xs->sc_link; |
720 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
721 | struct nvme_ccb *ccb = xs->io; |
722 | |
723 | ccb->ccb_done = nvme_scsi_sync_done; |
724 | ccb->ccb_cookie = xs; |
725 | |
726 | if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) { |
727 | nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_sync_fill); |
728 | return; |
729 | } |
730 | |
731 | nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_sync_fill); |
732 | } |
733 | |
734 | void |
735 | nvme_scsi_sync_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
736 | { |
737 | struct nvme_sqe *sqe = slot; |
738 | struct scsi_xfer *xs = ccb->ccb_cookie; |
739 | struct scsi_link *link = xs->sc_link; |
740 | |
741 | sqe->opcode = NVM_CMD_FLUSH0x00; |
742 | htolem32(&sqe->nsid, link->target)(*(__uint32_t *)(&sqe->nsid) = ((__uint32_t)(link-> target))); |
743 | } |
744 | |
745 | void |
746 | nvme_scsi_sync_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
747 | struct nvme_cqe *cqe) |
748 | { |
749 | struct scsi_xfer *xs = ccb->ccb_cookie; |
750 | u_int16_t flags; |
751 | |
752 | flags = lemtoh16(&cqe->flags)((__uint16_t)(*(__uint16_t *)(&cqe->flags))); |
753 | |
754 | xs->error = (NVME_CQE_SC(flags)((flags) & (0xff << 1)) == NVME_CQE_SC_SUCCESS(0x00 << 1)) ? |
755 | XS_NOERROR0 : XS_DRIVER_STUFFUP2; |
756 | xs->status = SCSI_OK0x00; |
757 | xs->resid = 0; |
758 | scsi_done(xs); |
759 | } |
760 | |
761 | void |
762 | nvme_scsi_inq(struct scsi_xfer *xs) |
763 | { |
764 | struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd; |
765 | |
766 | if (!ISSET(inq->flags, SI_EVPD)((inq->flags) & (0x01))) { |
767 | nvme_scsi_inquiry(xs); |
768 | return; |
769 | } |
770 | |
771 | switch (inq->pagecode) { |
772 | default: |
773 | /* printf("%s: %d\n", __func__, inq->pagecode); */ |
774 | break; |
775 | } |
776 | |
777 | xs->error = XS_DRIVER_STUFFUP2; |
778 | scsi_done(xs); |
779 | } |
780 | |
781 | void |
782 | nvme_scsi_inquiry(struct scsi_xfer *xs) |
783 | { |
784 | struct scsi_inquiry_data inq; |
785 | struct scsi_link *link = xs->sc_link; |
786 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
787 | struct nvm_identify_namespace *ns; |
788 | |
789 | ns = sc->sc_namespaces[link->target].ident; |
Value stored to 'ns' is never read | |
790 | |
791 | memset(&inq, 0, sizeof(inq))__builtin_memset((&inq), (0), (sizeof(inq))); |
792 | |
793 | inq.device = T_DIRECT0x00; |
794 | inq.version = SCSI_REV_SPC40x06; |
795 | inq.response_format = SID_SCSI2_RESPONSE0x02; |
796 | inq.additional_length = SID_SCSI2_ALEN31; |
797 | inq.flags |= SID_CmdQue0x02; |
798 | memcpy(inq.vendor, "NVMe ", sizeof(inq.vendor))__builtin_memcpy((inq.vendor), ("NVMe "), (sizeof(inq.vendor ))); |
799 | memcpy(inq.product, sc->sc_identify.mn, sizeof(inq.product))__builtin_memcpy((inq.product), (sc->sc_identify.mn), (sizeof (inq.product))); |
800 | memcpy(inq.revision, sc->sc_identify.fr, sizeof(inq.revision))__builtin_memcpy((inq.revision), (sc->sc_identify.fr), (sizeof (inq.revision))); |
801 | |
802 | scsi_copy_internal_data(xs, &inq, sizeof(inq)); |
803 | |
804 | xs->error = XS_NOERROR0; |
805 | scsi_done(xs); |
806 | } |
807 | |
808 | void |
809 | nvme_scsi_capacity16(struct scsi_xfer *xs) |
810 | { |
811 | struct scsi_read_cap_data_16 rcd; |
812 | struct scsi_link *link = xs->sc_link; |
813 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
814 | struct nvm_identify_namespace *ns; |
815 | struct nvm_namespace_format *f; |
816 | u_int64_t addr; |
817 | u_int16_t tpe = READ_CAP_16_TPE0x8000; |
818 | |
819 | ns = sc->sc_namespaces[link->target].ident; |
820 | |
821 | if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) { |
822 | xs->error = XS_DRIVER_STUFFUP2; |
823 | scsi_done(xs); |
824 | return; |
825 | } |
826 | |
827 | addr = nvme_scsi_size(ns) - 1; |
828 | f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)((ns->flbas) & 0x0f)]; |
829 | |
830 | memset(&rcd, 0, sizeof(rcd))__builtin_memset((&rcd), (0), (sizeof(rcd))); |
831 | _lto8b(addr, rcd.addr); |
832 | _lto4b(1 << f->lbads, rcd.length); |
833 | _lto2b(tpe, rcd.lowest_aligned); |
834 | |
835 | memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen))__builtin_memcpy((xs->data), (&rcd), ((((sizeof(rcd))< (xs->datalen))?(sizeof(rcd)):(xs->datalen)))); |
836 | |
837 | xs->error = XS_NOERROR0; |
838 | scsi_done(xs); |
839 | } |
840 | |
841 | void |
842 | nvme_scsi_capacity(struct scsi_xfer *xs) |
843 | { |
844 | struct scsi_read_cap_data rcd; |
845 | struct scsi_link *link = xs->sc_link; |
846 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
847 | struct nvm_identify_namespace *ns; |
848 | struct nvm_namespace_format *f; |
849 | u_int64_t addr; |
850 | |
851 | ns = sc->sc_namespaces[link->target].ident; |
852 | |
853 | if (xs->cmdlen != sizeof(struct scsi_read_capacity)) { |
854 | xs->error = XS_DRIVER_STUFFUP2; |
855 | scsi_done(xs); |
856 | return; |
857 | } |
858 | |
859 | addr = nvme_scsi_size(ns) - 1; |
860 | if (addr > 0xffffffff) |
861 | addr = 0xffffffff; |
862 | |
863 | f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)((ns->flbas) & 0x0f)]; |
864 | |
865 | memset(&rcd, 0, sizeof(rcd))__builtin_memset((&rcd), (0), (sizeof(rcd))); |
866 | _lto4b(addr, rcd.addr); |
867 | _lto4b(1 << f->lbads, rcd.length); |
868 | |
869 | memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen))__builtin_memcpy((xs->data), (&rcd), ((((sizeof(rcd))< (xs->datalen))?(sizeof(rcd)):(xs->datalen)))); |
870 | |
871 | xs->error = XS_NOERROR0; |
872 | scsi_done(xs); |
873 | } |
874 | |
875 | void |
876 | nvme_scsi_free(struct scsi_link *link) |
877 | { |
878 | struct nvme_softc *sc = link->bus->sb_adapter_softc; |
879 | struct nvm_identify_namespace *identify; |
880 | |
881 | identify = sc->sc_namespaces[link->target].ident; |
882 | sc->sc_namespaces[link->target].ident = NULL((void *)0); |
883 | |
884 | free(identify, M_DEVBUF2, sizeof(*identify)); |
885 | } |
886 | |
887 | uint64_t |
888 | nvme_scsi_size(struct nvm_identify_namespace *ns) |
889 | { |
890 | uint64_t ncap, nsze; |
891 | |
892 | ncap = lemtoh64(&ns->ncap)((__uint64_t)(*(__uint64_t *)(&ns->ncap))); /* Max allowed allocation. */ |
893 | nsze = lemtoh64(&ns->nsze)((__uint64_t)(*(__uint64_t *)(&ns->nsze))); |
894 | |
895 | if ((ns->nsfeat & NVME_ID_NS_NSFEAT_THIN_PROV(1 << 0)) && ncap < nsze) |
896 | return ncap; |
897 | else |
898 | return nsze; |
899 | } |
900 | |
901 | uint32_t |
902 | nvme_op_sq_enter(struct nvme_softc *sc, |
903 | struct nvme_queue *q, struct nvme_ccb *ccb) |
904 | { |
905 | mtx_enter(&q->q_sq_mtx); |
906 | return (nvme_op_sq_enter_locked(sc, q, ccb)); |
907 | } |
908 | |
909 | uint32_t |
910 | nvme_op_sq_enter_locked(struct nvme_softc *sc, |
911 | struct nvme_queue *q, struct nvme_ccb *ccb) |
912 | { |
913 | return (q->q_sq_tail); |
914 | } |
915 | |
916 | void |
917 | nvme_op_sq_leave_locked(struct nvme_softc *sc, |
918 | struct nvme_queue *q, struct nvme_ccb *ccb) |
919 | { |
920 | uint32_t tail; |
921 | |
922 | tail = ++q->q_sq_tail; |
923 | if (tail >= q->q_entries) |
924 | tail = 0; |
925 | q->q_sq_tail = tail; |
926 | nvme_write4(sc, q->q_sqtdbl, tail)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((q->q_sqtdbl )), ((tail)))); |
927 | } |
928 | |
929 | void |
930 | nvme_op_sq_leave(struct nvme_softc *sc, |
931 | struct nvme_queue *q, struct nvme_ccb *ccb) |
932 | { |
933 | nvme_op_sq_leave_locked(sc, q, ccb); |
934 | mtx_leave(&q->q_sq_mtx); |
935 | } |
936 | |
937 | void |
938 | nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb, |
939 | void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *)) |
940 | { |
941 | struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem)((void *)(q->q_sq_dmamem)->ndm_kva); |
942 | u_int32_t tail; |
943 | |
944 | tail = sc->sc_ops->op_sq_enter(sc, q, ccb); |
945 | |
946 | sqe += tail; |
947 | |
948 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)) |
949 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)); |
950 | memset(sqe, 0, sizeof(*sqe))__builtin_memset((sqe), (0), (sizeof(*sqe))); |
951 | (*fill)(sc, ccb, sqe); |
952 | sqe->cid = ccb->ccb_id; |
953 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)) |
954 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)); |
955 | |
956 | sc->sc_ops->op_sq_leave(sc, q, ccb); |
957 | } |
958 | |
959 | struct nvme_poll_state { |
960 | struct nvme_sqe s; |
961 | struct nvme_cqe c; |
962 | }; |
963 | |
964 | int |
965 | nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb, |
966 | void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *)) |
967 | { |
968 | struct nvme_poll_state state; |
969 | void (*done)(struct nvme_softc *, struct nvme_ccb *, struct nvme_cqe *); |
970 | void *cookie; |
971 | u_int16_t flags; |
972 | |
973 | memset(&state, 0, sizeof(state))__builtin_memset((&state), (0), (sizeof(state))); |
974 | (*fill)(sc, ccb, &state.s); |
975 | |
976 | done = ccb->ccb_done; |
977 | cookie = ccb->ccb_cookie; |
978 | |
979 | ccb->ccb_done = nvme_poll_done; |
980 | ccb->ccb_cookie = &state; |
981 | |
982 | nvme_q_submit(sc, q, ccb, nvme_poll_fill); |
983 | while (!ISSET(state.c.flags, htole16(NVME_CQE_PHASE))((state.c.flags) & (((__uint16_t)((1 << 0)))))) { |
984 | if (nvme_q_complete(sc, q) == 0) |
985 | delay(10)(*delay_func)(10); |
986 | |
987 | /* XXX no timeout? */ |
988 | } |
989 | |
990 | ccb->ccb_cookie = cookie; |
991 | done(sc, ccb, &state.c); |
992 | |
993 | flags = lemtoh16(&state.c.flags)((__uint16_t)(*(__uint16_t *)(&state.c.flags))); |
994 | |
995 | return (flags & ~NVME_CQE_PHASE(1 << 0)); |
996 | } |
997 | |
998 | void |
999 | nvme_poll_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
1000 | { |
1001 | struct nvme_sqe *sqe = slot; |
1002 | struct nvme_poll_state *state = ccb->ccb_cookie; |
1003 | |
1004 | *sqe = state->s; |
1005 | } |
1006 | |
1007 | void |
1008 | nvme_poll_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
1009 | struct nvme_cqe *cqe) |
1010 | { |
1011 | struct nvme_poll_state *state = ccb->ccb_cookie; |
1012 | |
1013 | state->c = *cqe; |
1014 | SET(state->c.flags, htole16(NVME_CQE_PHASE))((state->c.flags) |= (((__uint16_t)((1 << 0))))); |
1015 | } |
1016 | |
1017 | void |
1018 | nvme_sqe_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
1019 | { |
1020 | struct nvme_sqe *src = ccb->ccb_cookie; |
1021 | struct nvme_sqe *dst = slot; |
1022 | |
1023 | *dst = *src; |
1024 | } |
1025 | |
1026 | void |
1027 | nvme_empty_done(struct nvme_softc *sc, struct nvme_ccb *ccb, |
1028 | struct nvme_cqe *cqe) |
1029 | { |
1030 | } |
1031 | |
1032 | void |
1033 | nvme_op_cq_done(struct nvme_softc *sc, |
1034 | struct nvme_queue *q, struct nvme_ccb *ccb) |
1035 | { |
1036 | /* nop */ |
1037 | } |
1038 | |
1039 | int |
1040 | nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q) |
1041 | { |
1042 | struct nvme_ccb *ccb; |
1043 | struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem)((void *)(q->q_cq_dmamem)->ndm_kva), *cqe; |
1044 | u_int32_t head; |
1045 | u_int16_t flags; |
1046 | int rv = 0; |
1047 | |
1048 | if (!mtx_enter_try(&q->q_cq_mtx)) |
1049 | return (-1); |
1050 | |
1051 | head = q->q_cq_head; |
1052 | |
1053 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD0x02); |
1054 | for (;;) { |
1055 | cqe = &ring[head]; |
1056 | flags = lemtoh16(&cqe->flags)((__uint16_t)(*(__uint16_t *)(&cqe->flags))); |
1057 | if ((flags & NVME_CQE_PHASE(1 << 0)) != q->q_cq_phase) |
1058 | break; |
1059 | |
1060 | membar_consumer()do { __asm volatile("" ::: "memory"); } while (0); |
1061 | |
1062 | ccb = &sc->sc_ccbs[cqe->cid]; |
1063 | sc->sc_ops->op_cq_done(sc, q, ccb); |
1064 | ccb->ccb_done(sc, ccb, cqe); |
1065 | |
1066 | if (++head >= q->q_entries) { |
1067 | head = 0; |
1068 | q->q_cq_phase ^= NVME_CQE_PHASE(1 << 0); |
1069 | } |
1070 | |
1071 | rv = 1; |
1072 | } |
1073 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD0x01); |
1074 | |
1075 | if (rv) |
1076 | nvme_write4(sc, q->q_cqhdbl, q->q_cq_head = head)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((q->q_cqhdbl )), ((q->q_cq_head = head)))); |
1077 | mtx_leave(&q->q_cq_mtx); |
1078 | |
1079 | return (rv); |
1080 | } |
1081 | |
1082 | int |
1083 | nvme_identify(struct nvme_softc *sc, u_int mpsmin) |
1084 | { |
1085 | char sn[41], mn[81], fr[17]; |
1086 | struct nvm_identify_controller *identify; |
1087 | struct nvme_dmamem *mem; |
1088 | struct nvme_ccb *ccb; |
1089 | int rv = 1; |
1090 | |
1091 | ccb = nvme_ccb_get(sc); |
1092 | if (ccb == NULL((void *)0)) |
1093 | panic("nvme_identify: nvme_ccb_get returned NULL"); |
1094 | |
1095 | mem = nvme_dmamem_alloc(sc, sizeof(*identify)); |
1096 | if (mem == NULL((void *)0)) |
1097 | return (1); |
1098 | |
1099 | ccb->ccb_done = nvme_empty_done; |
1100 | ccb->ccb_cookie = mem; |
1101 | |
1102 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD0x01); |
1103 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify); |
1104 | nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD0x02); |
1105 | |
1106 | nvme_ccb_put(sc, ccb); |
1107 | |
1108 | if (rv != 0) |
1109 | goto done; |
1110 | |
1111 | identify = NVME_DMA_KVA(mem)((void *)(mem)->ndm_kva); |
1112 | |
1113 | scsi_strvis(sn, identify->sn, sizeof(identify->sn)); |
1114 | scsi_strvis(mn, identify->mn, sizeof(identify->mn)); |
1115 | scsi_strvis(fr, identify->fr, sizeof(identify->fr)); |
1116 | |
1117 | printf("%s: %s, firmware %s, serial %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), mn, fr, sn); |
1118 | |
1119 | if (identify->mdts > 0) { |
1120 | sc->sc_mdts = (1 << identify->mdts) * (1 << mpsmin); |
1121 | if (sc->sc_mdts > NVME_MAXPHYS(128 * 1024)) |
1122 | sc->sc_mdts = NVME_MAXPHYS(128 * 1024); |
1123 | sc->sc_max_prpl = sc->sc_mdts / sc->sc_mps; |
1124 | } |
1125 | |
1126 | sc->sc_nn = lemtoh32(&identify->nn)((__uint32_t)(*(__uint32_t *)(&identify->nn))); |
1127 | |
1128 | /* |
1129 | * At least one Apple NVMe device presents a second, bogus disk that is |
1130 | * inaccessible, so cap targets at 1. |
1131 | * |
1132 | * sd1 at scsibus1 targ 2 lun 0: <NVMe, APPLE SSD AP0512, 16.1> [..] |
1133 | * sd1: 0MB, 4096 bytes/sector, 2 sectors |
1134 | */ |
1135 | if (sc->sc_nn > 1 && |
1136 | mn[0] == 'A' && mn[1] == 'P' && mn[2] == 'P' && mn[3] == 'L' && |
1137 | mn[4] == 'E') |
1138 | sc->sc_nn = 1; |
1139 | |
1140 | memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify))__builtin_memcpy((&sc->sc_identify), (identify), (sizeof (sc->sc_identify))); |
1141 | |
1142 | done: |
1143 | nvme_dmamem_free(sc, mem); |
1144 | |
1145 | return (rv); |
1146 | } |
1147 | |
1148 | int |
1149 | nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q) |
1150 | { |
1151 | struct nvme_sqe_q sqe; |
1152 | struct nvme_ccb *ccb; |
1153 | int rv; |
1154 | |
1155 | ccb = scsi_io_get(&sc->sc_iopool, 0); |
1156 | KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/nvme.c" , 1156, "ccb != NULL")); |
1157 | |
1158 | ccb->ccb_done = nvme_empty_done; |
1159 | ccb->ccb_cookie = &sqe; |
1160 | |
1161 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1162 | sqe.opcode = NVM_ADMIN_ADD_IOCQ0x05; |
1163 | htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem))(*(__uint64_t *)(&sqe.prp1) = ((__uint64_t)(((u_int64_t)( q->q_cq_dmamem)->ndm_map->dm_segs[0].ds_addr)))); |
1164 | htolem16(&sqe.qsize, q->q_entries - 1)(*(__uint16_t *)(&sqe.qsize) = ((__uint16_t)(q->q_entries - 1))); |
1165 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1166 | sqe.qflags = NVM_SQE_CQ_IEN(1 << 1) | NVM_SQE_Q_PC(1 << 0); |
1167 | |
1168 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1169 | if (rv != 0) |
1170 | goto fail; |
1171 | |
1172 | ccb->ccb_done = nvme_empty_done; |
1173 | ccb->ccb_cookie = &sqe; |
1174 | |
1175 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1176 | sqe.opcode = NVM_ADMIN_ADD_IOSQ0x01; |
1177 | htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem))(*(__uint64_t *)(&sqe.prp1) = ((__uint64_t)(((u_int64_t)( q->q_sq_dmamem)->ndm_map->dm_segs[0].ds_addr)))); |
1178 | htolem16(&sqe.qsize, q->q_entries - 1)(*(__uint16_t *)(&sqe.qsize) = ((__uint16_t)(q->q_entries - 1))); |
1179 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1180 | htolem16(&sqe.cqid, q->q_id)(*(__uint16_t *)(&sqe.cqid) = ((__uint16_t)(q->q_id))); |
1181 | sqe.qflags = NVM_SQE_Q_PC(1 << 0); |
1182 | |
1183 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1184 | if (rv != 0) |
1185 | goto fail; |
1186 | |
1187 | fail: |
1188 | scsi_io_put(&sc->sc_iopool, ccb); |
1189 | return (rv); |
1190 | } |
1191 | |
1192 | int |
1193 | nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q) |
1194 | { |
1195 | struct nvme_sqe_q sqe; |
1196 | struct nvme_ccb *ccb; |
1197 | int rv; |
1198 | |
1199 | ccb = scsi_io_get(&sc->sc_iopool, 0); |
1200 | KASSERT(ccb != NULL)((ccb != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/ic/nvme.c" , 1200, "ccb != NULL")); |
1201 | |
1202 | ccb->ccb_done = nvme_empty_done; |
1203 | ccb->ccb_cookie = &sqe; |
1204 | |
1205 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1206 | sqe.opcode = NVM_ADMIN_DEL_IOSQ0x00; |
1207 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1208 | |
1209 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1210 | if (rv != 0) |
1211 | goto fail; |
1212 | |
1213 | ccb->ccb_done = nvme_empty_done; |
1214 | ccb->ccb_cookie = &sqe; |
1215 | |
1216 | memset(&sqe, 0, sizeof(sqe))__builtin_memset((&sqe), (0), (sizeof(sqe))); |
1217 | sqe.opcode = NVM_ADMIN_DEL_IOCQ0x04; |
1218 | htolem16(&sqe.qid, q->q_id)(*(__uint16_t *)(&sqe.qid) = ((__uint16_t)(q->q_id))); |
1219 | |
1220 | rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill); |
1221 | if (rv != 0) |
1222 | goto fail; |
1223 | |
1224 | nvme_q_free(sc, q); |
1225 | |
1226 | fail: |
1227 | scsi_io_put(&sc->sc_iopool, ccb); |
1228 | return (rv); |
1229 | |
1230 | } |
1231 | |
1232 | void |
1233 | nvme_fill_identify(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot) |
1234 | { |
1235 | struct nvme_sqe *sqe = slot; |
1236 | struct nvme_dmamem *mem = ccb->ccb_cookie; |
1237 | |
1238 | sqe->opcode = NVM_ADMIN_IDENTIFY0x06; |
1239 | htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem))(*(__uint64_t *)(&sqe->entry.prp[0]) = ((__uint64_t)(( (u_int64_t)(mem)->ndm_map->dm_segs[0].ds_addr)))); |
1240 | htolem32(&sqe->cdw10, 1)(*(__uint32_t *)(&sqe->cdw10) = ((__uint32_t)(1))); |
1241 | } |
1242 | |
1243 | int |
1244 | nvme_ccbs_alloc(struct nvme_softc *sc, u_int nccbs) |
1245 | { |
1246 | struct nvme_ccb *ccb; |
1247 | bus_addr_t off; |
1248 | u_int64_t *prpl; |
1249 | u_int i; |
1250 | |
1251 | sc->sc_ccbs = mallocarray(nccbs, sizeof(*ccb), M_DEVBUF2, |
1252 | M_WAITOK0x0001 | M_CANFAIL0x0004); |
1253 | if (sc->sc_ccbs == NULL((void *)0)) |
1254 | return (1); |
1255 | |
1256 | sc->sc_ccb_prpls = nvme_dmamem_alloc(sc, |
1257 | sizeof(*prpl) * sc->sc_max_prpl * nccbs); |
1258 | |
1259 | prpl = NVME_DMA_KVA(sc->sc_ccb_prpls)((void *)(sc->sc_ccb_prpls)->ndm_kva); |
1260 | off = 0; |
1261 | |
1262 | for (i = 0; i < nccbs; i++) { |
1263 | ccb = &sc->sc_ccbs[i]; |
1264 | |
1265 | if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002 | 0x2000), (&ccb->ccb_dmamap )) |
1266 | sc->sc_max_prpl + 1, /* we get a free prp in the sqe */(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002 | 0x2000), (&ccb->ccb_dmamap )) |
1267 | sc->sc_mps, sc->sc_mps,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002 | 0x2000), (&ccb->ccb_dmamap )) |
1268 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002 | 0x2000), (&ccb->ccb_dmamap )) |
1269 | &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc-> sc_mdts), (sc->sc_max_prpl + 1), (sc->sc_mps), (sc-> sc_mps), (0x0000 | 0x0002 | 0x2000), (&ccb->ccb_dmamap )) != 0) |
1270 | goto free_maps; |
1271 | |
1272 | ccb->ccb_id = i; |
1273 | ccb->ccb_prpl = prpl; |
1274 | ccb->ccb_prpl_off = off; |
1275 | ccb->ccb_prpl_dva = NVME_DMA_DVA(sc->sc_ccb_prpls)((u_int64_t)(sc->sc_ccb_prpls)->ndm_map->dm_segs[0]. ds_addr) + off; |
1276 | |
1277 | SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_list, ccb, ccb_entry)do { (ccb)->ccb_entry.sqe_next = ((void *)0); *(&sc-> sc_ccb_list)->sqh_last = (ccb); (&sc->sc_ccb_list)-> sqh_last = &(ccb)->ccb_entry.sqe_next; } while (0); |
1278 | |
1279 | prpl += sc->sc_max_prpl; |
1280 | off += sizeof(*prpl) * sc->sc_max_prpl; |
1281 | } |
1282 | |
1283 | return (0); |
1284 | |
1285 | free_maps: |
1286 | nvme_ccbs_free(sc, nccbs); |
1287 | return (1); |
1288 | } |
1289 | |
1290 | void * |
1291 | nvme_ccb_get(void *cookie) |
1292 | { |
1293 | struct nvme_softc *sc = cookie; |
1294 | struct nvme_ccb *ccb; |
1295 | |
1296 | mtx_enter(&sc->sc_ccb_mtx); |
1297 | ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)((&sc->sc_ccb_list)->sqh_first); |
1298 | if (ccb != NULL((void *)0)) |
1299 | SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry)do { if (((&sc->sc_ccb_list)->sqh_first = (&sc-> sc_ccb_list)->sqh_first->ccb_entry.sqe_next) == ((void * )0)) (&sc->sc_ccb_list)->sqh_last = &(&sc-> sc_ccb_list)->sqh_first; } while (0); |
1300 | mtx_leave(&sc->sc_ccb_mtx); |
1301 | |
1302 | return (ccb); |
1303 | } |
1304 | |
1305 | void |
1306 | nvme_ccb_put(void *cookie, void *io) |
1307 | { |
1308 | struct nvme_softc *sc = cookie; |
1309 | struct nvme_ccb *ccb = io; |
1310 | |
1311 | mtx_enter(&sc->sc_ccb_mtx); |
1312 | SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry)do { if (((ccb)->ccb_entry.sqe_next = (&sc->sc_ccb_list )->sqh_first) == ((void *)0)) (&sc->sc_ccb_list)-> sqh_last = &(ccb)->ccb_entry.sqe_next; (&sc->sc_ccb_list )->sqh_first = (ccb); } while (0); |
1313 | mtx_leave(&sc->sc_ccb_mtx); |
1314 | } |
1315 | |
1316 | void |
1317 | nvme_ccbs_free(struct nvme_softc *sc, unsigned int nccbs) |
1318 | { |
1319 | struct nvme_ccb *ccb; |
1320 | |
1321 | while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)((&sc->sc_ccb_list)->sqh_first)) != NULL((void *)0)) { |
1322 | SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry)do { if (((&sc->sc_ccb_list)->sqh_first = (&sc-> sc_ccb_list)->sqh_first->ccb_entry.sqe_next) == ((void * )0)) (&sc->sc_ccb_list)->sqh_last = &(&sc-> sc_ccb_list)->sqh_first; } while (0); |
1323 | bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb ->ccb_dmamap)); |
1324 | } |
1325 | |
1326 | nvme_dmamem_free(sc, sc->sc_ccb_prpls); |
1327 | free(sc->sc_ccbs, M_DEVBUF2, nccbs * sizeof(*ccb)); |
1328 | } |
1329 | |
1330 | struct nvme_queue * |
1331 | nvme_q_alloc(struct nvme_softc *sc, u_int16_t id, u_int entries, u_int dstrd) |
1332 | { |
1333 | struct nvme_queue *q; |
1334 | |
1335 | q = malloc(sizeof(*q), M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004); |
1336 | if (q == NULL((void *)0)) |
1337 | return (NULL((void *)0)); |
1338 | |
1339 | q->q_sq_dmamem = nvme_dmamem_alloc(sc, |
1340 | sizeof(struct nvme_sqe) * entries); |
1341 | if (q->q_sq_dmamem == NULL((void *)0)) |
1342 | goto free; |
1343 | |
1344 | q->q_cq_dmamem = nvme_dmamem_alloc(sc, |
1345 | sizeof(struct nvme_cqe) * entries); |
1346 | if (q->q_cq_dmamem == NULL((void *)0)) |
1347 | goto free_sq; |
1348 | |
1349 | memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem))__builtin_memset((((void *)(q->q_sq_dmamem)->ndm_kva)), (0), (((q->q_sq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1350 | memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem))__builtin_memset((((void *)(q->q_cq_dmamem)->ndm_kva)), (0), (((q->q_cq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1351 | |
1352 | mtx_init(&q->q_sq_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&q->q_sq_mtx ), ((((0x3)) > 0x0 && ((0x3)) < 0x9) ? 0x9 : (( 0x3)))); } while (0); |
1353 | mtx_init(&q->q_cq_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&q->q_cq_mtx ), ((((0x3)) > 0x0 && ((0x3)) < 0x9) ? 0x9 : (( 0x3)))); } while (0); |
1354 | q->q_sqtdbl = NVME_SQTDBL(id, dstrd)(0x1000 + (2 * (id) + 0) * (dstrd)); |
1355 | q->q_cqhdbl = NVME_CQHDBL(id, dstrd)(0x1000 + (2 * (id) + 1) * (dstrd)); |
1356 | |
1357 | q->q_id = id; |
1358 | q->q_entries = entries; |
1359 | q->q_sq_tail = 0; |
1360 | q->q_cq_head = 0; |
1361 | q->q_cq_phase = NVME_CQE_PHASE(1 << 0); |
1362 | |
1363 | if (sc->sc_ops->op_q_alloc != NULL((void *)0)) { |
1364 | if (sc->sc_ops->op_q_alloc(sc, q) != 0) |
1365 | goto free_cq; |
1366 | } |
1367 | |
1368 | nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE0x04); |
1369 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD0x01); |
1370 | |
1371 | return (q); |
1372 | |
1373 | free_cq: |
1374 | nvme_dmamem_free(sc, q->q_cq_dmamem); |
1375 | free_sq: |
1376 | nvme_dmamem_free(sc, q->q_sq_dmamem); |
1377 | free: |
1378 | free(q, M_DEVBUF2, sizeof *q); |
1379 | |
1380 | return (NULL((void *)0)); |
1381 | } |
1382 | |
1383 | int |
1384 | nvme_q_reset(struct nvme_softc *sc, struct nvme_queue *q) |
1385 | { |
1386 | memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem))__builtin_memset((((void *)(q->q_sq_dmamem)->ndm_kva)), (0), (((q->q_sq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1387 | memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem))__builtin_memset((((void *)(q->q_cq_dmamem)->ndm_kva)), (0), (((q->q_cq_dmamem)->ndm_map->dm_segs[0].ds_len ))); |
1388 | |
1389 | q->q_sq_tail = 0; |
1390 | q->q_cq_head = 0; |
1391 | q->q_cq_phase = NVME_CQE_PHASE(1 << 0); |
1392 | |
1393 | nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE0x04); |
1394 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD0x01); |
1395 | |
1396 | return (0); |
1397 | } |
1398 | |
1399 | void |
1400 | nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q) |
1401 | { |
1402 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD0x02); |
1403 | nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE0x08); |
1404 | |
1405 | if (sc->sc_ops->op_q_alloc != NULL((void *)0)) |
1406 | sc->sc_ops->op_q_free(sc, q); |
1407 | |
1408 | nvme_dmamem_free(sc, q->q_cq_dmamem); |
1409 | nvme_dmamem_free(sc, q->q_sq_dmamem); |
1410 | free(q, M_DEVBUF2, sizeof *q); |
1411 | } |
1412 | |
1413 | int |
1414 | nvme_intr(void *xsc) |
1415 | { |
1416 | struct nvme_softc *sc = xsc; |
1417 | int rv = 0; |
1418 | |
1419 | if (nvme_q_complete(sc, sc->sc_q)) |
1420 | rv = 1; |
1421 | if (nvme_q_complete(sc, sc->sc_admin_q)) |
1422 | rv = 1; |
1423 | |
1424 | return (rv); |
1425 | } |
1426 | |
1427 | int |
1428 | nvme_intr_intx(void *xsc) |
1429 | { |
1430 | struct nvme_softc *sc = xsc; |
1431 | int rv; |
1432 | |
1433 | nvme_write4(sc, NVME_INTMS, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x000c)), ((1)))); |
1434 | rv = nvme_intr(sc); |
1435 | nvme_write4(sc, NVME_INTMC, 1)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x0010)), ((1)))); |
1436 | |
1437 | return (rv); |
1438 | } |
1439 | |
1440 | struct nvme_dmamem * |
1441 | nvme_dmamem_alloc(struct nvme_softc *sc, size_t size) |
1442 | { |
1443 | struct nvme_dmamem *ndm; |
1444 | int nsegs; |
1445 | |
1446 | ndm = malloc(sizeof(*ndm), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
1447 | if (ndm == NULL((void *)0)) |
1448 | return (NULL((void *)0)); |
1449 | |
1450 | ndm->ndm_size = size; |
1451 | |
1452 | if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size ), (1), (size), (0), (0x0000 | 0x0002 | 0x2000), (&ndm-> ndm_map)) |
1453 | BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size ), (1), (size), (0), (0x0000 | 0x0002 | 0x2000), (&ndm-> ndm_map)) |
1454 | &ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size ), (1), (size), (0), (0x0000 | 0x0002 | 0x2000), (&ndm-> ndm_map)) != 0) |
1455 | goto ndmfree; |
1456 | |
1457 | if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size ), (sc->sc_mps), (0), (&ndm->ndm_seg), (1), (&nsegs ), (0x0000 | 0x1000)) |
1458 | 1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size ), (sc->sc_mps), (0), (&ndm->ndm_seg), (1), (&nsegs ), (0x0000 | 0x1000)) != 0) |
1459 | goto destroy; |
1460 | |
1461 | if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ndm ->ndm_seg), (nsegs), (size), (&ndm->ndm_kva), (0x0000 )) |
1462 | &ndm->ndm_kva, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&ndm ->ndm_seg), (nsegs), (size), (&ndm->ndm_kva), (0x0000 )) != 0) |
1463 | goto free; |
1464 | |
1465 | if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ndm-> ndm_map), (ndm->ndm_kva), (size), (((void *)0)), (0x0000)) |
1466 | NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (ndm-> ndm_map), (ndm->ndm_kva), (size), (((void *)0)), (0x0000)) != 0) |
1467 | goto unmap; |
1468 | |
1469 | return (ndm); |
1470 | |
1471 | unmap: |
1472 | bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ndm-> ndm_kva), (size)); |
1473 | free: |
1474 | bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& ndm->ndm_seg), (1)); |
1475 | destroy: |
1476 | bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ndm ->ndm_map)); |
1477 | ndmfree: |
1478 | free(ndm, M_DEVBUF2, sizeof *ndm); |
1479 | |
1480 | return (NULL((void *)0)); |
1481 | } |
1482 | |
1483 | void |
1484 | nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops) |
1485 | { |
1486 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem )->ndm_map)), (0), (((mem)->ndm_map->dm_segs[0].ds_len )), (ops)) |
1487 | 0, NVME_DMA_LEN(mem), ops)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem )->ndm_map)), (0), (((mem)->ndm_map->dm_segs[0].ds_len )), (ops)); |
1488 | } |
1489 | |
1490 | void |
1491 | nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm) |
1492 | { |
1493 | bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ndm ->ndm_map)); |
1494 | bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (ndm-> ndm_kva), (ndm->ndm_size)); |
1495 | bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (& ndm->ndm_seg), (1)); |
1496 | bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ndm ->ndm_map)); |
1497 | free(ndm, M_DEVBUF2, sizeof *ndm); |
1498 | } |
1499 | |
1500 | #ifdef HIBERNATE1 |
1501 | |
1502 | int |
1503 | nvme_hibernate_admin_cmd(struct nvme_softc *sc, struct nvme_sqe *sqe, |
1504 | struct nvme_cqe *cqe, int cid) |
1505 | { |
1506 | struct nvme_sqe *asqe = NVME_DMA_KVA(sc->sc_admin_q->q_sq_dmamem)((void *)(sc->sc_admin_q->q_sq_dmamem)->ndm_kva); |
1507 | struct nvme_cqe *acqe = NVME_DMA_KVA(sc->sc_admin_q->q_cq_dmamem)((void *)(sc->sc_admin_q->q_cq_dmamem)->ndm_kva); |
1508 | struct nvme_queue *q = sc->sc_admin_q; |
1509 | int tail; |
1510 | u_int16_t flags; |
1511 | |
1512 | /* submit command */ |
1513 | tail = sc->sc_ops->op_sq_enter_locked(sc, q, /* XXX ccb */ NULL((void *)0)); |
1514 | |
1515 | asqe += tail; |
1516 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)) |
1517 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x08)); |
1518 | *asqe = *sqe; |
1519 | asqe->cid = cid; |
1520 | bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)) |
1521 | sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((q-> q_sq_dmamem)->ndm_map)), (sizeof(*sqe) * tail), (sizeof(*sqe )), (0x04)); |
1522 | |
1523 | sc->sc_ops->op_sq_leave_locked(sc, q, /* XXX ccb */ NULL((void *)0)); |
1524 | |
1525 | /* wait for completion */ |
1526 | acqe += q->q_cq_head; |
1527 | for (;;) { |
1528 | nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD0x02); |
1529 | flags = lemtoh16(&acqe->flags)((__uint16_t)(*(__uint16_t *)(&acqe->flags))); |
1530 | if ((flags & NVME_CQE_PHASE(1 << 0)) == q->q_cq_phase) |
1531 | break; |
1532 | |
1533 | delay(10)(*delay_func)(10); |
1534 | } |
1535 | |
1536 | if (++q->q_cq_head >= q->q_entries) { |
1537 | q->q_cq_head = 0; |
1538 | q->q_cq_phase ^= NVME_CQE_PHASE(1 << 0); |
1539 | } |
1540 | nvme_write4(sc, q->q_cqhdbl, q->q_cq_head)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((q->q_cqhdbl )), ((q->q_cq_head)))); |
1541 | if ((NVME_CQE_SC(flags)((flags) & (0xff << 1)) != NVME_CQE_SC_SUCCESS(0x00 << 1)) || (acqe->cid != cid)) |
1542 | return (EIO5); |
1543 | |
1544 | return (0); |
1545 | } |
1546 | |
1547 | int |
1548 | nvme_hibernate_io(dev_t dev, daddr_t blkno, vaddr_t addr, size_t size, |
1549 | int op, void *page) |
1550 | { |
1551 | struct nvme_hibernate_page { |
1552 | u_int64_t prpl[MAXPHYS(64 * 1024) / PAGE_SIZE(1 << 12)]; |
1553 | |
1554 | struct nvme_softc *sc; |
1555 | int nsid; |
1556 | int sq_tail; |
1557 | int cq_head; |
1558 | int cqe_phase; |
1559 | |
1560 | daddr_t poffset; |
1561 | size_t psize; |
1562 | } *my = page; |
1563 | struct nvme_sqe_io *isqe; |
1564 | struct nvme_cqe *icqe; |
1565 | paddr_t data_phys, page_phys; |
1566 | u_int64_t data_bus_phys, page_bus_phys; |
1567 | u_int16_t flags; |
1568 | int i; |
1569 | int error; |
1570 | |
1571 | if (op == HIB_INIT-1) { |
1572 | struct device *disk; |
1573 | struct device *scsibus; |
1574 | extern struct cfdriver sd_cd; |
1575 | struct scsi_link *link; |
1576 | struct scsibus_softc *bus_sc; |
1577 | struct nvme_sqe_q qsqe; |
1578 | struct nvme_cqe qcqe; |
1579 | |
1580 | /* find nvme softc */ |
1581 | disk = disk_lookup(&sd_cd, DISKUNIT(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >> 8)) / 16)); |
1582 | scsibus = disk->dv_parent; |
1583 | my->sc = (struct nvme_softc *)disk->dv_parent->dv_parent; |
1584 | |
1585 | /* find scsi_link, which tells us the target */ |
1586 | my->nsid = 0; |
1587 | bus_sc = (struct scsibus_softc *)scsibus; |
1588 | SLIST_FOREACH(link, &bus_sc->sc_link_list, bus_list)for((link) = ((&bus_sc->sc_link_list)->slh_first); ( link) != ((void *)0); (link) = ((link)->bus_list.sle_next) ) { |
1589 | if (link->device_softc == disk) { |
1590 | my->nsid = link->target; |
1591 | break; |
1592 | } |
1593 | } |
1594 | if (my->nsid == 0) |
1595 | return (EIO5); |
1596 | |
1597 | my->poffset = blkno; |
1598 | my->psize = size; |
1599 | |
1600 | memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem), 0,__builtin_memset((((void *)(my->sc->sc_hib_q->q_cq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe))) |
1601 | my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe))__builtin_memset((((void *)(my->sc->sc_hib_q->q_cq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe))); |
1602 | memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem), 0,__builtin_memset((((void *)(my->sc->sc_hib_q->q_sq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe))) |
1603 | my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe))__builtin_memset((((void *)(my->sc->sc_hib_q->q_sq_dmamem )->ndm_kva)), (0), (my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe))); |
1604 | |
1605 | my->sq_tail = 0; |
1606 | my->cq_head = 0; |
1607 | my->cqe_phase = NVME_CQE_PHASE(1 << 0); |
1608 | |
1609 | pmap_extract(pmap_kernel()(&kernel_pmap_store), (vaddr_t)page, &page_phys); |
1610 | |
1611 | memset(&qsqe, 0, sizeof(qsqe))__builtin_memset((&qsqe), (0), (sizeof(qsqe))); |
1612 | qsqe.opcode = NVM_ADMIN_ADD_IOCQ0x05; |
1613 | htolem64(&qsqe.prp1,(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_cq_dmamem)->ndm_map->dm_segs [0].ds_addr)))) |
1614 | NVME_DMA_DVA(my->sc->sc_hib_q->q_cq_dmamem))(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_cq_dmamem)->ndm_map->dm_segs [0].ds_addr)))); |
1615 | htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1)(*(__uint16_t *)(&qsqe.qsize) = ((__uint16_t)(my->sc-> sc_hib_q->q_entries - 1))); |
1616 | htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id)(*(__uint16_t *)(&qsqe.qid) = ((__uint16_t)(my->sc-> sc_hib_q->q_id))); |
1617 | qsqe.qflags = NVM_SQE_CQ_IEN(1 << 1) | NVM_SQE_Q_PC(1 << 0); |
1618 | if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe, |
1619 | &qcqe, 1) != 0) |
1620 | return (EIO5); |
1621 | |
1622 | memset(&qsqe, 0, sizeof(qsqe))__builtin_memset((&qsqe), (0), (sizeof(qsqe))); |
1623 | qsqe.opcode = NVM_ADMIN_ADD_IOSQ0x01; |
1624 | htolem64(&qsqe.prp1,(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_sq_dmamem)->ndm_map->dm_segs [0].ds_addr)))) |
1625 | NVME_DMA_DVA(my->sc->sc_hib_q->q_sq_dmamem))(*(__uint64_t *)(&qsqe.prp1) = ((__uint64_t)(((u_int64_t) (my->sc->sc_hib_q->q_sq_dmamem)->ndm_map->dm_segs [0].ds_addr)))); |
1626 | htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1)(*(__uint16_t *)(&qsqe.qsize) = ((__uint16_t)(my->sc-> sc_hib_q->q_entries - 1))); |
1627 | htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id)(*(__uint16_t *)(&qsqe.qid) = ((__uint16_t)(my->sc-> sc_hib_q->q_id))); |
1628 | htolem16(&qsqe.cqid, my->sc->sc_hib_q->q_id)(*(__uint16_t *)(&qsqe.cqid) = ((__uint16_t)(my->sc-> sc_hib_q->q_id))); |
1629 | qsqe.qflags = NVM_SQE_Q_PC(1 << 0); |
1630 | if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe, |
1631 | &qcqe, 2) != 0) |
1632 | return (EIO5); |
1633 | |
1634 | return (0); |
1635 | } |
1636 | |
1637 | if (op != HIB_W1) |
1638 | return (0); |
1639 | |
1640 | isqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem)((void *)(my->sc->sc_hib_q->q_sq_dmamem)->ndm_kva ); |
1641 | isqe += my->sq_tail; |
1642 | if (++my->sq_tail == my->sc->sc_hib_q->q_entries) |
1643 | my->sq_tail = 0; |
1644 | |
1645 | memset(isqe, 0, sizeof(*isqe))__builtin_memset((isqe), (0), (sizeof(*isqe))); |
1646 | isqe->opcode = NVM_CMD_WRITE0x01; |
1647 | htolem32(&isqe->nsid, my->nsid)(*(__uint32_t *)(&isqe->nsid) = ((__uint32_t)(my->nsid ))); |
1648 | |
1649 | pmap_extract(pmap_kernel()(&kernel_pmap_store), addr, &data_phys); |
1650 | data_bus_phys = data_phys; |
1651 | htolem64(&isqe->entry.prp[0], data_bus_phys)(*(__uint64_t *)(&isqe->entry.prp[0]) = ((__uint64_t)( data_bus_phys))); |
1652 | if ((size > my->sc->sc_mps) && (size <= my->sc->sc_mps * 2)) { |
1653 | htolem64(&isqe->entry.prp[1], data_bus_phys + my->sc->sc_mps)(*(__uint64_t *)(&isqe->entry.prp[1]) = ((__uint64_t)( data_bus_phys + my->sc->sc_mps))); |
1654 | } else if (size > my->sc->sc_mps * 2) { |
1655 | pmap_extract(pmap_kernel()(&kernel_pmap_store), (vaddr_t)page, &page_phys); |
1656 | page_bus_phys = page_phys; |
1657 | htolem64(&isqe->entry.prp[1], page_bus_phys +(*(__uint64_t *)(&isqe->entry.prp[1]) = ((__uint64_t)( page_bus_phys + __builtin_offsetof(struct nvme_hibernate_page , prpl)))) |
1658 | offsetof(struct nvme_hibernate_page, prpl))(*(__uint64_t *)(&isqe->entry.prp[1]) = ((__uint64_t)( page_bus_phys + __builtin_offsetof(struct nvme_hibernate_page , prpl)))); |
1659 | for (i = 1; i < (size / my->sc->sc_mps); i++) { |
1660 | htolem64(&my->prpl[i - 1], data_bus_phys +(*(__uint64_t *)(&my->prpl[i - 1]) = ((__uint64_t)(data_bus_phys + (i * my->sc->sc_mps)))) |
1661 | (i * my->sc->sc_mps))(*(__uint64_t *)(&my->prpl[i - 1]) = ((__uint64_t)(data_bus_phys + (i * my->sc->sc_mps)))); |
1662 | } |
1663 | } |
1664 | |
1665 | isqe->slba = blkno + my->poffset; |
1666 | isqe->nlb = (size / DEV_BSIZE(1 << 9)) - 1; |
1667 | isqe->cid = blkno % 0xffff; |
1668 | |
1669 | nvme_write4(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd),(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd)))), ( (my->sq_tail)))) |
1670 | my->sq_tail)(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd)))), ( (my->sq_tail)))); |
1671 | nvme_barrier(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd), 4,bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd))), (4), (0x02)) |
1672 | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 0) * (my->sc->sc_dstrd))), (4), (0x02)); |
1673 | |
1674 | error = 0; |
1675 | |
1676 | icqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem)((void *)(my->sc->sc_hib_q->q_cq_dmamem)->ndm_kva ); |
1677 | icqe += my->cq_head; |
1678 | |
1679 | nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem, |
1680 | BUS_DMASYNC_POSTREAD0x02); |
1681 | for (;;) { |
1682 | flags = lemtoh16(&icqe->flags)((__uint16_t)(*(__uint16_t *)(&icqe->flags))); |
1683 | if ((flags & NVME_CQE_PHASE(1 << 0)) == my->cqe_phase) { |
1684 | if ((NVME_CQE_SC(flags)((flags) & (0xff << 1)) != NVME_CQE_SC_SUCCESS(0x00 << 1)) || |
1685 | (icqe->cid != blkno % 0xffff)) |
1686 | error = EIO5; |
1687 | |
1688 | break; |
1689 | } |
1690 | |
1691 | delay(1)(*delay_func)(1); |
1692 | nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem, |
1693 | BUS_DMASYNC_PREREAD0x01|BUS_DMASYNC_POSTREAD0x02); |
1694 | } |
1695 | nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem, |
1696 | BUS_DMASYNC_PREREAD0x01); |
1697 | |
1698 | if (++my->cq_head == my->sc->sc_hib_q->q_entries) { |
1699 | my->cq_head = 0; |
1700 | my->cqe_phase ^= NVME_CQE_PHASE(1 << 0); |
1701 | } |
1702 | |
1703 | nvme_write4(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd),(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd)))), ( (my->cq_head)))) |
1704 | my->cq_head)(((my->sc)->sc_iot)->write_4(((my->sc)->sc_ioh ), (((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd)))), ( (my->cq_head)))); |
1705 | nvme_barrier(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd), 4,bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd))), (4), (0x02)) |
1706 | BUS_SPACE_BARRIER_WRITE)bus_space_barrier((my->sc)->sc_iot, (my->sc)->sc_ioh , ((0x1000 + (2 * (2) + 1) * (my->sc->sc_dstrd))), (4), (0x02)); |
1707 | |
1708 | return (error); |
1709 | } |
1710 | |
1711 | #endif |