Bug Summary

File:dev/pci/if_bwfm_pci.c
Warning:line 821, column 4
3rd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_bwfm_pci.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_bwfm_pci.c
1/* $OpenBSD: if_bwfm_pci.c,v 1.66 2022/01/01 18:52:26 patrick Exp $ */
2/*
3 * Copyright (c) 2010-2016 Broadcom Corporation
4 * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bpfilter.h"
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/buf.h>
24#include <sys/kernel.h>
25#include <sys/malloc.h>
26#include <sys/device.h>
27#include <sys/queue.h>
28#include <sys/socket.h>
29
30#if defined(__HAVE_FDT)
31#include <machine/fdt.h>
32#include <dev/ofw/openfirm.h>
33#endif
34
35#if NBPFILTER1 > 0
36#include <net/bpf.h>
37#endif
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_media.h>
41
42#include <netinet/in.h>
43#include <netinet/if_ether.h>
44
45#include <net80211/ieee80211_var.h>
46
47#include <machine/bus.h>
48
49#include <dev/pci/pcireg.h>
50#include <dev/pci/pcivar.h>
51#include <dev/pci/pcidevs.h>
52
53#include <dev/ic/bwfmvar.h>
54#include <dev/ic/bwfmreg.h>
55#include <dev/pci/if_bwfm_pci.h>
56
57#define BWFM_DMA_D2H_SCRATCH_BUF_LEN8 8
58#define BWFM_DMA_D2H_RINGUPD_BUF_LEN1024 1024
59#define BWFM_DMA_H2D_IOCTL_BUF_LEN1518 ETHER_MAX_LEN1518
60
61#define BWFM_NUM_TX_MSGRINGS2 2
62#define BWFM_NUM_RX_MSGRINGS3 3
63
64#define BWFM_NUM_IOCTL_PKTIDS8 8
65#define BWFM_NUM_TX_PKTIDS2048 2048
66#define BWFM_NUM_RX_PKTIDS1024 1024
67
68#define BWFM_NUM_IOCTL_DESCS1 1
69#define BWFM_NUM_TX_DESCS1 1
70#define BWFM_NUM_RX_DESCS1 1
71
72#ifdef BWFM_DEBUG
73#define DPRINTF(x)do { ; } while (0) do { if (bwfm_debug > 0) printf x; } while (0)
74#define DPRINTFN(n, x)do { ; } while (0) do { if (bwfm_debug >= (n)) printf x; } while (0)
75static int bwfm_debug = 2;
76#else
77#define DPRINTF(x)do { ; } while (0) do { ; } while (0)
78#define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0)
79#endif
80
81#define DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname) ((sc)->sc_sc.sc_dev.dv_xname)
82
83enum ring_status {
84 RING_CLOSED,
85 RING_CLOSING,
86 RING_OPEN,
87 RING_OPENING,
88};
89
90struct bwfm_pci_msgring {
91 uint32_t w_idx_addr;
92 uint32_t r_idx_addr;
93 uint32_t w_ptr;
94 uint32_t r_ptr;
95 int nitem;
96 int itemsz;
97 enum ring_status status;
98 struct bwfm_pci_dmamem *ring;
99 struct mbuf *m;
100
101 int fifo;
102 uint8_t mac[ETHER_ADDR_LEN6];
103};
104
105struct bwfm_pci_ioctl {
106 uint16_t transid;
107 uint16_t retlen;
108 int16_t status;
109 struct mbuf *m;
110 TAILQ_ENTRY(bwfm_pci_ioctl)struct { struct bwfm_pci_ioctl *tqe_next; struct bwfm_pci_ioctl
**tqe_prev; }
next;
111};
112
113struct bwfm_pci_buf {
114 bus_dmamap_t bb_map;
115 struct mbuf *bb_m;
116};
117
118struct bwfm_pci_pkts {
119 struct bwfm_pci_buf *pkts;
120 uint32_t npkt;
121 int last;
122};
123
124struct bwfm_pci_softc {
125 struct bwfm_softc sc_sc;
126 pci_chipset_tag_t sc_pc;
127 pcitag_t sc_tag;
128 pcireg_t sc_id;
129 void *sc_ih;
130
131 int sc_initialized;
132
133 bus_space_tag_t sc_reg_iot;
134 bus_space_handle_t sc_reg_ioh;
135 bus_size_t sc_reg_ios;
136
137 bus_space_tag_t sc_tcm_iot;
138 bus_space_handle_t sc_tcm_ioh;
139 bus_size_t sc_tcm_ios;
140
141 bus_dma_tag_t sc_dmat;
142
143 uint32_t sc_shared_address;
144 uint32_t sc_shared_flags;
145 uint8_t sc_shared_version;
146
147 uint8_t sc_dma_idx_sz;
148 struct bwfm_pci_dmamem *sc_dma_idx_buf;
149 size_t sc_dma_idx_bufsz;
150
151 uint16_t sc_max_rxbufpost;
152 uint32_t sc_rx_dataoffset;
153 uint32_t sc_htod_mb_data_addr;
154 uint32_t sc_dtoh_mb_data_addr;
155 uint32_t sc_ring_info_addr;
156
157 uint32_t sc_console_base_addr;
158 uint32_t sc_console_buf_addr;
159 uint32_t sc_console_buf_size;
160 uint32_t sc_console_readidx;
161
162 uint16_t sc_max_flowrings;
163 uint16_t sc_max_submissionrings;
164 uint16_t sc_max_completionrings;
165
166 struct bwfm_pci_msgring sc_ctrl_submit;
167 struct bwfm_pci_msgring sc_rxpost_submit;
168 struct bwfm_pci_msgring sc_ctrl_complete;
169 struct bwfm_pci_msgring sc_tx_complete;
170 struct bwfm_pci_msgring sc_rx_complete;
171 struct bwfm_pci_msgring *sc_flowrings;
172
173 struct bwfm_pci_dmamem *sc_scratch_buf;
174 struct bwfm_pci_dmamem *sc_ringupd_buf;
175
176 TAILQ_HEAD(, bwfm_pci_ioctl)struct { struct bwfm_pci_ioctl *tqh_first; struct bwfm_pci_ioctl
**tqh_last; }
sc_ioctlq;
177 uint16_t sc_ioctl_transid;
178
179 struct if_rxring sc_ioctl_ring;
180 struct if_rxring sc_event_ring;
181 struct if_rxring sc_rxbuf_ring;
182
183 struct bwfm_pci_pkts sc_ioctl_pkts;
184 struct bwfm_pci_pkts sc_rx_pkts;
185 struct bwfm_pci_pkts sc_tx_pkts;
186 int sc_tx_pkts_full;
187
188 uint8_t sc_mbdata_done;
189 uint8_t sc_pcireg64;
190};
191
192struct bwfm_pci_dmamem {
193 bus_dmamap_t bdm_map;
194 bus_dma_segment_t bdm_seg;
195 size_t bdm_size;
196 caddr_t bdm_kva;
197};
198
199#define BWFM_PCI_DMA_MAP(_bdm)((_bdm)->bdm_map) ((_bdm)->bdm_map)
200#define BWFM_PCI_DMA_LEN(_bdm)((_bdm)->bdm_size) ((_bdm)->bdm_size)
201#define BWFM_PCI_DMA_DVA(_bdm)((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr) ((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
202#define BWFM_PCI_DMA_KVA(_bdm)((void *)(_bdm)->bdm_kva) ((void *)(_bdm)->bdm_kva)
203
204int bwfm_pci_match(struct device *, void *, void *);
205void bwfm_pci_attach(struct device *, struct device *, void *);
206int bwfm_pci_detach(struct device *, int);
207int bwfm_pci_activate(struct device *, int);
208void bwfm_pci_cleanup(struct bwfm_pci_softc *);
209
210#if defined(__HAVE_FDT)
211int bwfm_pci_read_otp(struct bwfm_pci_softc *);
212void bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *, uint8_t,
213 uint8_t, uint8_t *);
214#endif
215
216int bwfm_pci_intr(void *);
217void bwfm_pci_intr_enable(struct bwfm_pci_softc *);
218void bwfm_pci_intr_disable(struct bwfm_pci_softc *);
219uint32_t bwfm_pci_intr_status(struct bwfm_pci_softc *);
220void bwfm_pci_intr_ack(struct bwfm_pci_softc *, uint32_t);
221uint32_t bwfm_pci_intmask(struct bwfm_pci_softc *);
222void bwfm_pci_hostready(struct bwfm_pci_softc *);
223int bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
224 size_t, const u_char *, size_t);
225void bwfm_pci_select_core(struct bwfm_pci_softc *, int );
226
227struct bwfm_pci_dmamem *
228 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
229 bus_size_t);
230void bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
231int bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
232 struct bwfm_pci_pkts *);
233int bwfm_pci_pktid_new(struct bwfm_pci_softc *,
234 struct bwfm_pci_pkts *, struct mbuf *,
235 uint32_t *, paddr_t *);
236struct mbuf * bwfm_pci_pktid_free(struct bwfm_pci_softc *,
237 struct bwfm_pci_pkts *, uint32_t);
238void bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
239 struct if_rxring *, uint32_t);
240void bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
241void bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
242int bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
243 int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
244int bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
245 int, size_t);
246
247void bwfm_pci_ring_bell(struct bwfm_pci_softc *,
248 struct bwfm_pci_msgring *);
249void bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
250 struct bwfm_pci_msgring *);
251void bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
252 struct bwfm_pci_msgring *);
253void bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
254 struct bwfm_pci_msgring *);
255void bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
256 struct bwfm_pci_msgring *);
257void * bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
258 struct bwfm_pci_msgring *);
259void * bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
260 struct bwfm_pci_msgring *, int, int *);
261void * bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
262 struct bwfm_pci_msgring *, int *);
263void bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
264 struct bwfm_pci_msgring *, int);
265void bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
266 struct bwfm_pci_msgring *);
267void bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
268 struct bwfm_pci_msgring *, int);
269
270void bwfm_pci_ring_rx(struct bwfm_pci_softc *,
271 struct bwfm_pci_msgring *, struct mbuf_list *);
272void bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
273 struct mbuf_list *);
274
275uint32_t bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
276void bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
277 uint32_t);
278int bwfm_pci_buscore_prepare(struct bwfm_softc *);
279int bwfm_pci_buscore_reset(struct bwfm_softc *);
280void bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
281
282int bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
283 struct mbuf *);
284void bwfm_pci_flowring_create(struct bwfm_pci_softc *,
285 struct mbuf *);
286void bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
287void bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
288void bwfm_pci_flowring_delete_cb(struct bwfm_softc *, void *);
289
290int bwfm_pci_preinit(struct bwfm_softc *);
291void bwfm_pci_stop(struct bwfm_softc *);
292int bwfm_pci_txcheck(struct bwfm_softc *);
293int bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
294
295int bwfm_pci_send_mb_data(struct bwfm_pci_softc *, uint32_t);
296void bwfm_pci_handle_mb_data(struct bwfm_pci_softc *);
297
298#ifdef BWFM_DEBUG
299void bwfm_pci_debug_console(struct bwfm_pci_softc *);
300#endif
301
302int bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
303 int, char *, size_t *);
304int bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
305 int, char *, size_t);
306void bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
307 struct msgbuf_ioctl_resp_hdr *);
308
309struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
310 .bc_read = bwfm_pci_buscore_read,
311 .bc_write = bwfm_pci_buscore_write,
312 .bc_prepare = bwfm_pci_buscore_prepare,
313 .bc_reset = bwfm_pci_buscore_reset,
314 .bc_setup = NULL((void *)0),
315 .bc_activate = bwfm_pci_buscore_activate,
316};
317
318struct bwfm_bus_ops bwfm_pci_bus_ops = {
319 .bs_preinit = bwfm_pci_preinit,
320 .bs_stop = bwfm_pci_stop,
321 .bs_txcheck = bwfm_pci_txcheck,
322 .bs_txdata = bwfm_pci_txdata,
323 .bs_txctl = NULL((void *)0),
324};
325
326struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
327 .proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
328 .proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
329 .proto_rx = NULL((void *)0),
330 .proto_rxctl = NULL((void *)0),
331};
332
333struct cfattach bwfm_pci_ca = {
334 sizeof(struct bwfm_pci_softc),
335 bwfm_pci_match,
336 bwfm_pci_attach,
337 bwfm_pci_detach,
338 bwfm_pci_activate,
339};
340
341static const struct pci_matchid bwfm_pci_devices[] = {
342 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM43500x43a3 },
343 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM43560x43ec },
344 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM436020x43ba },
345 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM43710x440d },
346 { PCI_VENDOR_BROADCOM0x14e4, PCI_PRODUCT_BROADCOM_BCM43780x4425 },
347};
348
349int
350bwfm_pci_match(struct device *parent, void *match, void *aux)
351{
352 return (pci_matchbyid(aux, bwfm_pci_devices,
353 nitems(bwfm_pci_devices)(sizeof((bwfm_pci_devices)) / sizeof((bwfm_pci_devices)[0]))));
354}
355
356void
357bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
358{
359 struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
360 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
361 const char *intrstr;
362 pci_intr_handle_t ih;
363
364 if (pci_mapreg_map(pa, PCI_MAPREG_START0x10 + 0x08,
365 PCI_MAPREG_MEM_TYPE_64BIT0x00000004, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
366 NULL((void *)0), &sc->sc_tcm_ios, 0)) {
367 printf(": can't map bar1\n");
368 return;
369 }
370
371 if (pci_mapreg_map(pa, PCI_MAPREG_START0x10 + 0x00,
372 PCI_MAPREG_MEM_TYPE_64BIT0x00000004, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
373 NULL((void *)0), &sc->sc_reg_ios, 0)) {
374 printf(": can't map bar0\n");
375 goto bar1;
376 }
377
378 sc->sc_pc = pa->pa_pc;
379 sc->sc_tag = pa->pa_tag;
380 sc->sc_id = pa->pa_id;
381 sc->sc_dmat = pa->pa_dmat;
382
383 /* Map and establish the interrupt. */
384 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
385 printf(": couldn't map interrupt\n");
386 goto bar0;
387 }
388 intrstr = pci_intr_string(pa->pa_pc, ih);
389
390 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET0x7,
391 bwfm_pci_intr, sc, DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
392 if (sc->sc_ih == NULL((void *)0)) {
393 printf(": couldn't establish interrupt");
394 if (intrstr != NULL((void *)0))
395 printf(" at %s", intrstr);
396 printf("\n");
397 goto bar1;
398 }
399 printf(": %s\n", intrstr);
400
401#if defined(__HAVE_FDT)
402 sc->sc_sc.sc_node = PCITAG_NODE(pa->pa_tag);
403 if (sc->sc_sc.sc_node) {
404 if (OF_getproplen(sc->sc_sc.sc_node, "brcm,cal-blob") > 0) {
405 sc->sc_sc.sc_calsize = OF_getproplen(sc->sc_sc.sc_node,
406 "brcm,cal-blob");
407 sc->sc_sc.sc_cal = malloc(sc->sc_sc.sc_calsize,
408 M_DEVBUF2, M_WAITOK0x0001);
409 OF_getprop(sc->sc_sc.sc_node, "brcm,cal-blob",
410 sc->sc_sc.sc_cal, sc->sc_sc.sc_calsize);
411 }
412 }
413#endif
414
415 sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
416 sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
417 bwfm_attach(&sc->sc_sc);
418 config_mountroot(self, bwfm_attachhook);
419 return;
420
421bar0:
422 bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
423bar1:
424 bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
425}
426
427int
428bwfm_pci_preinit(struct bwfm_softc *bwfm)
429{
430 struct bwfm_pci_softc *sc = (void *)bwfm;
431 struct bwfm_pci_ringinfo ringinfo;
432 const char *chip = NULL((void *)0);
433 u_char *ucode, *nvram;
434 size_t size, nvsize, nvlen;
435 uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
436 uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
437 uint32_t idx_offset, reg;
438 int i;
439
440 if (sc->sc_initialized)
1
Assuming field 'sc_initialized' is 0
2
Taking false branch
441 return 0;
442
443 sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
444 if (bwfm_chip_attach(&sc->sc_sc) != 0) {
3
Assuming the condition is false
4
Taking false branch
445 printf("%s: cannot attach chip\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
446 return 1;
447 }
448
449#if defined(__HAVE_FDT)
450 if (bwfm_pci_read_otp(sc)) {
451 printf("%s: cannot read OTP\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
452 return 1;
453 }
454#endif
455
456 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE20x83C);
5
Calling 'bwfm_pci_select_core'
9
Returning from 'bwfm_pci_select_core'
457 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x120)
, (0x4e0)))
458 BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x120)
, (0x4e0)))
;
459 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x124))
)
460 BWFM_PCI_PCIE2REG_CONFIGDATA)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x124))
)
;
461 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x124)
, (reg)))
462 BWFM_PCI_PCIE2REG_CONFIGDATA, reg)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x124)
, (reg)))
;
463
464 switch (bwfm->sc_chip.ch_chip) {
10
Control jumps to 'case 17272:' at line 492
465 case BRCM_CC_4350_CHIP_ID0x4350:
466 if (bwfm->sc_chip.ch_chiprev <= 7)
467 chip = "4350c2";
468 else
469 chip = "4350";
470 break;
471 case BRCM_CC_4355_CHIP_ID0x4355:
472 chip = "4355c1";
473 break;
474 case BRCM_CC_4356_CHIP_ID0x4356:
475 chip = "4356";
476 break;
477 case BRCM_CC_4364_CHIP_ID0x4364:
478 if (bwfm->sc_chip.ch_chiprev <= 3)
479 chip = "4364b2";
480 else
481 chip = "4364b3";
482 break;
483 case BRCM_CC_43602_CHIP_ID43602:
484 chip = "43602";
485 break;
486 case BRCM_CC_4371_CHIP_ID0x4371:
487 chip = "4371";
488 break;
489 case BRCM_CC_4377_CHIP_ID0x4377:
490 chip = "4377b3";
491 break;
492 case BRCM_CC_4378_CHIP_ID0x4378:
493 chip = "4378";
494 break;
11
Execution continues on line 504
495 case BRCM_CC_4387_CHIP_ID0x4387:
496 chip = "4387c2";
497 break;
498 default:
499 printf("%s: unknown firmware for chip %s\n",
500 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), bwfm->sc_chip.ch_name);
501 return 1;
502 }
503
504 if (bwfm_loadfirmware(bwfm, chip, "-pcie", &ucode, &size,
12
Assuming the condition is false
13
Taking false branch
505 &nvram, &nvsize, &nvlen) != 0)
506 return 1;
507
508 /* Retrieve RAM size from firmware. */
509 if (size >= BWFM_RAMSIZE0x6c + 8) {
14
Assuming the condition is false
15
Taking false branch
510 uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE0x6c];
511 if (letoh32(ramsize[0])((__uint32_t)(ramsize[0])) == BWFM_RAMSIZE_MAGIC0x534d4152)
512 bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1])((__uint32_t)(ramsize[1]));
513 }
514
515 if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
16
Calling 'bwfm_pci_load_microcode'
516 printf("%s: could not load microcode\n",
517 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
518 free(ucode, M_DEVBUF2, size);
519 free(nvram, M_DEVBUF2, nvsize);
520 return 1;
521 }
522 free(ucode, M_DEVBUF2, size);
523 free(nvram, M_DEVBUF2, nvsize);
524
525 sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x000)))
526 sc->sc_shared_address + BWFM_SHARED_INFO)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x000)))
;
527 sc->sc_shared_version = sc->sc_shared_flags;
528 if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION7 ||
529 sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION5) {
530 printf("%s: PCIe version %d unsupported\n",
531 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), sc->sc_shared_version);
532 return 1;
533 }
534
535 sc->sc_dma_idx_sz = 0;
536 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX0x10000) {
537 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX0x100000)
538 sc->sc_dma_idx_sz = sizeof(uint16_t);
539 else
540 sc->sc_dma_idx_sz = sizeof(uint32_t);
541 }
542
543 /* Maximum RX data buffers in the ring. */
544 sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_2((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x22)))
545 sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST)((sc->sc_tcm_iot)->read_2((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x22)))
;
546 if (sc->sc_max_rxbufpost == 0)
547 sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT255;
548
549 /* Alternative offset of data in a packet */
550 sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x24)))
551 sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x24)))
;
552
553 /* For Power Management */
554 sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x28)))
555 sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x28)))
;
556 sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x2c)))
557 sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x2c)))
;
558
559 /* Ring information */
560 sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x30)))
561 sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x30)))
;
562
563 /* Firmware's "dmesg" */
564 sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x14)))
565 sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x14)))
;
566 sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_console_base_addr + 0x08)))
567 sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_console_base_addr + 0x08)))
;
568 sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_console_base_addr + 0x0c)))
569 sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_console_base_addr + 0x0c)))
;
570
571 /* Read ring information. */
572 bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_region_1((sc->sc_tcm_ioh), (
sc->sc_ring_info_addr), ((void *)&ringinfo), (sizeof(ringinfo
))))
573 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo))((sc->sc_tcm_iot)->read_region_1((sc->sc_tcm_ioh), (
sc->sc_ring_info_addr), ((void *)&ringinfo), (sizeof(ringinfo
))))
;
574
575 if (sc->sc_shared_version >= 6) {
576 sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings)((__uint16_t)(ringinfo.max_submissionrings));
577 sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings)((__uint16_t)(ringinfo.max_flowrings));
578 sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings)((__uint16_t)(ringinfo.max_completionrings));
579 } else {
580 sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings)((__uint16_t)(ringinfo.max_flowrings));
581 sc->sc_max_flowrings = sc->sc_max_submissionrings -
582 BWFM_NUM_TX_MSGRINGS2;
583 sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS3;
584 }
585
586 if (sc->sc_dma_idx_sz == 0) {
587 d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr)((__uint32_t)(ringinfo.d2h_w_idx_ptr));
588 d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr)((__uint32_t)(ringinfo.d2h_r_idx_ptr));
589 h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr)((__uint32_t)(ringinfo.h2d_w_idx_ptr));
590 h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr)((__uint32_t)(ringinfo.h2d_r_idx_ptr));
591 idx_offset = sizeof(uint32_t);
592 } else {
593 uint64_t address;
594
595 /* Each TX/RX Ring has a Read and Write Ptr */
596 sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
597 sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
598 sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
599 sc->sc_dma_idx_bufsz, 8);
600 if (sc->sc_dma_idx_buf == NULL((void *)0)) {
601 /* XXX: Fallback to TCM? */
602 printf("%s: cannot allocate idx buf\n",
603 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
604 return 1;
605 }
606
607 idx_offset = sc->sc_dma_idx_sz;
608 h2d_w_idx_ptr = 0;
609 address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf)((uint64_t)(sc->sc_dma_idx_buf)->bdm_map->dm_segs[0]
.ds_addr)
;
610 ringinfo.h2d_w_idx_hostaddr_low =
611 htole32(address & 0xffffffff)((__uint32_t)(address & 0xffffffff));
612 ringinfo.h2d_w_idx_hostaddr_high =
613 htole32(address >> 32)((__uint32_t)(address >> 32));
614
615 h2d_r_idx_ptr = h2d_w_idx_ptr +
616 sc->sc_max_submissionrings * idx_offset;
617 address += sc->sc_max_submissionrings * idx_offset;
618 ringinfo.h2d_r_idx_hostaddr_low =
619 htole32(address & 0xffffffff)((__uint32_t)(address & 0xffffffff));
620 ringinfo.h2d_r_idx_hostaddr_high =
621 htole32(address >> 32)((__uint32_t)(address >> 32));
622
623 d2h_w_idx_ptr = h2d_r_idx_ptr +
624 sc->sc_max_submissionrings * idx_offset;
625 address += sc->sc_max_submissionrings * idx_offset;
626 ringinfo.d2h_w_idx_hostaddr_low =
627 htole32(address & 0xffffffff)((__uint32_t)(address & 0xffffffff));
628 ringinfo.d2h_w_idx_hostaddr_high =
629 htole32(address >> 32)((__uint32_t)(address >> 32));
630
631 d2h_r_idx_ptr = d2h_w_idx_ptr +
632 sc->sc_max_completionrings * idx_offset;
633 address += sc->sc_max_completionrings * idx_offset;
634 ringinfo.d2h_r_idx_hostaddr_low =
635 htole32(address & 0xffffffff)((__uint32_t)(address & 0xffffffff));
636 ringinfo.d2h_r_idx_hostaddr_high =
637 htole32(address >> 32)((__uint32_t)(address >> 32));
638
639 bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_region_1((sc->sc_tcm_ioh), (
sc->sc_ring_info_addr), ((void *)&ringinfo), (sizeof(ringinfo
))))
640 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo))((sc->sc_tcm_iot)->write_region_1((sc->sc_tcm_ioh), (
sc->sc_ring_info_addr), ((void *)&ringinfo), (sizeof(ringinfo
))))
;
641 }
642
643 uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem)((__uint32_t)(ringinfo.ringmem));
644 /* TX ctrl ring: Send ctrl buffers, send IOCTLs */
645 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
646 h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
647 &ring_mem_ptr))
648 goto cleanup;
649 /* TX rxpost ring: Send clean data mbufs for RX */
650 if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 1024, 32,
651 h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
652 &ring_mem_ptr))
653 goto cleanup;
654 /* RX completion rings: recv our filled buffers back */
655 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
656 d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
657 &ring_mem_ptr))
658 goto cleanup;
659 if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024,
660 sc->sc_shared_version >= 7 ? 24 : 16,
661 d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
662 &ring_mem_ptr))
663 goto cleanup;
664 if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 1024,
665 sc->sc_shared_version >= 7 ? 40 : 32,
666 d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
667 &ring_mem_ptr))
668 goto cleanup;
669
670 /* Dynamic TX rings for actual data */
671 sc->sc_flowrings = malloc(sc->sc_max_flowrings *
672 sizeof(struct bwfm_pci_msgring), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
673 for (i = 0; i < sc->sc_max_flowrings; i++) {
674 struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
675 ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
676 ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
677 }
678
679 /* Scratch and ring update buffers for firmware */
680 if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
681 BWFM_DMA_D2H_SCRATCH_BUF_LEN8, 8)) == NULL((void *)0))
682 goto cleanup;
683 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x38), (((uint64_t)(sc->sc_scratch_buf
)->bdm_map->dm_segs[0].ds_addr) & 0xffffffff)))
684 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x38), (((uint64_t)(sc->sc_scratch_buf
)->bdm_map->dm_segs[0].ds_addr) & 0xffffffff)))
685 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x38), (((uint64_t)(sc->sc_scratch_buf
)->bdm_map->dm_segs[0].ds_addr) & 0xffffffff)))
;
686 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x3c), (((uint64_t)(sc->sc_scratch_buf
)->bdm_map->dm_segs[0].ds_addr) >> 32)))
687 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x3c), (((uint64_t)(sc->sc_scratch_buf
)->bdm_map->dm_segs[0].ds_addr) >> 32)))
688 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x3c), (((uint64_t)(sc->sc_scratch_buf
)->bdm_map->dm_segs[0].ds_addr) >> 32)))
;
689 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x34), (8)))
690 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x34), (8)))
691 BWFM_DMA_D2H_SCRATCH_BUF_LEN)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x34), (8)))
;
692
693 if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
694 BWFM_DMA_D2H_RINGUPD_BUF_LEN1024, 8)) == NULL((void *)0))
695 goto cleanup;
696 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x44), (((uint64_t)(sc->sc_ringupd_buf
)->bdm_map->dm_segs[0].ds_addr) & 0xffffffff)))
697 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x44), (((uint64_t)(sc->sc_ringupd_buf
)->bdm_map->dm_segs[0].ds_addr) & 0xffffffff)))
698 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x44), (((uint64_t)(sc->sc_ringupd_buf
)->bdm_map->dm_segs[0].ds_addr) & 0xffffffff)))
;
699 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x48), (((uint64_t)(sc->sc_ringupd_buf
)->bdm_map->dm_segs[0].ds_addr) >> 32)))
700 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x48), (((uint64_t)(sc->sc_ringupd_buf
)->bdm_map->dm_segs[0].ds_addr) >> 32)))
701 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x48), (((uint64_t)(sc->sc_ringupd_buf
)->bdm_map->dm_segs[0].ds_addr) >> 32)))
;
702 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x40), (1024)))
703 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x40), (1024)))
704 BWFM_DMA_D2H_RINGUPD_BUF_LEN)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_shared_address + 0x40), (1024)))
;
705
706 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE20x83C);
707 bwfm_pci_intr_enable(sc);
708 bwfm_pci_hostready(sc);
709
710 /* Maps RX mbufs to a packet id and back. */
711 sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS1024;
712 sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS1024 *
713 sizeof(struct bwfm_pci_buf), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
714 for (i = 0; i < BWFM_NUM_RX_PKTIDS1024; i++)
715 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_CTL_PKT_SIZE,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (8192
), (1), (8192), (0), (0x0000), (&sc->sc_rx_pkts.pkts[i
].bb_map))
716 BWFM_NUM_RX_DESCS, MSGBUF_MAX_CTL_PKT_SIZE, 0, BUS_DMA_WAITOK,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (8192
), (1), (8192), (0), (0x0000), (&sc->sc_rx_pkts.pkts[i
].bb_map))
717 &sc->sc_rx_pkts.pkts[i].bb_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (8192
), (1), (8192), (0), (0x0000), (&sc->sc_rx_pkts.pkts[i
].bb_map))
;
718
719 /* Maps TX mbufs to a packet id and back. */
720 sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS2048;
721 sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS2048
722 * sizeof(struct bwfm_pci_buf), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
723 for (i = 0; i < BWFM_NUM_TX_PKTIDS2048; i++)
724 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (2048
), (1), (2048), (0), (0x0000), (&sc->sc_tx_pkts.pkts[i
].bb_map))
725 BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (2048
), (1), (2048), (0), (0x0000), (&sc->sc_tx_pkts.pkts[i
].bb_map))
726 &sc->sc_tx_pkts.pkts[i].bb_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (2048
), (1), (2048), (0), (0x0000), (&sc->sc_tx_pkts.pkts[i
].bb_map))
;
727 sc->sc_tx_pkts_full = 0;
728
729 /* Maps IOCTL mbufs to a packet id and back. */
730 sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS8;
731 sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS8
732 * sizeof(struct bwfm_pci_buf), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
733 for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS8; i++)
734 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (2048
), (1), (2048), (0), (0x0000), (&sc->sc_ioctl_pkts.pkts
[i].bb_map))
735 BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (2048
), (1), (2048), (0), (0x0000), (&sc->sc_ioctl_pkts.pkts
[i].bb_map))
736 &sc->sc_ioctl_pkts.pkts[i].bb_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (2048
), (1), (2048), (0), (0x0000), (&sc->sc_ioctl_pkts.pkts
[i].bb_map))
;
737
738 /*
739 * For whatever reason, could also be a bug somewhere in this
740 * driver, the firmware needs a bunch of RX buffers otherwise
741 * it won't send any RX complete messages.
742 */
743 if_rxr_init(&sc->sc_rxbuf_ring, min(256, sc->sc_max_rxbufpost),
744 sc->sc_max_rxbufpost);
745 if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
746 if_rxr_init(&sc->sc_event_ring, 8, 8);
747 bwfm_pci_fill_rx_rings(sc);
748
749 TAILQ_INIT(&sc->sc_ioctlq)do { (&sc->sc_ioctlq)->tqh_first = ((void *)0); (&
sc->sc_ioctlq)->tqh_last = &(&sc->sc_ioctlq)
->tqh_first; } while (0)
;
750
751#ifdef BWFM_DEBUG
752 sc->sc_console_readidx = 0;
753 bwfm_pci_debug_console(sc);
754#endif
755
756 sc->sc_initialized = 1;
757 return 0;
758
759cleanup:
760 if (sc->sc_ringupd_buf)
761 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
762 if (sc->sc_scratch_buf)
763 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
764 if (sc->sc_rx_complete.ring)
765 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
766 if (sc->sc_tx_complete.ring)
767 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
768 if (sc->sc_ctrl_complete.ring)
769 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
770 if (sc->sc_rxpost_submit.ring)
771 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
772 if (sc->sc_ctrl_submit.ring)
773 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
774 if (sc->sc_dma_idx_buf)
775 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
776 return 1;
777}
778
779int
780bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
781 const u_char *nvram, size_t nvlen)
782{
783 struct bwfm_softc *bwfm = (void *)sc;
784 struct bwfm_core *core;
785 struct bwfm_pci_random_seed_footer footer;
786 uint32_t addr, shared, written;
787 uint8_t *rndbuf;
788 int i;
789
790 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID43602) {
17
Assuming field 'ch_chip' is not equal to BRCM_CC_43602_CHIP_ID
18
Taking false branch
791 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR40x83E);
792 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x40),
(5)))
793 BWFM_PCI_ARMCR4REG_BANKIDX, 5)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x40),
(5)))
;
794 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0)))
795 BWFM_PCI_ARMCR4REG_BANKPDA, 0)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0)))
;
796 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x40),
(7)))
797 BWFM_PCI_ARMCR4REG_BANKIDX, 7)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x40),
(7)))
;
798 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0)))
799 BWFM_PCI_ARMCR4REG_BANKPDA, 0)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0)))
;
800 }
801
802 for (i = 0; i < size; i++)
19
Assuming 'i' is >= 'size'
20
Loop condition is false. Execution continues on line 807
803 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + i), (ucode[i])))
804 bwfm->sc_chip.ch_rambase + i, ucode[i])((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + i), (ucode[i])))
;
805
806 /* Firmware replaces this with a pointer once up. */
807 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4), (0)))
808 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4), (0)))
;
809
810 if (nvram) {
21
Assuming 'nvram' is non-null
22
Taking true branch
811 addr = bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize -
812 nvlen;
813 for (i = 0; i < nvlen; i++)
23
Assuming 'i' is >= 'nvlen'
24
Loop condition is false. Execution continues on line 817
814 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (addr +
i), (nvram[i])))
815 addr + i, nvram[i])((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (addr +
i), (nvram[i])))
;
816
817 footer.length = htole32(BWFM_RANDOM_SEED_LENGTH)((__uint32_t)(0x100));
818 footer.magic = htole32(BWFM_RANDOM_SEED_MAGIC)((__uint32_t)(0xfeedc0de));
819 addr -= sizeof(footer);
820 for (i = 0; i < sizeof(footer); i++)
25
Loop condition is true. Entering loop body
26
The value 1 is assigned to 'i'
27
Loop condition is true. Entering loop body
821 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (addr +
i), (((uint8_t *)&footer)[i])))
28
3rd function call argument is an uninitialized value
822 addr + i, ((uint8_t *)&footer)[i])((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (addr +
i), (((uint8_t *)&footer)[i])))
;
823
824 rndbuf = malloc(BWFM_RANDOM_SEED_LENGTH0x100, M_TEMP127, M_WAITOK0x0001);
825 arc4random_buf(rndbuf, BWFM_RANDOM_SEED_LENGTH0x100);
826 addr -= BWFM_RANDOM_SEED_LENGTH0x100;
827 for (i = 0; i < BWFM_RANDOM_SEED_LENGTH0x100; i++)
828 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (addr +
i), (rndbuf[i])))
829 addr + i, rndbuf[i])((sc->sc_tcm_iot)->write_1((sc->sc_tcm_ioh), (addr +
i), (rndbuf[i])))
;
830 free(rndbuf, M_TEMP127, BWFM_RANDOM_SEED_LENGTH0x100);
831 }
832
833 written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4)))
834 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4)))
;
835
836 /* Load reset vector from firmware and kickstart core. */
837 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID43602) {
838 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM0x80E);
839 bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
840 }
841 bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
842
843 for (i = 0; i < 100; i++) {
844 delay(50 * 1000)(*delay_func)(50 * 1000);
845 shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4)))
846 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (bwfm->
sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4)))
;
847 if (shared != written)
848 break;
849 }
850 if (shared == written) {
851 printf("%s: firmware did not come up\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
852 return 1;
853 }
854 if (shared < bwfm->sc_chip.ch_rambase ||
855 shared >= bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize) {
856 printf("%s: invalid shared RAM address 0x%08x\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname),
857 shared);
858 return 1;
859 }
860
861 sc->sc_shared_address = shared;
862 return 0;
863}
864
865int
866bwfm_pci_detach(struct device *self, int flags)
867{
868 struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
869
870 bwfm_detach(&sc->sc_sc, flags);
871 bwfm_pci_cleanup(sc);
872
873 return 0;
874}
875
876void
877bwfm_pci_cleanup(struct bwfm_pci_softc *sc)
878{
879 int i;
880
881 for (i = 0; i < BWFM_NUM_RX_PKTIDS1024; i++) {
882 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_pkts.pkts[i].bb_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_rx_pkts.pkts[i].bb_map))
;
883 if (sc->sc_rx_pkts.pkts[i].bb_m)
884 m_freem(sc->sc_rx_pkts.pkts[i].bb_m);
885 }
886 free(sc->sc_rx_pkts.pkts, M_DEVBUF2, BWFM_NUM_RX_PKTIDS1024 *
887 sizeof(struct bwfm_pci_buf));
888
889 for (i = 0; i < BWFM_NUM_TX_PKTIDS2048; i++) {
890 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_pkts.pkts[i].bb_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_tx_pkts.pkts[i].bb_map))
;
891 if (sc->sc_tx_pkts.pkts[i].bb_m)
892 m_freem(sc->sc_tx_pkts.pkts[i].bb_m);
893 }
894 free(sc->sc_tx_pkts.pkts, M_DEVBUF2, BWFM_NUM_TX_PKTIDS2048 *
895 sizeof(struct bwfm_pci_buf));
896
897 for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS8; i++) {
898 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ioctl_pkts.pkts[i].bb_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_ioctl_pkts.pkts[i].bb_map))
;
899 if (sc->sc_ioctl_pkts.pkts[i].bb_m)
900 m_freem(sc->sc_ioctl_pkts.pkts[i].bb_m);
901 }
902 free(sc->sc_ioctl_pkts.pkts, M_DEVBUF2, BWFM_NUM_IOCTL_PKTIDS8 *
903 sizeof(struct bwfm_pci_buf));
904
905 for (i = 0; i < sc->sc_max_flowrings; i++) {
906 if (sc->sc_flowrings[i].status >= RING_OPEN)
907 bwfm_pci_dmamem_free(sc, sc->sc_flowrings[i].ring);
908 }
909 free(sc->sc_flowrings, M_DEVBUF2, sc->sc_max_flowrings *
910 sizeof(struct bwfm_pci_msgring));
911
912 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
913 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
914 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
915 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
916 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
917 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
918 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
919 if (sc->sc_dma_idx_buf) {
920 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
921 sc->sc_dma_idx_buf = NULL((void *)0);
922 }
923
924 sc->sc_initialized = 0;
925}
926
927int
928bwfm_pci_activate(struct device *self, int act)
929{
930 struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
931 struct bwfm_softc *bwfm = (void *)sc;
932 int error = 0;
933
934 switch (act) {
935 case DVACT_QUIESCE2:
936 error = bwfm_activate(bwfm, act);
937 if (error)
938 return error;
939 if (sc->sc_initialized) {
940 sc->sc_mbdata_done = 0;
941 error = bwfm_pci_send_mb_data(sc,
942 BWFM_PCI_H2D_HOST_D3_INFORM0x00000001);
943 if (error)
944 return error;
945 tsleep_nsec(&sc->sc_mbdata_done, PCATCH0x100,
946 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), SEC_TO_NSEC(2));
947 if (!sc->sc_mbdata_done)
948 return ETIMEDOUT60;
949 }
950 break;
951 case DVACT_WAKEUP5:
952 if (sc->sc_initialized) {
953 /* If device can't be resumed, re-init. */
954 if (bwfm_pci_intmask(sc) == 0 ||
955 bwfm_pci_send_mb_data(sc,
956 BWFM_PCI_H2D_HOST_D0_INFORM0x00000010) != 0) {
957 bwfm_cleanup(bwfm);
958 bwfm_pci_cleanup(sc);
959 }
960 }
961 error = bwfm_activate(bwfm, act);
962 if (error)
963 return error;
964 break;
965 default:
966 break;
967 }
968
969 return 0;
970}
971
972#if defined(__HAVE_FDT)
973int
974bwfm_pci_read_otp(struct bwfm_pci_softc *sc)
975{
976 struct bwfm_softc *bwfm = (void *)sc;
977 struct bwfm_core *core;
978 uint32_t coreid, base, words;
979 uint32_t page, offset, sromctl;
980 uint8_t *otp;
981 int i;
982
983 switch (bwfm->sc_chip.ch_chip) {
984 case BRCM_CC_4355_CHIP_ID0x4355:
985 coreid = BWFM_AGENT_CORE_CHIPCOMMON0x800;
986 base = 0x8c0;
987 words = 0xb2;
988 break;
989 case BRCM_CC_4364_CHIP_ID0x4364:
990 coreid = BWFM_AGENT_CORE_CHIPCOMMON0x800;
991 base = 0x8c0;
992 words = 0x1a0;
993 break;
994 case BRCM_CC_4377_CHIP_ID0x4377:
995 case BRCM_CC_4378_CHIP_ID0x4378:
996 coreid = BWFM_AGENT_CORE_GCI0x840;
997 base = 0x1120;
998 words = 0x170;
999 break;
1000 case BRCM_CC_4387_CHIP_ID0x4387:
1001 coreid = BWFM_AGENT_CORE_GCI0x840;
1002 base = 0x113c;
1003 words = 0x170;
1004 break;
1005 default:
1006 return 0;
1007 }
1008
1009 core = bwfm_chip_get_core(bwfm, coreid);
1010 if (core == NULL((void *)0))
1011 return 1;
1012
1013 /* Map OTP to shadow area */
1014 if (coreid == BWFM_AGENT_CORE_CHIPCOMMON0x800) {
1015 bwfm_pci_select_core(sc, coreid);
1016 sromctl = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x00000190
)))
1017 BWFM_CHIP_REG_SROMCONTROL)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x00000190
)))
;
1018
1019 if (!(sromctl & BWFM_CHIP_REG_SROMCONTROL_OTP_PRESENT(1 << 5)))
1020 return 0;
1021
1022 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x00000190
), (sromctl | (1 << 4))))
1023 BWFM_CHIP_REG_SROMCONTROL, sromctl |((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x00000190
), (sromctl | (1 << 4))))
1024 BWFM_CHIP_REG_SROMCONTROL_OTPSEL)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x00000190
), (sromctl | (1 << 4))))
;
1025 }
1026
1027 /* Map bus window to SROM/OTP shadow area */
1028 page = (core->co_base + base) & ~(BWFM_PCI_BAR0_REG_SIZE0x1000 - 1);
1029 offset = (core->co_base + base) & (BWFM_PCI_BAR0_REG_SIZE0x1000 - 1);
1030 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW0x80, page);
1031
1032 otp = mallocarray(words, sizeof(uint16_t), M_TEMP127, M_WAITOK0x0001);
1033 for (i = 0; i < words; i++)
1034 ((uint16_t *)otp)[i] = bus_space_read_2(sc->sc_reg_iot,((sc->sc_reg_iot)->read_2((sc->sc_reg_ioh), (offset +
i * sizeof(uint16_t))))
1035 sc->sc_reg_ioh, offset + i * sizeof(uint16_t))((sc->sc_reg_iot)->read_2((sc->sc_reg_ioh), (offset +
i * sizeof(uint16_t))))
;
1036
1037 /* Unmap OTP */
1038 if (coreid == BWFM_AGENT_CORE_CHIPCOMMON0x800) {
1039 bwfm_pci_select_core(sc, coreid);
1040 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x00000190
), (sromctl)))
1041 BWFM_CHIP_REG_SROMCONTROL, sromctl)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x00000190
), (sromctl)))
;
1042 }
1043
1044 for (i = 0; i < (words * sizeof(uint16_t)) - 1; i += otp[i + 1]) {
1045 if (otp[i + 0] == 0)
1046 break;
1047 if (i + otp[i + 1] > words * sizeof(uint16_t))
1048 break;
1049 bwfm_pci_process_otp_tuple(sc, otp[i + 0], otp[i + 1],
1050 &otp[i + 2]);
1051 }
1052
1053 free(otp, M_TEMP127, words * sizeof(uint16_t));
1054 return 0;
1055}
1056
1057void
1058bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *sc, uint8_t type, uint8_t size,
1059 uint8_t *data)
1060{
1061 struct bwfm_softc *bwfm = (void *)sc;
1062 char chiprev[8] = "", module[8] = "", modrev[8] = "", vendor[8] = "", chip[8] = "";
1063 char product[16] = "unknown";
1064 int len;
1065
1066 switch (type) {
1067 case 0x15: /* system vendor OTP */
1068 DPRINTF(("%s: system vendor OTP\n", DEVNAME(sc)))do { ; } while (0);
1069 if (size < sizeof(uint32_t))
1070 return;
1071 if (data[0] != 0x08 || data[1] != 0x00 ||
1072 data[2] != 0x00 || data[3] != 0x00)
1073 return;
1074 size -= sizeof(uint32_t);
1075 data += sizeof(uint32_t);
1076 while (size) {
1077 /* reached end */
1078 if (data[0] == 0xff)
1079 break;
1080 for (len = 0; len < size; len++)
1081 if (data[len] == 0x00 || data[len] == ' ' ||
1082 data[len] == 0xff)
1083 break;
1084 if (len < 3 || len > 9) /* X=abcdef */
1085 goto next;
1086 if (data[1] != '=')
1087 goto next;
1088 /* NULL-terminate string */
1089 if (data[len] == ' ')
1090 data[len] = '\0';
1091 switch (data[0]) {
1092 case 's':
1093 strlcpy(chiprev, &data[2], sizeof(chiprev));
1094 break;
1095 case 'M':
1096 strlcpy(module, &data[2], sizeof(module));
1097 break;
1098 case 'm':
1099 strlcpy(modrev, &data[2], sizeof(modrev));
1100 break;
1101 case 'V':
1102 strlcpy(vendor, &data[2], sizeof(vendor));
1103 break;
1104 }
1105next:
1106 /* skip content */
1107 data += len;
1108 size -= len;
1109 /* skip spacer tag */
1110 if (size) {
1111 data++;
1112 size--;
1113 }
1114 }
1115 snprintf(chip, sizeof(chip),
1116 bwfm->sc_chip.ch_chip > 40000 ? "%05d" : "%04x",
1117 bwfm->sc_chip.ch_chip);
1118 if (sc->sc_sc.sc_node)
1119 OF_getprop(sc->sc_sc.sc_node, "apple,module-instance",
1120 product, sizeof(product));
1121 printf("%s: firmware C-%s%s%s/P-%s_M-%s_V-%s__m-%s\n",
1122 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), chip,
1123 *chiprev ? "__s-" : "", *chiprev ? chiprev : "",
1124 product, module, vendor, modrev);
1125 break;
1126 case 0x80: /* Broadcom CIS */
1127 DPRINTF(("%s: Broadcom CIS\n", DEVNAME(sc)))do { ; } while (0);
1128 break;
1129 default:
1130 DPRINTF(("%s: unknown OTP tuple\n", DEVNAME(sc)))do { ; } while (0);
1131 break;
1132 }
1133}
1134#endif
1135
1136/* DMA code */
1137struct bwfm_pci_dmamem *
1138bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
1139{
1140 struct bwfm_pci_dmamem *bdm;
1141 int nsegs;
1142
1143 bdm = malloc(sizeof(*bdm), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
1144 bdm->bdm_size = size;
1145
1146 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0000 | 0x0002), (&bdm->bdm_map
))
1147 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0000 | 0x0002), (&bdm->bdm_map
))
!= 0)
1148 goto bdmfree;
1149
1150 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), (align), (0), (&bdm->bdm_seg), (1), (&nsegs), (
0x0000))
1151 &nsegs, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), (align), (0), (&bdm->bdm_seg), (1), (&nsegs), (
0x0000))
!= 0)
1152 goto destroy;
1153
1154 if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&bdm
->bdm_seg), (nsegs), (size), (&bdm->bdm_kva), (0x0000
| 0x0004))
1155 &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&bdm
->bdm_seg), (nsegs), (size), (&bdm->bdm_kva), (0x0000
| 0x0004))
!= 0)
1156 goto free;
1157
1158 if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (bdm->
bdm_map), (bdm->bdm_kva), (size), (((void *)0)), (0x0000))
1159 NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (bdm->
bdm_map), (bdm->bdm_kva), (size), (((void *)0)), (0x0000))
!= 0)
1160 goto unmap;
1161
1162 bzero(bdm->bdm_kva, size)__builtin_bzero((bdm->bdm_kva), (size));
1163
1164 return (bdm);
1165
1166unmap:
1167 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (bdm->
bdm_kva), (size))
;
1168free:
1169 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
bdm->bdm_seg), (1))
;
1170destroy:
1171 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bdm
->bdm_map))
;
1172bdmfree:
1173 free(bdm, M_DEVBUF2, sizeof(*bdm));
1174
1175 return (NULL((void *)0));
1176}
1177
1178void
1179bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
1180{
1181 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (bdm->
bdm_kva), (bdm->bdm_size))
;
1182 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
bdm->bdm_seg), (1))
;
1183 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bdm
->bdm_map))
;
1184 free(bdm, M_DEVBUF2, sizeof(*bdm));
1185}
1186
1187/*
1188 * We need a simple mapping from a packet ID to mbufs, because when
1189 * a transfer completed, we only know the ID so we have to look up
1190 * the memory for the ID. This simply looks for an empty slot.
1191 */
1192int
1193bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
1194{
1195 int i, idx;
1196
1197 idx = pkts->last + 1;
1198 for (i = 0; i < pkts->npkt; i++) {
1199 if (idx == pkts->npkt)
1200 idx = 0;
1201 if (pkts->pkts[idx].bb_m == NULL((void *)0))
1202 return 0;
1203 idx++;
1204 }
1205 return ENOBUFS55;
1206}
1207
1208int
1209bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1210 struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
1211{
1212 int i, idx;
1213
1214 idx = pkts->last + 1;
1215 for (i = 0; i < pkts->npkt; i++) {
1216 if (idx == pkts->npkt)
1217 idx = 0;
1218 if (pkts->pkts[idx].bb_m == NULL((void *)0)) {
1219 if (bus_dmamap_load_mbuf(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkts->pkts[idx].bb_map), (m), (0x0001))
1220 pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkts->pkts[idx].bb_map), (m), (0x0001))
!= 0) {
1221 if (m_defrag(m, M_DONTWAIT0x0002))
1222 return EFBIG27;
1223 if (bus_dmamap_load_mbuf(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkts->pkts[idx].bb_map), (m), (0x0001))
1224 pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
pkts->pkts[idx].bb_map), (m), (0x0001))
!= 0)
1225 return EFBIG27;
1226 }
1227 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkts->
pkts[idx].bb_map), (0), (pkts->pkts[idx].bb_map->dm_mapsize
), (0x01 | 0x04))
1228 0, pkts->pkts[idx].bb_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkts->
pkts[idx].bb_map), (0), (pkts->pkts[idx].bb_map->dm_mapsize
), (0x01 | 0x04))
1229 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkts->
pkts[idx].bb_map), (0), (pkts->pkts[idx].bb_map->dm_mapsize
), (0x01 | 0x04))
;
1230 pkts->last = idx;
1231 pkts->pkts[idx].bb_m = m;
1232 *pktid = idx;
1233 *paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
1234 return 0;
1235 }
1236 idx++;
1237 }
1238 return ENOBUFS55;
1239}
1240
1241struct mbuf *
1242bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1243 uint32_t pktid)
1244{
1245 struct mbuf *m;
1246
1247 if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL((void *)0))
1248 return NULL((void *)0);
1249 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkts->
pkts[pktid].bb_map), (0), (pkts->pkts[pktid].bb_map->dm_mapsize
), (0x02 | 0x08))
1250 pkts->pkts[pktid].bb_map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkts->
pkts[pktid].bb_map), (0), (pkts->pkts[pktid].bb_map->dm_mapsize
), (0x02 | 0x08))
1251 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pkts->
pkts[pktid].bb_map), (0), (pkts->pkts[pktid].bb_map->dm_mapsize
), (0x02 | 0x08))
;
1252 bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pkts
->pkts[pktid].bb_map))
;
1253 m = pkts->pkts[pktid].bb_m;
1254 pkts->pkts[pktid].bb_m = NULL((void *)0);
1255 return m;
1256}
1257
1258void
1259bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1260{
1261 bwfm_pci_fill_rx_buf_ring(sc);
1262 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1263 MSGBUF_TYPE_IOCTLRESP_BUF_POST0xB);
1264 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1265 MSGBUF_TYPE_EVENT_BUF_POST0xD);
1266}
1267
1268void
1269bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1270 uint32_t msgtype)
1271{
1272 struct msgbuf_rx_ioctl_resp_or_event *req;
1273 struct mbuf *m;
1274 uint32_t pktid;
1275 paddr_t paddr;
1276 int s, slots;
1277
1278 s = splnet()splraise(0x7);
1279 for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1280 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1281 break;
1282 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1283 if (req == NULL((void *)0))
1284 break;
1285 m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_CTL_PKT_SIZE)m_clget((((void *)0)), (0x0002), (8192));
1286 if (m == NULL((void *)0)) {
1287 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1288 break;
1289 }
1290 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MSGBUF_MAX_CTL_PKT_SIZE8192;
1291 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1292 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1293 m_freem(m);
1294 break;
1295 }
1296 memset(req, 0, sizeof(*req))__builtin_memset((req), (0), (sizeof(*req)));
1297 req->msg.msgtype = msgtype;
1298 req->msg.request_id = htole32(pktid)((__uint32_t)(pktid));
1299 req->host_buf_len = htole16(MSGBUF_MAX_CTL_PKT_SIZE)((__uint16_t)(8192));
1300 req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32)((__uint32_t)((uint64_t)paddr >> 32));
1301 req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff)((__uint32_t)(paddr & 0xffffffff));
1302 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1303 }
1304 if_rxr_put(rxring, slots)do { (rxring)->rxr_alive -= (slots); } while (0);
1305 splx(s)spllower(s);
1306}
1307
1308void
1309bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1310{
1311 struct msgbuf_rx_bufpost *req;
1312 struct mbuf *m;
1313 uint32_t pktid;
1314 paddr_t paddr;
1315 int s, slots;
1316
1317 s = splnet()splraise(0x7);
1318 for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1319 slots > 0; slots--) {
1320 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1321 break;
1322 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1323 if (req == NULL((void *)0))
1324 break;
1325 m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE)m_clget((((void *)0)), (0x0002), (2048));
1326 if (m == NULL((void *)0)) {
1327 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1328 break;
1329 }
1330 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = MSGBUF_MAX_PKT_SIZE2048;
1331 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1332 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1333 m_freem(m);
1334 break;
1335 }
1336 memset(req, 0, sizeof(*req))__builtin_memset((req), (0), (sizeof(*req)));
1337 req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST0x11;
1338 req->msg.request_id = htole32(pktid)((__uint32_t)(pktid));
1339 req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE)((__uint16_t)(2048));
1340 req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32)((__uint32_t)((uint64_t)paddr >> 32));
1341 req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff)((__uint32_t)(paddr & 0xffffffff));
1342 bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1343 }
1344 if_rxr_put(&sc->sc_rxbuf_ring, slots)do { (&sc->sc_rxbuf_ring)->rxr_alive -= (slots); } while
(0)
;
1345 splx(s)spllower(s);
1346}
1347
1348int
1349bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1350 int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1351 int idx, uint32_t idx_off, uint32_t *ring_mem)
1352{
1353 ring->w_idx_addr = w_idx + idx * idx_off;
1354 ring->r_idx_addr = r_idx + idx * idx_off;
1355 ring->w_ptr = 0;
1356 ring->r_ptr = 0;
1357 ring->nitem = nitem;
1358 ring->itemsz = itemsz;
1359 bwfm_pci_ring_write_rptr(sc, ring);
1360 bwfm_pci_ring_write_wptr(sc, ring);
1361
1362 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1363 if (ring->ring == NULL((void *)0))
1364 return ENOMEM12;
1365 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (*ring_mem
+ 0x08), (((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) & 0xffffffff)))
1366 *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (*ring_mem
+ 0x08), (((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) & 0xffffffff)))
1367 BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (*ring_mem
+ 0x08), (((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) & 0xffffffff)))
;
1368 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (*ring_mem
+ 0x0c), (((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) >> 32)))
1369 *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (*ring_mem
+ 0x0c), (((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) >> 32)))
1370 BWFM_PCI_DMA_DVA(ring->ring) >> 32)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (*ring_mem
+ 0x0c), (((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) >> 32)))
;
1371 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (*ring_mem
+ 0x04), (nitem)))
1372 *ring_mem + BWFM_RING_MAX_ITEM, nitem)((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (*ring_mem
+ 0x04), (nitem)))
;
1373 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (*ring_mem
+ 0x06), (itemsz)))
1374 *ring_mem + BWFM_RING_LEN_ITEMS, itemsz)((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (*ring_mem
+ 0x06), (itemsz)))
;
1375 *ring_mem = *ring_mem + BWFM_RING_MEM_SZ16;
1376 return 0;
1377}
1378
1379int
1380bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1381 int nitem, size_t itemsz)
1382{
1383 ring->w_ptr = 0;
1384 ring->r_ptr = 0;
1385 ring->nitem = nitem;
1386 ring->itemsz = itemsz;
1387 bwfm_pci_ring_write_rptr(sc, ring);
1388 bwfm_pci_ring_write_wptr(sc, ring);
1389
1390 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1391 if (ring->ring == NULL((void *)0))
1392 return ENOMEM12;
1393 return 0;
1394}
1395
1396/* Ring helpers */
1397void
1398bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1399 struct bwfm_pci_msgring *ring)
1400{
1401 if (sc->sc_pcireg64)
1402 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xA20)
, (1)))
1403 BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_0, 1)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xA20)
, (1)))
;
1404 else
1405 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x140)
, (1)))
1406 BWFM_PCI_PCIE2REG_H2D_MAILBOX_0, 1)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x140)
, (1)))
;
1407}
1408
1409void
1410bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1411 struct bwfm_pci_msgring *ring)
1412{
1413 if (sc->sc_dma_idx_sz == 0) {
1414 ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,((sc->sc_tcm_iot)->read_2((sc->sc_tcm_ioh), (ring->
r_idx_addr)))
1415 sc->sc_tcm_ioh, ring->r_idx_addr)((sc->sc_tcm_iot)->read_2((sc->sc_tcm_ioh), (ring->
r_idx_addr)))
;
1416 } else {
1417 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->r_idx_addr), (sizeof
(uint16_t)), (0x02 | 0x08))
1418 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->r_idx_addr), (sizeof
(uint16_t)), (0x02 | 0x08))
1419 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->r_idx_addr), (sizeof
(uint16_t)), (0x02 | 0x08))
;
1420 ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)((void *)(sc->sc_dma_idx_buf)->bdm_kva)
1421 + ring->r_idx_addr);
1422 }
1423}
1424
1425void
1426bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1427 struct bwfm_pci_msgring *ring)
1428{
1429 if (sc->sc_dma_idx_sz == 0) {
1430 ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,((sc->sc_tcm_iot)->read_2((sc->sc_tcm_ioh), (ring->
w_idx_addr)))
1431 sc->sc_tcm_ioh, ring->w_idx_addr)((sc->sc_tcm_iot)->read_2((sc->sc_tcm_ioh), (ring->
w_idx_addr)))
;
1432 } else {
1433 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->w_idx_addr), (sizeof
(uint16_t)), (0x02 | 0x08))
1434 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->w_idx_addr), (sizeof
(uint16_t)), (0x02 | 0x08))
1435 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->w_idx_addr), (sizeof
(uint16_t)), (0x02 | 0x08))
;
1436 ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)((void *)(sc->sc_dma_idx_buf)->bdm_kva)
1437 + ring->w_idx_addr);
1438 }
1439}
1440
1441void
1442bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1443 struct bwfm_pci_msgring *ring)
1444{
1445 if (sc->sc_dma_idx_sz == 0) {
1446 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (ring->
r_idx_addr), (ring->r_ptr)))
1447 ring->r_idx_addr, ring->r_ptr)((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (ring->
r_idx_addr), (ring->r_ptr)))
;
1448 } else {
1449 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)((void *)(sc->sc_dma_idx_buf)->bdm_kva)
1450 + ring->r_idx_addr) = ring->r_ptr;
1451 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->r_idx_addr), (sizeof
(uint16_t)), (0x01 | 0x04))
1452 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->r_idx_addr), (sizeof
(uint16_t)), (0x01 | 0x04))
1453 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->r_idx_addr), (sizeof
(uint16_t)), (0x01 | 0x04))
;
1454 }
1455}
1456
1457void
1458bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1459 struct bwfm_pci_msgring *ring)
1460{
1461 if (sc->sc_dma_idx_sz == 0) {
1462 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (ring->
w_idx_addr), (ring->w_ptr)))
1463 ring->w_idx_addr, ring->w_ptr)((sc->sc_tcm_iot)->write_2((sc->sc_tcm_ioh), (ring->
w_idx_addr), (ring->w_ptr)))
;
1464 } else {
1465 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)((void *)(sc->sc_dma_idx_buf)->bdm_kva)
1466 + ring->w_idx_addr) = ring->w_ptr;
1467 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->w_idx_addr), (sizeof
(uint16_t)), (0x01 | 0x04))
1468 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->w_idx_addr), (sizeof
(uint16_t)), (0x01 | 0x04))
1469 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_dma_idx_buf)->bdm_map)), (ring->w_idx_addr), (sizeof
(uint16_t)), (0x01 | 0x04))
;
1470 }
1471}
1472
1473/*
1474 * Retrieve a free descriptor to put new stuff in, but don't commit
1475 * to it yet so we can rollback later if any error occurs.
1476 */
1477void *
1478bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1479 struct bwfm_pci_msgring *ring)
1480{
1481 int available;
1482 char *ret;
1483
1484 bwfm_pci_ring_update_rptr(sc, ring);
1485
1486 if (ring->r_ptr > ring->w_ptr)
1487 available = ring->r_ptr - ring->w_ptr;
1488 else
1489 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1490
1491 if (available <= 1)
1492 return NULL((void *)0);
1493
1494 ret = BWFM_PCI_DMA_KVA(ring->ring)((void *)(ring->ring)->bdm_kva) + (ring->w_ptr * ring->itemsz);
1495 ring->w_ptr += 1;
1496 if (ring->w_ptr == ring->nitem)
1497 ring->w_ptr = 0;
1498 return ret;
1499}
1500
1501void *
1502bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1503 struct bwfm_pci_msgring *ring, int count, int *avail)
1504{
1505 int available;
1506 char *ret;
1507
1508 bwfm_pci_ring_update_rptr(sc, ring);
1509
1510 if (ring->r_ptr > ring->w_ptr)
1511 available = ring->r_ptr - ring->w_ptr;
1512 else
1513 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1514
1515 if (available <= 1)
1516 return NULL((void *)0);
1517
1518 ret = BWFM_PCI_DMA_KVA(ring->ring)((void *)(ring->ring)->bdm_kva) + (ring->w_ptr * ring->itemsz);
1519 *avail = min(count, available - 1);
1520 if (*avail + ring->w_ptr > ring->nitem)
1521 *avail = ring->nitem - ring->w_ptr;
1522 ring->w_ptr += *avail;
1523 if (ring->w_ptr == ring->nitem)
1524 ring->w_ptr = 0;
1525 return ret;
1526}
1527
1528/*
1529 * Read number of descriptors available (submitted by the firmware)
1530 * and retrieve pointer to first descriptor.
1531 */
1532void *
1533bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1534 struct bwfm_pci_msgring *ring, int *avail)
1535{
1536 bwfm_pci_ring_update_wptr(sc, ring);
1537
1538 if (ring->w_ptr >= ring->r_ptr)
1539 *avail = ring->w_ptr - ring->r_ptr;
1540 else
1541 *avail = ring->nitem - ring->r_ptr;
1542
1543 if (*avail == 0)
1544 return NULL((void *)0);
1545
1546 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((ring
->ring)->bdm_map)), (ring->r_ptr * ring->itemsz),
(*avail * ring->itemsz), (0x02 | 0x08))
1547 ring->r_ptr * ring->itemsz, *avail * ring->itemsz,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((ring
->ring)->bdm_map)), (ring->r_ptr * ring->itemsz),
(*avail * ring->itemsz), (0x02 | 0x08))
1548 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((ring
->ring)->bdm_map)), (ring->r_ptr * ring->itemsz),
(*avail * ring->itemsz), (0x02 | 0x08))
;
1549 return BWFM_PCI_DMA_KVA(ring->ring)((void *)(ring->ring)->bdm_kva) + (ring->r_ptr * ring->itemsz);
1550}
1551
1552/*
1553 * Let firmware know we read N descriptors.
1554 */
1555void
1556bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1557 struct bwfm_pci_msgring *ring, int nitem)
1558{
1559 ring->r_ptr += nitem;
1560 if (ring->r_ptr == ring->nitem)
1561 ring->r_ptr = 0;
1562 bwfm_pci_ring_write_rptr(sc, ring);
1563}
1564
1565/*
1566 * Let firmware know that we submitted some descriptors.
1567 */
1568void
1569bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1570 struct bwfm_pci_msgring *ring)
1571{
1572 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((ring
->ring)->bdm_map)), (0), (((ring->ring)->bdm_size
)), (0x01 | 0x04))
1573 0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((ring
->ring)->bdm_map)), (0), (((ring->ring)->bdm_size
)), (0x01 | 0x04))
1574 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((ring
->ring)->bdm_map)), (0), (((ring->ring)->bdm_size
)), (0x01 | 0x04))
;
1575 bwfm_pci_ring_write_wptr(sc, ring);
1576 bwfm_pci_ring_bell(sc, ring);
1577}
1578
1579/*
1580 * Rollback N descriptors in case we don't actually want
1581 * to commit to it.
1582 */
1583void
1584bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1585 struct bwfm_pci_msgring *ring, int nitem)
1586{
1587 if (ring->w_ptr == 0)
1588 ring->w_ptr = ring->nitem - nitem;
1589 else
1590 ring->w_ptr -= nitem;
1591}
1592
1593/*
1594 * Foreach written descriptor on the ring, pass the descriptor to
1595 * a message handler and let the firmware know we handled it.
1596 */
1597void
1598bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1599 struct mbuf_list *ml)
1600{
1601 void *buf;
1602 int avail, processed;
1603
1604again:
1605 buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1606 if (buf == NULL((void *)0))
1607 return;
1608
1609 processed = 0;
1610 while (avail) {
1611 bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1612 buf += ring->itemsz;
1613 processed++;
1614 if (processed == 48) {
1615 bwfm_pci_ring_read_commit(sc, ring, processed);
1616 processed = 0;
1617 }
1618 avail--;
1619 }
1620 if (processed)
1621 bwfm_pci_ring_read_commit(sc, ring, processed);
1622 if (ring->r_ptr == 0)
1623 goto again;
1624}
1625
1626void
1627bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1628{
1629 struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_ific_ac.ac_if;
1630 struct msgbuf_ioctl_resp_hdr *resp;
1631 struct msgbuf_tx_status *tx;
1632 struct msgbuf_rx_complete *rx;
1633 struct msgbuf_rx_event *event;
1634 struct msgbuf_common_hdr *msg;
1635 struct msgbuf_flowring_create_resp *fcr;
1636 struct msgbuf_flowring_delete_resp *fdr;
1637 struct bwfm_cmd_flowring_create fdcmd;
1638 struct bwfm_pci_msgring *ring;
1639 struct mbuf *m;
1640 int flowid;
1641
1642 msg = (struct msgbuf_common_hdr *)buf;
1643 switch (msg->msgtype)
1644 {
1645 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT0x4:
1646 fcr = (struct msgbuf_flowring_create_resp *)buf;
1647 flowid = letoh16(fcr->compl_hdr.flow_ring_id)((__uint16_t)(fcr->compl_hdr.flow_ring_id));
1648 if (flowid < 2)
1649 break;
1650 flowid -= 2;
1651 if (flowid >= sc->sc_max_flowrings)
1652 break;
1653 ring = &sc->sc_flowrings[flowid];
1654 if (ring->status != RING_OPENING)
1655 break;
1656 if (fcr->compl_hdr.status) {
1657 printf("%s: failed to open flowring %d\n",
1658 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), flowid);
1659 ring->status = RING_CLOSED;
1660 if (ring->m) {
1661 m_freem(ring->m);
1662 ring->m = NULL((void *)0);
1663 }
1664 ifq_restart(&ifp->if_snd);
1665 break;
1666 }
1667 ring->status = RING_OPEN;
1668 if (ring->m != NULL((void *)0)) {
1669 m = ring->m;
1670 ring->m = NULL((void *)0);
1671 if (bwfm_pci_txdata(&sc->sc_sc, m))
1672 m_freem(ring->m);
1673 }
1674 ifq_restart(&ifp->if_snd);
1675 break;
1676 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT0x6:
1677 fdr = (struct msgbuf_flowring_delete_resp *)buf;
1678 flowid = letoh16(fdr->compl_hdr.flow_ring_id)((__uint16_t)(fdr->compl_hdr.flow_ring_id));
1679 if (flowid < 2)
1680 break;
1681 flowid -= 2;
1682 if (flowid >= sc->sc_max_flowrings)
1683 break;
1684 ring = &sc->sc_flowrings[flowid];
1685 if (ring->status != RING_CLOSING)
1686 break;
1687 if (fdr->compl_hdr.status) {
1688 printf("%s: failed to delete flowring %d\n",
1689 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), flowid);
1690 break;
1691 }
1692 fdcmd.flowid = flowid;
1693 bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_delete_cb,
1694 &fdcmd, sizeof(fdcmd));
1695 break;
1696 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK0xA:
1697 m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1698 letoh32(msg->request_id)((__uint32_t)(msg->request_id)));
1699 if (m == NULL((void *)0))
1700 break;
1701 m_freem(m);
1702 break;
1703 case MSGBUF_TYPE_IOCTL_CMPLT0xC:
1704 resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1705 bwfm_pci_msgbuf_rxioctl(sc, resp);
1706 if_rxr_put(&sc->sc_ioctl_ring, 1)do { (&sc->sc_ioctl_ring)->rxr_alive -= (1); } while
(0)
;
1707 bwfm_pci_fill_rx_rings(sc);
1708 break;
1709 case MSGBUF_TYPE_WL_EVENT0xE:
1710 event = (struct msgbuf_rx_event *)buf;
1711 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1712 letoh32(event->msg.request_id)((__uint32_t)(event->msg.request_id)));
1713 if (m == NULL((void *)0))
1714 break;
1715 m_adj(m, sc->sc_rx_dataoffset);
1716 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = letoh16(event->event_data_len)((__uint16_t)(event->event_data_len));
1717 bwfm_rx(&sc->sc_sc, m, ml);
1718 if_rxr_put(&sc->sc_event_ring, 1)do { (&sc->sc_event_ring)->rxr_alive -= (1); } while
(0)
;
1719 bwfm_pci_fill_rx_rings(sc);
1720 break;
1721 case MSGBUF_TYPE_TX_STATUS0x10:
1722 tx = (struct msgbuf_tx_status *)buf;
1723 m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1724 letoh32(tx->msg.request_id)((__uint32_t)(tx->msg.request_id)) - 1);
1725 if (m == NULL((void *)0))
1726 break;
1727 m_freem(m);
1728 if (sc->sc_tx_pkts_full) {
1729 sc->sc_tx_pkts_full = 0;
1730 ifq_restart(&ifp->if_snd);
1731 }
1732 break;
1733 case MSGBUF_TYPE_RX_CMPLT0x12:
1734 rx = (struct msgbuf_rx_complete *)buf;
1735 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1736 letoh32(rx->msg.request_id)((__uint32_t)(rx->msg.request_id)));
1737 if (m == NULL((void *)0))
1738 break;
1739 if (letoh16(rx->data_offset)((__uint16_t)(rx->data_offset)))
1740 m_adj(m, letoh16(rx->data_offset)((__uint16_t)(rx->data_offset)));
1741 else if (sc->sc_rx_dataoffset)
1742 m_adj(m, sc->sc_rx_dataoffset);
1743 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = letoh16(rx->data_len)((__uint16_t)(rx->data_len));
1744 bwfm_rx(&sc->sc_sc, m, ml);
1745 if_rxr_put(&sc->sc_rxbuf_ring, 1)do { (&sc->sc_rxbuf_ring)->rxr_alive -= (1); } while
(0)
;
1746 bwfm_pci_fill_rx_rings(sc);
1747 break;
1748 default:
1749 printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1750 break;
1751 }
1752}
1753
1754/* Bus core helpers */
1755void
1756bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1757{
1758 struct bwfm_softc *bwfm = (void *)sc;
1759 struct bwfm_core *core;
1760
1761 core = bwfm_chip_get_core(bwfm, id);
1762 if (core == NULL((void *)0)) {
6
Assuming 'core' is equal to NULL
7
Taking true branch
1763 printf("%s: could not find core to select", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
8
Value assigned to field 'ch_chip', which participates in a condition later
1764 return;
1765 }
1766
1767 pci_conf_write(sc->sc_pc, sc->sc_tag,
1768 BWFM_PCI_BAR0_WINDOW0x80, core->co_base);
1769 if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1770 BWFM_PCI_BAR0_WINDOW0x80) != core->co_base)
1771 pci_conf_write(sc->sc_pc, sc->sc_tag,
1772 BWFM_PCI_BAR0_WINDOW0x80, core->co_base);
1773}
1774
1775uint32_t
1776bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1777{
1778 struct bwfm_pci_softc *sc = (void *)bwfm;
1779 uint32_t page, offset;
1780
1781 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE0x1000 - 1);
1782 offset = reg & (BWFM_PCI_BAR0_REG_SIZE0x1000 - 1);
1783 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW0x80, page);
1784 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (offset)
))
;
1785}
1786
1787void
1788bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1789{
1790 struct bwfm_pci_softc *sc = (void *)bwfm;
1791 uint32_t page, offset;
1792
1793 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE0x1000 - 1);
1794 offset = reg & (BWFM_PCI_BAR0_REG_SIZE0x1000 - 1);
1795 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW0x80, page);
1796 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (offset
), (val)))
;
1797}
1798
1799int
1800bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1801{
1802 return 0;
1803}
1804
1805int
1806bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1807{
1808 struct bwfm_pci_softc *sc = (void *)bwfm;
1809 struct bwfm_core *core;
1810 uint32_t reg;
1811 int i;
1812
1813 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE20x83C);
1814 reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1815 BWFM_PCI_CFGREG_LINK_STATUS_CTRL0x0BC);
1816 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL0x0BC,
1817 reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB0x3);
1818
1819 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON0x800);
1820 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x00000080
), (4)))
1821 BWFM_CHIP_REG_WATCHDOG, 4)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x00000080
), (4)))
;
1822 delay(100 * 1000)(*delay_func)(100 * 1000);
1823
1824 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE20x83C);
1825 pci_conf_write(sc->sc_pc, sc->sc_tag,
1826 BWFM_PCI_CFGREG_LINK_STATUS_CTRL0x0BC, reg);
1827
1828 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE20x83C);
1829 if (core->co_rev <= 13) {
1830 uint16_t cfg_offset[] = {
1831 BWFM_PCI_CFGREG_STATUS_CMD0x004,
1832 BWFM_PCI_CFGREG_PM_CSR0x04C,
1833 BWFM_PCI_CFGREG_MSI_CAP0x058,
1834 BWFM_PCI_CFGREG_MSI_ADDR_L0x05C,
1835 BWFM_PCI_CFGREG_MSI_ADDR_H0x060,
1836 BWFM_PCI_CFGREG_MSI_DATA0x064,
1837 BWFM_PCI_CFGREG_LINK_STATUS_CTRL20x0DC,
1838 BWFM_PCI_CFGREG_RBAR_CTRL0x228,
1839 BWFM_PCI_CFGREG_PML1_SUB_CTRL10x248,
1840 BWFM_PCI_CFGREG_REG_BAR2_CONFIG0x4E0,
1841 BWFM_PCI_CFGREG_REG_BAR3_CONFIG0x4F4,
1842 };
1843
1844 for (i = 0; i < nitems(cfg_offset)(sizeof((cfg_offset)) / sizeof((cfg_offset)[0])); i++) {
1845 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x120)
, (cfg_offset[i])))
1846 BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i])((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x120)
, (cfg_offset[i])))
;
1847 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x124))
)
1848 BWFM_PCI_PCIE2REG_CONFIGDATA)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x124))
)
;
1849 DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",do { ; } while (0)
1850 DEVNAME(sc), cfg_offset[i], reg))do { ; } while (0);
1851 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x124)
, (reg)))
1852 BWFM_PCI_PCIE2REG_CONFIGDATA, reg)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x124)
, (reg)))
;
1853 }
1854 }
1855 if (core->co_rev >= 64)
1856 sc->sc_pcireg64 = 1;
1857
1858 reg = bwfm_pci_intr_status(sc);
1859 if (reg != 0xffffffff)
1860 bwfm_pci_intr_ack(sc, reg);
1861
1862 return 0;
1863}
1864
1865void
1866bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1867{
1868 struct bwfm_pci_softc *sc = (void *)bwfm;
1869 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (0), (rstvec
)))
;
1870}
1871
1872static int bwfm_pci_prio2fifo[8] = {
1873 0, /* best effort */
1874 1, /* IPTOS_PREC_IMMEDIATE */
1875 1, /* IPTOS_PREC_PRIORITY */
1876 0, /* IPTOS_PREC_FLASH */
1877 2, /* IPTOS_PREC_FLASHOVERRIDE */
1878 2, /* IPTOS_PREC_CRITIC_ECP */
1879 3, /* IPTOS_PREC_INTERNETCONTROL */
1880 3, /* IPTOS_PREC_NETCONTROL */
1881};
1882
1883int
1884bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1885{
1886 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1887#ifndef IEEE80211_STA_ONLY
1888 uint8_t *da = mtod(m, uint8_t *)((uint8_t *)((m)->m_hdr.mh_data));
1889#endif
1890 int flowid, prio, fifo;
1891 int i, found;
1892
1893 prio = ieee80211_classify(ic, m);
1894 fifo = bwfm_pci_prio2fifo[prio];
1895
1896 switch (ic->ic_opmode)
1897 {
1898 case IEEE80211_M_STA:
1899 flowid = fifo;
1900 break;
1901#ifndef IEEE80211_STA_ONLY
1902 case IEEE80211_M_HOSTAP:
1903 if (ETHER_IS_MULTICAST(da)(*(da) & 0x01))
1904 da = etherbroadcastaddr;
1905 flowid = da[5] * 2 + fifo;
1906 break;
1907#endif
1908 default:
1909 printf("%s: state not supported\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
1910 return ENOBUFS55;
1911 }
1912
1913 found = 0;
1914 flowid = flowid % sc->sc_max_flowrings;
1915 for (i = 0; i < sc->sc_max_flowrings; i++) {
1916 if (ic->ic_opmode == IEEE80211_M_STA &&
1917 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1918 sc->sc_flowrings[flowid].fifo == fifo) {
1919 found = 1;
1920 break;
1921 }
1922#ifndef IEEE80211_STA_ONLY
1923 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1924 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1925 sc->sc_flowrings[flowid].fifo == fifo &&
1926 !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)__builtin_memcmp((sc->sc_flowrings[flowid].mac), (da), (6)
)
) {
1927 found = 1;
1928 break;
1929 }
1930#endif
1931 flowid = (flowid + 1) % sc->sc_max_flowrings;
1932 }
1933
1934 if (found)
1935 return flowid;
1936
1937 return -1;
1938}
1939
1940void
1941bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1942{
1943 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1944 struct bwfm_cmd_flowring_create cmd;
1945#ifndef IEEE80211_STA_ONLY
1946 uint8_t *da = mtod(m, uint8_t *)((uint8_t *)((m)->m_hdr.mh_data));
1947#endif
1948 struct bwfm_pci_msgring *ring;
1949 int flowid, prio, fifo;
1950 int i, found;
1951
1952 prio = ieee80211_classify(ic, m);
1953 fifo = bwfm_pci_prio2fifo[prio];
1954
1955 switch (ic->ic_opmode)
1956 {
1957 case IEEE80211_M_STA:
1958 flowid = fifo;
1959 break;
1960#ifndef IEEE80211_STA_ONLY
1961 case IEEE80211_M_HOSTAP:
1962 if (ETHER_IS_MULTICAST(da)(*(da) & 0x01))
1963 da = etherbroadcastaddr;
1964 flowid = da[5] * 2 + fifo;
1965 break;
1966#endif
1967 default:
1968 printf("%s: state not supported\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
1969 return;
1970 }
1971
1972 found = 0;
1973 flowid = flowid % sc->sc_max_flowrings;
1974 for (i = 0; i < sc->sc_max_flowrings; i++) {
1975 ring = &sc->sc_flowrings[flowid];
1976 if (ring->status == RING_CLOSED) {
1977 ring->status = RING_OPENING;
1978 found = 1;
1979 break;
1980 }
1981 flowid = (flowid + 1) % sc->sc_max_flowrings;
1982 }
1983
1984 /*
1985 * We cannot recover from that so far. Only a stop/init
1986 * cycle can revive this if it ever happens at all.
1987 */
1988 if (!found) {
1989 printf("%s: no flowring available\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
1990 return;
1991 }
1992
1993 cmd.m = m;
1994 cmd.prio = prio;
1995 cmd.flowid = flowid;
1996 bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1997}
1998
1999void
2000bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
2001{
2002 struct bwfm_pci_softc *sc = (void *)bwfm;
2003#ifndef IEEE80211_STA_ONLY
2004 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
2005#endif
2006 struct bwfm_cmd_flowring_create *cmd = arg;
2007 struct msgbuf_tx_flowring_create_req *req;
2008 struct bwfm_pci_msgring *ring;
2009 uint8_t *da, *sa;
2010 int s;
2011
2012 da = mtod(cmd->m, char *)((char *)((cmd->m)->m_hdr.mh_data)) + 0 * ETHER_ADDR_LEN6;
2013 sa = mtod(cmd->m, char *)((char *)((cmd->m)->m_hdr.mh_data)) + 1 * ETHER_ADDR_LEN6;
2014
2015 ring = &sc->sc_flowrings[cmd->flowid];
2016 if (ring->status != RING_OPENING) {
2017 printf("%s: flowring not opening\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
2018 return;
2019 }
2020
2021 if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
2022 printf("%s: cannot setup flowring\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
2023 return;
2024 }
2025
2026 s = splnet()splraise(0x7);
2027 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2028 if (req == NULL((void *)0)) {
2029 printf("%s: cannot reserve for flowring\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
2030 splx(s)spllower(s);
2031 return;
2032 }
2033
2034 ring->status = RING_OPENING;
2035 ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
2036 ring->m = cmd->m;
2037 memcpy(ring->mac, da, ETHER_ADDR_LEN)__builtin_memcpy((ring->mac), (da), (6));
2038#ifndef IEEE80211_STA_ONLY
2039 if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da)(*(da) & 0x01))
2040 memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN)__builtin_memcpy((ring->mac), (etherbroadcastaddr), (6));
2041#endif
2042
2043 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE0x3;
2044 req->msg.ifidx = 0;
2045 req->msg.request_id = 0;
2046 req->tid = bwfm_pci_prio2fifo[cmd->prio];
2047 req->flow_ring_id = letoh16(cmd->flowid + 2)((__uint16_t)(cmd->flowid + 2));
2048 memcpy(req->da, da, ETHER_ADDR_LEN)__builtin_memcpy((req->da), (da), (6));
2049 memcpy(req->sa, sa, ETHER_ADDR_LEN)__builtin_memcpy((req->sa), (sa), (6));
2050 req->flow_ring_addr.high_addr =
2051 letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32)((__uint32_t)(((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) >> 32))
;
2052 req->flow_ring_addr.low_addr =
2053 letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff)((__uint32_t)(((uint64_t)(ring->ring)->bdm_map->dm_segs
[0].ds_addr) & 0xffffffff))
;
2054 req->max_items = letoh16(512)((__uint16_t)(512));
2055 req->len_item = letoh16(48)((__uint16_t)(48));
2056
2057 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2058 splx(s)spllower(s);
2059}
2060
2061void
2062bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
2063{
2064 struct msgbuf_tx_flowring_delete_req *req;
2065 struct bwfm_pci_msgring *ring;
2066 int s;
2067
2068 ring = &sc->sc_flowrings[flowid];
2069 if (ring->status != RING_OPEN) {
2070 printf("%s: flowring not open\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
2071 return;
2072 }
2073
2074 s = splnet()splraise(0x7);
2075 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2076 if (req == NULL((void *)0)) {
2077 printf("%s: cannot reserve for flowring\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
2078 splx(s)spllower(s);
2079 return;
2080 }
2081
2082 ring->status = RING_CLOSING;
2083
2084 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE0x5;
2085 req->msg.ifidx = 0;
2086 req->msg.request_id = 0;
2087 req->flow_ring_id = letoh16(flowid + 2)((__uint16_t)(flowid + 2));
2088 req->reason = 0;
2089
2090 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2091 splx(s)spllower(s);
2092}
2093
2094void
2095bwfm_pci_flowring_delete_cb(struct bwfm_softc *bwfm, void *arg)
2096{
2097 struct bwfm_pci_softc *sc = (void *)bwfm;
2098 struct bwfm_cmd_flowring_create *cmd = arg;
2099 struct bwfm_pci_msgring *ring;
2100
2101 ring = &sc->sc_flowrings[cmd->flowid];
2102 bwfm_pci_dmamem_free(sc, ring->ring);
2103 ring->status = RING_CLOSED;
2104}
2105
2106void
2107bwfm_pci_stop(struct bwfm_softc *bwfm)
2108{
2109 struct bwfm_pci_softc *sc = (void *)bwfm;
2110 struct bwfm_pci_msgring *ring;
2111 int i;
2112
2113 for (i = 0; i < sc->sc_max_flowrings; i++) {
2114 ring = &sc->sc_flowrings[i];
2115 if (ring->status == RING_OPEN)
2116 bwfm_pci_flowring_delete(sc, i);
2117 }
2118}
2119
2120int
2121bwfm_pci_txcheck(struct bwfm_softc *bwfm)
2122{
2123 struct bwfm_pci_softc *sc = (void *)bwfm;
2124 struct bwfm_pci_msgring *ring;
2125 int i;
2126
2127 /* If we are transitioning, we cannot send. */
2128 for (i = 0; i < sc->sc_max_flowrings; i++) {
2129 ring = &sc->sc_flowrings[i];
2130 if (ring->status == RING_OPENING)
2131 return ENOBUFS55;
2132 }
2133
2134 if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
2135 sc->sc_tx_pkts_full = 1;
2136 return ENOBUFS55;
2137 }
2138
2139 return 0;
2140}
2141
2142int
2143bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
2144{
2145 struct bwfm_pci_softc *sc = (void *)bwfm;
2146 struct bwfm_pci_msgring *ring;
2147 struct msgbuf_tx_msghdr *tx;
2148 uint32_t pktid;
2149 paddr_t paddr;
2150 int flowid, ret;
2151
2152 flowid = bwfm_pci_flowring_lookup(sc, m);
2153 if (flowid < 0) {
2154 /*
2155 * We cannot send the packet right now as there is
2156 * no flowring yet. The flowring will be created
2157 * asynchronously. While the ring is transitioning
2158 * the TX check will tell the upper layers that we
2159 * cannot send packets right now. When the flowring
2160 * is created the queue will be restarted and this
2161 * mbuf will be transmitted.
2162 */
2163 bwfm_pci_flowring_create(sc, m);
2164 return 0;
2165 }
2166
2167 ring = &sc->sc_flowrings[flowid];
2168 if (ring->status == RING_OPENING ||
2169 ring->status == RING_CLOSING) {
2170 printf("%s: tried to use a flow that was "
2171 "transitioning in status %d\n",
2172 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), ring->status);
2173 return ENOBUFS55;
2174 }
2175
2176 tx = bwfm_pci_ring_write_reserve(sc, ring);
2177 if (tx == NULL((void *)0))
2178 return ENOBUFS55;
2179
2180 memset(tx, 0, sizeof(*tx))__builtin_memset((tx), (0), (sizeof(*tx)));
2181 tx->msg.msgtype = MSGBUF_TYPE_TX_POST0xF;
2182 tx->msg.ifidx = 0;
2183 tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3(1 << 0);
2184 tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
2185 BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT5;
2186 tx->seg_cnt = 1;
2187 memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN)__builtin_memcpy((tx->txhdr), (((char *)((m)->m_hdr.mh_data
))), (((6 * 2) + 2)))
;
2188
2189 ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
2190 if (ret) {
2191 if (ret == ENOBUFS55) {
2192 printf("%s: no pktid available for TX\n",
2193 DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname));
2194 sc->sc_tx_pkts_full = 1;
2195 }
2196 bwfm_pci_ring_write_cancel(sc, ring, 1);
2197 return ret;
2198 }
2199 paddr += ETHER_HDR_LEN((6 * 2) + 2);
2200
2201 tx->msg.request_id = htole32(pktid + 1)((__uint32_t)(pktid + 1));
2202 tx->data_len = htole16(m->m_len - ETHER_HDR_LEN)((__uint16_t)(m->m_hdr.mh_len - ((6 * 2) + 2)));
2203 tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32)((__uint32_t)((uint64_t)paddr >> 32));
2204 tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff)((__uint32_t)(paddr & 0xffffffff));
2205
2206 bwfm_pci_ring_write_commit(sc, ring);
2207 return 0;
2208}
2209
2210int
2211bwfm_pci_send_mb_data(struct bwfm_pci_softc *sc, uint32_t htod_mb_data)
2212{
2213 struct bwfm_softc *bwfm = (void *)sc;
2214 struct bwfm_core *core;
2215 uint32_t reg;
2216 int i;
2217
2218 for (i = 0; i < 100; i++) {
2219 reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_htod_mb_data_addr)))
2220 sc->sc_htod_mb_data_addr)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_htod_mb_data_addr)))
;
2221 if (reg == 0)
2222 break;
2223 delay(10 * 1000)(*delay_func)(10 * 1000);
2224 }
2225 if (i == 100) {
2226 DPRINTF(("%s: MB transaction already pending\n", DEVNAME(sc)))do { ; } while (0);
2227 return EIO5;
2228 }
2229
2230 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_htod_mb_data_addr), (htod_mb_data)))
2231 sc->sc_htod_mb_data_addr, htod_mb_data)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_htod_mb_data_addr), (htod_mb_data)))
;
2232 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX0x98, 1);
2233
2234 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE20x83C);
2235 if (core->co_rev <= 13)
2236 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX0x98, 1);
2237
2238 return 0;
2239}
2240
2241void
2242bwfm_pci_handle_mb_data(struct bwfm_pci_softc *sc)
2243{
2244 uint32_t reg;
2245
2246 reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_dtoh_mb_data_addr)))
2247 sc->sc_dtoh_mb_data_addr)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_dtoh_mb_data_addr)))
;
2248 if (reg == 0)
2249 return;
2250
2251 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_dtoh_mb_data_addr), (0)))
2252 sc->sc_dtoh_mb_data_addr, 0)((sc->sc_tcm_iot)->write_4((sc->sc_tcm_ioh), (sc->
sc_dtoh_mb_data_addr), (0)))
;
2253
2254 if (reg & BWFM_PCI_D2H_DEV_D3_ACK0x00000001) {
2255 sc->sc_mbdata_done = 1;
2256 wakeup(&sc->sc_mbdata_done);
2257 }
2258
2259 /* TODO: support more events */
2260 if (reg & ~BWFM_PCI_D2H_DEV_D3_ACK0x00000001)
2261 printf("%s: handle MB data 0x%08x\n", DEVNAME(sc)((sc)->sc_sc.sc_dev.dv_xname), reg);
2262}
2263
2264#ifdef BWFM_DEBUG
2265void
2266bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
2267{
2268 uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_console_base_addr + 0x10)))
2269 sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX)((sc->sc_tcm_iot)->read_4((sc->sc_tcm_ioh), (sc->
sc_console_base_addr + 0x10)))
;
2270
2271 if (newidx != sc->sc_console_readidx)
2272 DPRINTFN(3, ("BWFM CONSOLE: "))do { ; } while (0);
2273 while (newidx != sc->sc_console_readidx) {
2274 uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,((sc->sc_tcm_iot)->read_1((sc->sc_tcm_ioh), (sc->
sc_console_buf_addr + sc->sc_console_readidx)))
2275 sc->sc_console_buf_addr + sc->sc_console_readidx)((sc->sc_tcm_iot)->read_1((sc->sc_tcm_ioh), (sc->
sc_console_buf_addr + sc->sc_console_readidx)))
;
2276 sc->sc_console_readidx++;
2277 if (sc->sc_console_readidx == sc->sc_console_buf_size)
2278 sc->sc_console_readidx = 0;
2279 if (ch == '\r')
2280 continue;
2281 DPRINTFN(3, ("%c", ch))do { ; } while (0);
2282 }
2283}
2284#endif
2285
2286int
2287bwfm_pci_intr(void *v)
2288{
2289 struct bwfm_pci_softc *sc = (void *)v;
2290 struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_ific_ac.ac_if;
2291 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
2292 uint32_t status, mask;
2293
2294 if (!sc->sc_initialized)
2295 return 0;
2296
2297 status = bwfm_pci_intr_status(sc);
2298 /* FIXME: interrupt status seems to be zero? */
2299 if (status == 0 && sc->sc_pcireg64)
2300 status |= BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB(1 | 2 | 4 | 8 | 0x10 | 0x20 | 0x40 | 0x80 | 0x100 | 0x200 | 0x400
| 0x800 | 0x1000 | 0x2000 | 0x4000 | 0x8000)
;
2301 if (status == 0)
2302 return 0;
2303
2304 bwfm_pci_intr_disable(sc);
2305 bwfm_pci_intr_ack(sc, status);
2306
2307 if (!sc->sc_pcireg64 &&
2308 (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_00x0100 |
2309 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_10x0200)))
2310 bwfm_pci_handle_mb_data(sc);
2311
2312 mask = BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB(0x10000 | 0x20000 | 0x40000 | 0x80000 | 0x100000 | 0x200000 |
0x400000 | 0x800000)
;
2313 if (sc->sc_pcireg64)
2314 mask = BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB(1 | 2 | 4 | 8 | 0x10 | 0x20 | 0x40 | 0x80 | 0x100 | 0x200 | 0x400
| 0x800 | 0x1000 | 0x2000 | 0x4000 | 0x8000)
;
2315
2316 if (status & mask) {
2317 bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
2318 bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
2319 bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
2320
2321 if (ifiq_input(&ifp->if_rcv, &ml))
2322 if_rxr_livelocked(&sc->sc_rxbuf_ring);
2323 }
2324
2325#ifdef BWFM_DEBUG
2326 bwfm_pci_debug_console(sc);
2327#endif
2328
2329 bwfm_pci_intr_enable(sc);
2330 return 1;
2331}
2332
2333void
2334bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2335{
2336 if (sc->sc_pcireg64)
2337 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xC34)
, ((1 | 2 | 4 | 8 | 0x10 | 0x20 | 0x40 | 0x80 | 0x100 | 0x200
| 0x400 | 0x800 | 0x1000 | 0x2000 | 0x4000 | 0x8000))))
2338 BWFM_PCI_64_PCIE2REG_MAILBOXMASK,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xC34)
, ((1 | 2 | 4 | 8 | 0x10 | 0x20 | 0x40 | 0x80 | 0x100 | 0x200
| 0x400 | 0x800 | 0x1000 | 0x2000 | 0x4000 | 0x8000))))
2339 BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xC34)
, ((1 | 2 | 4 | 8 | 0x10 | 0x20 | 0x40 | 0x80 | 0x100 | 0x200
| 0x400 | 0x800 | 0x1000 | 0x2000 | 0x4000 | 0x8000))))
;
2340 else
2341 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0x0100 | 0x0200 | (0x10000 | 0x20000 | 0x40000 | 0x80000 | 0x100000
| 0x200000 | 0x400000 | 0x800000))))
2342 BWFM_PCI_PCIE2REG_MAILBOXMASK,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0x0100 | 0x0200 | (0x10000 | 0x20000 | 0x40000 | 0x80000 | 0x100000
| 0x200000 | 0x400000 | 0x800000))))
2343 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0x0100 | 0x0200 | (0x10000 | 0x20000 | 0x40000 | 0x80000 | 0x100000
| 0x200000 | 0x400000 | 0x800000))))
2344 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0x0100 | 0x0200 | (0x10000 | 0x20000 | 0x40000 | 0x80000 | 0x100000
| 0x200000 | 0x400000 | 0x800000))))
2345 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0x0100 | 0x0200 | (0x10000 | 0x20000 | 0x40000 | 0x80000 | 0x100000
| 0x200000 | 0x400000 | 0x800000))))
;
2346}
2347
2348void
2349bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2350{
2351 if (sc->sc_pcireg64)
2352 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xC34)
, (0)))
2353 BWFM_PCI_64_PCIE2REG_MAILBOXMASK, 0)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xC34)
, (0)))
;
2354 else
2355 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0)))
2356 BWFM_PCI_PCIE2REG_MAILBOXMASK, 0)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x4C),
(0)))
;
2357}
2358
2359uint32_t
2360bwfm_pci_intr_status(struct bwfm_pci_softc *sc)
2361{
2362 if (sc->sc_pcireg64)
2363 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0xC30))
)
2364 BWFM_PCI_64_PCIE2REG_MAILBOXINT)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0xC30))
)
;
2365 else
2366 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x48)))
2367 BWFM_PCI_PCIE2REG_MAILBOXINT)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x48)));
2368}
2369
2370void
2371bwfm_pci_intr_ack(struct bwfm_pci_softc *sc, uint32_t status)
2372{
2373 if (sc->sc_pcireg64)
2374 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xC30)
, (status)))
2375 BWFM_PCI_64_PCIE2REG_MAILBOXINT, status)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xC30)
, (status)))
;
2376 else
2377 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x48),
(status)))
2378 BWFM_PCI_PCIE2REG_MAILBOXINT, status)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x48),
(status)))
;
2379}
2380
2381uint32_t
2382bwfm_pci_intmask(struct bwfm_pci_softc *sc)
2383{
2384 if (sc->sc_pcireg64)
2385 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0xC14))
)
2386 BWFM_PCI_64_PCIE2REG_INTMASK)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0xC14))
)
;
2387 else
2388 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x24)))
2389 BWFM_PCI_PCIE2REG_INTMASK)((sc->sc_reg_iot)->read_4((sc->sc_reg_ioh), (0x24)));
2390}
2391
2392void
2393bwfm_pci_hostready(struct bwfm_pci_softc *sc)
2394{
2395 if ((sc->sc_shared_flags & BWFM_SHARED_INFO_HOSTRDY_DB10x10000000) == 0)
2396 return;
2397
2398 if (sc->sc_pcireg64)
2399 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xA24)
, (1)))
2400 BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_1, 1)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0xA24)
, (1)))
;
2401 else
2402 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x144)
, (1)))
2403 BWFM_PCI_PCIE2REG_H2D_MAILBOX_1, 1)((sc->sc_reg_iot)->write_4((sc->sc_reg_ioh), (0x144)
, (1)))
;
2404}
2405
2406/* Msgbuf protocol implementation */
2407int
2408bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2409 int cmd, char *buf, size_t *len)
2410{
2411 struct bwfm_pci_softc *sc = (void *)bwfm;
2412 struct msgbuf_ioctl_req_hdr *req;
2413 struct bwfm_pci_ioctl *ctl;
2414 struct mbuf *m;
2415 uint32_t pktid;
2416 paddr_t paddr;
2417 size_t buflen;
2418 int s;
2419
2420 buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN1518);
2421 m = MCLGETL(NULL, M_DONTWAIT, buflen)m_clget((((void *)0)), (0x0002), (buflen));
2422 if (m == NULL((void *)0))
2423 return 1;
2424 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = buflen;
2425
2426 if (buf)
2427 memcpy(mtod(m, char *), buf, buflen)__builtin_memcpy((((char *)((m)->m_hdr.mh_data))), (buf), (
buflen))
;
2428 else
2429 memset(mtod(m, char *), 0, buflen)__builtin_memset((((char *)((m)->m_hdr.mh_data))), (0), (buflen
))
;
2430
2431 s = splnet()splraise(0x7);
2432 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2433 if (req == NULL((void *)0)) {
2434 splx(s)spllower(s);
2435 m_freem(m);
2436 return 1;
2437 }
2438
2439 if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
2440 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
2441 splx(s)spllower(s);
2442 m_freem(m);
2443 return 1;
2444 }
2445
2446 ctl = malloc(sizeof(*ctl), M_TEMP127, M_WAITOK0x0001|M_ZERO0x0008);
2447 ctl->transid = sc->sc_ioctl_transid++;
2448 TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next)do { (ctl)->next.tqe_next = ((void *)0); (ctl)->next.tqe_prev
= (&sc->sc_ioctlq)->tqh_last; *(&sc->sc_ioctlq
)->tqh_last = (ctl); (&sc->sc_ioctlq)->tqh_last =
&(ctl)->next.tqe_next; } while (0)
;
2449
2450 req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ0x9;
2451 req->msg.ifidx = 0;
2452 req->msg.flags = 0;
2453 req->msg.request_id = htole32(pktid)((__uint32_t)(pktid));
2454 req->cmd = htole32(cmd)((__uint32_t)(cmd));
2455 req->output_buf_len = htole16(*len)((__uint16_t)(*len));
2456 req->trans_id = htole16(ctl->transid)((__uint16_t)(ctl->transid));
2457
2458 req->input_buf_len = htole16(m->m_len)((__uint16_t)(m->m_hdr.mh_len));
2459 req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32)((__uint32_t)((uint64_t)paddr >> 32));
2460 req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff)((__uint32_t)(paddr & 0xffffffff));
2461
2462 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2463 splx(s)spllower(s);
2464
2465 tsleep_nsec(ctl, PWAIT32, "bwfm", SEC_TO_NSEC(1));
2466 TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next)do { if (((ctl)->next.tqe_next) != ((void *)0)) (ctl)->
next.tqe_next->next.tqe_prev = (ctl)->next.tqe_prev; else
(&sc->sc_ioctlq)->tqh_last = (ctl)->next.tqe_prev
; *(ctl)->next.tqe_prev = (ctl)->next.tqe_next; ((ctl)->
next.tqe_prev) = ((void *)-1); ((ctl)->next.tqe_next) = ((
void *)-1); } while (0)
;
2467
2468 if (ctl->m == NULL((void *)0)) {
2469 free(ctl, M_TEMP127, sizeof(*ctl));
2470 return 1;
2471 }
2472
2473 *len = min(ctl->retlen, m->m_lenm_hdr.mh_len);
2474 *len = min(*len, buflen);
2475 if (buf)
2476 m_copydata(ctl->m, 0, *len, buf);
2477 m_freem(ctl->m);
2478
2479 if (ctl->status < 0) {
2480 free(ctl, M_TEMP127, sizeof(*ctl));
2481 return 1;
2482 }
2483
2484 free(ctl, M_TEMP127, sizeof(*ctl));
2485 return 0;
2486}
2487
2488int
2489bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2490 int cmd, char *buf, size_t len)
2491{
2492 return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2493}
2494
2495void
2496bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2497 struct msgbuf_ioctl_resp_hdr *resp)
2498{
2499 struct bwfm_pci_ioctl *ctl, *tmp;
2500 struct mbuf *m;
2501
2502 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2503 letoh32(resp->msg.request_id)((__uint32_t)(resp->msg.request_id)));
2504
2505 TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp)for ((ctl) = ((&sc->sc_ioctlq)->tqh_first); (ctl) !=
((void *)0) && ((tmp) = ((ctl)->next.tqe_next), 1
); (ctl) = (tmp))
{
2506 if (ctl->transid != letoh16(resp->trans_id)((__uint16_t)(resp->trans_id)))
2507 continue;
2508 ctl->m = m;
2509 ctl->retlen = letoh16(resp->resp_len)((__uint16_t)(resp->resp_len));
2510 ctl->status = letoh16(resp->compl_hdr.status)((__uint16_t)(resp->compl_hdr.status));
2511 wakeup(ctl);
2512 return;
2513 }
2514
2515 m_freem(m);
2516}