Bug Summary

File:dev/ic/pgt.c
Warning:line 337, column 3
Value stored to 'fwlen' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name pgt.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/ic/pgt.c
1/* $OpenBSD: pgt.c,v 1.104 2023/11/10 15:51:20 bluhm Exp $ */
2
3/*
4 * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*
21 * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
22 * Copyright (c) 2004 Brian Fundakowski Feldman
23 * All rights reserved.
24 *
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
27 * are met:
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 * SUCH DAMAGE.
45 */
46
47#include "bpfilter.h"
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/kernel.h>
52#include <sys/malloc.h>
53#include <sys/socket.h>
54#include <sys/mbuf.h>
55#include <sys/endian.h>
56#include <sys/sockio.h>
57#include <sys/kthread.h>
58#include <sys/time.h>
59#include <sys/ioctl.h>
60#include <sys/device.h>
61
62#include <machine/bus.h>
63#include <machine/intr.h>
64
65#include <net/if.h>
66#include <net/if_llc.h>
67#include <net/if_media.h>
68
69#if NBPFILTER1 > 0
70#include <net/bpf.h>
71#endif
72
73#include <netinet/in.h>
74#include <netinet/if_ether.h>
75
76#include <net80211/ieee80211_var.h>
77#include <net80211/ieee80211_radiotap.h>
78
79#include <dev/ic/pgtreg.h>
80#include <dev/ic/pgtvar.h>
81
82#include <dev/ic/if_wireg.h>
83#include <dev/ic/if_wi_ieee.h>
84#include <dev/ic/if_wivar.h>
85
86#ifdef PGT_DEBUG
87#define DPRINTF(x) do { printf x; } while (0)
88#else
89#define DPRINTF(x)
90#endif
91
92#define SETOID(oid, var, size){ if (pgt_oid_set(sc, oid, var, size) != 0) break; } { \
93 if (pgt_oid_set(sc, oid, var, size) != 0) \
94 break; \
95}
96
97/*
98 * This is a driver for the Intersil Prism family of 802.11g network cards,
99 * based upon version 1.2 of the Linux driver.
100 */
101
102#define SCAN_TIMEOUT5 5 /* 5 seconds */
103
104struct cfdriver pgt_cd = {
105 NULL((void *)0), "pgt", DV_IFNET
106};
107
108void pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
109int pgt_media_change(struct ifnet *ifp);
110void pgt_write_memory_barrier(struct pgt_softc *);
111uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
112void pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
113void pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
114void pgt_debug_events(struct pgt_softc *, const char *);
115uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
116void pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
117int pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
118 struct pgt_desc *);
119void pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
120int pgt_load_firmware(struct pgt_softc *);
121void pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
122 struct pgt_frag *);
123int pgt_reset(struct pgt_softc *);
124void pgt_stop(struct pgt_softc *, unsigned int);
125void pgt_reboot(struct pgt_softc *);
126void pgt_init_intr(struct pgt_softc *);
127void pgt_update_intr(struct pgt_softc *, int);
128struct mbuf
129 *pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
130 struct mbuf *, struct ieee80211_node **);
131void pgt_input_frames(struct pgt_softc *, struct mbuf *);
132void pgt_wakeup_intr(struct pgt_softc *);
133void pgt_sleep_intr(struct pgt_softc *);
134void pgt_empty_traps(struct pgt_softc_kthread *);
135void pgt_per_device_kthread(void *);
136void pgt_async_reset(struct pgt_softc *);
137void pgt_async_update(struct pgt_softc *);
138void pgt_txdone(struct pgt_softc *, enum pgt_queue);
139void pgt_rxdone(struct pgt_softc *, enum pgt_queue);
140void pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
141void pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
142struct mbuf
143 *pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
144int pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
145int pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
146int pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
147void pgt_state_dump(struct pgt_softc *);
148int pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
149void pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
150 struct pgt_desc *, uint16_t, int);
151void pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
152struct ieee80211_node
153 *pgt_ieee80211_node_alloc(struct ieee80211com *);
154void pgt_ieee80211_newassoc(struct ieee80211com *,
155 struct ieee80211_node *, int);
156void pgt_ieee80211_node_free(struct ieee80211com *,
157 struct ieee80211_node *);
158void pgt_ieee80211_node_copy(struct ieee80211com *,
159 struct ieee80211_node *,
160 const struct ieee80211_node *);
161int pgt_ieee80211_send_mgmt(struct ieee80211com *,
162 struct ieee80211_node *, int, int, int);
163int pgt_net_attach(struct pgt_softc *);
164void pgt_start(struct ifnet *);
165int pgt_ioctl(struct ifnet *, u_long, caddr_t);
166void pgt_obj_bss2scanres(struct pgt_softc *,
167 struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
168void node_mark_active_ap(void *, struct ieee80211_node *);
169void node_mark_active_adhoc(void *, struct ieee80211_node *);
170void pgt_watchdog(struct ifnet *);
171int pgt_init(struct ifnet *);
172void pgt_update_hw_from_sw(struct pgt_softc *, int);
173void pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
174 struct pgt_obj_mlme *);
175void pgt_update_sw_from_hw(struct pgt_softc *,
176 struct pgt_async_trap *, struct mbuf *);
177int pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
178int pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
179int pgt_dma_alloc(struct pgt_softc *);
180int pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
181void pgt_dma_free(struct pgt_softc *);
182void pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
183void pgt_wakeup(struct pgt_softc *);
184
185void
186pgt_write_memory_barrier(struct pgt_softc *sc)
187{
188 bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
189 BUS_SPACE_BARRIER_WRITE0x02);
190}
191
192u_int32_t
193pgt_read_4(struct pgt_softc *sc, uint16_t offset)
194{
195 return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset)((sc->sc_iotag)->read_4((sc->sc_iohandle), (offset))
)
);
196}
197
198void
199pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
200{
201 bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value)((sc->sc_iotag)->write_4((sc->sc_iohandle), (offset)
, (value)))
;
202}
203
204/*
205 * Write out 4 bytes and cause a PCI flush by reading back in on a
206 * harmless register.
207 */
208void
209pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
210{
211 bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value)((sc->sc_iotag)->write_4((sc->sc_iohandle), (offset)
, (value)))
;
212 (void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN)((sc->sc_iotag)->read_4((sc->sc_iohandle), (0x0018))
)
;
213}
214
215/*
216 * Print the state of events in the queues from an interrupt or a trigger.
217 */
218void
219pgt_debug_events(struct pgt_softc *sc, const char *when)
220{
221#define COUNT(i) \
222 letoh32(sc->sc_cb->pcb_driver_curfrag[i])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[i])) - \
223 letoh32(sc->sc_cb->pcb_device_curfrag[i])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[i]))
224 if (sc->sc_debug & SC_DEBUG_EVENTS0x00000010)
225 DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
226 sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
227 COUNT(3), COUNT(4), COUNT(5)));
228#undef COUNT
229}
230
231uint32_t
232pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
233{
234 return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[pq])) -
235 letoh32(sc->sc_cb->pcb_device_curfrag[pq])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[pq])));
236}
237
238void
239pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
240{
241 pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr)((__uint32_t)((uint32_t)pd->pd_dmaaddr));
242 pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE)((__uint16_t)(1536));
243 pd->pd_fragp->pf_flags = 0;
244
245 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x08))
246 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x08))
;
247}
248
249int
250pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
251 struct pgt_desc *pd)
252{
253 int error;
254
255 error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (pd->
pd_dmam), (pd->pd_mem), (1536), (((void *)0)), (0x0001))
256 PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (pd->
pd_dmam), (pd->pd_mem), (1536), (((void *)0)), (0x0001))
;
257 if (error) {
258 DPRINTF(("%s: unable to load %s tx DMA: %d\n",
259 sc->sc_dev.dv_xname,
260 pgt_queue_is_data(pq) ? "data" : "mgmt", error));
261 return (error);
262 }
263 pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
264 pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr)((__uint32_t)((uint32_t)pd->pd_dmaaddr));
265 pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE)((__uint16_t)(1536));
266 pd->pd_fragp->pf_flags = htole16(0)((__uint16_t)(0));
267
268 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x08))
269 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x08))
;
270
271 return (0);
272}
273
274void
275pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
276{
277 bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pd->
pd_dmam))
;
278 pd->pd_dmaaddr = 0;
279}
280
281int
282pgt_load_firmware(struct pgt_softc *sc)
283{
284 int error, reg, dirreg, fwoff, ucodeoff, fwlen;
285 uint8_t *ucode;
286 uint32_t *uc;
287 size_t size;
288 char *name;
289
290 if (sc->sc_flags & SC_ISL38770x00000800)
291 name = "pgt-isl3877";
292 else
293 name = "pgt-isl3890"; /* includes isl3880 */
294
295 error = loadfirmware(name, &ucode, &size);
296
297 if (error != 0) {
298 DPRINTF(("%s: error %d, could not read firmware %s\n",
299 sc->sc_dev.dv_xname, error, name));
300 return (EIO5);
301 }
302
303 if (size & 3) {
304 DPRINTF(("%s: bad firmware size %u\n",
305 sc->sc_dev.dv_xname, size));
306 free(ucode, M_DEVBUF2, 0);
307 return (EINVAL22);
308 }
309
310 pgt_reboot(sc);
311
312 fwoff = 0;
313 ucodeoff = 0;
314 uc = (uint32_t *)ucode;
315 reg = PGT_FIRMWARE_INTERNAL_OFFSET0x20000;
316 while (fwoff < size) {
317 pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE0x0030, reg);
318
319 if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE0x1000)
320 fwlen = PGT_DIRECT_MEMORY_SIZE0x1000;
321 else
322 fwlen = size - fwoff;
323
324 dirreg = PGT_DIRECT_MEMORY_OFFSET0x1000;
325 while (fwlen > 4) {
326 pgt_write_4(sc, dirreg, uc[ucodeoff]);
327 fwoff += 4;
328 dirreg += 4;
329 reg += 4;
330 fwlen -= 4;
331 ucodeoff++;
332 }
333 pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
334 fwoff += 4;
335 dirreg += 4;
336 reg += 4;
337 fwlen -= 4;
Value stored to 'fwlen' is never read
338 ucodeoff++;
339 }
340 DPRINTF(("%s: %d bytes microcode loaded from %s\n",
341 sc->sc_dev.dv_xname, fwoff, name));
342
343 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT0x0078);
344 reg &= ~(PGT_CTRL_STAT_RESET0x10000000 | PGT_CTRL_STAT_CLOCKRUN0x00800000);
345 reg |= PGT_CTRL_STAT_RAMBOOT0x20000000;
346 pgt_write_4_flush(sc, PGT_REG_CTRL_STAT0x0078, reg);
347 pgt_write_memory_barrier(sc);
348 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
349
350 reg |= PGT_CTRL_STAT_RESET0x10000000;
351 pgt_write_4(sc, PGT_REG_CTRL_STAT0x0078, reg);
352 pgt_write_memory_barrier(sc);
353 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
354
355 reg &= ~PGT_CTRL_STAT_RESET0x10000000;
356 pgt_write_4(sc, PGT_REG_CTRL_STAT0x0078, reg);
357 pgt_write_memory_barrier(sc);
358 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
359
360 free(ucode, M_DEVBUF2, 0);
361
362 return (0);
363}
364
365void
366pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
367 struct pgt_frag *pqfrags)
368{
369 struct pgt_desc *pd;
370 unsigned int i;
371
372 sc->sc_cb->pcb_device_curfrag[pq] = 0;
373 i = 0;
374 /* XXX why only freeq ??? */
375 TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link)for((pd) = ((&sc->sc_freeq[pq])->tqh_first); (pd) !=
((void *)0); (pd) = ((pd)->pd_link.tqe_next))
{
376 pd->pd_fragnum = i;
377 pd->pd_fragp = &pqfrags[i];
378 if (pgt_queue_is_rx(pq))
379 pgt_reinit_rx_desc_frag(sc, pd);
380 i++;
381 }
382 sc->sc_freeq_count[pq] = i;
383 /*
384 * The ring buffer describes how many free buffers are available from
385 * the host (for receive queues) or how many are pending (for
386 * transmit queues).
387 */
388 if (pgt_queue_is_rx(pq))
389 sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i)((__uint32_t)(i));
390 else
391 sc->sc_cb->pcb_driver_curfrag[pq] = 0;
392}
393
394/*
395 * Turn off interrupts, reset the device (possibly loading firmware),
396 * and put everything in a known state.
397 */
398int
399pgt_reset(struct pgt_softc *sc)
400{
401 int error;
402
403 /* disable all interrupts */
404 pgt_write_4_flush(sc, PGT_REG_INT_EN0x0018, 0);
405 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
406
407 /*
408 * Set up the management receive queue, assuming there are no
409 * requests in progress.
410 */
411 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
412 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
413 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
;
414 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
415 &sc->sc_cb->pcb_data_low_rx[0]);
416 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
417 &sc->sc_cb->pcb_data_low_tx[0]);
418 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
419 &sc->sc_cb->pcb_data_high_rx[0]);
420 pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
421 &sc->sc_cb->pcb_data_high_tx[0]);
422 pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
423 &sc->sc_cb->pcb_mgmt_rx[0]);
424 pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
425 &sc->sc_cb->pcb_mgmt_tx[0]);
426 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
427 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
428 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
;
429
430 /* load firmware */
431 if (sc->sc_flags & SC_NEEDS_FIRMWARE0x00000001) {
432 error = pgt_load_firmware(sc);
433 if (error) {
434 printf("%s: firmware load failed\n",
435 sc->sc_dev.dv_xname);
436 return (error);
437 }
438 sc->sc_flags &= ~SC_NEEDS_FIRMWARE0x00000001;
439 DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
440 }
441
442 /* upload the control block's DMA address */
443 pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE0x0020,
444 htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr)((__uint32_t)((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr
))
);
445 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
446
447 /* send a reset event */
448 pgt_write_4_flush(sc, PGT_REG_DEV_INT0x0000, PGT_DEV_INT_RESET0x00000001);
449 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
450
451 /* await only the initialization interrupt */
452 pgt_write_4_flush(sc, PGT_REG_INT_EN0x0018, PGT_INT_STAT_INIT0x00000004);
453 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
454
455 return (0);
456}
457
458/*
459 * If we're trying to reset and the device has seemingly not been detached,
460 * we'll spend a minute seeing if we can't do the reset.
461 */
462void
463pgt_stop(struct pgt_softc *sc, unsigned int flag)
464{
465 struct ieee80211com *ic;
466 unsigned int wokeup;
467 int tryagain = 0;
468
469 ic = &sc->sc_ic;
470
471 ic->ic_ific_ac.ac_if.if_flags &= ~IFF_RUNNING0x40;
472 sc->sc_flags |= SC_UNINITIALIZED0x00000002;
473 sc->sc_flags |= flag;
474
475 pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
476 pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
477 pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
478
479trying_again:
480 /* disable all interrupts */
481 pgt_write_4_flush(sc, PGT_REG_INT_EN0x0018, 0);
482 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
483
484 /* reboot card */
485 pgt_reboot(sc);
486
487 do {
488 wokeup = 0;
489 /*
490 * We don't expect to be woken up, just to drop the lock
491 * and time out. Only tx queues can have anything valid
492 * on them outside of an interrupt.
493 */
494 while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)(((&sc->sc_mgmtinprog)->tqh_first) == ((void *)0))) {
495 struct pgt_mgmt_desc *pmd;
496
497 pmd = TAILQ_FIRST(&sc->sc_mgmtinprog)((&sc->sc_mgmtinprog)->tqh_first);
498 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link)do { if (((pmd)->pmd_link.tqe_next) != ((void *)0)) (pmd)->
pmd_link.tqe_next->pmd_link.tqe_prev = (pmd)->pmd_link.
tqe_prev; else (&sc->sc_mgmtinprog)->tqh_last = (pmd
)->pmd_link.tqe_prev; *(pmd)->pmd_link.tqe_prev = (pmd)
->pmd_link.tqe_next; ((pmd)->pmd_link.tqe_prev) = ((void
*)-1); ((pmd)->pmd_link.tqe_next) = ((void *)-1); } while
(0)
;
499 pmd->pmd_error = ENETRESET52;
500 wakeup_one(pmd)wakeup_n((pmd), 1);
501 if (sc->sc_debug & SC_DEBUG_MGMT0x00000002)
502 DPRINTF(("%s: queue: mgmt %p <- %#x "
503 "(drained)\n", sc->sc_dev.dv_xname,
504 pmd, pmd->pmd_oid));
505 wokeup++;
506 }
507 if (wokeup > 0) {
508 if (flag == SC_NEEDS_RESET0x00000008 && sc->sc_flags & SC_DYING0x00000004) {
509 sc->sc_flags &= ~flag;
510 return;
511 }
512 }
513 } while (wokeup > 0);
514
515 if (flag == SC_NEEDS_RESET0x00000008) {
516 int error;
517
518 DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
519 sc->sc_flags &= ~SC_POWERSAVE0x00000040;
520 sc->sc_flags |= SC_NEEDS_FIRMWARE0x00000001;
521 error = pgt_reset(sc);
522 if (error == 0) {
523 tsleep_nsec(&sc->sc_flags, 0, "pgtres", SEC_TO_NSEC(1));
524 if (sc->sc_flags & SC_UNINITIALIZED0x00000002) {
525 printf("%s: not responding\n",
526 sc->sc_dev.dv_xname);
527 /* Thud. It was probably removed. */
528 if (tryagain)
529 panic("pgt went for lunch"); /* XXX */
530 tryagain = 1;
531 } else {
532 /* await all interrupts */
533 pgt_write_4_flush(sc, PGT_REG_INT_EN0x0018,
534 PGT_INT_STAT_SOURCES0x8000401e);
535 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
536 ic->ic_ific_ac.ac_if.if_flags |= IFF_RUNNING0x40;
537 }
538 }
539
540 if (tryagain)
541 goto trying_again;
542
543 sc->sc_flags &= ~flag;
544 if (ic->ic_ific_ac.ac_if.if_flags & IFF_RUNNING0x40)
545 pgt_update_hw_from_sw(sc,
546 ic->ic_state != IEEE80211_S_INIT);
547 }
548
549 ic->ic_ific_ac.ac_if.if_flags &= ~IFF_RUNNING0x40;
550 ifq_clr_oactive(&ic->ic_ific_ac.ac_if.if_snd);
551 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1)(((&sc->sc_ic)->ic_newstate)((&sc->sc_ic), (
IEEE80211_S_INIT), (-1)))
;
552}
553
554void
555pgt_attach(struct device *self)
556{
557 struct pgt_softc *sc = (struct pgt_softc *)self;
558 int error;
559
560 /* debug flags */
561 //sc->sc_debug |= SC_DEBUG_QUEUES; /* super verbose */
562 //sc->sc_debug |= SC_DEBUG_MGMT;
563 sc->sc_debug |= SC_DEBUG_UNEXPECTED0x00000004;
564 //sc->sc_debug |= SC_DEBUG_TRIGGER; /* verbose */
565 //sc->sc_debug |= SC_DEBUG_EVENTS; /* super verbose */
566 //sc->sc_debug |= SC_DEBUG_POWER;
567 sc->sc_debug |= SC_DEBUG_TRAP0x00000040;
568 sc->sc_debug |= SC_DEBUG_LINK0x00000080;
569 //sc->sc_debug |= SC_DEBUG_RXANNEX;
570 //sc->sc_debug |= SC_DEBUG_RXFRAG;
571 //sc->sc_debug |= SC_DEBUG_RXETHER;
572
573 /* enable card if possible */
574 if (sc->sc_enable != NULL((void *)0))
575 (*sc->sc_enable)(sc);
576
577 error = pgt_dma_alloc(sc);
578 if (error)
579 return;
580
581 sc->sc_ic.ic_ific_ac.ac_if.if_softc = sc;
582 TAILQ_INIT(&sc->sc_mgmtinprog)do { (&sc->sc_mgmtinprog)->tqh_first = ((void *)0);
(&sc->sc_mgmtinprog)->tqh_last = &(&sc->
sc_mgmtinprog)->tqh_first; } while (0)
;
583 TAILQ_INIT(&sc->sc_kthread.sck_traps)do { (&sc->sc_kthread.sck_traps)->tqh_first = ((void
*)0); (&sc->sc_kthread.sck_traps)->tqh_last = &
(&sc->sc_kthread.sck_traps)->tqh_first; } while (0)
;
584 sc->sc_flags |= SC_NEEDS_FIRMWARE0x00000001 | SC_UNINITIALIZED0x00000002;
585 sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN1;
586
587 error = pgt_reset(sc);
588 if (error)
589 return;
590
591 tsleep_nsec(&sc->sc_flags, 0, "pgtres", SEC_TO_NSEC(1));
592 if (sc->sc_flags & SC_UNINITIALIZED0x00000002) {
593 printf("%s: not responding\n", sc->sc_dev.dv_xname);
594 sc->sc_flags |= SC_NEEDS_FIRMWARE0x00000001;
595 return;
596 } else {
597 /* await all interrupts */
598 pgt_write_4_flush(sc, PGT_REG_INT_EN0x0018, PGT_INT_STAT_SOURCES0x8000401e);
599 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
600 }
601
602 error = pgt_net_attach(sc);
603 if (error)
604 return;
605
606 if (kthread_create(pgt_per_device_kthread, sc, NULL((void *)0),
607 sc->sc_dev.dv_xname) != 0)
608 return;
609
610 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1)(((&sc->sc_ic)->ic_newstate)((&sc->sc_ic), (
IEEE80211_S_INIT), (-1)))
;
611}
612
613int
614pgt_detach(struct pgt_softc *sc)
615{
616 if (sc->sc_flags & SC_NEEDS_FIRMWARE0x00000001 || sc->sc_flags & SC_UNINITIALIZED0x00000002)
617 /* device was not initialized correctly, so leave early */
618 goto out;
619
620 /* stop card */
621 pgt_stop(sc, SC_DYING0x00000004);
622 pgt_reboot(sc);
623
624 ieee80211_ifdetach(&sc->sc_ic.ic_ific_ac.ac_if);
625 if_detach(&sc->sc_ic.ic_ific_ac.ac_if);
626
627out:
628 /* disable card if possible */
629 if (sc->sc_disable != NULL((void *)0))
630 (*sc->sc_disable)(sc);
631
632 pgt_dma_free(sc);
633
634 return (0);
635}
636
637void
638pgt_reboot(struct pgt_softc *sc)
639{
640 uint32_t reg;
641
642 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT0x0078);
643 reg &= ~(PGT_CTRL_STAT_RESET0x10000000 | PGT_CTRL_STAT_RAMBOOT0x20000000);
644 pgt_write_4(sc, PGT_REG_CTRL_STAT0x0078, reg);
645 pgt_write_memory_barrier(sc);
646 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
647
648 reg |= PGT_CTRL_STAT_RESET0x10000000;
649 pgt_write_4(sc, PGT_REG_CTRL_STAT0x0078, reg);
650 pgt_write_memory_barrier(sc);
651 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
652
653 reg &= ~PGT_CTRL_STAT_RESET0x10000000;
654 pgt_write_4(sc, PGT_REG_CTRL_STAT0x0078, reg);
655 pgt_write_memory_barrier(sc);
656 DELAY(PGT_RESET_DELAY)(*delay_func)(50000);
657}
658
659void
660pgt_init_intr(struct pgt_softc *sc)
661{
662 if ((sc->sc_flags & SC_UNINITIALIZED0x00000002) == 0) {
663 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
664 DPRINTF(("%s: spurious initialization\n",
665 sc->sc_dev.dv_xname));
666 } else {
667 sc->sc_flags &= ~SC_UNINITIALIZED0x00000002;
668 wakeup(&sc->sc_flags);
669 }
670}
671
672/*
673 * If called with a NULL last_nextpkt, only the mgmt queue will be checked
674 * for new packets.
675 */
676void
677pgt_update_intr(struct pgt_softc *sc, int hack)
678{
679 /* priority order */
680 enum pgt_queue pqs[PGT_QUEUE_COUNT6] = {
681 PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX,
682 PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX,
683 PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
684 };
685 struct mbuf *m;
686 uint32_t npend;
687 unsigned int dirtycount;
688 int i;
689
690 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
691 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
692 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
;
693 pgt_debug_events(sc, "intr");
694 /*
695 * Check for completion of tx in their dirty queues.
696 * Check completion of rx into their dirty queues.
697 */
698 for (i = 0; i < PGT_QUEUE_COUNT6; i++) {
699 size_t qdirty, qfree;
700
701 qdirty = sc->sc_dirtyq_count[pqs[i]];
702 qfree = sc->sc_freeq_count[pqs[i]];
703 /*
704 * We want the wrap-around here.
705 */
706 if (pgt_queue_is_rx(pqs[i])) {
707 int data;
708
709 data = pgt_queue_is_data(pqs[i]);
710#ifdef PGT_BUGGY_INTERRUPT_RECOVERY
711 if (hack && data)
712 continue;
713#endif
714 npend = pgt_queue_frags_pending(sc, pqs[i]);
715 /*
716 * Receive queues clean up below, so qdirty must
717 * always be 0.
718 */
719 if (npend > qfree) {
720 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
721 DPRINTF(("%s: rx queue [%u] "
722 "overflowed by %u\n",
723 sc->sc_dev.dv_xname, pqs[i],
724 npend - qfree));
725 sc->sc_flags |= SC_INTR_RESET0x00000020;
726 break;
727 }
728 while (qfree-- > npend)
729 pgt_rxdone(sc, pqs[i]);
730 } else {
731 npend = pgt_queue_frags_pending(sc, pqs[i]);
732 if (npend > qdirty) {
733 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
734 DPRINTF(("%s: tx queue [%u] "
735 "underflowed by %u\n",
736 sc->sc_dev.dv_xname, pqs[i],
737 npend - qdirty));
738 sc->sc_flags |= SC_INTR_RESET0x00000020;
739 break;
740 }
741 /*
742 * If the free queue was empty, or the data transmit
743 * queue just became empty, wake up any waiters.
744 */
745 if (qdirty > npend) {
746 if (pgt_queue_is_data(pqs[i])) {
747 sc->sc_ic.ic_ific_ac.ac_if.if_timer = 0;
748 ifq_clr_oactive(
749 &sc->sc_ic.ic_ific_ac.ac_if.if_snd);
750 }
751 while (qdirty-- > npend)
752 pgt_txdone(sc, pqs[i]);
753 }
754 }
755 }
756
757 /*
758 * This is the deferred completion for received management frames
759 * and where we queue network frames for stack input.
760 */
761 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
762 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])(((&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])->tqh_first) ==
((void *)0))
) {
763 struct pgt_mgmt_desc *pmd;
764
765 pmd = TAILQ_FIRST(&sc->sc_mgmtinprog)((&sc->sc_mgmtinprog)->tqh_first);
766 /*
767 * If there is no mgmt request in progress or the operation
768 * returned is explicitly a trap, this pmd will essentially
769 * be ignored.
770 */
771 pgt_mgmtrx_completion(sc, pmd);
772 }
773 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
774 htole32(dirtycount +((__uint32_t)(dirtycount + ((__uint32_t)(sc->sc_cb->pcb_driver_curfrag
[PGT_QUEUE_MGMT_RX]))))
775 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]))((__uint32_t)(dirtycount + ((__uint32_t)(sc->sc_cb->pcb_driver_curfrag
[PGT_QUEUE_MGMT_RX]))))
;
776
777 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
778 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])(((&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])->tqh_first
) == ((void *)0))
) {
779 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
780 pgt_input_frames(sc, m);
781 }
782 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
783 htole32(dirtycount +((__uint32_t)(dirtycount + ((__uint32_t)(sc->sc_cb->pcb_driver_curfrag
[PGT_QUEUE_DATA_HIGH_RX]))))
784 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]))((__uint32_t)(dirtycount + ((__uint32_t)(sc->sc_cb->pcb_driver_curfrag
[PGT_QUEUE_DATA_HIGH_RX]))))
;
785
786 dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
787 while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])(((&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])->tqh_first
) == ((void *)0))
) {
788 if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
789 pgt_input_frames(sc, m);
790 }
791 sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
792 htole32(dirtycount +((__uint32_t)(dirtycount + ((__uint32_t)(sc->sc_cb->pcb_driver_curfrag
[PGT_QUEUE_DATA_LOW_RX]))))
793 letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]))((__uint32_t)(dirtycount + ((__uint32_t)(sc->sc_cb->pcb_driver_curfrag
[PGT_QUEUE_DATA_LOW_RX]))))
;
794
795 /*
796 * Write out what we've finished with.
797 */
798 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
799 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
800 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
;
801}
802
803struct mbuf *
804pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
805 struct mbuf *m, struct ieee80211_node **ni)
806{
807 struct ieee80211com *ic;
808 struct ieee80211_frame *frame;
809 struct llc *snap;
810
811 ic = &sc->sc_ic;
812 if (ni != NULL((void *)0) && ic->ic_opmode == IEEE80211_M_MONITOR) {
813 *ni = ieee80211_ref_node(ic->ic_bss);
814 (*ni)->ni_inact = 0;
815 return (m);
816 }
817
818 M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT)(m) = m_prepend((m), (sizeof(*frame) + sizeof(*snap)), (0x0002
))
;
819 if (m == NULL((void *)0))
820 return (m);
821 if (m->m_lenm_hdr.mh_len < sizeof(*frame) + sizeof(*snap)) {
822 m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
823 if (m == NULL((void *)0))
824 return (m);
825 }
826 frame = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
827 snap = (struct llc *)&frame[1];
828 if (ni != NULL((void *)0)) {
829 if (ic->ic_opmode == IEEE80211_M_STA) {
830 *ni = ieee80211_ref_node(ic->ic_bss);
831 }
832#ifndef IEEE80211_STA_ONLY
833 else {
834 *ni = ieee80211_find_node(ic, eh->ether_shost);
835 /*
836 * Make up associations for ad-hoc mode. To support
837 * ad-hoc WPA, we'll need to maintain a bounded
838 * pool of ad-hoc stations.
839 */
840 if (*ni == NULL((void *)0) &&
841 ic->ic_opmode != IEEE80211_M_HOSTAP) {
842 *ni = ieee80211_dup_bss(ic, eh->ether_shost);
843 if (*ni != NULL((void *)0)) {
844 (*ni)->ni_associd = 1;
845 ic->ic_newassoc(ic, *ni, 1);
846 }
847 }
848 if (*ni == NULL((void *)0)) {
849 m_freem(m);
850 return (NULL((void *)0));
851 }
852 }
853#endif
854 (*ni)->ni_inact = 0;
855 }
856 snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP0xaa;
857 snap->llc_controlllc_un.type_u.control = LLC_UI0x3;
858 snap->llc_snapllc_un.type_snap.org_code[0] = 0;
859 snap->llc_snapllc_un.type_snap.org_code[1] = 0;
860 snap->llc_snapllc_un.type_snap.org_code[2] = 0;
861 snap->llc_snapllc_un.type_snap.ether_type = eh->ether_type;
862 frame->i_fc[0] = IEEE80211_FC0_VERSION_00x00 | IEEE80211_FC0_TYPE_DATA0x08;
863 /* Doesn't look like much of the 802.11 header is available. */
864 *(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
865 /*
866 * Translate the addresses; WDS is not handled.
867 */
868 switch (ic->ic_opmode) {
869 case IEEE80211_M_STA:
870 frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS0x02;
871 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost)__builtin_memcpy((frame->i_addr1), (eh->ether_dhost), (
6))
;
872 IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid)__builtin_memcpy((frame->i_addr2), (ic->ic_bss->ni_bssid
), (6))
;
873 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost)__builtin_memcpy((frame->i_addr3), (eh->ether_shost), (
6))
;
874 break;
875#ifndef IEEE80211_STA_ONLY
876 case IEEE80211_M_IBSS:
877 case IEEE80211_M_AHDEMO:
878 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS0x00;
879 IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost)__builtin_memcpy((frame->i_addr1), (eh->ether_dhost), (
6))
;
880 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost)__builtin_memcpy((frame->i_addr2), (eh->ether_shost), (
6))
;
881 IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid)__builtin_memcpy((frame->i_addr3), (ic->ic_bss->ni_bssid
), (6))
;
882 break;
883 case IEEE80211_M_HOSTAP:
884 /* HostAP forwarding defaults to being done on firmware. */
885 frame->i_fc[1] = IEEE80211_FC1_DIR_TODS0x01;
886 IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid)__builtin_memcpy((frame->i_addr1), (ic->ic_bss->ni_bssid
), (6))
;
887 IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost)__builtin_memcpy((frame->i_addr2), (eh->ether_shost), (
6))
;
888 IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost)__builtin_memcpy((frame->i_addr3), (eh->ether_dhost), (
6))
;
889 break;
890#endif
891 default:
892 break;
893 }
894 return (m);
895}
896
897void
898pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
899{
900 struct ether_header eh;
901 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
902 struct ifnet *ifp;
903 struct ieee80211_channel *chan;
904 struct ieee80211_rxinfo rxi;
905 struct ieee80211_node *ni;
906 struct ieee80211com *ic;
907 struct pgt_rx_annex *pra;
908 struct pgt_rx_header *pha;
909 struct mbuf *next;
910 unsigned int n;
911 uint32_t rstamp;
912 uint8_t rssi;
913
914 ic = &sc->sc_ic;
915 ifp = &ic->ic_ific_ac.ac_if;
916 for (next = m; m != NULL((void *)0); m = next) {
917 next = m->m_nextpktm_hdr.mh_nextpkt;
918 m->m_nextpktm_hdr.mh_nextpkt = NULL((void *)0);
919
920 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
921 if (m->m_lenm_hdr.mh_len < sizeof(*pha)) {
922 m = m_pullup(m, sizeof(*pha));
923 if (m == NULL((void *)0)) {
924 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
925 DPRINTF(("%s: m_pullup "
926 "failure\n",
927 sc->sc_dev.dv_xname));
928 ifp->if_ierrorsif_data.ifi_ierrors++;
929 continue;
930 }
931 }
932 pha = mtod(m, struct pgt_rx_header *)((struct pgt_rx_header *)((m)->m_hdr.mh_data));
933 pra = NULL((void *)0);
934 goto input;
935 }
936
937 if (m->m_lenm_hdr.mh_len < sizeof(*pra)) {
938 m = m_pullup(m, sizeof(*pra));
939 if (m == NULL((void *)0)) {
940 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
941 DPRINTF(("%s: m_pullup failure\n",
942 sc->sc_dev.dv_xname));
943 ifp->if_ierrorsif_data.ifi_ierrors++;
944 continue;
945 }
946 }
947 pra = mtod(m, struct pgt_rx_annex *)((struct pgt_rx_annex *)((m)->m_hdr.mh_data));
948 pha = &pra->pra_header;
949 if (sc->sc_debug & SC_DEBUG_RXANNEX0x00000100)
950 DPRINTF(("%s: rx annex: ? %04x "
951 "len %u clock %u flags %02x ? %02x rate %u ? %02x "
952 "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
953 sc->sc_dev.dv_xname,
954 letoh16(pha->pra_unknown0),
955 letoh16(pha->pra_length),
956 letoh32(pha->pra_clock), pha->pra_flags,
957 pha->pra_unknown1, pha->pra_rate,
958 pha->pra_unknown2, letoh32(pha->pra_frequency),
959 pha->pra_unknown3, pha->pra_rssi,
960 pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
961 if (sc->sc_debug & SC_DEBUG_RXETHER0x00000400)
962 DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
963 sc->sc_dev.dv_xname,
964 ether_sprintf(pra->pra_ether_dhost),
965 ether_sprintf(pra->pra_ether_shost),
966 ntohs(pra->pra_ether_type)));
967
968 memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN)__builtin_memcpy((eh.ether_dhost), (pra->pra_ether_dhost),
(6))
;
969 memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN)__builtin_memcpy((eh.ether_shost), (pra->pra_ether_shost),
(6))
;
970 eh.ether_type = pra->pra_ether_type;
971
972input:
973 /*
974 * This flag is set if e.g. packet could not be decrypted.
975 */
976 if (pha->pra_flags & PRA_FLAG_BAD0x01) {
977 ifp->if_ierrorsif_data.ifi_ierrors++;
978 m_freem(m);
979 continue;
980 }
981
982 /*
983 * After getting what we want, chop off the annex, then
984 * turn into something that looks like it really was
985 * 802.11.
986 */
987 rssi = pha->pra_rssi;
988 rstamp = letoh32(pha->pra_clock)((__uint32_t)(pha->pra_clock));
989 n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency)((__uint32_t)(pha->pra_frequency)), 0);
990 if (n <= IEEE80211_CHAN_MAX255)
991 chan = &ic->ic_channels[n];
992 else
993 chan = ic->ic_bss->ni_chan;
994 /* Send to 802.3 listeners. */
995 if (pra) {
996 m_adj(m, sizeof(*pra));
997 } else
998 m_adj(m, sizeof(*pha));
999
1000 m = pgt_ieee80211_encap(sc, &eh, m, &ni);
1001 if (m != NULL((void *)0)) {
1002#if NBPFILTER1 > 0
1003 if (sc->sc_drvbpf != NULL((void *)0)) {
1004 struct mbuf mb;
1005 struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtapsc_rxtapu.th;
1006
1007 tap->wr_flags = 0;
1008 tap->wr_chan_freq = htole16(chan->ic_freq)((__uint16_t)(chan->ic_freq));
1009 tap->wr_chan_flags = htole16(chan->ic_flags)((__uint16_t)(chan->ic_flags));
1010 tap->wr_rssi = rssi;
1011 tap->wr_max_rssi = ic->ic_max_rssi;
1012
1013 mb.m_datam_hdr.mh_data = (caddr_t)tap;
1014 mb.m_lenm_hdr.mh_len = sc->sc_rxtap_len;
1015 mb.m_nextm_hdr.mh_next = m;
1016 mb.m_nextpktm_hdr.mh_nextpkt = NULL((void *)0);
1017 mb.m_typem_hdr.mh_type = 0;
1018 mb.m_flagsm_hdr.mh_flags = 0;
1019 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN(1 << 0));
1020 }
1021#endif
1022 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
1023 ni->ni_rssi = rxi.rxi_rssi = rssi;
1024 ni->ni_rstamp = rxi.rxi_tstamp = rstamp;
1025 ieee80211_inputm(ifp, m, ni, &rxi, &ml);
1026 /*
1027 * The frame may have caused the node to be marked for
1028 * reclamation (e.g. in response to a DEAUTH message)
1029 * so use free_node here instead of unref_node.
1030 */
1031 if (ni == ic->ic_bss)
1032 ieee80211_unref_node(&ni);
1033 else
1034 ieee80211_release_node(&sc->sc_ic, ni);
1035 } else {
1036 ifp->if_ierrorsif_data.ifi_ierrors++;
1037 }
1038 }
1039 if_input(ifp, &ml);
1040}
1041
1042void
1043pgt_wakeup_intr(struct pgt_softc *sc)
1044{
1045 int shouldupdate;
1046 int i;
1047
1048 shouldupdate = 0;
1049 /* Check for any queues being empty before updating. */
1050 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02))
1051 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02))
1052 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02))
;
1053 for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT6; i++) {
1054 if (pgt_queue_is_tx(i))
1055 shouldupdate = pgt_queue_frags_pending(sc, i);
1056 else
1057 shouldupdate = pgt_queue_frags_pending(sc, i) <
1058 sc->sc_freeq_count[i];
1059 }
1060 if (!TAILQ_EMPTY(&sc->sc_mgmtinprog)(((&sc->sc_mgmtinprog)->tqh_first) == ((void *)0)))
1061 shouldupdate = 1;
1062 if (sc->sc_debug & SC_DEBUG_POWER0x00000020)
1063 DPRINTF(("%s: wakeup interrupt (update = %d)\n",
1064 sc->sc_dev.dv_xname, shouldupdate));
1065 sc->sc_flags &= ~SC_POWERSAVE0x00000040;
1066 if (shouldupdate) {
1067 pgt_write_4_flush(sc, PGT_REG_DEV_INT0x0000, PGT_DEV_INT_UPDATE0x00000002);
1068 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
1069 }
1070}
1071
1072void
1073pgt_sleep_intr(struct pgt_softc *sc)
1074{
1075 int allowed;
1076 int i;
1077
1078 allowed = 1;
1079 /* Check for any queues not being empty before allowing. */
1080 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02))
1081 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02))
1082 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02))
;
1083 for (i = 0; allowed && i < PGT_QUEUE_COUNT6; i++) {
1084 if (pgt_queue_is_tx(i))
1085 allowed = pgt_queue_frags_pending(sc, i) == 0;
1086 else
1087 allowed = pgt_queue_frags_pending(sc, i) >=
1088 sc->sc_freeq_count[i];
1089 }
1090 if (!TAILQ_EMPTY(&sc->sc_mgmtinprog)(((&sc->sc_mgmtinprog)->tqh_first) == ((void *)0)))
1091 allowed = 0;
1092 if (sc->sc_debug & SC_DEBUG_POWER0x00000020)
1093 DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
1094 sc->sc_dev.dv_xname, allowed));
1095 if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON0x00000400) {
1096 sc->sc_flags |= SC_POWERSAVE0x00000040;
1097 pgt_write_4_flush(sc, PGT_REG_DEV_INT0x0000, PGT_DEV_INT_SLEEP0x00000010);
1098 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
1099 }
1100}
1101
1102void
1103pgt_empty_traps(struct pgt_softc_kthread *sck)
1104{
1105 struct pgt_async_trap *pa;
1106 struct mbuf *m;
1107
1108 while (!TAILQ_EMPTY(&sck->sck_traps)(((&sck->sck_traps)->tqh_first) == ((void *)0))) {
1109 pa = TAILQ_FIRST(&sck->sck_traps)((&sck->sck_traps)->tqh_first);
1110 TAILQ_REMOVE(&sck->sck_traps, pa, pa_link)do { if (((pa)->pa_link.tqe_next) != ((void *)0)) (pa)->
pa_link.tqe_next->pa_link.tqe_prev = (pa)->pa_link.tqe_prev
; else (&sck->sck_traps)->tqh_last = (pa)->pa_link
.tqe_prev; *(pa)->pa_link.tqe_prev = (pa)->pa_link.tqe_next
; ((pa)->pa_link.tqe_prev) = ((void *)-1); ((pa)->pa_link
.tqe_next) = ((void *)-1); } while (0)
;
1111 m = pa->pa_mbuf;
1112 m_freem(m);
1113 }
1114}
1115
1116void
1117pgt_per_device_kthread(void *argp)
1118{
1119 struct pgt_softc *sc;
1120 struct pgt_softc_kthread *sck;
1121 struct pgt_async_trap *pa;
1122 struct mbuf *m;
1123 int s;
1124
1125 sc = argp;
1126 sck = &sc->sc_kthread;
1127 while (!sck->sck_exit) {
1128 if (!sck->sck_update && !sck->sck_reset &&
1129 TAILQ_EMPTY(&sck->sck_traps)(((&sck->sck_traps)->tqh_first) == ((void *)0)))
1130 tsleep_nsec(&sc->sc_kthread, 0, "pgtkth", INFSLP0xffffffffffffffffULL);
1131 if (sck->sck_reset) {
1132 DPRINTF(("%s: [thread] async reset\n",
1133 sc->sc_dev.dv_xname));
1134 sck->sck_reset = 0;
1135 sck->sck_update = 0;
1136 pgt_empty_traps(sck);
1137 s = splnet()splraise(0x4);
1138 pgt_stop(sc, SC_NEEDS_RESET0x00000008);
1139 splx(s)spllower(s);
1140 } else if (!TAILQ_EMPTY(&sck->sck_traps)(((&sck->sck_traps)->tqh_first) == ((void *)0))) {
1141 DPRINTF(("%s: [thread] got a trap\n",
1142 sc->sc_dev.dv_xname));
1143 pa = TAILQ_FIRST(&sck->sck_traps)((&sck->sck_traps)->tqh_first);
1144 TAILQ_REMOVE(&sck->sck_traps, pa, pa_link)do { if (((pa)->pa_link.tqe_next) != ((void *)0)) (pa)->
pa_link.tqe_next->pa_link.tqe_prev = (pa)->pa_link.tqe_prev
; else (&sck->sck_traps)->tqh_last = (pa)->pa_link
.tqe_prev; *(pa)->pa_link.tqe_prev = (pa)->pa_link.tqe_next
; ((pa)->pa_link.tqe_prev) = ((void *)-1); ((pa)->pa_link
.tqe_next) = ((void *)-1); } while (0)
;
1145 m = pa->pa_mbuf;
1146 m_adj(m, sizeof(*pa));
1147 pgt_update_sw_from_hw(sc, pa, m);
1148 m_freem(m);
1149 } else if (sck->sck_update) {
1150 sck->sck_update = 0;
1151 pgt_update_sw_from_hw(sc, NULL((void *)0), NULL((void *)0));
1152 }
1153 }
1154 pgt_empty_traps(sck);
1155 kthread_exit(0);
1156}
1157
1158void
1159pgt_async_reset(struct pgt_softc *sc)
1160{
1161 if (sc->sc_flags & (SC_DYING0x00000004 | SC_NEEDS_RESET0x00000008))
1162 return;
1163 sc->sc_kthread.sck_reset = 1;
1164 wakeup(&sc->sc_kthread);
1165}
1166
1167void
1168pgt_async_update(struct pgt_softc *sc)
1169{
1170 if (sc->sc_flags & SC_DYING0x00000004)
1171 return;
1172 sc->sc_kthread.sck_update = 1;
1173 wakeup(&sc->sc_kthread);
1174}
1175
1176int
1177pgt_intr(void *arg)
1178{
1179 struct pgt_softc *sc;
1180 struct ifnet *ifp;
1181 u_int32_t reg;
1182
1183 sc = arg;
1184 ifp = &sc->sc_ic.ic_ific_ac.ac_if;
1185
1186 /*
1187 * Here the Linux driver ands in the value of the INT_EN register,
1188 * and masks off everything but the documented interrupt bits. Why?
1189 *
1190 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
1191 * other times.
1192 */
1193 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON0x00000400 &&
1194 sc->sc_flags & SC_POWERSAVE0x00000040) {
1195 /*
1196 * Don't try handling the interrupt in sleep mode.
1197 */
1198 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT0x0078);
1199 if (reg & PGT_CTRL_STAT_SLEEPMODE0x00000200)
1200 return (0);
1201 }
1202 reg = pgt_read_4(sc, PGT_REG_INT_STAT0x0010);
1203 if (reg == 0)
1204 return (0); /* This interrupt is not from us */
1205
1206 pgt_write_4_flush(sc, PGT_REG_INT_ACK0x0014, reg);
1207 if (reg & PGT_INT_STAT_INIT0x00000004)
1208 pgt_init_intr(sc);
1209 if (reg & PGT_INT_STAT_UPDATE0x00000002) {
1210 pgt_update_intr(sc, 0);
1211 /*
1212 * If we got an update, it's not really asleep.
1213 */
1214 sc->sc_flags &= ~SC_POWERSAVE0x00000040;
1215 /*
1216 * Pretend I have any idea what the documentation
1217 * would say, and just give it a shot sending an
1218 * "update" after acknowledging the interrupt
1219 * bits and writing out the new control block.
1220 */
1221 pgt_write_4_flush(sc, PGT_REG_DEV_INT0x0000, PGT_DEV_INT_UPDATE0x00000002);
1222 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
1223 }
1224 if (reg & PGT_INT_STAT_SLEEP0x00000010 && !(reg & PGT_INT_STAT_WAKEUP0x00000008))
1225 pgt_sleep_intr(sc);
1226 if (reg & PGT_INT_STAT_WAKEUP0x00000008)
1227 pgt_wakeup_intr(sc);
1228
1229 if (sc->sc_flags & SC_INTR_RESET0x00000020) {
1230 sc->sc_flags &= ~SC_INTR_RESET0x00000020;
1231 pgt_async_reset(sc);
1232 }
1233
1234 if (reg & ~PGT_INT_STAT_SOURCES0x8000401e && sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004) {
1235 DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
1236 sc->sc_dev.dv_xname,
1237 reg & ~PGT_INT_STAT_SOURCES,
1238 pgt_read_4(sc, PGT_REG_CTRL_STAT)));
1239 }
1240
1241 if (!ifq_empty(&ifp->if_snd)(({ typeof((&ifp->if_snd)->ifq_len) __tmp = *(volatile
typeof((&ifp->if_snd)->ifq_len) *)&((&ifp->
if_snd)->ifq_len); membar_datadep_consumer(); __tmp; }) ==
0)
)
1242 pgt_start(ifp);
1243
1244 return (1);
1245}
1246
1247void
1248pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
1249{
1250 struct pgt_desc *pd;
1251
1252 pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])((&sc->sc_dirtyq[pq])->tqh_first);
1253 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link)do { if (((pd)->pd_link.tqe_next) != ((void *)0)) (pd)->
pd_link.tqe_next->pd_link.tqe_prev = (pd)->pd_link.tqe_prev
; else (&sc->sc_dirtyq[pq])->tqh_last = (pd)->pd_link
.tqe_prev; *(pd)->pd_link.tqe_prev = (pd)->pd_link.tqe_next
; ((pd)->pd_link.tqe_prev) = ((void *)-1); ((pd)->pd_link
.tqe_next) = ((void *)-1); } while (0)
;
1254 sc->sc_dirtyq_count[pq]--;
1255 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_freeq[pq])->tqh_last; *(&sc
->sc_freeq[pq])->tqh_last = (pd); (&sc->sc_freeq
[pq])->tqh_last = &(pd)->pd_link.tqe_next; } while (
0)
;
1256 sc->sc_freeq_count[pq]++;
1257 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x02))
1258 pd->pd_dmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x02))
1259 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x02))
;
1260 /* Management frames want completion information. */
1261 if (sc->sc_debug & SC_DEBUG_QUEUES0x00000001) {
1262 DPRINTF(("%s: queue: tx %u <- [%u]\n",
1263 sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1264 if (sc->sc_debug & SC_DEBUG_MGMT0x00000002 && pgt_queue_is_mgmt(pq)) {
1265 struct pgt_mgmt_frame *pmf;
1266
1267 pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1268 DPRINTF(("%s: queue: txmgmt %p <- "
1269 "(ver %u, op %u, flags %#x)\n",
1270 sc->sc_dev.dv_xname,
1271 pd, pmf->pmf_version, pmf->pmf_operation,
1272 pmf->pmf_flags));
1273 }
1274 }
1275 pgt_unload_tx_desc_frag(sc, pd);
1276}
1277
1278void
1279pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
1280{
1281 struct pgt_desc *pd;
1282
1283 pd = TAILQ_FIRST(&sc->sc_freeq[pq])((&sc->sc_freeq[pq])->tqh_first);
1284 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link)do { if (((pd)->pd_link.tqe_next) != ((void *)0)) (pd)->
pd_link.tqe_next->pd_link.tqe_prev = (pd)->pd_link.tqe_prev
; else (&sc->sc_freeq[pq])->tqh_last = (pd)->pd_link
.tqe_prev; *(pd)->pd_link.tqe_prev = (pd)->pd_link.tqe_next
; ((pd)->pd_link.tqe_prev) = ((void *)-1); ((pd)->pd_link
.tqe_next) = ((void *)-1); } while (0)
;
1285 sc->sc_freeq_count[pq]--;
1286 TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_dirtyq[pq])->tqh_last; *(&
sc->sc_dirtyq[pq])->tqh_last = (pd); (&sc->sc_dirtyq
[pq])->tqh_last = &(pd)->pd_link.tqe_next; } while (
0)
;
1287 sc->sc_dirtyq_count[pq]++;
1288 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x02))
1289 pd->pd_dmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x02))
1290 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x02))
;
1291 if (sc->sc_debug & SC_DEBUG_QUEUES0x00000001)
1292 DPRINTF(("%s: queue: rx %u <- [%u]\n",
1293 sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1294 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004 &&
1295 pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF)((__uint16_t)(0x0001)))
1296 DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
1297 sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
1298}
1299
1300/*
1301 * Traps are generally used for the firmware to report changes in state
1302 * back to the host. Mostly this processes changes in link state, but
1303 * it needs to also be used to initiate WPA and other authentication
1304 * schemes in terms of client (station) or server (access point).
1305 */
1306void
1307pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
1308 size_t size)
1309{
1310 struct pgt_async_trap *pa;
1311 struct mbuf *m;
1312 char *p;
1313 size_t total;
1314
1315 if (sc->sc_flags & SC_DYING0x00000004)
1316 return;
1317
1318 total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
1319 if (total > MLEN(256 - sizeof(struct m_hdr))) {
1320 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
1321 if (m == NULL((void *)0))
1322 return;
1323 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
1324 if (!(m->m_flagsm_hdr.mh_flags & M_EXT0x0001)) {
1325 m_freem(m);
1326 m = NULL((void *)0);
1327 }
1328 } else
1329 m = m_get(M_DONTWAIT0x0002, MT_DATA1);
1330
1331 if (m == NULL((void *)0))
1332 return;
1333 else
1334 m->m_lenm_hdr.mh_len = total;
1335
1336 pa = mtod(m, struct pgt_async_trap *)((struct pgt_async_trap *)((m)->m_hdr.mh_data));
1337 p = mtod(m, char *)((char *)((m)->m_hdr.mh_data)) + sizeof(*pa);
1338 *(uint32_t *)p = oid;
1339 p += sizeof(uint32_t);
1340 memcpy(p, trapdata, size)__builtin_memcpy((p), (trapdata), (size));
1341 pa->pa_mbuf = m;
1342
1343 TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link)do { (pa)->pa_link.tqe_next = ((void *)0); (pa)->pa_link
.tqe_prev = (&sc->sc_kthread.sck_traps)->tqh_last; *
(&sc->sc_kthread.sck_traps)->tqh_last = (pa); (&
sc->sc_kthread.sck_traps)->tqh_last = &(pa)->pa_link
.tqe_next; } while (0)
;
1344 wakeup(&sc->sc_kthread);
1345}
1346
1347/*
1348 * Process a completed management response (all requests should be
1349 * responded to, quickly) or an event (trap).
1350 */
1351void
1352pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1353{
1354 struct pgt_desc *pd;
1355 struct pgt_mgmt_frame *pmf;
1356 uint32_t oid, size;
1357
1358 pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])((&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])->tqh_first);
1359 TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link)do { if (((pd)->pd_link.tqe_next) != ((void *)0)) (pd)->
pd_link.tqe_next->pd_link.tqe_prev = (pd)->pd_link.tqe_prev
; else (&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])->tqh_last
= (pd)->pd_link.tqe_prev; *(pd)->pd_link.tqe_prev = (pd
)->pd_link.tqe_next; ((pd)->pd_link.tqe_prev) = ((void *
)-1); ((pd)->pd_link.tqe_next) = ((void *)-1); } while (0)
;
1360 sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
1361 TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_freeq[PGT_QUEUE_MGMT_RX])->tqh_last
; *(&sc->sc_freeq[PGT_QUEUE_MGMT_RX])->tqh_last = (
pd); (&sc->sc_freeq[PGT_QUEUE_MGMT_RX])->tqh_last =
&(pd)->pd_link.tqe_next; } while (0)
1362 pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_freeq[PGT_QUEUE_MGMT_RX])->tqh_last
; *(&sc->sc_freeq[PGT_QUEUE_MGMT_RX])->tqh_last = (
pd); (&sc->sc_freeq[PGT_QUEUE_MGMT_RX])->tqh_last =
&(pd)->pd_link.tqe_next; } while (0)
;
1363 sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
1364 if (letoh16(pd->pd_fragp->pf_size)((__uint16_t)(pd->pd_fragp->pf_size)) < sizeof(*pmf)) {
1365 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1366 DPRINTF(("%s: mgmt desc too small: %u\n",
1367 sc->sc_dev.dv_xname,
1368 letoh16(pd->pd_fragp->pf_size)));
1369 goto out_nopmd;
1370 }
1371 pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1372 if (pmf->pmf_version != PMF_VER0x01) {
1373 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1374 DPRINTF(("%s: unknown mgmt version %u\n",
1375 sc->sc_dev.dv_xname, pmf->pmf_version));
1376 goto out_nopmd;
1377 }
1378 if (pmf->pmf_device != PMF_DEV0x00) {
1379 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1380 DPRINTF(("%s: unknown mgmt dev %u\n",
1381 sc->sc_dev.dv_xname, pmf->pmf_device));
1382 goto out;
1383 }
1384 if (pmf->pmf_flags & ~PMF_FLAG_VALID(0x01 | 0x02)) {
1385 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1386 DPRINTF(("%s: unknown mgmt flags %x\n",
1387 sc->sc_dev.dv_xname,
1388 pmf->pmf_flags & ~PMF_FLAG_VALID));
1389 goto out;
1390 }
1391 if (pmf->pmf_flags & PMF_FLAG_LE0x02) {
1392 oid = letoh32(pmf->pmf_oid)((__uint32_t)(pmf->pmf_oid));
1393 size = letoh32(pmf->pmf_size)((__uint32_t)(pmf->pmf_size));
1394 } else {
1395 oid = betoh32(pmf->pmf_oid)(__uint32_t)(__builtin_constant_p(pmf->pmf_oid) ? (__uint32_t
)(((__uint32_t)(pmf->pmf_oid) & 0xff) << 24 | ((
__uint32_t)(pmf->pmf_oid) & 0xff00) << 8 | ((__uint32_t
)(pmf->pmf_oid) & 0xff0000) >> 8 | ((__uint32_t)
(pmf->pmf_oid) & 0xff000000) >> 24) : __swap32md
(pmf->pmf_oid))
;
1396 size = betoh32(pmf->pmf_size)(__uint32_t)(__builtin_constant_p(pmf->pmf_size) ? (__uint32_t
)(((__uint32_t)(pmf->pmf_size) & 0xff) << 24 | (
(__uint32_t)(pmf->pmf_size) & 0xff00) << 8 | ((__uint32_t
)(pmf->pmf_size) & 0xff0000) >> 8 | ((__uint32_t
)(pmf->pmf_size) & 0xff000000) >> 24) : __swap32md
(pmf->pmf_size))
;
1397 }
1398 if (pmf->pmf_operation == PMF_OP_TRAP) {
1399 pmd = NULL((void *)0); /* ignored */
1400 DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
1401 sc->sc_dev.dv_xname,
1402 pmf->pmf_operation, oid, size));
1403 pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
1404 min(size, PGT_FRAG_SIZE1536 - sizeof(*pmf)));
1405 goto out_nopmd;
1406 }
1407 if (pmd == NULL((void *)0)) {
1408 if (sc->sc_debug & (SC_DEBUG_UNEXPECTED0x00000004 | SC_DEBUG_MGMT0x00000002))
1409 DPRINTF(("%s: spurious mgmt received "
1410 "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
1411 pmf->pmf_operation, oid, size));
1412 goto out_nopmd;
1413 }
1414 switch (pmf->pmf_operation) {
1415 case PMF_OP_RESPONSE:
1416 pmd->pmd_error = 0;
1417 break;
1418 case PMF_OP_ERROR:
1419 pmd->pmd_error = EPERM1;
1420 goto out;
1421 default:
1422 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1423 DPRINTF(("%s: unknown mgmt op %u\n",
1424 sc->sc_dev.dv_xname, pmf->pmf_operation));
1425 pmd->pmd_error = EIO5;
1426 goto out;
1427 }
1428 if (oid != pmd->pmd_oid) {
1429 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1430 DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
1431 sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
1432 pmd->pmd_oid = oid;
1433 }
1434 if (pmd->pmd_recvbuf != NULL((void *)0)) {
1435 if (size > PGT_FRAG_SIZE1536) {
1436 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1437 DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
1438 sc->sc_dev.dv_xname, oid, size));
1439 pmd->pmd_error = EIO5;
1440 goto out;
1441 }
1442 if (size > pmd->pmd_len)
1443 pmd->pmd_error = ENOMEM12;
1444 else
1445 memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),__builtin_memcpy((pmd->pmd_recvbuf), ((char *)pmf + sizeof
(*pmf)), (size))
1446 size)__builtin_memcpy((pmd->pmd_recvbuf), ((char *)pmf + sizeof
(*pmf)), (size))
;
1447 pmd->pmd_len = size;
1448 }
1449
1450out:
1451 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link)do { if (((pmd)->pmd_link.tqe_next) != ((void *)0)) (pmd)->
pmd_link.tqe_next->pmd_link.tqe_prev = (pmd)->pmd_link.
tqe_prev; else (&sc->sc_mgmtinprog)->tqh_last = (pmd
)->pmd_link.tqe_prev; *(pmd)->pmd_link.tqe_prev = (pmd)
->pmd_link.tqe_next; ((pmd)->pmd_link.tqe_prev) = ((void
*)-1); ((pmd)->pmd_link.tqe_next) = ((void *)-1); } while
(0)
;
1452 wakeup_one(pmd)wakeup_n((pmd), 1);
1453 if (sc->sc_debug & SC_DEBUG_MGMT0x00000002)
1454 DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
1455 sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
1456 pmd->pmd_oid, pmd->pmd_len));
1457out_nopmd:
1458 pgt_reinit_rx_desc_frag(sc, pd);
1459}
1460
1461/*
1462 * Queue packets for reception and defragmentation. I don't know now
1463 * whether the rx queue being full enough to start, but not finish,
1464 * queueing a fragmented packet, can happen.
1465 */
1466struct mbuf *
1467pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
1468{
1469 struct ifnet *ifp;
1470 struct pgt_desc *pd;
1471 struct mbuf *top, **mp, *m;
1472 size_t datalen;
1473 uint16_t morefrags, dataoff;
1474 int tlen = 0;
1475
1476 ifp = &sc->sc_ic.ic_ific_ac.ac_if;
1477 m = NULL((void *)0);
1478 top = NULL((void *)0);
1479 mp = &top;
1480
1481 while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])((&sc->sc_dirtyq[pq])->tqh_first)) != NULL((void *)0)) {
1482 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link)do { if (((pd)->pd_link.tqe_next) != ((void *)0)) (pd)->
pd_link.tqe_next->pd_link.tqe_prev = (pd)->pd_link.tqe_prev
; else (&sc->sc_dirtyq[pq])->tqh_last = (pd)->pd_link
.tqe_prev; *(pd)->pd_link.tqe_prev = (pd)->pd_link.tqe_next
; ((pd)->pd_link.tqe_prev) = ((void *)-1); ((pd)->pd_link
.tqe_next) = ((void *)-1); } while (0)
;
1483 sc->sc_dirtyq_count[pq]--;
1484 datalen = letoh16(pd->pd_fragp->pf_size)((__uint16_t)(pd->pd_fragp->pf_size));
1485 dataoff = letoh32(pd->pd_fragp->pf_addr)((__uint32_t)(pd->pd_fragp->pf_addr)) - pd->pd_dmaaddr;
1486 morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF)((__uint16_t)(0x0001));
1487
1488 if (sc->sc_debug & SC_DEBUG_RXFRAG0x00000200)
1489 DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
1490 sc->sc_dev.dv_xname, datalen, dataoff,
1491 pd->pd_fragp->pf_flags));
1492
1493 /* Add the (two+?) bytes for the header. */
1494 if (datalen + dataoff > PGT_FRAG_SIZE1536) {
1495 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1496 DPRINTF(("%s data rx too big: %u\n",
1497 sc->sc_dev.dv_xname, datalen));
1498 goto fail;
1499 }
1500
1501 if (m == NULL((void *)0))
1502 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
1503 else
1504 m = m_get(M_DONTWAIT0x0002, MT_DATA1);
1505
1506 if (m == NULL((void *)0))
1507 goto fail;
1508 if (datalen > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
1509 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
1510 if (!(m->m_flagsm_hdr.mh_flags & M_EXT0x0001)) {
1511 m_free(m);
1512 goto fail;
1513 }
1514 }
1515 bcopy(pd->pd_mem + dataoff, mtod(m, char *)((char *)((m)->m_hdr.mh_data)), datalen);
1516 m->m_lenm_hdr.mh_len = datalen;
1517 tlen += datalen;
1518
1519 *mp = m;
1520 mp = &m->m_nextm_hdr.mh_next;
1521
1522 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_freeq[pq])->tqh_last; *(&sc
->sc_freeq[pq])->tqh_last = (pd); (&sc->sc_freeq
[pq])->tqh_last = &(pd)->pd_link.tqe_next; } while (
0)
;
1523 sc->sc_freeq_count[pq]++;
1524 pgt_reinit_rx_desc_frag(sc, pd);
1525
1526 if (!morefrags)
1527 break;
1528 }
1529
1530 if (top) {
1531 top->m_pkthdrM_dat.MH.MH_pkthdr.len = tlen;
1532 }
1533 return (top);
1534
1535fail:
1536 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_freeq[pq])->tqh_last; *(&sc
->sc_freeq[pq])->tqh_last = (pd); (&sc->sc_freeq
[pq])->tqh_last = &(pd)->pd_link.tqe_next; } while (
0)
;
1537 sc->sc_freeq_count[pq]++;
1538 pgt_reinit_rx_desc_frag(sc, pd);
1539
1540 ifp->if_ierrorsif_data.ifi_ierrors++;
1541 m_freem(top);
1542 return (NULL((void *)0));
1543}
1544
1545int
1546pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
1547 void *arg, size_t arglen)
1548{
1549 struct pgt_mgmt_desc pmd;
1550 int error;
1551
1552 bzero(&pmd, sizeof(pmd))__builtin_bzero((&pmd), (sizeof(pmd)));
1553 pmd.pmd_recvbuf = arg;
1554 pmd.pmd_len = arglen;
1555 pmd.pmd_oid = oid;
1556
1557 error = pgt_mgmt_request(sc, &pmd);
1558 if (error == 0)
1559 error = pmd.pmd_error;
1560 if (error != 0 && error != EPERM1 && sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1561 DPRINTF(("%s: failure getting oid %#x: %d\n",
1562 sc->sc_dev.dv_xname, oid, error));
1563
1564 return (error);
1565}
1566
1567int
1568pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
1569 void *arg, size_t arglen)
1570{
1571 struct pgt_mgmt_desc pmd;
1572 int error;
1573
1574 bzero(&pmd, sizeof(pmd))__builtin_bzero((&pmd), (sizeof(pmd)));
1575 pmd.pmd_sendbuf = arg;
1576 pmd.pmd_recvbuf = arg;
1577 pmd.pmd_len = arglen;
1578 pmd.pmd_oid = oid;
1579
1580 error = pgt_mgmt_request(sc, &pmd);
1581 if (error == 0)
1582 error = pmd.pmd_error;
1583 if (error != 0 && error != EPERM1 && sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1584 DPRINTF(("%s: failure retrieving oid %#x: %d\n",
1585 sc->sc_dev.dv_xname, oid, error));
1586
1587 return (error);
1588}
1589
1590int
1591pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
1592 const void *arg, size_t arglen)
1593{
1594 struct pgt_mgmt_desc pmd;
1595 int error;
1596
1597 bzero(&pmd, sizeof(pmd))__builtin_bzero((&pmd), (sizeof(pmd)));
1598 pmd.pmd_sendbuf = arg;
1599 pmd.pmd_len = arglen;
1600 pmd.pmd_oid = oid;
1601
1602 error = pgt_mgmt_request(sc, &pmd);
1603 if (error == 0)
1604 error = pmd.pmd_error;
1605 if (error != 0 && error != EPERM1 && sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1606 DPRINTF(("%s: failure setting oid %#x: %d\n",
1607 sc->sc_dev.dv_xname, oid, error));
1608
1609 return (error);
1610}
1611
1612void
1613pgt_state_dump(struct pgt_softc *sc)
1614{
1615 printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
1616 sc->sc_dev.dv_xname,
1617 pgt_read_4(sc, PGT_REG_CTRL_STAT0x0078),
1618 pgt_read_4(sc, PGT_REG_INT_STAT0x0010));
1619
1620 printf("%s: state dump: driver curfrag[]\n",
1621 sc->sc_dev.dv_xname);
1622
1623 printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1624 sc->sc_dev.dv_xname,
1625 letoh32(sc->sc_cb->pcb_driver_curfrag[0])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[0])),
1626 letoh32(sc->sc_cb->pcb_driver_curfrag[1])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[1])),
1627 letoh32(sc->sc_cb->pcb_driver_curfrag[2])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[2])),
1628 letoh32(sc->sc_cb->pcb_driver_curfrag[3])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[3])),
1629 letoh32(sc->sc_cb->pcb_driver_curfrag[4])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[4])),
1630 letoh32(sc->sc_cb->pcb_driver_curfrag[5])((__uint32_t)(sc->sc_cb->pcb_driver_curfrag[5])));
1631
1632 printf("%s: state dump: device curfrag[]\n",
1633 sc->sc_dev.dv_xname);
1634
1635 printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1636 sc->sc_dev.dv_xname,
1637 letoh32(sc->sc_cb->pcb_device_curfrag[0])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[0])),
1638 letoh32(sc->sc_cb->pcb_device_curfrag[1])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[1])),
1639 letoh32(sc->sc_cb->pcb_device_curfrag[2])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[2])),
1640 letoh32(sc->sc_cb->pcb_device_curfrag[3])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[3])),
1641 letoh32(sc->sc_cb->pcb_device_curfrag[4])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[4])),
1642 letoh32(sc->sc_cb->pcb_device_curfrag[5])((__uint32_t)(sc->sc_cb->pcb_device_curfrag[5])));
1643}
1644
1645int
1646pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1647{
1648 struct pgt_desc *pd;
1649 struct pgt_mgmt_frame *pmf;
1650 int error, i, ret;
1651
1652 if (sc->sc_flags & (SC_DYING0x00000004 | SC_NEEDS_RESET0x00000008))
1653 return (EIO5);
1654 if (pmd->pmd_len > PGT_FRAG_SIZE1536 - sizeof(*pmf))
1655 return (ENOMEM12);
1656 pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX])((&sc->sc_freeq[PGT_QUEUE_MGMT_TX])->tqh_first);
1657 if (pd == NULL((void *)0))
1658 return (ENOMEM12);
1659 error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
1660 if (error)
1661 return (error);
1662 pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1663 pmf->pmf_version = PMF_VER0x01;
1664 /* "get" and "retrieve" operations look the same */
1665 if (pmd->pmd_recvbuf != NULL((void *)0))
1666 pmf->pmf_operation = PMF_OP_GET;
1667 else
1668 pmf->pmf_operation = PMF_OP_SET;
1669 pmf->pmf_oid = htobe32(pmd->pmd_oid)(__uint32_t)(__builtin_constant_p(pmd->pmd_oid) ? (__uint32_t
)(((__uint32_t)(pmd->pmd_oid) & 0xff) << 24 | ((
__uint32_t)(pmd->pmd_oid) & 0xff00) << 8 | ((__uint32_t
)(pmd->pmd_oid) & 0xff0000) >> 8 | ((__uint32_t)
(pmd->pmd_oid) & 0xff000000) >> 24) : __swap32md
(pmd->pmd_oid))
;
1670 pmf->pmf_device = PMF_DEV0x00;
1671 pmf->pmf_flags = 0;
1672 pmf->pmf_size = htobe32(pmd->pmd_len)(__uint32_t)(__builtin_constant_p(pmd->pmd_len) ? (__uint32_t
)(((__uint32_t)(pmd->pmd_len) & 0xff) << 24 | ((
__uint32_t)(pmd->pmd_len) & 0xff00) << 8 | ((__uint32_t
)(pmd->pmd_len) & 0xff0000) >> 8 | ((__uint32_t)
(pmd->pmd_len) & 0xff000000) >> 24) : __swap32md
(pmd->pmd_len))
;
1673 /* "set" and "retrieve" operations both send data */
1674 if (pmd->pmd_sendbuf != NULL((void *)0))
1675 memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len)__builtin_memcpy((pmf + 1), (pmd->pmd_sendbuf), (pmd->pmd_len
))
;
1676 else
1677 bzero(pmf + 1, pmd->pmd_len)__builtin_bzero((pmf + 1), (pmd->pmd_len));
1678 pmd->pmd_error = EINPROGRESS36;
1679 TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link)do { (pmd)->pmd_link.tqe_next = ((void *)0); (pmd)->pmd_link
.tqe_prev = (&sc->sc_mgmtinprog)->tqh_last; *(&
sc->sc_mgmtinprog)->tqh_last = (pmd); (&sc->sc_mgmtinprog
)->tqh_last = &(pmd)->pmd_link.tqe_next; } while (0
)
;
1680 if (sc->sc_debug & SC_DEBUG_MGMT0x00000002)
1681 DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
1682 sc->sc_dev.dv_xname,
1683 pmd, pmf->pmf_operation,
1684 pmd->pmd_oid, pmd->pmd_len));
1685 pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
1686 sizeof(*pmf) + pmd->pmd_len, 0);
1687 /*
1688 * Try for one second, triggering 10 times.
1689 *
1690 * Do our best to work around seemingly buggy CardBus controllers
1691 * on Soekris 4521 that fail to get interrupts with alarming
1692 * regularity: run as if an interrupt occurred and service every
1693 * queue except for mbuf reception.
1694 */
1695 i = 0;
1696 do {
1697 ret = tsleep_nsec(pmd, 0, "pgtmgm", MSEC_TO_NSEC(100));
1698 if (ret != EWOULDBLOCK35)
1699 break;
1700 if (pmd->pmd_error != EINPROGRESS36)
1701 break;
1702 if (sc->sc_flags & (SC_DYING0x00000004 | SC_NEEDS_RESET0x00000008)) {
1703 pmd->pmd_error = EIO5;
1704 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link)do { if (((pmd)->pmd_link.tqe_next) != ((void *)0)) (pmd)->
pmd_link.tqe_next->pmd_link.tqe_prev = (pmd)->pmd_link.
tqe_prev; else (&sc->sc_mgmtinprog)->tqh_last = (pmd
)->pmd_link.tqe_prev; *(pmd)->pmd_link.tqe_prev = (pmd)
->pmd_link.tqe_next; ((pmd)->pmd_link.tqe_prev) = ((void
*)-1); ((pmd)->pmd_link.tqe_next) = ((void *)-1); } while
(0)
;
1705 break;
1706 }
1707 if (i != 9)
1708 pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
1709#ifdef PGT_BUGGY_INTERRUPT_RECOVERY
1710 pgt_update_intr(sc, 0);
1711#endif
1712 } while (i++ < 10);
1713
1714 if (pmd->pmd_error == EINPROGRESS36) {
1715 printf("%s: timeout waiting for management "
1716 "packet response to %#x\n",
1717 sc->sc_dev.dv_xname, pmd->pmd_oid);
1718 TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link)do { if (((pmd)->pmd_link.tqe_next) != ((void *)0)) (pmd)->
pmd_link.tqe_next->pmd_link.tqe_prev = (pmd)->pmd_link.
tqe_prev; else (&sc->sc_mgmtinprog)->tqh_last = (pmd
)->pmd_link.tqe_prev; *(pmd)->pmd_link.tqe_prev = (pmd)
->pmd_link.tqe_next; ((pmd)->pmd_link.tqe_prev) = ((void
*)-1); ((pmd)->pmd_link.tqe_next) = ((void *)-1); } while
(0)
;
1719 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1720 pgt_state_dump(sc);
1721 pgt_async_reset(sc);
1722 error = ETIMEDOUT60;
1723 } else
1724 error = 0;
1725
1726 return (error);
1727}
1728
1729void
1730pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
1731 uint16_t len, int morecoming)
1732{
1733 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link)do { if (((pd)->pd_link.tqe_next) != ((void *)0)) (pd)->
pd_link.tqe_next->pd_link.tqe_prev = (pd)->pd_link.tqe_prev
; else (&sc->sc_freeq[pq])->tqh_last = (pd)->pd_link
.tqe_prev; *(pd)->pd_link.tqe_prev = (pd)->pd_link.tqe_next
; ((pd)->pd_link.tqe_prev) = ((void *)-1); ((pd)->pd_link
.tqe_next) = ((void *)-1); } while (0)
;
1734 sc->sc_freeq_count[pq]--;
1735 TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_dirtyq[pq])->tqh_last; *(&
sc->sc_dirtyq[pq])->tqh_last = (pd); (&sc->sc_dirtyq
[pq])->tqh_last = &(pd)->pd_link.tqe_next; } while (
0)
;
1736 sc->sc_dirtyq_count[pq]++;
1737 if (sc->sc_debug & SC_DEBUG_QUEUES0x00000001)
1738 DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
1739 pd->pd_fragnum, pq));
1740 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
1741 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
1742 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
;
1743 if (morecoming)
1744 pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF)((__uint16_t)(0x0001));
1745 pd->pd_fragp->pf_size = htole16(len)((__uint16_t)(len));
1746 bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x08))
1747 pd->pd_dmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x08))
1748 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (pd->
pd_dmam), (0), (pd->pd_dmam->dm_mapsize), (0x08))
;
1749 sc->sc_cb->pcb_driver_curfrag[pq] =
1750 htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1)((__uint32_t)(((__uint32_t)(sc->sc_cb->pcb_driver_curfrag
[pq])) + 1))
;
1751 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
1752 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
1753 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
;
1754 if (!morecoming)
1755 pgt_maybe_trigger(sc, pq);
1756}
1757
1758void
1759pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
1760{
1761 unsigned int tries = 1000000 / PGT_WRITEIO_DELAY10; /* one second */
1762 uint32_t reg;
1763
1764 if (sc->sc_debug & SC_DEBUG_TRIGGER0x00000008)
1765 DPRINTF(("%s: triggered by queue [%u]\n",
1766 sc->sc_dev.dv_xname, pq));
1767 pgt_debug_events(sc, "trig");
1768 if (sc->sc_flags & SC_POWERSAVE0x00000040) {
1769 /* Magic values ahoy? */
1770 if (pgt_read_4(sc, PGT_REG_INT_STAT0x0010) == 0xabadface) {
1771 do {
1772 reg = pgt_read_4(sc, PGT_REG_CTRL_STAT0x0078);
1773 if (!(reg & PGT_CTRL_STAT_SLEEPMODE0x00000200))
1774 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
1775 } while (tries-- != 0);
1776 if (!(reg & PGT_CTRL_STAT_SLEEPMODE0x00000200)) {
1777 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
1778 DPRINTF(("%s: timeout triggering from "
1779 "sleep mode\n",
1780 sc->sc_dev.dv_xname));
1781 pgt_async_reset(sc);
1782 return;
1783 }
1784 }
1785 pgt_write_4_flush(sc, PGT_REG_DEV_INT0x0000,
1786 PGT_DEV_INT_WAKEUP0x00000008);
1787 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
1788 /* read the status back in */
1789 (void)pgt_read_4(sc, PGT_REG_CTRL_STAT0x0078);
1790 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
1791 } else {
1792 pgt_write_4_flush(sc, PGT_REG_DEV_INT0x0000, PGT_DEV_INT_UPDATE0x00000002);
1793 DELAY(PGT_WRITEIO_DELAY)(*delay_func)(10);
1794 }
1795}
1796
1797struct ieee80211_node *
1798pgt_ieee80211_node_alloc(struct ieee80211com *ic)
1799{
1800 struct pgt_ieee80211_node *pin;
1801
1802 pin = malloc(sizeof(*pin), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1803 if (pin != NULL((void *)0)) {
1804 pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
1805 }
1806 return (struct ieee80211_node *)pin;
1807}
1808
1809void
1810pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
1811 int reallynew)
1812{
1813 ieee80211_ref_node(ni);
1814}
1815
1816void
1817pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1818{
1819 struct pgt_ieee80211_node *pin;
1820
1821 pin = (struct pgt_ieee80211_node *)ni;
1822 free(pin, M_DEVBUF2, 0);
1823}
1824
1825void
1826pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
1827 const struct ieee80211_node *src)
1828{
1829 const struct pgt_ieee80211_node *psrc;
1830 struct pgt_ieee80211_node *pdst;
1831
1832 psrc = (const struct pgt_ieee80211_node *)src;
1833 pdst = (struct pgt_ieee80211_node *)dst;
1834 bcopy(psrc, pdst, sizeof(*psrc));
1835}
1836
1837int
1838pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
1839 int type, int arg1, int arg2)
1840{
1841 return (EOPNOTSUPP45);
1842}
1843
1844int
1845pgt_net_attach(struct pgt_softc *sc)
1846{
1847 struct ieee80211com *ic = &sc->sc_ic;
1848 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
1849 struct ieee80211_rateset *rs;
1850 uint8_t rates[IEEE80211_RATE_MAXSIZE15];
1851 struct pgt_obj_buffer psbuffer;
1852 struct pgt_obj_frequencies *freqs;
1853 uint32_t phymode, country;
1854 unsigned int chan, i, j, firstchan = -1;
1855 int error;
1856
1857 psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT)((__uint32_t)(1536 * 64));
1858 psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr)((__uint32_t)(sc->sc_psmdmam->dm_segs[0].ds_addr));
1859 error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
1860 if (error)
1861 return (error);
1862 error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
1863 if (error)
1864 return (error);
1865 error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
1866 sizeof(ic->ic_myaddr));
1867 if (error)
1868 return (error);
1869 error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
1870 if (error)
1871 return (error);
1872
1873 ifp->if_softc = sc;
1874 ifp->if_ioctl = pgt_ioctl;
1875 ifp->if_start = pgt_start;
1876 ifp->if_watchdog = pgt_watchdog;
1877 ifp->if_flags = IFF_SIMPLEX0x800 | IFF_BROADCAST0x2 | IFF_MULTICAST0x8000;
1878 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ16);
1879
1880 ifq_init_maxlen(&ifp->if_snd, IFQ_MAXLEN256);
1881
1882 /*
1883 * Set channels
1884 *
1885 * Prism hardware likes to report supported frequencies that are
1886 * not actually available for the country of origin.
1887 */
1888 j = sizeof(*freqs) + (IEEE80211_CHAN_MAX255 + 1) * sizeof(uint16_t);
1889 freqs = malloc(j, M_DEVBUF2, M_WAITOK0x0001);
1890 error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
1891 if (error) {
1892 free(freqs, M_DEVBUF2, 0);
1893 return (error);
1894 }
1895
1896 for (i = 0, j = letoh16(freqs->pof_count)((__uint16_t)(freqs->pof_count)); i < j; i++) {
1897 chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i])((__uint16_t)(freqs->pof_freqlist_mhz[i])),
1898 0);
1899
1900 if (chan > IEEE80211_CHAN_MAX255) {
1901 printf("%s: reported bogus channel (%uMHz)\n",
1902 sc->sc_dev.dv_xname, chan);
1903 free(freqs, M_DEVBUF2, 0);
1904 return (EIO5);
1905 }
1906
1907 if (letoh16(freqs->pof_freqlist_mhz[i])((__uint16_t)(freqs->pof_freqlist_mhz[i])) < 5000) {
1908 if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)((__uint32_t)(0x00000001))))
1909 continue;
1910 if (country == letoh32(PGT_COUNTRY_USA)((__uint32_t)(0))) {
1911 if (chan >= 12 && chan <= 14)
1912 continue;
1913 }
1914 if (chan <= 14)
1915 ic->ic_channels[chan].ic_flags |=
1916 IEEE80211_CHAN_B(0x0080 | 0x0020);
1917 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG(0x0080 | 0x0040);
1918 } else {
1919 if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)((__uint32_t)(0x00000002))))
1920 continue;
1921 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A(0x0100 | 0x0040);
1922 }
1923
1924 ic->ic_channels[chan].ic_freq =
1925 letoh16(freqs->pof_freqlist_mhz[i])((__uint16_t)(freqs->pof_freqlist_mhz[i]));
1926
1927 if (firstchan == -1)
1928 firstchan = chan;
1929
1930 DPRINTF(("%s: set channel %d to freq %uMHz\n",
1931 sc->sc_dev.dv_xname, chan,
1932 letoh16(freqs->pof_freqlist_mhz[i])));
1933 }
1934 free(freqs, M_DEVBUF2, 0);
1935 if (firstchan == -1) {
1936 printf("%s: no channels found\n", sc->sc_dev.dv_xname);
1937 return (EIO5);
1938 }
1939
1940 /*
1941 * Set rates
1942 */
1943 bzero(rates, sizeof(rates))__builtin_bzero((rates), (sizeof(rates)));
1944 error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
1945 if (error)
1946 return (error);
1947 for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
1948 switch (rates[i]) {
1949 case 2:
1950 case 4:
1951 case 11:
1952 case 22:
1953 case 44: /* maybe */
1954 if (phymode & htole32(PGT_OID_PHY_2400MHZ)((__uint32_t)(0x00000001))) {
1955 rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
1956 rs->rs_rates[rs->rs_nrates++] = rates[i];
1957 }
1958 default:
1959 if (phymode & htole32(PGT_OID_PHY_2400MHZ)((__uint32_t)(0x00000001))) {
1960 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
1961 rs->rs_rates[rs->rs_nrates++] = rates[i];
1962 }
1963 if (phymode & htole32(PGT_OID_PHY_5000MHZ)((__uint32_t)(0x00000002))) {
1964 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
1965 rs->rs_rates[rs->rs_nrates++] = rates[i];
1966 }
1967 rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
1968 rs->rs_rates[rs->rs_nrates++] = rates[i];
1969 }
1970 }
1971
1972 ic->ic_caps = IEEE80211_C_WEP0x00000001 | IEEE80211_C_PMGT0x00000004 | IEEE80211_C_TXPMGT0x00000040 |
1973 IEEE80211_C_SHSLOT0x00000080 | IEEE80211_C_SHPREAMBLE0x00000100 | IEEE80211_C_MONITOR0x00000200;
1974#ifndef IEEE80211_STA_ONLY
1975 ic->ic_caps |= IEEE80211_C_IBSS0x00000002 | IEEE80211_C_HOSTAP0x00000008;
1976#endif
1977 ic->ic_opmode = IEEE80211_M_STA;
1978 ic->ic_state = IEEE80211_S_INIT;
1979
1980 if_attach(ifp);
1981 ieee80211_ifattach(ifp);
1982
1983 /* setup post-attach/pre-lateattach vector functions */
1984 sc->sc_newstate = ic->ic_newstate;
1985 ic->ic_newstate = pgt_newstate;
1986 ic->ic_node_alloc = pgt_ieee80211_node_alloc;
1987 ic->ic_newassoc = pgt_ieee80211_newassoc;
1988 ic->ic_node_free = pgt_ieee80211_node_free;
1989 ic->ic_node_copy = pgt_ieee80211_node_copy;
1990 ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
1991 ic->ic_max_rssi = 255; /* rssi is a u_int8_t */
1992
1993 /* let net80211 handle switching around the media + resetting */
1994 ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
1995
1996#if NBPFILTER1 > 0
1997 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO127,
1998 sizeof(struct ieee80211_frame) + 64);
1999
2000 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
2001 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_len = htole16(sc->sc_rxtap_len)((__uint16_t)(sc->sc_rxtap_len));
2002 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_CHANNEL) | (1 << IEEE80211_RADIOTAP_RSSI
))))
;
2003
2004 sc->sc_txtap_len = sizeof(sc->sc_txtapu);
2005 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_len = htole16(sc->sc_txtap_len)((__uint16_t)(sc->sc_txtap_len));
2006 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
))))
;
2007#endif
2008 return (0);
2009}
2010
2011int
2012pgt_media_change(struct ifnet *ifp)
2013{
2014 struct pgt_softc *sc = ifp->if_softc;
2015 int error;
2016
2017 error = ieee80211_media_change(ifp);
2018 if (error == ENETRESET52) {
2019 pgt_update_hw_from_sw(sc, 0);
2020 error = 0;
2021 }
2022
2023 return (error);
2024}
2025
2026void
2027pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2028{
2029 struct pgt_softc *sc = ifp->if_softc;
2030 struct ieee80211com *ic = &sc->sc_ic;
2031 uint32_t rate;
2032 int s;
2033
2034 imr->ifm_status = 0;
2035 imr->ifm_active = IFM_IEEE802110x0000000000000400ULL | IFM_NONE2ULL;
2036
2037 if (!(ifp->if_flags & IFF_UP0x1))
2038 return;
2039
2040 s = splnet()splraise(0x4);
2041
2042 if (ic->ic_fixed_rate != -1) {
2043 rate = ic->ic_sup_rates[ic->ic_curmode].
2044 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL0x7f;
2045 } else {
2046 if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
2047 goto out;
2048 rate = letoh32(rate)((__uint32_t)(rate));
2049 if (sc->sc_debug & SC_DEBUG_LINK0x00000080) {
2050 DPRINTF(("%s: %s: link rate %u\n",
2051 sc->sc_dev.dv_xname, __func__, rate));
2052 }
2053 if (rate == 0)
2054 goto out;
2055 }
2056
2057 imr->ifm_status = IFM_AVALID0x0000000000000001ULL;
2058 imr->ifm_active = IFM_IEEE802110x0000000000000400ULL;
2059 if (ic->ic_state == IEEE80211_S_RUN)
2060 imr->ifm_status |= IFM_ACTIVE0x0000000000000002ULL;
2061
2062 imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
2063
2064 switch (ic->ic_opmode) {
2065 case IEEE80211_M_STA:
2066 break;
2067#ifndef IEEE80211_STA_ONLY
2068 case IEEE80211_M_IBSS:
2069 imr->ifm_active |= IFM_IEEE80211_ADHOC0x0000000000010000ULL;
2070 break;
2071 case IEEE80211_M_AHDEMO:
2072 imr->ifm_active |= IFM_IEEE80211_ADHOC0x0000000000010000ULL | IFM_FLAG00x0000100000000000ULL;
2073 break;
2074 case IEEE80211_M_HOSTAP:
2075 imr->ifm_active |= IFM_IEEE80211_HOSTAP0x0000000000020000ULL;
2076 break;
2077#endif
2078 case IEEE80211_M_MONITOR:
2079 imr->ifm_active |= IFM_IEEE80211_MONITOR0x0000000000100000ULL;
2080 break;
2081 default:
2082 break;
2083 }
2084
2085out:
2086 splx(s)spllower(s);
2087}
2088
2089/*
2090 * Start data frames. Critical sections surround the boundary of
2091 * management frame transmission / transmission acknowledgement / response
2092 * and data frame transmission / transmission acknowledgement.
2093 */
2094void
2095pgt_start(struct ifnet *ifp)
2096{
2097 struct pgt_softc *sc;
2098 struct ieee80211com *ic;
2099 struct pgt_desc *pd;
2100 struct mbuf *m;
2101 int error;
2102
2103 sc = ifp->if_softc;
2104 ic = &sc->sc_ic;
2105
2106 if (sc->sc_flags & (SC_DYING0x00000004 | SC_NEEDS_RESET0x00000008) ||
2107 !(ifp->if_flags & IFF_RUNNING0x40) ||
2108 ic->ic_state != IEEE80211_S_RUN) {
2109 return;
2110 }
2111
2112 /*
2113 * Management packets should probably be MLME frames
2114 * (i.e. hostap "managed" mode); we don't touch the
2115 * net80211 management queue.
2116 */
2117 for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
2118 PGT_QUEUE_FULL_THRESHOLD8 && !ifq_empty(&ifp->if_snd)(({ typeof((&ifp->if_snd)->ifq_len) __tmp = *(volatile
typeof((&ifp->if_snd)->ifq_len) *)&((&ifp->
if_snd)->ifq_len); membar_datadep_consumer(); __tmp; }) ==
0)
;) {
2119 pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX])((&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX])->tqh_first);
2120 m = ifq_deq_begin(&ifp->if_snd);
2121 if (m == NULL((void *)0))
2122 break;
2123 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len <= PGT_FRAG_SIZE1536) {
2124 error = pgt_load_tx_desc_frag(sc,
2125 PGT_QUEUE_DATA_LOW_TX, pd);
2126 if (error) {
2127 ifq_deq_rollback(&ifp->if_snd, m);
2128 break;
2129 }
2130 ifq_deq_commit(&ifp->if_snd, m);
2131 m_copydata(m, 0, m->m_pkthdrM_dat.MH.MH_pkthdr.len, pd->pd_mem);
2132 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2133 pd, m->m_pkthdrM_dat.MH.MH_pkthdr.len, 0);
2134 } else if (m->m_pkthdrM_dat.MH.MH_pkthdr.len <= PGT_FRAG_SIZE1536 * 2) {
2135 struct pgt_desc *pd2;
2136
2137 /*
2138 * Transmit a fragmented frame if there is
2139 * not enough room in one fragment; limit
2140 * to two fragments (802.11 itself couldn't
2141 * even support a full two.)
2142 */
2143 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
2144 PGT_QUEUE_FULL_THRESHOLD8) {
2145 ifq_deq_rollback(&ifp->if_snd, m);
2146 break;
2147 }
2148 pd2 = TAILQ_NEXT(pd, pd_link)((pd)->pd_link.tqe_next);
2149 error = pgt_load_tx_desc_frag(sc,
2150 PGT_QUEUE_DATA_LOW_TX, pd);
2151 if (error == 0) {
2152 error = pgt_load_tx_desc_frag(sc,
2153 PGT_QUEUE_DATA_LOW_TX, pd2);
2154 if (error) {
2155 pgt_unload_tx_desc_frag(sc, pd);
2156 TAILQ_INSERT_HEAD(&sc->sc_freeq[do { if (((pd)->pd_link.tqe_next = (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first) != ((void *)0)) (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first->pd_link.tqe_prev = &(pd)->pd_link
.tqe_next; else (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX]
)->tqh_last = &(pd)->pd_link.tqe_next; (&sc->
sc_freeq[ PGT_QUEUE_DATA_LOW_TX])->tqh_first = (pd); (pd)->
pd_link.tqe_prev = &(&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first; } while (0)
2157 PGT_QUEUE_DATA_LOW_TX], pd,do { if (((pd)->pd_link.tqe_next = (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first) != ((void *)0)) (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first->pd_link.tqe_prev = &(pd)->pd_link
.tqe_next; else (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX]
)->tqh_last = &(pd)->pd_link.tqe_next; (&sc->
sc_freeq[ PGT_QUEUE_DATA_LOW_TX])->tqh_first = (pd); (pd)->
pd_link.tqe_prev = &(&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first; } while (0)
2158 pd_link)do { if (((pd)->pd_link.tqe_next = (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first) != ((void *)0)) (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first->pd_link.tqe_prev = &(pd)->pd_link
.tqe_next; else (&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX]
)->tqh_last = &(pd)->pd_link.tqe_next; (&sc->
sc_freeq[ PGT_QUEUE_DATA_LOW_TX])->tqh_first = (pd); (pd)->
pd_link.tqe_prev = &(&sc->sc_freeq[ PGT_QUEUE_DATA_LOW_TX
])->tqh_first; } while (0)
;
2159 }
2160 }
2161 if (error) {
2162 ifq_deq_rollback(&ifp->if_snd, m);
2163 break;
2164 }
2165 ifq_deq_commit(&ifp->if_snd, m);
2166 m_copydata(m, 0, PGT_FRAG_SIZE1536, pd->pd_mem);
2167 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2168 pd, PGT_FRAG_SIZE1536, 1);
2169 m_copydata(m, PGT_FRAG_SIZE1536,
2170 m->m_pkthdrM_dat.MH.MH_pkthdr.len - PGT_FRAG_SIZE1536, pd2->pd_mem);
2171 pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2172 pd2, m->m_pkthdrM_dat.MH.MH_pkthdr.len - PGT_FRAG_SIZE1536, 0);
2173 } else {
2174 ifq_deq_commit(&ifp->if_snd, m);
2175 ifp->if_oerrorsif_data.ifi_oerrors++;
2176 m_freem(m);
2177 m = NULL((void *)0);
2178 }
2179 if (m != NULL((void *)0)) {
2180 struct ieee80211_node *ni;
2181#if NBPFILTER1 > 0
2182 if (ifp->if_bpf != NULL((void *)0))
2183 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
2184#endif
2185 ifp->if_timer = 1;
2186 sc->sc_txtimer = 5;
2187 ni = ieee80211_find_txnode(&sc->sc_ic,
2188 mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data))->ether_dhost);
2189 if (ni != NULL((void *)0)) {
2190 ni->ni_inact = 0;
2191 if (ni != ic->ic_bss)
2192 ieee80211_release_node(&sc->sc_ic, ni);
2193 }
2194#if NBPFILTER1 > 0
2195 if (sc->sc_drvbpf != NULL((void *)0)) {
2196 struct mbuf mb;
2197 struct ether_header eh;
2198 struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtapsc_txtapu.th;
2199
2200 bcopy(mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data)), &eh,
2201 sizeof(eh));
2202 m_adj(m, sizeof(eh));
2203 m = pgt_ieee80211_encap(sc, &eh, m, NULL((void *)0));
2204
2205 tap->wt_flags = 0;
2206 //tap->wt_rate = rate;
2207 tap->wt_rate = 0;
2208 tap->wt_chan_freq =
2209 htole16(ic->ic_bss->ni_chan->ic_freq)((__uint16_t)(ic->ic_bss->ni_chan->ic_freq));
2210 tap->wt_chan_flags =
2211 htole16(ic->ic_bss->ni_chan->ic_flags)((__uint16_t)(ic->ic_bss->ni_chan->ic_flags));
2212
2213 if (m != NULL((void *)0)) {
2214 mb.m_datam_hdr.mh_data = (caddr_t)tap;
2215 mb.m_lenm_hdr.mh_len = sc->sc_txtap_len;
2216 mb.m_nextm_hdr.mh_next = m;
2217 mb.m_nextpktm_hdr.mh_nextpkt = NULL((void *)0);
2218 mb.m_typem_hdr.mh_type = 0;
2219 mb.m_flagsm_hdr.mh_flags = 0;
2220
2221 bpf_mtap(sc->sc_drvbpf, &mb,
2222 BPF_DIRECTION_OUT(1 << 1));
2223 }
2224 }
2225#endif
2226 m_freem(m);
2227 }
2228 }
2229}
2230
2231int
2232pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
2233{
2234 struct pgt_softc *sc = ifp->if_softc;
2235 struct ifreq *ifr;
2236 struct wi_req *wreq;
2237 struct ieee80211_nodereq_all *na;
2238 struct ieee80211com *ic;
2239 struct pgt_obj_bsslist *pob;
2240 struct wi_scan_p2_hdr *p2hdr;
2241 struct wi_scan_res *res;
2242 uint32_t noise;
2243 int maxscan, i, j, s, error = 0;
2244
2245 ic = &sc->sc_ic;
2246 ifr = (struct ifreq *)req;
2247
2248 s = splnet()splraise(0x4);
2249 switch (cmd) {
2250 case SIOCS80211SCAN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((210)))
:
2251 /*
2252 * This chip scans always as soon as it gets initialized.
2253 */
2254 break;
2255 case SIOCG80211ALLNODES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ieee80211_nodereq_all) & 0x1fff) << 16) | (
(('i')) << 8) | ((214)))
: {
2256 struct ieee80211_nodereq *nr = NULL((void *)0);
2257 na = (struct ieee80211_nodereq_all *)req;
2258 wreq = malloc(sizeof(*wreq), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
2259
2260 maxscan = PGT_OBJ_BSSLIST_NBSS24;
2261 pob = malloc(sizeof(*pob) +
2262 sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF2, M_WAITOK0x0001);
2263 error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
2264 sizeof(noise));
2265
2266 if (error == 0) {
2267 noise = letoh32(noise)((__uint32_t)(noise));
2268 error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
2269 sizeof(*pob) +
2270 sizeof(struct pgt_obj_bss) * maxscan);
2271 }
2272
2273 if (error == 0) {
2274 maxscan = min(PGT_OBJ_BSSLIST_NBSS24,
2275 letoh32(pob->pob_count)((__uint32_t)(pob->pob_count)));
2276 maxscan = min(maxscan,
2277 (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
2278 WI_PRISM2_RES_SIZE62);
2279 p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
2280 p2hdr->wi_rsvd = 0;
2281 p2hdr->wi_reason = 1;
2282 wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE62) / 2 +
2283 sizeof(*p2hdr) / 2;
2284 wreq->wi_type = WI_RID_SCAN_RES0xFD88;
2285 }
2286
2287 for (na->na_nodes = j = i = 0; i < maxscan &&
2288 (na->na_size >= j + sizeof(struct ieee80211_nodereq));
2289 i++) {
2290 /* allocate node space */
2291 if (nr == NULL((void *)0))
2292 nr = malloc(sizeof(*nr), M_DEVBUF2, M_WAITOK0x0001);
2293
2294 /* get next BSS scan result */
2295 res = (struct wi_scan_res *)
2296 ((char *)&wreq->wi_val + sizeof(*p2hdr) +
2297 i * WI_PRISM2_RES_SIZE62);
2298 pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
2299 res, noise);
2300
2301 /* copy it to node structure for ifconfig to read */
2302 bzero(nr, sizeof(*nr))__builtin_bzero((nr), (sizeof(*nr)));
2303 IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid)__builtin_memcpy((nr->nr_macaddr), (res->wi_bssid), (6)
)
;
2304 IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid)__builtin_memcpy((nr->nr_bssid), (res->wi_bssid), (6));
2305 nr->nr_channel = letoh16(res->wi_chan)((__uint16_t)(res->wi_chan));
2306 nr->nr_chan_flags = IEEE80211_CHAN_B(0x0080 | 0x0020);
2307 nr->nr_rssi = letoh16(res->wi_signal)((__uint16_t)(res->wi_signal));
2308 nr->nr_max_rssi = 0; /* XXX */
2309 nr->nr_nwid_len = letoh16(res->wi_ssid_len)((__uint16_t)(res->wi_ssid_len));
2310 bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
2311 nr->nr_intval = letoh16(res->wi_interval)((__uint16_t)(res->wi_interval));
2312 nr->nr_capinfo = letoh16(res->wi_capinfo)((__uint16_t)(res->wi_capinfo));
2313 nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M0x0a ? 2 :
2314 (res->wi_rate == WI_WAVELAN_RES_2M0x14 ? 4 :
2315 (res->wi_rate == WI_WAVELAN_RES_5M0x37 ? 11 :
2316 (res->wi_rate == WI_WAVELAN_RES_11M0x6e ? 22 : 0)));
2317 nr->nr_nrates = 0;
2318 while (res->wi_srates[nr->nr_nrates] != 0) {
2319 nr->nr_rates[nr->nr_nrates] =
2320 res->wi_srates[nr->nr_nrates] &
2321 WI_VAR_SRATES_MASK0x7F;
2322 nr->nr_nrates++;
2323 }
2324 nr->nr_flags = 0;
2325 if (bcmp(nr->nr_macaddr, nr->nr_bssid,
2326 IEEE80211_ADDR_LEN6) == 0)
2327 nr->nr_flags |= IEEE80211_NODEREQ_AP0x01;
2328 error = copyout(nr, (caddr_t)na->na_node + j,
2329 sizeof(struct ieee80211_nodereq));
2330 if (error)
2331 break;
2332
2333 /* point to next node entry */
2334 j += sizeof(struct ieee80211_nodereq);
2335 na->na_nodes++;
2336 }
2337 if (nr)
2338 free(nr, M_DEVBUF2, 0);
2339 free(pob, M_DEVBUF2, 0);
2340 free(wreq, M_DEVBUF2, 0);
2341 break;
2342 }
2343 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2344 ifp->if_flags |= IFF_UP0x1;
2345 /* FALLTHROUGH */
2346 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2347 if (ifp->if_flags & IFF_UP0x1) {
2348 if ((ifp->if_flags & IFF_RUNNING0x40) == 0) {
2349 pgt_init(ifp);
2350 error = ENETRESET52;
2351 }
2352 } else {
2353 if (ifp->if_flags & IFF_RUNNING0x40) {
2354 pgt_stop(sc, SC_NEEDS_RESET0x00000008);
2355 error = ENETRESET52;
2356 }
2357 }
2358 break;
2359 case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((127)))
:
2360 if (ifr->ifr_mtuifr_ifru.ifru_metric > PGT_FRAG_SIZE1536) {
2361 error = EINVAL22;
2362 break;
2363 }
2364 /* FALLTHROUGH */
2365 default:
2366 error = ieee80211_ioctl(ifp, cmd, req);
2367 break;
2368 }
2369
2370 if (error == ENETRESET52) {
2371 pgt_update_hw_from_sw(sc, 0);
2372 error = 0;
2373 }
2374 splx(s)spllower(s);
2375
2376 return (error);
2377}
2378
2379void
2380pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
2381 struct wi_scan_res *scanres, uint32_t noise)
2382{
2383 struct ieee80211_rateset *rs;
2384 struct wi_scan_res ap;
2385 unsigned int i, n;
2386
2387 rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
2388 bzero(&ap, sizeof(ap))__builtin_bzero((&ap), (sizeof(ap)));
2389 ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel)((__uint16_t)(pob->pob_channel)), 0);
2390 ap.wi_noise = noise;
2391 ap.wi_signal = letoh16(pob->pob_rssi)((__uint16_t)(pob->pob_rssi));
2392 IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address)__builtin_memcpy((ap.wi_bssid), (pob->pob_address), (6));
2393 ap.wi_interval = letoh16(pob->pob_beacon_period)((__uint16_t)(pob->pob_beacon_period));
2394 ap.wi_capinfo = letoh16(pob->pob_capinfo)((__uint16_t)(pob->pob_capinfo));
2395 ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
2396 memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len)__builtin_memcpy((ap.wi_ssid), (pob->pob_ssid.pos_ssid), (
ap.wi_ssid_len))
;
2397 n = 0;
2398 for (i = 0; i < 16; i++) {
2399 if (letoh16(pob->pob_rates)((__uint16_t)(pob->pob_rates)) & (1 << i)) {
2400 if (i >= rs->rs_nrates)
2401 break;
2402 ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
2403 if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
2404 break;
2405 }
2406 }
2407 memcpy(scanres, &ap, WI_PRISM2_RES_SIZE)__builtin_memcpy((scanres), (&ap), (62));
2408}
2409
2410void
2411node_mark_active_ap(void *arg, struct ieee80211_node *ni)
2412{
2413 /*
2414 * HostAP mode lets all nodes stick around unless
2415 * the firmware AP kicks them off.
2416 */
2417 ni->ni_inact = 0;
2418}
2419
2420void
2421node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
2422{
2423 struct pgt_ieee80211_node *pin;
2424
2425 /*
2426 * As there is no association in ad-hoc, we let links just
2427 * time out naturally as long they are not holding any private
2428 * configuration, such as 802.1x authorization.
2429 */
2430 pin = (struct pgt_ieee80211_node *)ni;
2431 if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
2432 pin->pin_node.ni_inact = 0;
2433}
2434
2435void
2436pgt_watchdog(struct ifnet *ifp)
2437{
2438 struct pgt_softc *sc;
2439
2440 sc = ifp->if_softc;
2441 /*
2442 * Check for timed out transmissions (and make sure to set
2443 * this watchdog to fire again if there is still data in the
2444 * output device queue).
2445 */
2446 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
2447 int count;
2448
2449 ifp->if_timer = 1;
2450 if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
2451 count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
2452 if (sc->sc_debug & SC_DEBUG_UNEXPECTED0x00000004)
2453 DPRINTF(("%s: timeout %d data transmissions\n",
2454 sc->sc_dev.dv_xname, count));
2455 }
2456 }
2457 if (sc->sc_flags & (SC_DYING0x00000004 | SC_NEEDS_RESET0x00000008))
2458 return;
2459 /*
2460 * If we're going to kick the device out of power-save mode
2461 * just to update the BSSID and such, we should not do it
2462 * very often; need to determine in what way to do that.
2463 */
2464 if (ifp->if_flags & IFF_RUNNING0x40 &&
2465 sc->sc_ic.ic_state != IEEE80211_S_INIT &&
2466 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
2467 pgt_async_update(sc);
2468
2469#ifndef IEEE80211_STA_ONLY
2470 /*
2471 * As a firmware-based HostAP, we should not time out
2472 * nodes inside the driver additionally to the timeout
2473 * that exists in the firmware. The only things we
2474 * should have to deal with timing out when doing HostAP
2475 * are the privacy-related.
2476 */
2477 switch (sc->sc_ic.ic_opmode) {
2478 case IEEE80211_M_HOSTAP:
2479 ieee80211_iterate_nodes(&sc->sc_ic,
2480 node_mark_active_ap, NULL((void *)0));
2481 break;
2482 case IEEE80211_M_IBSS:
2483 ieee80211_iterate_nodes(&sc->sc_ic,
2484 node_mark_active_adhoc, NULL((void *)0));
2485 break;
2486 default:
2487 break;
2488 }
2489#endif
2490 ieee80211_watchdog(ifp);
2491 ifp->if_timer = 1;
2492}
2493
2494int
2495pgt_init(struct ifnet *ifp)
2496{
2497 struct pgt_softc *sc = ifp->if_softc;
2498 struct ieee80211com *ic = &sc->sc_ic;
2499
2500 /* set default channel */
2501 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
2502
2503 if (!(sc->sc_flags & (SC_DYING0x00000004 | SC_UNINITIALIZED0x00000002)))
2504 pgt_update_hw_from_sw(sc,
2505 ic->ic_state != IEEE80211_S_INIT);
2506
2507 ifp->if_flags |= IFF_RUNNING0x40;
2508 ifq_clr_oactive(&ifp->if_snd);
2509
2510 /* Begin background scanning */
2511 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1)(((&sc->sc_ic)->ic_newstate)((&sc->sc_ic), (
IEEE80211_S_SCAN), (-1)))
;
2512
2513 return (0);
2514}
2515
2516/*
2517 * After most every configuration change, everything needs to be fully
2518 * reinitialized. For some operations (currently, WEP settings
2519 * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
2520 * "associations," and allows EAP authorization to occur again.
2521 * If keepassoc is specified, the reset operation should try to go
2522 * back to the BSS had before.
2523 */
2524void
2525pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc)
2526{
2527 struct ieee80211com *ic = &sc->sc_ic;
2528 struct arpcom *ac = &ic->ic_ac;
2529 struct ifnet *ifp = &ac->ac_if;
2530 struct pgt_obj_key keyobj;
2531 struct pgt_obj_ssid essid;
2532 uint8_t availrates[IEEE80211_RATE_MAXSIZE15 + 1];
2533 uint32_t mode, bsstype, config, profile, channel, slot, preamble;
2534 uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
2535 unsigned int i;
2536 int success, shouldbeup, s;
2537
2538 config = PGT_CONFIG_MANUAL_RUN0x00000001 | PGT_CONFIG_RX_ANNEX0x00000004;
2539
2540 /*
2541 * Promiscuous mode is currently a no-op since packets transmitted,
2542 * while in promiscuous mode, don't ever seem to go anywhere.
2543 */
2544 shouldbeup = ifp->if_flags & IFF_RUNNING0x40 && ifp->if_flags & IFF_UP0x1;
2545
2546 if (shouldbeup) {
2547 switch (ic->ic_opmode) {
2548 case IEEE80211_M_STA:
2549 if (ifp->if_flags & IFF_PROMISC0x100)
2550 mode = PGT_MODE_CLIENT1; /* what to do? */
2551 else
2552 mode = PGT_MODE_CLIENT1;
2553 bsstype = PGT_BSS_TYPE_STA1;
2554 dot1x = PGT_DOT1X_AUTH_ENABLED1;
2555 break;
2556#ifndef IEEE80211_STA_ONLY
2557 case IEEE80211_M_IBSS:
2558 if (ifp->if_flags & IFF_PROMISC0x100)
2559 mode = PGT_MODE_CLIENT1; /* what to do? */
2560 else
2561 mode = PGT_MODE_CLIENT1;
2562 bsstype = PGT_BSS_TYPE_IBSS2;
2563 dot1x = PGT_DOT1X_AUTH_ENABLED1;
2564 break;
2565 case IEEE80211_M_HOSTAP:
2566 mode = PGT_MODE_AP2;
2567 bsstype = PGT_BSS_TYPE_STA1;
2568 /*
2569 * For IEEE 802.1x, we need to authenticate and
2570 * authorize hosts from here on or they remain
2571 * associated but without the ability to send or
2572 * receive normal traffic to us (courtesy the
2573 * firmware AP implementation).
2574 */
2575 dot1x = PGT_DOT1X_AUTH_ENABLED1;
2576 /*
2577 * WDS mode needs several things to work:
2578 * discovery of exactly how creating the WDS
2579 * links is meant to function, an interface
2580 * for this, and ability to encode or decode
2581 * the WDS frames.
2582 */
2583 if (sc->sc_wds)
2584 config |= PGT_CONFIG_WDS0x00000010;
2585 break;
2586#endif
2587 case IEEE80211_M_MONITOR:
2588 mode = PGT_MODE_PROMISCUOUS0;
2589 bsstype = PGT_BSS_TYPE_ANY3;
2590 dot1x = PGT_DOT1X_AUTH_NONE0;
2591 break;
2592 default:
2593 goto badopmode;
2594 }
2595 } else {
2596badopmode:
2597 mode = PGT_MODE_CLIENT1;
2598 bsstype = PGT_BSS_TYPE_NONE0;
2599 }
2600
2601 DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
2602 switch (ic->ic_curmode) {
2603 case IEEE80211_MODE_11A:
2604 profile = PGT_PROFILE_A_ONLY6;
2605 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC2;
2606 DPRINTF(("IEEE80211_MODE_11A\n"));
2607 break;
2608 case IEEE80211_MODE_11B:
2609 profile = PGT_PROFILE_B_ONLY0;
2610 preamble = PGT_OID_PREAMBLE_MODE_LONG0;
2611 DPRINTF(("IEEE80211_MODE_11B\n"));
2612 break;
2613 case IEEE80211_MODE_11G:
2614 profile = PGT_PROFILE_G_ONLY3;
2615 preamble = PGT_OID_PREAMBLE_MODE_SHORT1;
2616 DPRINTF(("IEEE80211_MODE_11G\n"));
2617 break;
2618 case IEEE80211_MODE_AUTO:
2619 profile = PGT_PROFILE_MIXED_G_WIFI1;
2620 preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC2;
2621 DPRINTF(("IEEE80211_MODE_AUTO\n"));
2622 break;
2623 default:
2624 panic("unknown mode %d", ic->ic_curmode);
2625 }
2626
2627 switch (sc->sc_80211_ioc_auth) {
2628 case IEEE80211_AUTH_NONE0:
2629 auth = PGT_AUTH_MODE_NONE0;
2630 break;
2631 case IEEE80211_AUTH_OPEN1:
2632 auth = PGT_AUTH_MODE_OPEN1;
2633 break;
2634 default:
2635 auth = PGT_AUTH_MODE_SHARED2;
2636 break;
2637 }
2638
2639 if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON0x00000100) {
2640 wep = 1;
2641 exunencrypted = 1;
2642 } else {
2643 wep = 0;
2644 exunencrypted = 0;
2645 }
2646
2647 mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO)((__uint32_t)(0));
2648 wep = htole32(wep)((__uint32_t)(wep));
2649 exunencrypted = htole32(exunencrypted)((__uint32_t)(exunencrypted));
2650 profile = htole32(profile)((__uint32_t)(profile));
2651 preamble = htole32(preamble)((__uint32_t)(preamble));
2652 bsstype = htole32(bsstype)((__uint32_t)(bsstype));
2653 config = htole32(config)((__uint32_t)(config));
2654 mode = htole32(mode)((__uint32_t)(mode));
2655
2656 if (!wep || !sc->sc_dot1x)
2657 dot1x = PGT_DOT1X_AUTH_NONE0;
2658 dot1x = htole32(dot1x)((__uint32_t)(dot1x));
2659 auth = htole32(auth)((__uint32_t)(auth));
2660
2661 if (ic->ic_flags & IEEE80211_F_SHSLOT0x00020000)
2662 slot = htole32(PGT_OID_SLOT_MODE_SHORT)((__uint32_t)(1));
2663 else
2664 slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC)((__uint32_t)(2));
2665
2666 if (ic->ic_des_chan == IEEE80211_CHAN_ANYC((struct ieee80211_channel *) ((void *)0))) {
2667 if (keepassoc)
2668 channel = 0;
2669 else
2670 channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
2671 } else
2672 channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
2673
2674 DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
2675 for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
2676 availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
2677 DPRINTF((" %d", availrates[i]));
2678 }
2679 DPRINTF(("\n"));
2680 availrates[i++] = 0;
2681
2682 essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
2683 memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length)__builtin_memcpy((&essid.pos_ssid), (ic->ic_des_essid)
, (essid.pos_length))
;
2684
2685 s = splnet()splraise(0x4);
2686 for (success = 0; success == 0; success = 1) {
2687 SETOID(PGT_OID_PROFILE, &profile, sizeof(profile)){ if (pgt_oid_set(sc, PGT_OID_PROFILE, &profile, sizeof(profile
)) != 0) break; }
;
2688 SETOID(PGT_OID_CONFIG, &config, sizeof(config)){ if (pgt_oid_set(sc, PGT_OID_CONFIG, &config, sizeof(config
)) != 0) break; }
;
2689 SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme)){ if (pgt_oid_set(sc, PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof
(mlme)) != 0) break; }
;
2690
2691 if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)(__builtin_memcmp((ic->ic_myaddr), (ac->ac_enaddr), (6)
) == 0)
) {
2692 SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,{ if (pgt_oid_set(sc, PGT_OID_MAC_ADDRESS, ac->ac_enaddr, sizeof
(ac->ac_enaddr)) != 0) break; }
2693 sizeof(ac->ac_enaddr)){ if (pgt_oid_set(sc, PGT_OID_MAC_ADDRESS, ac->ac_enaddr, sizeof
(ac->ac_enaddr)) != 0) break; }
;
2694 IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr)__builtin_memcpy((ic->ic_myaddr), (ac->ac_enaddr), (6));
2695 }
2696
2697 SETOID(PGT_OID_MODE, &mode, sizeof(mode)){ if (pgt_oid_set(sc, PGT_OID_MODE, &mode, sizeof(mode)) !=
0) break; }
;
2698 SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype)){ if (pgt_oid_set(sc, PGT_OID_BSS_TYPE, &bsstype, sizeof(
bsstype)) != 0) break; }
;
2699
2700 if (channel != 0 && channel != IEEE80211_CHAN_ANY0xffff)
2701 SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel)){ if (pgt_oid_set(sc, PGT_OID_CHANNEL, &channel, sizeof(channel
)) != 0) break; }
;
2702
2703 if (ic->ic_flags & IEEE80211_F_DESBSSID0x00000800) {
2704 SETOID(PGT_OID_BSSID, ic->ic_des_bssid,{ if (pgt_oid_set(sc, PGT_OID_BSSID, ic->ic_des_bssid, sizeof
(ic->ic_des_bssid)) != 0) break; }
2705 sizeof(ic->ic_des_bssid)){ if (pgt_oid_set(sc, PGT_OID_BSSID, ic->ic_des_bssid, sizeof
(ic->ic_des_bssid)) != 0) break; }
;
2706 } else if (keepassoc) {
2707 SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,{ if (pgt_oid_set(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid
, sizeof(ic->ic_bss->ni_bssid)) != 0) break; }
2708 sizeof(ic->ic_bss->ni_bssid)){ if (pgt_oid_set(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid
, sizeof(ic->ic_bss->ni_bssid)) != 0) break; }
;
2709 }
2710
2711 SETOID(PGT_OID_SSID, &essid, sizeof(essid)){ if (pgt_oid_set(sc, PGT_OID_SSID, &essid, sizeof(essid)
) != 0) break; }
;
2712
2713 if (ic->ic_des_esslen > 0)
2714 SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid)){ if (pgt_oid_set(sc, PGT_OID_SSID_OVERRIDE, &essid, sizeof
(essid)) != 0) break; }
;
2715
2716 SETOID(PGT_OID_RATES, &availrates, i){ if (pgt_oid_set(sc, PGT_OID_RATES, &availrates, i) != 0
) break; }
;
2717 SETOID(PGT_OID_EXTENDED_RATES, &availrates, i){ if (pgt_oid_set(sc, PGT_OID_EXTENDED_RATES, &availrates
, i) != 0) break; }
;
2718 SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble)){ if (pgt_oid_set(sc, PGT_OID_PREAMBLE_MODE, &preamble, sizeof
(preamble)) != 0) break; }
;
2719 SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot)){ if (pgt_oid_set(sc, PGT_OID_SLOT_MODE, &slot, sizeof(slot
)) != 0) break; }
;
2720 SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth)){ if (pgt_oid_set(sc, PGT_OID_AUTH_MODE, &auth, sizeof(auth
)) != 0) break; }
;
2721 SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,{ if (pgt_oid_set(sc, PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted
, sizeof(exunencrypted)) != 0) break; }
2722 sizeof(exunencrypted)){ if (pgt_oid_set(sc, PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted
, sizeof(exunencrypted)) != 0) break; }
;
2723 SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x)){ if (pgt_oid_set(sc, PGT_OID_DOT1X, &dot1x, sizeof(dot1x
)) != 0) break; }
;
2724 SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep)){ if (pgt_oid_set(sc, PGT_OID_PRIVACY_INVOKED, &wep, sizeof
(wep)) != 0) break; }
;
2725 /*
2726 * Setting WEP key(s)
2727 */
2728 if (letoh32(wep)((__uint32_t)(wep)) != 0) {
2729 keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP0;
2730 /* key 1 */
2731 keyobj.pok_length = min(sizeof(keyobj.pok_key),
2732 IEEE80211_KEYBUF_SIZE16);
2733 keyobj.pok_length = min(keyobj.pok_length,
2734 ic->ic_nw_keys[0].k_len);
2735 bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
2736 keyobj.pok_length);
2737 SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj)){ if (pgt_oid_set(sc, PGT_OID_DEFAULT_KEY0, &keyobj, sizeof
(keyobj)) != 0) break; }
;
2738 /* key 2 */
2739 keyobj.pok_length = min(sizeof(keyobj.pok_key),
2740 IEEE80211_KEYBUF_SIZE16);
2741 keyobj.pok_length = min(keyobj.pok_length,
2742 ic->ic_nw_keys[1].k_len);
2743 bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
2744 keyobj.pok_length);
2745 SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj)){ if (pgt_oid_set(sc, PGT_OID_DEFAULT_KEY1, &keyobj, sizeof
(keyobj)) != 0) break; }
;
2746 /* key 3 */
2747 keyobj.pok_length = min(sizeof(keyobj.pok_key),
2748 IEEE80211_KEYBUF_SIZE16);
2749 keyobj.pok_length = min(keyobj.pok_length,
2750 ic->ic_nw_keys[2].k_len);
2751 bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
2752 keyobj.pok_length);
2753 SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj)){ if (pgt_oid_set(sc, PGT_OID_DEFAULT_KEY2, &keyobj, sizeof
(keyobj)) != 0) break; }
;
2754 /* key 4 */
2755 keyobj.pok_length = min(sizeof(keyobj.pok_key),
2756 IEEE80211_KEYBUF_SIZE16);
2757 keyobj.pok_length = min(keyobj.pok_length,
2758 ic->ic_nw_keys[3].k_len);
2759 bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
2760 keyobj.pok_length);
2761 SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj)){ if (pgt_oid_set(sc, PGT_OID_DEFAULT_KEY3, &keyobj, sizeof
(keyobj)) != 0) break; }
;
2762
2763 wepkey = htole32(ic->ic_wep_txkey)((__uint32_t)(ic->ic_def_txkey));
2764 SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey)){ if (pgt_oid_set(sc, PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof
(wepkey)) != 0) break; }
;
2765 }
2766 /* set mode again to commit */
2767 SETOID(PGT_OID_MODE, &mode, sizeof(mode)){ if (pgt_oid_set(sc, PGT_OID_MODE, &mode, sizeof(mode)) !=
0) break; }
;
2768 }
2769 splx(s)spllower(s);
2770
2771 if (success) {
2772 if (shouldbeup)
2773 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_SCAN), (-1)));
2774 else
2775 ieee80211_new_state(ic, IEEE80211_S_INIT, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_INIT), (-1)));
2776 } else {
2777 printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
2778 ieee80211_new_state(ic, IEEE80211_S_INIT, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_INIT), (-1)));
2779 }
2780}
2781
2782void
2783pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
2784 struct pgt_obj_mlme *mlme)
2785{
2786 struct ieee80211com *ic = &sc->sc_ic;
2787 struct pgt_ieee80211_node *pin;
2788 struct ieee80211_node *ni;
2789
2790 ni = ieee80211_find_node(ic, mlme->pom_address);
2791 pin = (struct pgt_ieee80211_node *)ni;
2792 switch (oid) {
2793 case PGT_OID_DISASSOCIATE:
2794 if (ni != NULL((void *)0))
2795 ieee80211_release_node(&sc->sc_ic, ni);
2796 break;
2797 case PGT_OID_ASSOCIATE:
2798 if (ni == NULL((void *)0)) {
2799 ni = ieee80211_dup_bss(ic, mlme->pom_address);
2800 if (ni == NULL((void *)0))
2801 break;
2802 ic->ic_newassoc(ic, ni, 1);
2803 pin = (struct pgt_ieee80211_node *)ni;
2804 }
2805 ni->ni_associd = letoh16(mlme->pom_id)((__uint16_t)(mlme->pom_id));
2806 pin->pin_mlme_state = letoh16(mlme->pom_state)((__uint16_t)(mlme->pom_state));
2807 break;
2808 default:
2809 if (pin != NULL((void *)0))
2810 pin->pin_mlme_state = letoh16(mlme->pom_state)((__uint16_t)(mlme->pom_state));
2811 break;
2812 }
2813}
2814
2815/*
2816 * Either in response to an event or after a certain amount of time,
2817 * synchronize our idea of the network we're part of from the hardware.
2818 */
2819void
2820pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
2821 struct mbuf *args)
2822{
2823 struct ieee80211com *ic = &sc->sc_ic;
2824 struct pgt_obj_ssid ssid;
2825 struct pgt_obj_bss bss;
2826 uint32_t channel, noise, ls;
2827 int error, s;
2828
2829 if (pa != NULL((void *)0)) {
2830 struct pgt_obj_mlme *mlme;
2831 uint32_t oid;
2832
2833 oid = *mtod(args, uint32_t *)((uint32_t *)((args)->m_hdr.mh_data));
2834 m_adj(args, sizeof(uint32_t));
2835 if (sc->sc_debug & SC_DEBUG_TRAP0x00000040)
2836 DPRINTF(("%s: trap: oid %#x len %u\n",
2837 sc->sc_dev.dv_xname, oid, args->m_len));
2838 switch (oid) {
2839 case PGT_OID_LINK_STATE:
2840 if (args->m_lenm_hdr.mh_len < sizeof(uint32_t))
2841 break;
2842 ls = letoh32(*mtod(args, uint32_t *))((__uint32_t)(*((uint32_t *)((args)->m_hdr.mh_data))));
2843 if (sc->sc_debug & (SC_DEBUG_TRAP0x00000040 | SC_DEBUG_LINK0x00000080))
2844 DPRINTF(("%s: %s: link rate %u\n",
2845 sc->sc_dev.dv_xname, __func__, ls));
2846 if (ls)
2847 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
2848 else
2849 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_SCAN), (-1)));
2850 goto gotlinkstate;
2851 case PGT_OID_DEAUTHENTICATE:
2852 case PGT_OID_AUTHENTICATE:
2853 case PGT_OID_DISASSOCIATE:
2854 case PGT_OID_ASSOCIATE:
2855 if (args->m_lenm_hdr.mh_len < sizeof(struct pgt_obj_mlme))
2856 break;
2857 mlme = mtod(args, struct pgt_obj_mlme *)((struct pgt_obj_mlme *)((args)->m_hdr.mh_data));
2858 if (sc->sc_debug & SC_DEBUG_TRAP0x00000040)
2859 DPRINTF(("%s: mlme: address "
2860 "%s id 0x%02x state 0x%02x code 0x%02x\n",
2861 sc->sc_dev.dv_xname,
2862 ether_sprintf(mlme->pom_address),
2863 letoh16(mlme->pom_id),
2864 letoh16(mlme->pom_state),
2865 letoh16(mlme->pom_code)));
2866#ifndef IEEE80211_STA_ONLY
2867 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2868 pgt_hostap_handle_mlme(sc, oid, mlme);
2869#endif
2870 break;
2871 }
2872 return;
2873 }
2874 if (ic->ic_state == IEEE80211_S_SCAN) {
2875 s = splnet()splraise(0x4);
2876 error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
2877 splx(s)spllower(s);
2878 if (error)
2879 return;
2880 DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
2881 htole32(ls)));
2882 if (ls != 0)
2883 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
2884 }
2885
2886gotlinkstate:
2887 s = splnet()splraise(0x4);
2888 if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
2889 goto out;
2890 sc->sc_noise = letoh32(noise)((__uint32_t)(noise));
2891 if (ic->ic_state == IEEE80211_S_RUN) {
2892 if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
2893 sizeof(channel)) != 0)
2894 goto out;
2895 channel = min(letoh32(channel)((__uint32_t)(channel)), IEEE80211_CHAN_MAX255);
2896 ic->ic_bss->ni_chan = &ic->ic_channels[channel];
2897 if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2898 sizeof(ic->ic_bss->ni_bssid)) != 0)
2899 goto out;
2900 IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid)__builtin_memcpy((&bss.pob_address), (ic->ic_bss->ni_bssid
), (6))
;
2901 error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
2902 sizeof(bss));
2903 if (error == 0)
2904 ic->ic_bss->ni_rssi = bss.pob_rssi;
2905 else if (error != EPERM1)
2906 goto out;
2907 error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
2908 if (error)
2909 goto out;
2910 ic->ic_bss->ni_esslen = min(ssid.pos_length,
2911 sizeof(ic->ic_bss->ni_essid));
2912 memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,__builtin_memcpy((ic->ic_bss->ni_essid), (ssid.pos_ssid
), (ssid.pos_length))
2913 ssid.pos_length)__builtin_memcpy((ic->ic_bss->ni_essid), (ssid.pos_ssid
), (ssid.pos_length))
;
2914 }
2915
2916out:
2917 splx(s)spllower(s);
2918}
2919
2920int
2921pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2922{
2923 struct pgt_softc *sc = ic->ic_ific_ac.ac_if.if_softc;
2924 enum ieee80211_state ostate;
2925
2926 ostate = ic->ic_state;
2927
2928 DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
2929 ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
2930
2931 switch (nstate) {
2932 case IEEE80211_S_INIT:
2933 if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
2934 ic->ic_ific_ac.ac_if.if_timer = 0;
2935 ic->ic_mgt_timer = 0;
2936 ic->ic_flags &= ~IEEE80211_F_SIBSS0x00000002;
2937 ieee80211_free_allnodes(ic, 1);
2938 ieee80211_set_link_state(ic, LINK_STATE_DOWN2);
2939 break;
2940 case IEEE80211_S_SCAN:
2941 ic->ic_ific_ac.ac_if.if_timer = 1;
2942 ic->ic_mgt_timer = 0;
2943 ieee80211_node_cleanup(ic, ic->ic_bss);
2944 ieee80211_set_link_state(ic, LINK_STATE_DOWN2);
2945#ifndef IEEE80211_STA_ONLY
2946 /* Just use any old channel; we override it anyway. */
2947 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2948 ieee80211_create_ibss(ic, ic->ic_ibss_chan);
2949#endif
2950 break;
2951 case IEEE80211_S_RUN:
2952 ic->ic_ific_ac.ac_if.if_timer = 1;
2953 break;
2954 default:
2955 break;
2956 }
2957
2958 return (sc->sc_newstate(ic, nstate, arg));
2959}
2960
2961int
2962pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
2963{
2964 int wokeup = 0;
2965
2966 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
2967 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
2968 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x02 | 0x04
))
;
2969 sc->sc_cb->pcb_device_curfrag[pq] =
2970 sc->sc_cb->pcb_driver_curfrag[pq];
2971 bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
2972 sc->sc_cbdmam->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
2973 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sc_cbdmam), (0), (sc->sc_cbdmam->dm_mapsize), (0x08 | 0x01
))
;
2974 while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])(((&sc->sc_dirtyq[pq])->tqh_first) == ((void *)0))) {
2975 struct pgt_desc *pd;
2976
2977 pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])((&sc->sc_dirtyq[pq])->tqh_first);
2978 TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link)do { if (((pd)->pd_link.tqe_next) != ((void *)0)) (pd)->
pd_link.tqe_next->pd_link.tqe_prev = (pd)->pd_link.tqe_prev
; else (&sc->sc_dirtyq[pq])->tqh_last = (pd)->pd_link
.tqe_prev; *(pd)->pd_link.tqe_prev = (pd)->pd_link.tqe_next
; ((pd)->pd_link.tqe_prev) = ((void *)-1); ((pd)->pd_link
.tqe_next) = ((void *)-1); } while (0)
;
2979 sc->sc_dirtyq_count[pq]--;
2980 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_freeq[pq])->tqh_last; *(&sc
->sc_freeq[pq])->tqh_last = (pd); (&sc->sc_freeq
[pq])->tqh_last = &(pd)->pd_link.tqe_next; } while (
0)
;
2981 sc->sc_freeq_count[pq]++;
2982 pgt_unload_tx_desc_frag(sc, pd);
2983 if (sc->sc_debug & SC_DEBUG_QUEUES0x00000001)
2984 DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
2985 sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
2986 wokeup++;
2987 if (pgt_queue_is_data(pq))
2988 sc->sc_ic.ic_ific_ac.ac_if.if_oerrorsif_data.ifi_oerrors++;
2989 }
2990
2991 return (wokeup);
2992}
2993
2994int
2995pgt_dma_alloc(struct pgt_softc *sc)
2996{
2997 size_t size;
2998 int i, error, nsegs;
2999
3000 for (i = 0; i < PGT_QUEUE_COUNT6; i++) {
3001 TAILQ_INIT(&sc->sc_freeq[i])do { (&sc->sc_freeq[i])->tqh_first = ((void *)0); (
&sc->sc_freeq[i])->tqh_last = &(&sc->sc_freeq
[i])->tqh_first; } while (0)
;
3002 TAILQ_INIT(&sc->sc_dirtyq[i])do { (&sc->sc_dirtyq[i])->tqh_first = ((void *)0); (
&sc->sc_dirtyq[i])->tqh_last = &(&sc->sc_dirtyq
[i])->tqh_first; } while (0)
;
3003 }
3004
3005 /*
3006 * control block
3007 */
3008 size = sizeof(struct pgt_control_block);
3009
3010 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001), (&sc->sc_cbdmam))
3011 BUS_DMA_NOWAIT, &sc->sc_cbdmam)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001), (&sc->sc_cbdmam))
;
3012 if (error != 0) {
3013 printf("%s: can not create DMA tag for control block\n",
3014 sc->sc_dev.dv_xname);
3015 goto out;
3016 }
3017
3018 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&sc->sc_cbdmas), (1), (&
nsegs), (0x0001 | 0x1000))
3019 0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&sc->sc_cbdmas), (1), (&
nsegs), (0x0001 | 0x1000))
;
3020 if (error != 0) {
3021 printf("%s: can not allocate DMA memory for control block\n",
3022 sc->sc_dev.dv_xname);
3023 goto out;
3024 }
3025
3026 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_cbdmas), (nsegs), (size), ((caddr_t *)&sc->sc_cb
), (0x0001))
3027 size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_cbdmas), (nsegs), (size), ((caddr_t *)&sc->sc_cb
), (0x0001))
;
3028 if (error != 0) {
3029 printf("%s: can not map DMA memory for control block\n",
3030 sc->sc_dev.dv_xname);
3031 goto out;
3032 }
3033
3034 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_cbdmam), (sc->sc_cb), (size), (((void *)0)), (0x0001))
3035 sc->sc_cb, size, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_cbdmam), (sc->sc_cb), (size), (((void *)0)), (0x0001))
;
3036 if (error != 0) {
3037 printf("%s: can not load DMA map for control block\n",
3038 sc->sc_dev.dv_xname);
3039 goto out;
3040 }
3041
3042 /*
3043 * powersave
3044 */
3045 size = PGT_FRAG_SIZE1536 * PGT_PSM_BUFFER_FRAME_COUNT64;
3046
3047 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0002), (&sc->sc_psmdmam))
3048 BUS_DMA_ALLOCNOW, &sc->sc_psmdmam)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0002), (&sc->sc_psmdmam))
;
3049 if (error != 0) {
3050 printf("%s: can not create DMA tag for powersave\n",
3051 sc->sc_dev.dv_xname);
3052 goto out;
3053 }
3054
3055 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&sc->sc_psmdmas), (1), (&
nsegs), (0x0001 | 0x1000))
3056 0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&sc->sc_psmdmas), (1), (&
nsegs), (0x0001 | 0x1000))
;
3057 if (error != 0) {
3058 printf("%s: can not allocate DMA memory for powersave\n",
3059 sc->sc_dev.dv_xname);
3060 goto out;
3061 }
3062
3063 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_psmdmas), (nsegs), (size), ((caddr_t *)&sc->sc_psmbuf
), (0x0001))
3064 size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_psmdmas), (nsegs), (size), ((caddr_t *)&sc->sc_psmbuf
), (0x0001))
;
3065 if (error != 0) {
3066 printf("%s: can not map DMA memory for powersave\n",
3067 sc->sc_dev.dv_xname);
3068 goto out;
3069 }
3070
3071 error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_psmdmam), (sc->sc_psmbuf), (size), (((void *)0)), (0x0000
))
3072 sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_psmdmam), (sc->sc_psmbuf), (size), (((void *)0)), (0x0000
))
;
3073 if (error != 0) {
3074 printf("%s: can not load DMA map for powersave\n",
3075 sc->sc_dev.dv_xname);
3076 goto out;
3077 }
3078
3079 /*
3080 * fragments
3081 */
3082 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3083 if (error != 0)
3084 goto out;
3085
3086 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3087 if (error != 0)
3088 goto out;
3089
3090 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3091 if (error != 0)
3092 goto out;
3093
3094 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3095 if (error != 0)
3096 goto out;
3097
3098 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
3099 if (error != 0)
3100 goto out;
3101
3102 error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
3103 if (error != 0)
3104 goto out;
3105
3106out:
3107 if (error) {
3108 printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
3109 pgt_dma_free(sc);
3110 }
3111
3112 return (error);
3113}
3114
3115int
3116pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
3117{
3118 struct pgt_desc *pd;
3119 size_t i, qsize;
3120 int error, nsegs;
3121
3122 switch (pq) {
3123 case PGT_QUEUE_DATA_LOW_RX:
3124 qsize = PGT_QUEUE_DATA_RX_SIZE8;
3125 break;
3126 case PGT_QUEUE_DATA_LOW_TX:
3127 qsize = PGT_QUEUE_DATA_TX_SIZE32;
3128 break;
3129 case PGT_QUEUE_DATA_HIGH_RX:
3130 qsize = PGT_QUEUE_DATA_RX_SIZE8;
3131 break;
3132 case PGT_QUEUE_DATA_HIGH_TX:
3133 qsize = PGT_QUEUE_DATA_TX_SIZE32;
3134 break;
3135 case PGT_QUEUE_MGMT_RX:
3136 qsize = PGT_QUEUE_MGMT_SIZE4;
3137 break;
3138 case PGT_QUEUE_MGMT_TX:
3139 qsize = PGT_QUEUE_MGMT_SIZE4;
3140 break;
3141 default:
3142 return (EINVAL22);
3143 }
3144
3145 for (i = 0; i < qsize; i++) {
3146 pd = malloc(sizeof(*pd), M_DEVBUF2, M_WAITOK0x0001);
3147
3148 error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (1536
), (1), (1536), (0), (0x0002), (&pd->pd_dmam))
3149 PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (1536
), (1), (1536), (0), (0x0002), (&pd->pd_dmam))
;
3150 if (error != 0) {
3151 printf("%s: can not create DMA tag for fragment\n",
3152 sc->sc_dev.dv_xname);
3153 free(pd, M_DEVBUF2, 0);
3154 break;
3155 }
3156
3157 error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (1536
), ((1 << 12)), (0), (&pd->pd_dmas), (1), (&
nsegs), (0x0000))
3158 0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (1536
), ((1 << 12)), (0), (&pd->pd_dmas), (1), (&
nsegs), (0x0000))
;
3159 if (error != 0) {
3160 printf("%s: error alloc frag %zu on queue %u\n",
3161 sc->sc_dev.dv_xname, i, pq);
3162 free(pd, M_DEVBUF2, 0);
3163 break;
3164 }
3165
3166 error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&pd
->pd_dmas), (nsegs), (1536), ((caddr_t *)&pd->pd_mem
), (0x0000))
3167 PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&pd
->pd_dmas), (nsegs), (1536), ((caddr_t *)&pd->pd_mem
), (0x0000))
;
3168 if (error != 0) {
3169 printf("%s: error map frag %zu on queue %u\n",
3170 sc->sc_dev.dv_xname, i, pq);
3171 free(pd, M_DEVBUF2, 0);
3172 break;
3173 }
3174
3175 if (pgt_queue_is_rx(pq)) {
3176 error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (pd->
pd_dmam), (pd->pd_mem), (1536), (((void *)0)), (0x0001))
3177 pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (pd->
pd_dmam), (pd->pd_mem), (1536), (((void *)0)), (0x0001))
;
3178 if (error != 0) {
3179 printf("%s: error load frag %zu on queue %u\n",
3180 sc->sc_dev.dv_xname, i, pq);
3181 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
pd->pd_dmas), (nsegs))
3182 nsegs)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
pd->pd_dmas), (nsegs))
;
3183 free(pd, M_DEVBUF2, 0);
3184 break;
3185 }
3186 pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
3187 }
3188 TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link)do { (pd)->pd_link.tqe_next = ((void *)0); (pd)->pd_link
.tqe_prev = (&sc->sc_freeq[pq])->tqh_last; *(&sc
->sc_freeq[pq])->tqh_last = (pd); (&sc->sc_freeq
[pq])->tqh_last = &(pd)->pd_link.tqe_next; } while (
0)
;
3189 }
3190
3191 return (error);
3192}
3193
3194void
3195pgt_dma_free(struct pgt_softc *sc)
3196{
3197 /*
3198 * fragments
3199 */
3200 if (sc->sc_dmat != NULL((void *)0)) {
3201 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3202 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3203 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3204 pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3205 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
3206 pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
3207 }
3208
3209 /*
3210 * powersave
3211 */
3212 if (sc->sc_psmbuf != NULL((void *)0)) {
3213 bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_psmdmam))
;
3214 bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_psmdmas), (1))
;
3215 sc->sc_psmbuf = NULL((void *)0);
3216 sc->sc_psmdmam = NULL((void *)0);
3217 }
3218
3219 /*
3220 * control block
3221 */
3222 if (sc->sc_cb != NULL((void *)0)) {
3223 bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_cbdmam))
;
3224 bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_cbdmas), (1))
;
3225 sc->sc_cb = NULL((void *)0);
3226 sc->sc_cbdmam = NULL((void *)0);
3227 }
3228}
3229
3230void
3231pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
3232{
3233 struct pgt_desc *pd;
3234
3235 while (!TAILQ_EMPTY(&sc->sc_freeq[pq])(((&sc->sc_freeq[pq])->tqh_first) == ((void *)0))) {
3236 pd = TAILQ_FIRST(&sc->sc_freeq[pq])((&sc->sc_freeq[pq])->tqh_first);
3237 TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link)do { if (((pd)->pd_link.tqe_next) != ((void *)0)) (pd)->
pd_link.tqe_next->pd_link.tqe_prev = (pd)->pd_link.tqe_prev
; else (&sc->sc_freeq[pq])->tqh_last = (pd)->pd_link
.tqe_prev; *(pd)->pd_link.tqe_prev = (pd)->pd_link.tqe_next
; ((pd)->pd_link.tqe_prev) = ((void *)-1); ((pd)->pd_link
.tqe_next) = ((void *)-1); } while (0)
;
3238 if (pd->pd_dmam != NULL((void *)0)) {
3239 bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (pd->
pd_dmam))
;
3240 pd->pd_dmam = NULL((void *)0);
3241 }
3242 bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
pd->pd_dmas), (1))
;
3243 free(pd, M_DEVBUF2, 0);
3244 }
3245}
3246
3247int
3248pgt_activate(struct device *self, int act)
3249{
3250 struct pgt_softc *sc = (struct pgt_softc *)self;
3251 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
3252
3253 DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
3254
3255 switch (act) {
3256 case DVACT_SUSPEND3:
3257 if (ifp->if_flags & IFF_RUNNING0x40) {
3258 pgt_stop(sc, SC_NEEDS_RESET0x00000008);
3259 pgt_update_hw_from_sw(sc, 0);
3260 }
3261 if (sc->sc_power != NULL((void *)0))
3262 (*sc->sc_power)(sc, act);
3263 break;
3264 case DVACT_WAKEUP5:
3265 pgt_wakeup(sc);
3266 break;
3267 }
3268 return 0;
3269}
3270
3271void
3272pgt_wakeup(struct pgt_softc *sc)
3273{
3274 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
3275
3276 if (sc->sc_power != NULL((void *)0))
3277 (*sc->sc_power)(sc, DVACT_RESUME4);
3278
3279 pgt_stop(sc, SC_NEEDS_RESET0x00000008);
3280 pgt_update_hw_from_sw(sc, 0);
3281
3282 if (ifp->if_flags & IFF_UP0x1) {
3283 pgt_init(ifp);
3284 pgt_update_hw_from_sw(sc, 0);
3285 }
3286}