Bug Summary

File:dev/ic/ath.c
Warning:line 872, column 4
Value stored to 'wh' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ath.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/ic/ath.c
1/* $OpenBSD: ath.c,v 1.122 2020/10/11 07:05:28 mpi Exp $ */
2/* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */
3
4/*-
5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGES.
34 */
35
36/*
37 * Driver for the Atheros Wireless LAN controller.
38 *
39 * This software is derived from work of Atsushi Onoe; his contribution
40 * is greatly appreciated. It has been modified for OpenBSD to use an
41 * open source HAL instead of the original binary-only HAL.
42 */
43
44#include "bpfilter.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/mbuf.h>
49#include <sys/malloc.h>
50#include <sys/lock.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/device.h>
55#include <sys/errno.h>
56#include <sys/timeout.h>
57#include <sys/gpio.h>
58#include <sys/endian.h>
59
60#include <machine/bus.h>
61
62#include <net/if.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#if NBPFILTER1 > 0
66#include <net/bpf.h>
67#endif
68#include <netinet/in.h>
69#include <netinet/if_ether.h>
70
71#include <net80211/ieee80211_var.h>
72#include <net80211/ieee80211_rssadapt.h>
73
74#include <dev/pci/pcidevs.h>
75#include <dev/gpio/gpiovar.h>
76
77#include <dev/ic/athvar.h>
78
79int ath_init(struct ifnet *);
80int ath_init1(struct ath_softc *);
81int ath_intr1(struct ath_softc *);
82void ath_stop(struct ifnet *);
83void ath_start(struct ifnet *);
84void ath_reset(struct ath_softc *, int);
85int ath_media_change(struct ifnet *);
86void ath_watchdog(struct ifnet *);
87int ath_ioctl(struct ifnet *, u_long, caddr_t);
88void ath_fatal_proc(void *, int);
89void ath_rxorn_proc(void *, int);
90void ath_bmiss_proc(void *, int);
91int ath_initkeytable(struct ath_softc *);
92void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]);
93void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]);
94u_int32_t ath_calcrxfilter(struct ath_softc *);
95void ath_mode_init(struct ath_softc *);
96#ifndef IEEE80211_STA_ONLY
97int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
98void ath_beacon_proc(void *, int);
99void ath_beacon_free(struct ath_softc *);
100#endif
101void ath_beacon_config(struct ath_softc *);
102int ath_desc_alloc(struct ath_softc *);
103void ath_desc_free(struct ath_softc *);
104struct ieee80211_node *ath_node_alloc(struct ieee80211com *);
105struct mbuf *ath_getmbuf(int, int, u_int);
106void ath_node_free(struct ieee80211com *, struct ieee80211_node *);
107void ath_node_copy(struct ieee80211com *,
108 struct ieee80211_node *, const struct ieee80211_node *);
109u_int8_t ath_node_getrssi(struct ieee80211com *,
110 const struct ieee80211_node *);
111int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
112void ath_rx_proc(void *, int);
113int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
114 struct ath_buf *, struct mbuf *);
115void ath_tx_proc(void *, int);
116int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
117void ath_draintxq(struct ath_softc *);
118void ath_stoprecv(struct ath_softc *);
119int ath_startrecv(struct ath_softc *);
120void ath_next_scan(void *);
121int ath_set_slot_time(struct ath_softc *);
122void ath_calibrate(void *);
123void ath_ledstate(struct ath_softc *, enum ieee80211_state);
124int ath_newstate(struct ieee80211com *, enum ieee80211_state, int);
125void ath_newassoc(struct ieee80211com *,
126 struct ieee80211_node *, int);
127int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor,
128 HAL_BOOL xchanmode);
129int ath_rate_setup(struct ath_softc *sc, u_int mode);
130void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
131void ath_rssadapt_updatenode(void *, struct ieee80211_node *);
132void ath_rssadapt_updatestats(void *);
133#ifndef IEEE80211_STA_ONLY
134void ath_recv_mgmt(struct ieee80211com *, struct mbuf *,
135 struct ieee80211_node *, struct ieee80211_rxinfo *, int);
136#endif
137void ath_disable(struct ath_softc *);
138
139int ath_gpio_attach(struct ath_softc *, u_int16_t);
140int ath_gpio_pin_read(void *, int);
141void ath_gpio_pin_write(void *, int, int);
142void ath_gpio_pin_ctl(void *, int, int);
143
144#ifdef AR_DEBUG
145void ath_printrxbuf(struct ath_buf *, int);
146void ath_printtxbuf(struct ath_buf *, int);
147int ath_debug = 0;
148#endif
149
150int ath_dwelltime = 200; /* 5 channels/second */
151int ath_calinterval = 30; /* calibrate every 30 secs */
152int ath_outdoor = AH_TRUE; /* outdoor operation */
153int ath_xchanmode = AH_TRUE; /* enable extended channels */
154int ath_softcrypto = 1; /* 1=enable software crypto */
155
156struct cfdriver ath_cd = {
157 NULL((void *)0), "ath", DV_IFNET
158};
159
160int
161ath_activate(struct device *self, int act)
162{
163 struct ath_softc *sc = (struct ath_softc *)self;
164 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
165
166 switch (act) {
167 case DVACT_SUSPEND3:
168 if (ifp->if_flags & IFF_RUNNING0x40) {
169 ath_stop(ifp);
170 if (sc->sc_power != NULL((void *)0))
171 (*sc->sc_power)(sc, act);
172 }
173 break;
174 case DVACT_RESUME4:
175 if (ifp->if_flags & IFF_UP0x1) {
176 ath_init(ifp);
177 if (ifp->if_flags & IFF_RUNNING0x40)
178 ath_start(ifp);
179 }
180 break;
181 }
182 return 0;
183}
184
185int
186ath_enable(struct ath_softc *sc)
187{
188 if (ATH_IS_ENABLED(sc)((sc)->sc_flags & 0x0002) == 0) {
189 if (sc->sc_enable != NULL((void *)0) && (*sc->sc_enable)(sc) != 0) {
190 printf("%s: device enable failed\n",
191 sc->sc_dev.dv_xname);
192 return (EIO5);
193 }
194 sc->sc_flags |= ATH_ENABLED0x0002;
195 }
196 return (0);
197}
198
199void
200ath_disable(struct ath_softc *sc)
201{
202 if (!ATH_IS_ENABLED(sc)((sc)->sc_flags & 0x0002))
203 return;
204 if (sc->sc_disable != NULL((void *)0))
205 (*sc->sc_disable)(sc);
206 sc->sc_flags &= ~ATH_ENABLED0x0002;
207}
208
209int
210ath_attach(u_int16_t devid, struct ath_softc *sc)
211{
212 struct ieee80211com *ic = &sc->sc_ic;
213 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
214 struct ath_hal *ah;
215 HAL_STATUS status;
216 HAL_TXQ_INFO qinfo;
217 int error = 0, i;
218
219 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid));
220
221 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ16);
222 sc->sc_flags &= ~ATH_ATTACHED0x0001; /* make sure that it's not attached */
223
224 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
225 sc->sc_pcie, &status);
226 if (ah == NULL((void *)0)) {
227 printf("%s: unable to attach hardware; HAL status %d\n",
228 ifp->if_xname, status);
229 error = ENXIO6;
230 goto bad;
231 }
232 if (ah->ah_abi != HAL_ABI_VERSION0x04090901) {
233 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n",
234 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION0x04090901);
235 error = ENXIO6;
236 goto bad;
237 }
238
239 if (ah->ah_single_chip == AH_TRUE) {
240 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname,
241 ar5k_printver(AR5K_VERSION_DEV, devid),
242 ah->ah_mac_version, ah->ah_mac_revision,
243 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf,
244 ah->ah_radio_5ghz_revision >> 4,
245 ah->ah_radio_5ghz_revision & 0xf);
246 } else {
247 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname,
248 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev),
249 ah->ah_mac_version, ah->ah_mac_revision,
250 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf);
251 printf(" rf%s %u.%u",
252 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision),
253 ah->ah_radio_5ghz_revision >> 4,
254 ah->ah_radio_5ghz_revision & 0xf);
255 if (ah->ah_radio_2ghz_revision != 0) {
256 printf(" rf%s %u.%u",
257 ar5k_printver(AR5K_VERSION_RAD,
258 ah->ah_radio_2ghz_revision),
259 ah->ah_radio_2ghz_revision >> 4,
260 ah->ah_radio_2ghz_revision & 0xf);
261 }
262 }
263 if (ah->ah_ee_versionah_capabilities.cap_eeprom.ee_version == AR5K_EEPROM_VERSION_4_70x3007)
264 printf(" eeprom 4.7");
265 else
266 printf(" eeprom %1x.%1x", ah->ah_ee_versionah_capabilities.cap_eeprom.ee_version >> 12,
267 ah->ah_ee_versionah_capabilities.cap_eeprom.ee_version & 0xff);
268
269#if 0
270 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP0xff ||
271 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP0xff) {
272 printf(": RF radio not supported\n");
273 error = EOPNOTSUPP45;
274 goto bad;
275 }
276#endif
277
278 sc->sc_ah = ah;
279 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
280
281 /*
282 * Get regulation domain either stored in the EEPROM or defined
283 * as the default value. Some devices are known to have broken
284 * regulation domain values in their EEPROM.
285 */
286 ath_hal_get_regdomain(ah, &ah->ah_regdomain)(*(&ah->ah_capabilities.cap_regdomain.reg_current) = (
ah)->ah_get_regdomain(ah))
;
287
288 /*
289 * Construct channel list based on the current regulation domain.
290 */
291 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode);
292 if (error != 0)
293 goto bad;
294
295 /*
296 * Setup rate tables for all potential media types.
297 */
298 ath_rate_setup(sc, IEEE80211_MODE_11A);
299 ath_rate_setup(sc, IEEE80211_MODE_11B);
300 ath_rate_setup(sc, IEEE80211_MODE_11G);
301
302 error = ath_desc_alloc(sc);
303 if (error != 0) {
304 printf(": failed to allocate descriptors: %d\n", error);
305 goto bad;
306 }
307 timeout_set(&sc->sc_scan_to, ath_next_scan, sc);
308 timeout_set(&sc->sc_cal_to, ath_calibrate, sc);
309 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc);
310
311#ifdef __FreeBSD__
312 ATH_TXBUF_LOCK_INIT(sc)mtx_init;
313 ATH_TXQ_LOCK_INIT(sc)mtx_init;
314#endif
315
316 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc)do { (&sc->sc_txtask)->t_func = (ath_tx_proc); (&
sc->sc_txtask)->t_context = (sc); } while (0)
;
317 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc)do { (&sc->sc_rxtask)->t_func = (ath_rx_proc); (&
sc->sc_rxtask)->t_context = (sc); } while (0)
;
318 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc)do { (&sc->sc_rxorntask)->t_func = (ath_rxorn_proc)
; (&sc->sc_rxorntask)->t_context = (sc); } while (0
)
;
319 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc)do { (&sc->sc_fataltask)->t_func = (ath_fatal_proc)
; (&sc->sc_fataltask)->t_context = (sc); } while (0
)
;
320 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc)do { (&sc->sc_bmisstask)->t_func = (ath_bmiss_proc)
; (&sc->sc_bmisstask)->t_context = (sc); } while (0
)
;
321#ifndef IEEE80211_STA_ONLY
322 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc)do { (&sc->sc_swbatask)->t_func = (ath_beacon_proc)
; (&sc->sc_swbatask)->t_context = (sc); } while (0)
;
323#endif
324
325 /*
326 * For now just pre-allocate one data queue and one
327 * beacon queue. Note that the HAL handles resetting
328 * them at the needed time. Eventually we'll want to
329 * allocate more tx queues for splitting management
330 * frames and for QOS support.
331 */
332 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL)((*(ah)->ah_setup_tx_queue)((ah), (HAL_TX_QUEUE_BEACON), (
((void *)0))))
;
333 if (sc->sc_bhalq == (u_int) -1) {
334 printf(": unable to setup a beacon xmit queue!\n");
335 goto bad2;
336 }
337
338 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) {
339 bzero(&qinfo, sizeof(qinfo))__builtin_bzero((&qinfo), (sizeof(qinfo)));
340 qinfo.tqi_type = HAL_TX_QUEUE_DATA;
341 qinfo.tqi_subtype = i; /* should be mapped to WME types */
342 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah,((*(ah)->ah_setup_tx_queue)((ah), (HAL_TX_QUEUE_DATA), (&
qinfo)))
343 HAL_TX_QUEUE_DATA, &qinfo)((*(ah)->ah_setup_tx_queue)((ah), (HAL_TX_QUEUE_DATA), (&
qinfo)))
;
344 if (sc->sc_txhalq[i] == (u_int) -1) {
345 printf(": unable to setup a data xmit queue %u!\n", i);
346 goto bad2;
347 }
348 }
349
350 ifp->if_softc = sc;
351 ifp->if_flags = IFF_SIMPLEX0x800 | IFF_BROADCAST0x2 | IFF_MULTICAST0x8000;
352 ifp->if_start = ath_start;
353 ifp->if_watchdog = ath_watchdog;
354 ifp->if_ioctl = ath_ioctl;
355#ifndef __OpenBSD__1
356 ifp->if_stop = ath_stop; /* XXX */
357#endif
358 ifq_set_maxlen(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC)((&ifp->if_snd)->ifq_maxlen = (60 * 8));
359
360 ic->ic_softcic_ac.ac_if.if_softc = sc;
361 ic->ic_newassoc = ath_newassoc;
362 /* XXX not right but it's not used anywhere important */
363 ic->ic_phytype = IEEE80211_T_OFDM;
364 ic->ic_opmode = IEEE80211_M_STA;
365 ic->ic_caps = IEEE80211_C_WEP0x00000001 /* wep supported */
366 | IEEE80211_C_PMGT0x00000004 /* power management */
367#ifndef IEEE80211_STA_ONLY
368 | IEEE80211_C_IBSS0x00000002 /* ibss, nee adhoc, mode */
369 | IEEE80211_C_HOSTAP0x00000008 /* hostap mode */
370#endif
371 | IEEE80211_C_MONITOR0x00000200 /* monitor mode */
372 | IEEE80211_C_SHSLOT0x00000080 /* short slot time supported */
373 | IEEE80211_C_SHPREAMBLE0x00000100; /* short preamble supported */
374 if (ath_softcrypto)
375 ic->ic_caps |= IEEE80211_C_RSN0x00001000; /* wpa/rsn supported */
376
377 /*
378 * Not all chips have the VEOL support we want to use with
379 * IBSS beacon; check here for it.
380 */
381 sc->sc_veol = ath_hal_has_veol(ah)((*(ah)->ah_has_veol)((ah)));
382
383 /* get mac address from hardware */
384 ath_hal_get_lladdr(ah, ic->ic_myaddr)((*(ah)->ah_get_lladdr)((ah), (ic->ic_myaddr)));
385
386 if_attach(ifp);
387
388 /* call MI attach routine. */
389 ieee80211_ifattach(ifp);
390
391 /* override default methods */
392 ic->ic_node_alloc = ath_node_alloc;
393 sc->sc_node_free = ic->ic_node_free;
394 ic->ic_node_free = ath_node_free;
395 sc->sc_node_copy = ic->ic_node_copy;
396 ic->ic_node_copy = ath_node_copy;
397 ic->ic_node_getrssi = ath_node_getrssi;
398 sc->sc_newstate = ic->ic_newstate;
399 ic->ic_newstate = ath_newstate;
400#ifndef IEEE80211_STA_ONLY
401 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
402 ic->ic_recv_mgmt = ath_recv_mgmt;
403#endif
404 ic->ic_max_rssi = AR5K_MAX_RSSI64;
405 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN6);
406
407 /* complete initialization */
408 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status);
409
410#if NBPFILTER1 > 0
411 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO127,
412 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN64);
413
414 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
415 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len)__builtin_bzero((&sc->sc_rxtapu), (sc->sc_rxtap_len
))
;
416 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_len = htole16(sc->sc_rxtap_len)((__uint16_t)(sc->sc_rxtap_len));
417 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT)((__uint32_t)(( (1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
) | (1 << IEEE80211_RADIOTAP_ANTENNA) | (1 << IEEE80211_RADIOTAP_RSSI
) | 0)))
;
418
419 sc->sc_txtap_len = sizeof(sc->sc_txtapu);
420 bzero(&sc->sc_txtapu, sc->sc_txtap_len)__builtin_bzero((&sc->sc_txtapu), (sc->sc_txtap_len
))
;
421 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_len = htole16(sc->sc_txtap_len)((__uint16_t)(sc->sc_txtap_len));
422 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT)((__uint32_t)(( (1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
) | (1 << IEEE80211_RADIOTAP_DBM_TX_POWER) | (1 <<
IEEE80211_RADIOTAP_ANTENNA) | 0)))
;
423#endif
424
425 sc->sc_flags |= ATH_ATTACHED0x0001;
426
427 /*
428 * Print regulation domain and the mac address. The regulation domain
429 * will be marked with a * if the EEPROM value has been overwritten.
430 */
431 printf(", %s%s, address %s\n",
432 ieee80211_regdomain2name(ah->ah_regdomainah_capabilities.cap_regdomain.reg_current),
433 ah->ah_regdomainah_capabilities.cap_regdomain.reg_current != ah->ah_regdomain_hwah_capabilities.cap_regdomain.reg_hw ? "*" : "",
434 ether_sprintf(ic->ic_myaddr));
435
436 if (ath_gpio_attach(sc, devid) == 0)
437 sc->sc_flags |= ATH_GPIO0x0004;
438
439 return 0;
440bad2:
441 ath_desc_free(sc);
442bad:
443 if (ah)
444 ath_hal_detach(ah)((*(ah)->ah_detach)(ah));
445 sc->sc_invalid = 1;
446 return error;
447}
448
449int
450ath_detach(struct ath_softc *sc, int flags)
451{
452 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
453 int s;
454
455 if ((sc->sc_flags & ATH_ATTACHED0x0001) == 0)
456 return (0);
457
458 config_detach_children(&sc->sc_dev, flags);
459
460 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags));
461
462 timeout_del(&sc->sc_scan_to);
463 timeout_del(&sc->sc_cal_to);
464 timeout_del(&sc->sc_rssadapt_to);
465
466 s = splnet()splraise(0x7);
467 ath_stop(ifp);
468 ath_desc_free(sc);
469 ath_hal_detach(sc->sc_ah)((*(sc->sc_ah)->ah_detach)(sc->sc_ah));
470
471 ieee80211_ifdetach(ifp);
472 if_detach(ifp);
473
474 splx(s)spllower(s);
475#ifdef __FreeBSD__
476 ATH_TXBUF_LOCK_DESTROY(sc)mtx_destroy(&(sc)->sc_txbuflock);
477 ATH_TXQ_LOCK_DESTROY(sc)mtx_destroy(&(sc)->sc_txqlock);
478#endif
479
480 return 0;
481}
482
483int
484ath_intr(void *arg)
485{
486 return ath_intr1((struct ath_softc *)arg);
487}
488
489int
490ath_intr1(struct ath_softc *sc)
491{
492 struct ieee80211com *ic = &sc->sc_ic;
493 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
494 struct ath_hal *ah = sc->sc_ah;
495 HAL_INT status;
496
497 if (sc->sc_invalid) {
498 /*
499 * The hardware is not ready/present, don't touch anything.
500 * Note this can happen early on if the IRQ is shared.
501 */
502 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__));
503 return 0;
504 }
505 if (!ath_hal_is_intr_pending(ah)((*(ah)->ah_is_intr_pending)((ah)))) /* shared irq, not for us */
506 return 0;
507 if ((ifp->if_flags & (IFF_RUNNING0x40|IFF_UP0x1)) != (IFF_RUNNING0x40|IFF_UP0x1)) {
508 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n",
509 __func__, ifp->if_flags));
510 ath_hal_get_isr(ah, &status)((*(ah)->ah_get_isr)((ah), (&status))); /* clear ISR */
511 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable further intr's */
512 return 1; /* XXX */
513 }
514 ath_hal_get_isr(ah, &status)((*(ah)->ah_get_isr)((ah), (&status))); /* NB: clears ISR too */
515 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status));
516 status &= sc->sc_imask; /* discard unasked for bits */
517 if (status & HAL_INT_FATAL0x40000000) {
518 sc->sc_stats.ast_hardware++;
519 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable intr's until reset */
520 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask)((*(&sc->sc_fataltask)->t_func)((&sc->sc_fataltask
)->t_context, 1))
;
521 } else if (status & HAL_INT_RXORN0x00000020) {
522 sc->sc_stats.ast_rxorn++;
523 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable intr's until reset */
524 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask)((*(&sc->sc_rxorntask)->t_func)((&sc->sc_rxorntask
)->t_context, 1))
;
525 } else if (status & HAL_INT_MIB0x00001000) {
526 DPRINTF(ATH_DEBUG_INTR,
527 ("%s: resetting MIB counters\n", __func__));
528 sc->sc_stats.ast_mib++;
529 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats)((*(ah)->ah_update_mib_counters)((ah), (&sc->sc_mib_stats
)))
;
530 } else {
531 if (status & HAL_INT_RXEOL0x00000010) {
532 /*
533 * NB: the hardware should re-read the link when
534 * RXE bit is written, but it doesn't work at
535 * least on older hardware revs.
536 */
537 sc->sc_stats.ast_rxeol++;
538 sc->sc_rxlink = NULL((void *)0);
539 }
540 if (status & HAL_INT_TXURN0x00000800) {
541 sc->sc_stats.ast_txurn++;
542 /* bump tx trigger level */
543 ath_hal_update_tx_triglevel(ah, AH_TRUE)((*(ah)->ah_update_tx_triglevel)((ah), (AH_TRUE)));
544 }
545 if (status & HAL_INT_RX0x00000001)
546 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask)((*(&sc->sc_rxtask)->t_func)((&sc->sc_rxtask
)->t_context, 1))
;
547 if (status & HAL_INT_TX0x00000040)
548 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask)((*(&sc->sc_txtask)->t_func)((&sc->sc_txtask
)->t_context, 1))
;
549 if (status & HAL_INT_SWBA0x00010000)
550 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask)((*(&sc->sc_swbatask)->t_func)((&sc->sc_swbatask
)->t_context, 1))
;
551 if (status & HAL_INT_BMISS0x00040000) {
552 sc->sc_stats.ast_bmiss++;
553 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask)((*(&sc->sc_bmisstask)->t_func)((&sc->sc_bmisstask
)->t_context, 1))
;
554 }
555 }
556 return 1;
557}
558
559void
560ath_fatal_proc(void *arg, int pending)
561{
562 struct ath_softc *sc = arg;
563 struct ieee80211com *ic = &sc->sc_ic;
564 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
565
566 if (ifp->if_flags & IFF_DEBUG0x4)
567 printf("%s: hardware error; resetting\n", ifp->if_xname);
568 ath_reset(sc, 1);
569}
570
571void
572ath_rxorn_proc(void *arg, int pending)
573{
574 struct ath_softc *sc = arg;
575 struct ieee80211com *ic = &sc->sc_ic;
576 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
577
578 if (ifp->if_flags & IFF_DEBUG0x4)
579 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname);
580 ath_reset(sc, 1);
581}
582
583void
584ath_bmiss_proc(void *arg, int pending)
585{
586 struct ath_softc *sc = arg;
587 struct ieee80211com *ic = &sc->sc_ic;
588
589 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending));
590 if (ic->ic_opmode != IEEE80211_M_STA)
591 return;
592 if (ic->ic_state == IEEE80211_S_RUN) {
593 /*
594 * Rather than go directly to scan state, try to
595 * reassociate first. If that fails then the state
596 * machine will drop us into scanning after timing
597 * out waiting for a probe response.
598 */
599 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_ASSOC), (-1)));
600 }
601}
602
603int
604ath_init(struct ifnet *ifp)
605{
606 return ath_init1((struct ath_softc *)ifp->if_softc);
607}
608
609int
610ath_init1(struct ath_softc *sc)
611{
612 struct ieee80211com *ic = &sc->sc_ic;
613 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
614 struct ieee80211_node *ni;
615 enum ieee80211_phymode mode;
616 struct ath_hal *ah = sc->sc_ah;
617 HAL_STATUS status;
618 HAL_CHANNEL hchan;
619 int error = 0, s;
620
621 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n",
622 __func__, ifp->if_flags));
623
624 if ((error = ath_enable(sc)) != 0)
625 return error;
626
627 s = splnet()splraise(0x7);
628 /*
629 * Stop anything previously setup. This is safe
630 * whether this is the first time through or not.
631 */
632 ath_stop(ifp);
633
634 /*
635 * Reset the link layer address to the latest value.
636 */
637 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl))__builtin_memcpy((ic->ic_myaddr), (((caddr_t)((ifp->if_sadl
)->sdl_data + (ifp->if_sadl)->sdl_nlen))), (6))
;
638 ath_hal_set_lladdr(ah, ic->ic_myaddr)((*(ah)->ah_set_lladdr)((ah), (ic->ic_myaddr)));
639
640 /*
641 * The basic interface to setting the hardware in a good
642 * state is ``reset''. On return the hardware is known to
643 * be powered up and with interrupts disabled. This must
644 * be followed by initialization of the appropriate bits
645 * and then setup of the interrupt mask.
646 */
647 hchan.channel = ic->ic_ibss_chan->ic_freq;
648 hchan.channelFlags = ic->ic_ibss_chan->ic_flags;
649 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(AH_TRUE), (&status)))
) {
650 printf("%s: unable to reset hardware; hal status %u\n",
651 ifp->if_xname, status);
652 error = EIO5;
653 goto done;
654 }
655 ath_set_slot_time(sc);
656
657 if ((error = ath_initkeytable(sc)) != 0) {
658 printf("%s: unable to reset the key cache\n",
659 ifp->if_xname);
660 goto done;
661 }
662
663 if ((error = ath_startrecv(sc)) != 0) {
664 printf("%s: unable to start recv logic\n", ifp->if_xname);
665 goto done;
666 }
667
668 /*
669 * Enable interrupts.
670 */
671 sc->sc_imask = HAL_INT_RX0x00000001 | HAL_INT_TX0x00000040
672 | HAL_INT_RXEOL0x00000010 | HAL_INT_RXORN0x00000020
673 | HAL_INT_FATAL0x40000000 | HAL_INT_GLOBAL0x80000000;
674#ifndef IEEE80211_STA_ONLY
675 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
676 sc->sc_imask |= HAL_INT_MIB0x00001000;
677#endif
678 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
679
680 ifp->if_flags |= IFF_RUNNING0x40;
681 ic->ic_state = IEEE80211_S_INIT;
682
683 /*
684 * The hardware should be ready to go now so it's safe
685 * to kick the 802.11 state machine as it's likely to
686 * immediately call back to us to send mgmt frames.
687 */
688 ni = ic->ic_bss;
689 ni->ni_chan = ic->ic_ibss_chan;
690 mode = ieee80211_chan2mode(ic, ni->ni_chan);
691 if (mode != sc->sc_curmode)
692 ath_setcurmode(sc, mode);
693 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
694 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_SCAN), (-1)));
695 } else {
696 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
697 }
698done:
699 splx(s)spllower(s);
700 return error;
701}
702
703void
704ath_stop(struct ifnet *ifp)
705{
706 struct ieee80211com *ic = (struct ieee80211com *) ifp;
707 struct ath_softc *sc = ifp->if_softc;
708 struct ath_hal *ah = sc->sc_ah;
709 int s;
710
711 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n",
712 __func__, sc->sc_invalid, ifp->if_flags));
713
714 s = splnet()splraise(0x7);
715 if (ifp->if_flags & IFF_RUNNING0x40) {
716 /*
717 * Shutdown the hardware and driver:
718 * disable interrupts
719 * turn off timers
720 * clear transmit machinery
721 * clear receive machinery
722 * drain and release tx queues
723 * reclaim beacon resources
724 * reset 802.11 state machine
725 * power down hardware
726 *
727 * Note that some of this work is not possible if the
728 * hardware is gone (invalid).
729 */
730 ifp->if_flags &= ~IFF_RUNNING0x40;
731 ifp->if_timer = 0;
732 if (!sc->sc_invalid)
733 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0)));
734 ath_draintxq(sc);
735 if (!sc->sc_invalid) {
736 ath_stoprecv(sc);
737 } else {
738 sc->sc_rxlink = NULL((void *)0);
739 }
740 ifq_purge(&ifp->if_snd);
741#ifndef IEEE80211_STA_ONLY
742 ath_beacon_free(sc);
743#endif
744 ieee80211_new_state(ic, IEEE80211_S_INIT, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_INIT), (-1)));
745 if (!sc->sc_invalid) {
746 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0)((*(ah)->ah_set_power)((ah), (HAL_PM_FULL_SLEEP), AH_TRUE,
(0)))
;
747 }
748 ath_disable(sc);
749 }
750 splx(s)spllower(s);
751}
752
753/*
754 * Reset the hardware w/o losing operational state. This is
755 * basically a more efficient way of doing ath_stop, ath_init,
756 * followed by state transitions to the current 802.11
757 * operational state. Used to recover from errors rx overrun
758 * and to reset the hardware when rf gain settings must be reset.
759 */
760void
761ath_reset(struct ath_softc *sc, int full)
762{
763 struct ieee80211com *ic = &sc->sc_ic;
764 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
765 struct ath_hal *ah = sc->sc_ah;
766 struct ieee80211_channel *c;
767 HAL_STATUS status;
768 HAL_CHANNEL hchan;
769
770 /*
771 * Convert to a HAL channel description.
772 */
773 c = ic->ic_ibss_chan;
774 hchan.channel = c->ic_freq;
775 hchan.channelFlags = c->ic_flags;
776
777 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable interrupts */
778 ath_draintxq(sc); /* stop xmit side */
779 ath_stoprecv(sc); /* stop recv side */
780 /* NB: indicate channel change so we do a full reset */
781 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan,((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(full ? AH_TRUE : AH_FALSE), (&status)))
782 full ? AH_TRUE : AH_FALSE, &status)((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(full ? AH_TRUE : AH_FALSE), (&status)))
) {
783 printf("%s: %s: unable to reset hardware; hal status %u\n",
784 ifp->if_xname, __func__, status);
785 }
786 ath_set_slot_time(sc);
787 /* In case channel changed, save as a node channel */
788 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
789 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
790 if (ath_startrecv(sc) != 0) /* restart recv */
791 printf("%s: %s: unable to start recv logic\n", ifp->if_xname,
792 __func__);
793 ath_start(ifp); /* restart xmit */
794 if (ic->ic_state == IEEE80211_S_RUN)
795 ath_beacon_config(sc); /* restart beacons */
796}
797
798void
799ath_start(struct ifnet *ifp)
800{
801 struct ath_softc *sc = ifp->if_softc;
802 struct ath_hal *ah = sc->sc_ah;
803 struct ieee80211com *ic = &sc->sc_ic;
804 struct ieee80211_node *ni;
805 struct ath_buf *bf;
806 struct mbuf *m;
807 struct ieee80211_frame *wh;
808 int s;
809
810 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd) ||
811 sc->sc_invalid)
812 return;
813 for (;;) {
814 /*
815 * Grab a TX buffer and associated resources.
816 */
817 s = splnet()splraise(0x7);
818 bf = TAILQ_FIRST(&sc->sc_txbuf)((&sc->sc_txbuf)->tqh_first);
819 if (bf != NULL((void *)0))
820 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_txbuf)->tqh_last = (bf)->bf_list
.tqe_prev; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next
; ((bf)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list
.tqe_next) = ((void *)-1); } while (0)
;
821 splx(s)spllower(s);
822 if (bf == NULL((void *)0)) {
823 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n",
824 __func__));
825 sc->sc_stats.ast_tx_qstop++;
826 ifq_set_oactive(&ifp->if_snd);
827 break;
828 }
829 /*
830 * Poll the management queue for frames; they
831 * have priority over normal data frames.
832 */
833 m = mq_dequeue(&ic->ic_mgtq);
834 if (m == NULL((void *)0)) {
835 /*
836 * No data frames go out unless we're associated.
837 */
838 if (ic->ic_state != IEEE80211_S_RUN) {
839 DPRINTF(ATH_DEBUG_ANY,
840 ("%s: ignore data packet, state %u\n",
841 __func__, ic->ic_state));
842 sc->sc_stats.ast_tx_discard++;
843 s = splnet()splraise(0x7);
844 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
845 splx(s)spllower(s);
846 break;
847 }
848 m = ifq_dequeue(&ifp->if_snd);
849 if (m == NULL((void *)0)) {
850 s = splnet()splraise(0x7);
851 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
852 splx(s)spllower(s);
853 break;
854 }
855
856#if NBPFILTER1 > 0
857 if (ifp->if_bpf)
858 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
859#endif
860
861 /*
862 * Encapsulate the packet in prep for transmission.
863 */
864 m = ieee80211_encap(ifp, m, &ni);
865 if (m == NULL((void *)0)) {
866 DPRINTF(ATH_DEBUG_ANY,
867 ("%s: encapsulation failure\n",
868 __func__));
869 sc->sc_stats.ast_tx_encap++;
870 goto bad;
871 }
872 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
Value stored to 'wh' is never read
873 } else {
874 ni = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_cookie;
875
876 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
877 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) ==
878 IEEE80211_FC0_SUBTYPE_PROBE_RESP0x50) {
879 /* fill time stamp */
880 u_int64_t tsf;
881 u_int32_t *tstamp;
882
883 tsf = ath_hal_get_tsf64(ah)((*(ah)->ah_get_tsf64)((ah)));
884 /* XXX: adjust 100us delay to xmit */
885 tsf += 100;
886 tstamp = (u_int32_t *)&wh[1];
887 tstamp[0] = htole32(tsf & 0xffffffff)((__uint32_t)(tsf & 0xffffffff));
888 tstamp[1] = htole32(tsf >> 32)((__uint32_t)(tsf >> 32));
889 }
890 sc->sc_stats.ast_tx_mgmt++;
891 }
892
893 if (ath_tx_start(sc, ni, bf, m)) {
894 bad:
895 s = splnet()splraise(0x7);
896 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
897 splx(s)spllower(s);
898 ifp->if_oerrorsif_data.ifi_oerrors++;
899 if (ni != NULL((void *)0))
900 ieee80211_release_node(ic, ni);
901 continue;
902 }
903
904 sc->sc_tx_timer = 5;
905 ifp->if_timer = 1;
906 }
907}
908
909int
910ath_media_change(struct ifnet *ifp)
911{
912 int error;
913
914 error = ieee80211_media_change(ifp);
915 if (error == ENETRESET52) {
916 if ((ifp->if_flags & (IFF_RUNNING0x40|IFF_UP0x1)) ==
917 (IFF_RUNNING0x40|IFF_UP0x1))
918 ath_init(ifp); /* XXX lose error */
919 error = 0;
920 }
921 return error;
922}
923
924void
925ath_watchdog(struct ifnet *ifp)
926{
927 struct ath_softc *sc = ifp->if_softc;
928
929 ifp->if_timer = 0;
930 if ((ifp->if_flags & IFF_RUNNING0x40) == 0 || sc->sc_invalid)
931 return;
932 if (sc->sc_tx_timer) {
933 if (--sc->sc_tx_timer == 0) {
934 printf("%s: device timeout\n", ifp->if_xname);
935 ath_reset(sc, 1);
936 ifp->if_oerrorsif_data.ifi_oerrors++;
937 sc->sc_stats.ast_watchdog++;
938 return;
939 }
940 ifp->if_timer = 1;
941 }
942
943 ieee80211_watchdog(ifp);
944}
945
946int
947ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
948{
949 struct ath_softc *sc = ifp->if_softc;
950 struct ieee80211com *ic = &sc->sc_ic;
951 struct ifreq *ifr = (struct ifreq *)data;
952 int error = 0, s;
953
954 s = splnet()splraise(0x7);
955 switch (cmd) {
956 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
957 ifp->if_flags |= IFF_UP0x1;
958 /* FALLTHROUGH */
959 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
960 if (ifp->if_flags & IFF_UP0x1) {
961 if (ifp->if_flags & IFF_RUNNING0x40) {
962 /*
963 * To avoid rescanning another access point,
964 * do not call ath_init() here. Instead,
965 * only reflect promisc mode settings.
966 */
967 ath_mode_init(sc);
968 } else {
969 /*
970 * Beware of being called during detach to
971 * reset promiscuous mode. In that case we
972 * will still be marked UP but not RUNNING.
973 * However trying to re-init the interface
974 * is the wrong thing to do as we've already
975 * torn down much of our state. There's
976 * probably a better way to deal with this.
977 */
978 if (!sc->sc_invalid)
979 ath_init(ifp); /* XXX lose error */
980 }
981 } else
982 ath_stop(ifp);
983 break;
984 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
985 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
986#ifdef __FreeBSD__
987 /*
988 * The upper layer has already installed/removed
989 * the multicast address(es), just recalculate the
990 * multicast filter for the card.
991 */
992 if (ifp->if_flags & IFF_RUNNING0x40)
993 ath_mode_init(sc);
994#endif
995 error = (cmd == SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
) ?
996 ether_addmulti(ifr, &sc->sc_ic.ic_ac) :
997 ether_delmulti(ifr, &sc->sc_ic.ic_ac);
998 if (error == ENETRESET52) {
999 if (ifp->if_flags & IFF_RUNNING0x40)
1000 ath_mode_init(sc);
1001 error = 0;
1002 }
1003 break;
1004 case SIOCGATHSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((137)))
:
1005 error = copyout(&sc->sc_stats,
1006 ifr->ifr_dataifr_ifru.ifru_data, sizeof (sc->sc_stats));
1007 break;
1008 default:
1009 error = ieee80211_ioctl(ifp, cmd, data);
1010 if (error == ENETRESET52) {
1011 if ((ifp->if_flags & (IFF_RUNNING0x40|IFF_UP0x1)) ==
1012 (IFF_RUNNING0x40|IFF_UP0x1)) {
1013 if (ic->ic_opmode != IEEE80211_M_MONITOR)
1014 ath_init(ifp); /* XXX lose error */
1015 else
1016 ath_reset(sc, 1);
1017 }
1018 error = 0;
1019 }
1020 break;
1021 }
1022 splx(s)spllower(s);
1023 return error;
1024}
1025
1026/*
1027 * Fill the hardware key cache with key entries.
1028 */
1029int
1030ath_initkeytable(struct ath_softc *sc)
1031{
1032 struct ieee80211com *ic = &sc->sc_ic;
1033 struct ath_hal *ah = sc->sc_ah;
1034 int i;
1035
1036 if (ath_softcrypto) {
1037 /*
1038 * Disable the hardware crypto engine and reset the key cache
1039 * to allow software crypto operation for WEP/RSN/WPA2
1040 */
1041 if (ic->ic_flags & (IEEE80211_F_WEPON0x00000100|IEEE80211_F_RSNON0x00200000))
1042 (void)ath_hal_softcrypto(ah, AH_TRUE)((*(ah)->ah_softcrypto)((ah), (AH_TRUE)));
1043 else
1044 (void)ath_hal_softcrypto(ah, AH_FALSE)((*(ah)->ah_softcrypto)((ah), (AH_FALSE)));
1045 return (0);
1046 }
1047
1048 /* WEP is disabled, we only support WEP in hardware yet */
1049 if ((ic->ic_flags & IEEE80211_F_WEPON0x00000100) == 0)
1050 return (0);
1051
1052 /*
1053 * Setup the hardware after reset: the key cache is filled as
1054 * needed and the receive engine is set going. Frame transmit
1055 * is handled entirely in the frame output path; there's nothing
1056 * to do here except setup the interrupt mask.
1057 */
1058
1059 /* XXX maybe should reset all keys when !WEPON */
1060 for (i = 0; i < IEEE80211_WEP_NKID4; i++) {
1061 struct ieee80211_key *k = &ic->ic_nw_keys[i];
1062 if (k->k_len == 0)
1063 ath_hal_reset_key(ah, i)((*(ah)->ah_reset_key)((ah), (i)));
1064 else {
1065 HAL_KEYVAL hk;
1066
1067 bzero(&hk, sizeof(hk))__builtin_bzero((&hk), (sizeof(hk)));
1068 /*
1069 * Pad the key to a supported key length. It
1070 * is always a good idea to use full-length
1071 * keys without padded zeros but this seems
1072 * to be the default behaviour used by many
1073 * implementations.
1074 */
1075 if (k->k_cipher == IEEE80211_CIPHER_WEP40)
1076 hk.wk_len = AR5K_KEYVAL_LENGTH_405;
1077 else if (k->k_cipher == IEEE80211_CIPHER_WEP104)
1078 hk.wk_len = AR5K_KEYVAL_LENGTH_10413;
1079 else
1080 return (EINVAL22);
1081 bcopy(k->k_key, hk.wk_key, hk.wk_len);
1082
1083 if (ath_hal_set_key(ah, i, &hk)((*(ah)->ah_set_key)((ah), (i), (&hk), ((void *)0), AH_FALSE
))
!= AH_TRUE)
1084 return (EINVAL22);
1085 }
1086 }
1087
1088 return (0);
1089}
1090
1091void
1092ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2])
1093{
1094 u_int32_t val;
1095 u_int8_t pos;
1096
1097 val = LE_READ_4(dl + 0)((u_int32_t) ((((u_int8_t *)(dl + 0))[0] ) | (((u_int8_t *)(dl
+ 0))[1] << 8) | (((u_int8_t *)(dl + 0))[2] << 16
) | (((u_int8_t *)(dl + 0))[3] << 24)))
;
1098 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1099 val = LE_READ_4(dl + 3)((u_int32_t) ((((u_int8_t *)(dl + 3))[0] ) | (((u_int8_t *)(dl
+ 3))[1] << 8) | (((u_int8_t *)(dl + 3))[2] << 16
) | (((u_int8_t *)(dl + 3))[3] << 24)))
;
1100 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1101 pos &= 0x3f;
1102 (*mfilt)[pos / 32] |= (1 << (pos % 32));
1103}
1104
1105void
1106ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2])
1107{
1108 struct arpcom *ac = &sc->sc_ic.ic_ac;
1109 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
1110 struct ether_multi *enm;
1111 struct ether_multistep estep;
1112
1113 if (ac->ac_multirangecnt > 0) {
1114 /* XXX Punt on ranges. */
1115 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0);
1116 ifp->if_flags |= IFF_ALLMULTI0x200;
1117 return;
1118 }
1119
1120 ETHER_FIRST_MULTI(estep, ac, enm)do { (estep).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((estep)).e_enm) != ((void *)0)) ((estep
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1121 while (enm != NULL((void *)0)) {
1122 ath_mcastfilter_accum(enm->enm_addrlo, mfilt);
1123 ETHER_NEXT_MULTI(estep, enm)do { if (((enm) = (estep).e_enm) != ((void *)0)) (estep).e_enm
= (((enm))->enm_list.le_next); } while ( 0)
;
1124 }
1125 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1126}
1127
1128/*
1129 * Calculate the receive filter according to the
1130 * operating mode and state:
1131 *
1132 * o always accept unicast, broadcast, and multicast traffic
1133 * o maintain current state of phy error reception
1134 * o probe request frames are accepted only when operating in
1135 * hostap, adhoc, or monitor modes
1136 * o enable promiscuous mode according to the interface state
1137 * o accept beacons:
1138 * - when operating in adhoc mode so the 802.11 layer creates
1139 * node table entries for peers,
1140 * - when operating in station mode for collecting rssi data when
1141 * the station is otherwise quiet, or
1142 * - when scanning
1143 */
1144u_int32_t
1145ath_calcrxfilter(struct ath_softc *sc)
1146{
1147 struct ieee80211com *ic = &sc->sc_ic;
1148 struct ath_hal *ah = sc->sc_ah;
1149 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
1150 u_int32_t rfilt;
1151
1152 rfilt = (ath_hal_get_rx_filter(ah)((*(ah)->ah_get_rx_filter)((ah))) & HAL_RX_FILTER_PHYERR0x00000100)
1153 | HAL_RX_FILTER_UCAST0x00000001 | HAL_RX_FILTER_BCAST0x00000004 | HAL_RX_FILTER_MCAST0x00000002;
1154 if (ic->ic_opmode != IEEE80211_M_STA)
1155 rfilt |= HAL_RX_FILTER_PROBEREQ0x00000080;
1156#ifndef IEEE80211_STA_ONLY
1157 if (ic->ic_opmode != IEEE80211_M_AHDEMO)
1158#endif
1159 rfilt |= HAL_RX_FILTER_BEACON0x00000010;
1160 if (ifp->if_flags & IFF_PROMISC0x100)
1161 rfilt |= HAL_RX_FILTER_PROM0x00000020;
1162 return rfilt;
1163}
1164
1165void
1166ath_mode_init(struct ath_softc *sc)
1167{
1168 struct ath_hal *ah = sc->sc_ah;
1169 u_int32_t rfilt, mfilt[2];
1170
1171 /* configure rx filter */
1172 rfilt = ath_calcrxfilter(sc);
1173 ath_hal_set_rx_filter(ah, rfilt)((*(ah)->ah_set_rx_filter)((ah), (rfilt)));
1174
1175 /* configure operational mode */
1176 ath_hal_set_opmode(ah)((*(ah)->ah_set_opmode)((ah)));
1177
1178 /* calculate and install multicast filter */
1179 mfilt[0] = mfilt[1] = 0;
1180 ath_mcastfilter_compute(sc, &mfilt);
1181 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1])((*(ah)->ah_set_mcast_filter)((ah), (mfilt[0]), (mfilt[1])
))
;
1182 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n",
1183 __func__, rfilt, mfilt[0], mfilt[1]));
1184}
1185
1186struct mbuf *
1187ath_getmbuf(int flags, int type, u_int pktlen)
1188{
1189 struct mbuf *m;
1190
1191 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen))if (!(pktlen <= (1 << 11))) panic ("802.11 packet too large: %u"
, pktlen)
;
1192#ifdef __FreeBSD__
1193 if (pktlen <= MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
1194 MGETHDR(m, flags, type)m = m_gethdr((flags), (type));
1195 } else {
1196 m = m_getcl(flags, type, M_PKTHDR0x0002);
1197 }
1198#else
1199 MGETHDR(m, flags, type)m = m_gethdr((flags), (type));
1200 if (m != NULL((void *)0) && pktlen > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
1201 MCLGET(m, flags)(void) m_clget((m), (flags), (1 << 11));
1202 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
1203 m_free(m);
1204 m = NULL((void *)0);
1205 }
1206 }
1207#endif
1208 return m;
1209}
1210
1211#ifndef IEEE80211_STA_ONLY
1212int
1213ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
1214{
1215 struct ieee80211com *ic = &sc->sc_ic;
1216 struct ath_hal *ah = sc->sc_ah;
1217 struct ath_buf *bf;
1218 struct ath_desc *ds;
1219 struct mbuf *m;
1220 int error;
1221 u_int8_t rate;
1222 const HAL_RATE_TABLE *rt;
1223 u_int flags = 0;
1224
1225 bf = sc->sc_bcbuf;
1226 if (bf->bf_m != NULL((void *)0)) {
1227 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1228 m_freem(bf->bf_m);
1229 bf->bf_m = NULL((void *)0);
1230 bf->bf_node = NULL((void *)0);
1231 }
1232 /*
1233 * NB: the beacon data buffer must be 32-bit aligned;
1234 * we assume the mbuf routines will return us something
1235 * with this alignment (perhaps should assert).
1236 */
1237 m = ieee80211_beacon_alloc(ic, ni);
1238 if (m == NULL((void *)0)) {
1239 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n",
1240 __func__));
1241 sc->sc_stats.ast_be_nombuf++;
1242 return ENOMEM12;
1243 }
1244
1245 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len));
1246 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
1247 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
;
1248 if (error != 0) {
1249 m_freem(m);
1250 return error;
1251 }
1252 KASSERT(bf->bf_nseg == 1,if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("%s: multi-segment packet; nseg %u"
, __func__, bf->bf_dmamap->dm_nsegs)
1253 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg))if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("%s: multi-segment packet; nseg %u"
, __func__, bf->bf_dmamap->dm_nsegs)
;
1254 bf->bf_m = m;
1255
1256 /* setup descriptors */
1257 ds = bf->bf_desc;
1258 bzero(ds, sizeof(struct ath_desc))__builtin_bzero((ds), (sizeof(struct ath_desc)));
1259
1260 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) {
1261 ds->ds_link = bf->bf_daddr; /* link to self */
1262 flags |= HAL_TXDESC_VEOL0x0020;
1263 } else {
1264 ds->ds_link = 0;
1265 }
1266 ds->ds_data = bf->bf_segsbf_dmamap->dm_segs[0].ds_addr;
1267
1268 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__,
1269 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len));
1270
1271 /*
1272 * Calculate rate code.
1273 * XXX everything at min xmit rate
1274 */
1275 rt = sc->sc_currates;
1276 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode))if (!(rt != ((void *)0))) panic ("no rate table, mode %u", sc
->sc_curmode)
;
1277 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE0x00040000) {
1278 rate = rt->info[0].rateCode | rt->info[0].shortPreamble;
1279 } else {
1280 rate = rt->info[0].rateCode;
1281 }
1282
1283 flags = HAL_TXDESC_NOACK0x0002;
1284 if (ic->ic_opmode == IEEE80211_M_IBSS)
1285 flags |= HAL_TXDESC_VEOL0x0020;
1286
1287 if (!ath_hal_setup_tx_desc(ah, ds((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1288 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1289 , sizeof(struct ieee80211_frame) /* header length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1290 , HAL_PKT_TYPE_BEACON /* Atheros packet type */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1291 , 60 /* txpower XXX */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1292 , rate, 1 /* series 0 rate/tries */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1293 , HAL_TXKEYIX_INVALID /* no encryption */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1294 , 0 /* antenna mode */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1295 , flags /* no ack for beacons */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1296 , 0 /* rts/cts rate */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1297 , 0 /* rts/cts duration */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1298 )((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
) {
1299 printf("%s: ath_hal_setup_tx_desc failed\n", __func__);
1300 return -1;
1301 }
1302 /* NB: beacon's BufLen must be a multiple of 4 bytes */
1303 /* XXX verify mbuf data area covers this roundup */
1304 if (!ath_hal_fill_tx_desc(ah, ds((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1305 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1306 , AH_TRUE /* first segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1307 , AH_TRUE /* last segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1308 )((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
) {
1309 printf("%s: ath_hal_fill_tx_desc failed\n", __func__);
1310 return -1;
1311 }
1312
1313 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */
1314
1315 return 0;
1316}
1317
1318void
1319ath_beacon_proc(void *arg, int pending)
1320{
1321 struct ath_softc *sc = arg;
1322 struct ieee80211com *ic = &sc->sc_ic;
1323 struct ath_buf *bf = sc->sc_bcbuf;
1324 struct ath_hal *ah = sc->sc_ah;
1325
1326 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending));
1327 if (ic->ic_opmode == IEEE80211_M_STA ||
1328 bf == NULL((void *)0) || bf->bf_m == NULL((void *)0)) {
1329 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n",
1330 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL));
1331 return;
1332 }
1333 /* TODO: update beacon to reflect PS poll state */
1334 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)((*(ah)->ah_stop_tx_dma)((ah), (sc->sc_bhalq)))) {
1335 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n",
1336 __func__, sc->sc_bhalq));
1337 }
1338 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
1339 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
;
1340
1341 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr)((*(ah)->ah_put_tx_buf)((ah), (sc->sc_bhalq), (bf->bf_daddr
)))
;
1342 ath_hal_tx_start(ah, sc->sc_bhalq)((*(ah)->ah_tx_start)((ah), (sc->sc_bhalq)));
1343 DPRINTF(ATH_DEBUG_BEACON_PROC,
1344 ("%s: TXDP%u = %p (%p)\n", __func__,
1345 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc));
1346}
1347
1348void
1349ath_beacon_free(struct ath_softc *sc)
1350{
1351 struct ath_buf *bf = sc->sc_bcbuf;
1352
1353 if (bf->bf_m != NULL((void *)0)) {
1354 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1355 m_freem(bf->bf_m);
1356 bf->bf_m = NULL((void *)0);
1357 bf->bf_node = NULL((void *)0);
1358 }
1359}
1360#endif /* IEEE80211_STA_ONLY */
1361
1362/*
1363 * Configure the beacon and sleep timers.
1364 *
1365 * When operating as an AP this resets the TSF and sets
1366 * up the hardware to notify us when we need to issue beacons.
1367 *
1368 * When operating in station mode this sets up the beacon
1369 * timers according to the timestamp of the last received
1370 * beacon and the current TSF, configures PCF and DTIM
1371 * handling, programs the sleep registers so the hardware
1372 * will wakeup in time to receive beacons, and configures
1373 * the beacon miss handling so we'll receive a BMISS
1374 * interrupt when we stop seeing beacons from the AP
1375 * we've associated with.
1376 */
1377void
1378ath_beacon_config(struct ath_softc *sc)
1379{
1380#define MS_TO_TU(x)(((x) * 1000) / 1024) (((x) * 1000) / 1024)
1381 struct ath_hal *ah = sc->sc_ah;
1382 struct ieee80211com *ic = &sc->sc_ic;
1383 struct ieee80211_node *ni = ic->ic_bss;
1384 u_int32_t nexttbtt, intval;
1385
1386 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4)((u_int32_t) ((((u_int8_t *)(ni->ni_tstamp + 4))[0] ) | ((
(u_int8_t *)(ni->ni_tstamp + 4))[1] << 8) | (((u_int8_t
*)(ni->ni_tstamp + 4))[2] << 16) | (((u_int8_t *)(ni
->ni_tstamp + 4))[3] << 24)))
<< 22) |
1387 (LE_READ_4(ni->ni_tstamp)((u_int32_t) ((((u_int8_t *)(ni->ni_tstamp))[0] ) | (((u_int8_t
*)(ni->ni_tstamp))[1] << 8) | (((u_int8_t *)(ni->
ni_tstamp))[2] << 16) | (((u_int8_t *)(ni->ni_tstamp
))[3] << 24)))
>> 10);
1388 intval = MAX(1, ni->ni_intval)(((1)>(ni->ni_intval))?(1):(ni->ni_intval)) & HAL_BEACON_PERIOD0x0000ffff;
1389 if (nexttbtt == 0) { /* e.g. for ap mode */
1390 nexttbtt = intval;
1391 } else if (intval) {
1392 nexttbtt = roundup(nexttbtt, intval)((((nexttbtt)+((intval)-1))/(intval))*(intval));
1393 }
1394 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n",
1395 __func__, ni->ni_intval, nexttbtt));
1396 if (ic->ic_opmode == IEEE80211_M_STA) {
1397 HAL_BEACON_STATE bs;
1398
1399 /* NB: no PCF support right now */
1400 bzero(&bs, sizeof(bs))__builtin_bzero((&bs), (sizeof(bs)));
1401 bs.bs_intvalbs_interval = intval;
1402 bs.bs_nexttbttbs_next_beacon = nexttbtt;
1403 bs.bs_dtimperiodbs_dtim_period = bs.bs_intvalbs_interval;
1404 bs.bs_nextdtimbs_next_dtim = nexttbtt;
1405 /*
1406 * Calculate the number of consecutive beacons to miss
1407 * before taking a BMISS interrupt.
1408 * Note that we clamp the result to at most 7 beacons.
1409 */
1410 bs.bs_bmissthresholdbs_bmiss_threshold = ic->ic_bmissthres;
1411 if (bs.bs_bmissthresholdbs_bmiss_threshold > 7) {
1412 bs.bs_bmissthresholdbs_bmiss_threshold = 7;
1413 } else if (bs.bs_bmissthresholdbs_bmiss_threshold <= 0) {
1414 bs.bs_bmissthresholdbs_bmiss_threshold = 1;
1415 }
1416
1417 /*
1418 * Calculate sleep duration. The configuration is
1419 * given in ms. We insure a multiple of the beacon
1420 * period is used. Also, if the sleep duration is
1421 * greater than the DTIM period then it makes senses
1422 * to make it a multiple of that.
1423 *
1424 * XXX fixed at 100ms
1425 */
1426 bs.bs_sleepdurationbs_sleep_duration =
1427 roundup(MS_TO_TU(100), bs.bs_intval)(((((((100) * 1000) / 1024))+((bs.bs_interval)-1))/(bs.bs_interval
))*(bs.bs_interval))
;
1428 if (bs.bs_sleepdurationbs_sleep_duration > bs.bs_dtimperiodbs_dtim_period) {
1429 bs.bs_sleepdurationbs_sleep_duration =
1430 roundup(bs.bs_sleepduration, bs.bs_dtimperiod)((((bs.bs_sleep_duration)+((bs.bs_dtim_period)-1))/(bs.bs_dtim_period
))*(bs.bs_dtim_period))
;
1431 }
1432
1433 DPRINTF(ATH_DEBUG_BEACON,
1434 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u"
1435 " sleep %u\n"
1436 , __func__
1437 , bs.bs_intval
1438 , bs.bs_nexttbtt
1439 , bs.bs_dtimperiod
1440 , bs.bs_nextdtim
1441 , bs.bs_bmissthreshold
1442 , bs.bs_sleepduration
1443 ));
1444 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0)));
1445 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0)((*(ah)->ah_set_beacon_timers)((ah), (&bs), (0), (0), (
0)))
;
1446 sc->sc_imask |= HAL_INT_BMISS0x00040000;
1447 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
1448 }
1449#ifndef IEEE80211_STA_ONLY
1450 else {
1451 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0)));
1452 if (nexttbtt == intval)
1453 intval |= HAL_BEACON_RESET_TSF0x01000000;
1454 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1455 /*
1456 * In IBSS mode enable the beacon timers but only
1457 * enable SWBA interrupts if we need to manually
1458 * prepare beacon frames. Otherwise we use a
1459 * self-linked tx descriptor and let the hardware
1460 * deal with things.
1461 */
1462 intval |= HAL_BEACON_ENA0x00800000;
1463 if (!sc->sc_veol)
1464 sc->sc_imask |= HAL_INT_SWBA0x00010000;
1465 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
1466 /*
1467 * In AP mode we enable the beacon timers and
1468 * SWBA interrupts to prepare beacon frames.
1469 */
1470 intval |= HAL_BEACON_ENA0x00800000;
1471 sc->sc_imask |= HAL_INT_SWBA0x00010000; /* beacon prepare */
1472 }
1473 ath_hal_init_beacon(ah, nexttbtt, intval)((*(ah)->ah_init_beacon)((ah), (nexttbtt), (intval)));
1474 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
1475 /*
1476 * When using a self-linked beacon descriptor in IBBS
1477 * mode load it once here.
1478 */
1479 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol)
1480 ath_beacon_proc(sc, 0);
1481 }
1482#endif
1483}
1484
1485int
1486ath_desc_alloc(struct ath_softc *sc)
1487{
1488 int i, bsize, error = -1;
1489 struct ath_desc *ds;
1490 struct ath_buf *bf;
1491
1492 /* allocate descriptors */
1493 sc->sc_desc_len = sizeof(struct ath_desc) *
1494 (ATH_TXBUF60 * ATH_TXDESC8 + ATH_RXBUF40 + 1);
1495 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sc->
sc_desc_len), ((1 << 12)), (0), (&sc->sc_dseg), (
1), (&sc->sc_dnseg), (0))
1496 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sc->
sc_desc_len), ((1 << 12)), (0), (&sc->sc_dseg), (
1), (&sc->sc_dnseg), (0))
) != 0) {
1497 printf("%s: unable to allocate control data, error = %d\n",
1498 sc->sc_dev.dv_xname, error);
1499 goto fail0;
1500 }
1501
1502 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_dseg), (sc->sc_dnseg), (sc->sc_desc_len), ((caddr_t
*)&sc->sc_desc), (0x0004))
1503 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_dseg), (sc->sc_dnseg), (sc->sc_desc_len), ((caddr_t
*)&sc->sc_desc), (0x0004))
) != 0) {
1504 printf("%s: unable to map control data, error = %d\n",
1505 sc->sc_dev.dv_xname, error);
1506 goto fail1;
1507 }
1508
1509 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc->
sc_desc_len), (1), (sc->sc_desc_len), (0), (0), (&sc->
sc_ddmamap))
1510 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc->
sc_desc_len), (1), (sc->sc_desc_len), (0), (0), (&sc->
sc_ddmamap))
) != 0) {
1511 printf("%s: unable to create control data DMA map, "
1512 "error = %d\n", sc->sc_dev.dv_xname, error);
1513 goto fail2;
1514 }
1515
1516 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_ddmamap), (sc->sc_desc), (sc->sc_desc_len), (((void *
)0)), (0))
1517 sc->sc_desc_len, NULL, 0)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_ddmamap), (sc->sc_desc), (sc->sc_desc_len), (((void *
)0)), (0))
) != 0) {
1518 printf("%s: unable to load control data DMA map, error = %d\n",
1519 sc->sc_dev.dv_xname, error);
1520 goto fail3;
1521 }
1522
1523 ds = sc->sc_desc;
1524 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr;
1525
1526 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC,
1527 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n",
1528 ds, (u_long)sc->sc_desc_len,
1529 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len));
1530
1531 /* allocate buffers */
1532 bsize = sizeof(struct ath_buf) * (ATH_TXBUF60 + ATH_RXBUF40 + 1);
1533 bf = malloc(bsize, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1534 if (bf == NULL((void *)0)) {
1535 printf("%s: unable to allocate Tx/Rx buffers\n",
1536 sc->sc_dev.dv_xname);
1537 error = ENOMEM12;
1538 goto fail3;
1539 }
1540 sc->sc_bufptr = bf;
1541
1542 TAILQ_INIT(&sc->sc_rxbuf)do { (&sc->sc_rxbuf)->tqh_first = ((void *)0); (&
sc->sc_rxbuf)->tqh_last = &(&sc->sc_rxbuf)->
tqh_first; } while (0)
;
1543 for (i = 0; i < ATH_RXBUF40; i++, bf++, ds++) {
1544 bf->bf_desc = ds;
1545 bf->bf_daddr = sc->sc_desc_paddr +
1546 ((caddr_t)ds - (caddr_t)sc->sc_desc);
1547 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
1548 MCLBYTES, 0, 0, &bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
) != 0) {
1549 printf("%s: unable to create Rx dmamap, error = %d\n",
1550 sc->sc_dev.dv_xname, error);
1551 goto fail4;
1552 }
1553 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_rxbuf)->tqh_last; *(&sc->
sc_rxbuf)->tqh_last = (bf); (&sc->sc_rxbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
1554 }
1555
1556 TAILQ_INIT(&sc->sc_txbuf)do { (&sc->sc_txbuf)->tqh_first = ((void *)0); (&
sc->sc_txbuf)->tqh_last = &(&sc->sc_txbuf)->
tqh_first; } while (0)
;
1557 for (i = 0; i < ATH_TXBUF60; i++, bf++, ds += ATH_TXDESC8) {
1558 bf->bf_desc = ds;
1559 bf->bf_daddr = sc->sc_desc_paddr +
1560 ((caddr_t)ds - (caddr_t)sc->sc_desc);
1561 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (8), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
1562 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (8), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
) != 0) {
1563 printf("%s: unable to create Tx dmamap, error = %d\n",
1564 sc->sc_dev.dv_xname, error);
1565 goto fail5;
1566 }
1567 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
1568 }
1569 TAILQ_INIT(&sc->sc_txq)do { (&sc->sc_txq)->tqh_first = ((void *)0); (&
sc->sc_txq)->tqh_last = &(&sc->sc_txq)->tqh_first
; } while (0)
;
1570
1571 /* beacon buffer */
1572 bf->bf_desc = ds;
1573 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc);
1574 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
1575 &bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
) != 0) {
1576 printf("%s: unable to create beacon dmamap, error = %d\n",
1577 sc->sc_dev.dv_xname, error);
1578 goto fail5;
1579 }
1580 sc->sc_bcbuf = bf;
1581 return 0;
1582
1583fail5:
1584 for (i = ATH_RXBUF40; i < ATH_RXBUF40 + ATH_TXBUF60; i++) {
1585 if (sc->sc_bufptr[i].bf_dmamap == NULL((void *)0))
1586 continue;
1587 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_bufptr[i].bf_dmamap))
;
1588 }
1589fail4:
1590 for (i = 0; i < ATH_RXBUF40; i++) {
1591 if (sc->sc_bufptr[i].bf_dmamap == NULL((void *)0))
1592 continue;
1593 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_bufptr[i].bf_dmamap))
;
1594 }
1595fail3:
1596 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_ddmamap))
;
1597fail2:
1598 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_ddmamap))
;
1599 sc->sc_ddmamap = NULL((void *)0);
1600fail1:
1601 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_desc), (sc->sc_desc_len))
;
1602fail0:
1603 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_dseg), (sc->sc_dnseg))
;
1604 return error;
1605}
1606
1607void
1608ath_desc_free(struct ath_softc *sc)
1609{
1610 struct ath_buf *bf;
1611
1612 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_ddmamap))
;
1613 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_ddmamap))
;
1614 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_dseg), (sc->sc_dnseg))
;
1615
1616 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list)for((bf) = ((&sc->sc_txq)->tqh_first); (bf) != ((void
*)0); (bf) = ((bf)->bf_list.tqe_next))
{
1617 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1618 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bf
->bf_dmamap))
;
1619 m_freem(bf->bf_m);
1620 }
1621 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list)for((bf) = ((&sc->sc_txbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
1622 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bf
->bf_dmamap))
;
1623 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)for((bf) = ((&sc->sc_rxbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
{
1624 if (bf->bf_m) {
1625 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1626 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bf
->bf_dmamap))
;
1627 m_freem(bf->bf_m);
1628 bf->bf_m = NULL((void *)0);
1629 }
1630 }
1631 if (sc->sc_bcbuf != NULL((void *)0)) {
1632 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_bcbuf->bf_dmamap))
;
1633 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_bcbuf->bf_dmamap))
;
1634 sc->sc_bcbuf = NULL((void *)0);
1635 }
1636
1637 TAILQ_INIT(&sc->sc_rxbuf)do { (&sc->sc_rxbuf)->tqh_first = ((void *)0); (&
sc->sc_rxbuf)->tqh_last = &(&sc->sc_rxbuf)->
tqh_first; } while (0)
;
1638 TAILQ_INIT(&sc->sc_txbuf)do { (&sc->sc_txbuf)->tqh_first = ((void *)0); (&
sc->sc_txbuf)->tqh_last = &(&sc->sc_txbuf)->
tqh_first; } while (0)
;
1639 TAILQ_INIT(&sc->sc_txq)do { (&sc->sc_txq)->tqh_first = ((void *)0); (&
sc->sc_txq)->tqh_last = &(&sc->sc_txq)->tqh_first
; } while (0)
;
1640 free(sc->sc_bufptr, M_DEVBUF2, 0);
1641 sc->sc_bufptr = NULL((void *)0);
1642}
1643
1644struct ieee80211_node *
1645ath_node_alloc(struct ieee80211com *ic)
1646{
1647 struct ath_node *an;
1648
1649 an = malloc(sizeof(*an), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1650 if (an) {
1651 int i;
1652 for (i = 0; i < ATH_RHIST_SIZE16; i++)
1653 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME(~0);
1654 an->an_rx_hist_next = ATH_RHIST_SIZE16-1;
1655 return &an->an_node;
1656 } else
1657 return NULL((void *)0);
1658}
1659
1660void
1661ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1662{
1663 struct ath_softc *sc = ic->ic_ific_ac.ac_if.if_softc;
1664 struct ath_buf *bf;
1665
1666 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list)for((bf) = ((&sc->sc_txq)->tqh_first); (bf) != ((void
*)0); (bf) = ((bf)->bf_list.tqe_next))
{
1667 if (bf->bf_node == ni)
1668 bf->bf_node = NULL((void *)0);
1669 }
1670 (*sc->sc_node_free)(ic, ni);
1671}
1672
1673void
1674ath_node_copy(struct ieee80211com *ic,
1675 struct ieee80211_node *dst, const struct ieee80211_node *src)
1676{
1677 struct ath_softc *sc = ic->ic_ific_ac.ac_if.if_softc;
1678
1679 bcopy(&src[1], &dst[1],
1680 sizeof(struct ath_node) - sizeof(struct ieee80211_node));
1681 (*sc->sc_node_copy)(ic, dst, src);
1682}
1683
1684u_int8_t
1685ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni)
1686{
1687 const struct ath_node *an = ATH_NODE(ni)((struct ath_node *)(ni));
1688 int i, now, nsamples, rssi;
1689
1690 /*
1691 * Calculate the average over the last second of sampled data.
1692 */
1693 now = ATH_TICKS()(ticks);
1694 nsamples = 0;
1695 rssi = 0;
1696 i = an->an_rx_hist_next;
1697 do {
1698 const struct ath_recv_hist *rh = &an->an_rx_hist[i];
1699 if (rh->arh_ticks == ATH_RHIST_NOTIME(~0))
1700 goto done;
1701 if (now - rh->arh_ticks > hz)
1702 goto done;
1703 rssi += rh->arh_rssi;
1704 nsamples++;
1705 if (i == 0) {
1706 i = ATH_RHIST_SIZE16-1;
1707 } else {
1708 i--;
1709 }
1710 } while (i != an->an_rx_hist_next);
1711done:
1712 /*
1713 * Return either the average or the last known
1714 * value if there is no recent data.
1715 */
1716 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi);
1717}
1718
1719int
1720ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
1721{
1722 struct ath_hal *ah = sc->sc_ah;
1723 int error;
1724 struct mbuf *m;
1725 struct ath_desc *ds;
1726
1727 m = bf->bf_m;
1728 if (m == NULL((void *)0)) {
1729 /*
1730 * NB: by assigning a page to the rx dma buffer we
1731 * implicitly satisfy the Atheros requirement that
1732 * this buffer be cache-line-aligned and sized to be
1733 * multiple of the cache line size. Not doing this
1734 * causes weird stuff to happen (for the 5210 at least).
1735 */
1736 m = ath_getmbuf(M_DONTWAIT0x0002, MT_DATA1, MCLBYTES(1 << 11));
1737 if (m == NULL((void *)0)) {
1738 DPRINTF(ATH_DEBUG_ANY,
1739 ("%s: no mbuf/cluster\n", __func__));
1740 sc->sc_stats.ast_rx_nombuf++;
1741 return ENOMEM12;
1742 }
1743 bf->bf_m = m;
1744 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size;
1745
1746 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
1747 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
;
1748 if (error != 0) {
1749 DPRINTF(ATH_DEBUG_ANY,
1750 ("%s: ath_bus_dmamap_load_mbuf failed;"
1751 " error %d\n", __func__, error));
1752 sc->sc_stats.ast_rx_busdma++;
1753 return error;
1754 }
1755 KASSERT(bf->bf_nseg == 1,if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_rxbuf_init: multi-segment packet; nseg %u"
, bf->bf_dmamap->dm_nsegs)
1756 ("ath_rxbuf_init: multi-segment packet; nseg %u",if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_rxbuf_init: multi-segment packet; nseg %u"
, bf->bf_dmamap->dm_nsegs)
1757 bf->bf_nseg))if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_rxbuf_init: multi-segment packet; nseg %u"
, bf->bf_dmamap->dm_nsegs)
;
1758 }
1759 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x01))
1760 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x01))
;
1761
1762 /*
1763 * Setup descriptors. For receive we always terminate
1764 * the descriptor list with a self-linked entry so we'll
1765 * not get overrun under high load (as can happen with a
1766 * 5212 when ANI processing enables PHY errors).
1767 *
1768 * To insure the last descriptor is self-linked we create
1769 * each descriptor as self-linked and add it to the end. As
1770 * each additional descriptor is added the previous self-linked
1771 * entry is ``fixed'' naturally. This should be safe even
1772 * if DMA is happening. When processing RX interrupts we
1773 * never remove/process the last, self-linked, entry on the
1774 * descriptor list. This insures the hardware always has
1775 * someplace to write a new frame.
1776 */
1777 ds = bf->bf_desc;
1778 bzero(ds, sizeof(struct ath_desc))__builtin_bzero((ds), (sizeof(struct ath_desc)));
1779#ifndef IEEE80211_STA_ONLY
1780 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP)
1781 ds->ds_link = bf->bf_daddr; /* link to self */
1782#endif
1783 ds->ds_data = bf->bf_segsbf_dmamap->dm_segs[0].ds_addr;
1784 ath_hal_setup_rx_desc(ah, ds((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
1785 , m->m_len /* buffer size */((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
1786 , 0((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
1787 )((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
;
1788
1789 if (sc->sc_rxlink != NULL((void *)0))
1790 *sc->sc_rxlink = bf->bf_daddr;
1791 sc->sc_rxlink = &ds->ds_link;
1792 return 0;
1793}
1794
1795void
1796ath_rx_proc(void *arg, int npending)
1797{
1798 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1799#define PA2DESC(_sc, _pa) \
1800 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \
1801 ((_pa) - (_sc)->sc_desc_paddr)))
1802 struct ath_softc *sc = arg;
1803 struct ath_buf *bf;
1804 struct ieee80211com *ic = &sc->sc_ic;
1805 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
1806 struct ath_hal *ah = sc->sc_ah;
1807 struct ath_desc *ds;
1808 struct mbuf *m;
1809 struct ieee80211_frame *wh;
1810 struct ieee80211_frame whbuf;
1811 struct ieee80211_rxinfo rxi;
1812 struct ieee80211_node *ni;
1813 struct ath_node *an;
1814 struct ath_recv_hist *rh;
1815 int len;
1816 u_int phyerr;
1817 HAL_STATUS status;
1818
1819 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending));
1820 do {
1821 bf = TAILQ_FIRST(&sc->sc_rxbuf)((&sc->sc_rxbuf)->tqh_first);
1822 if (bf == NULL((void *)0)) { /* NB: shouldn't happen */
1823 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname);
1824 break;
1825 }
1826 ds = bf->bf_desc;
1827 if (ds->ds_link == bf->bf_daddr) {
1828 /* NB: never process the self-linked entry at the end */
1829 break;
1830 }
1831 m = bf->bf_m;
1832 if (m == NULL((void *)0)) { /* NB: shouldn't happen */
1833 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname);
1834 continue;
1835 }
1836 /* XXX sync descriptor memory */
1837 /*
1838 * Must provide the virtual address of the current
1839 * descriptor, the physical address, and the virtual
1840 * address of the next descriptor in the h/w chain.
1841 * This allows the HAL to look ahead to see if the
1842 * hardware is done with a descriptor by checking the
1843 * done bit in the following descriptor and the address
1844 * of the current descriptor the DMA engine is working
1845 * on. All this is necessary because of our use of
1846 * a self-linked list to avoid rx overruns.
1847 */
1848 status = ath_hal_proc_rx_desc(ah, ds,((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
1849 bf->bf_daddr, PA2DESC(sc, ds->ds_link))((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
;
1850#ifdef AR_DEBUG
1851 if (ath_debug & ATH_DEBUG_RECV_DESC)
1852 ath_printrxbuf(bf, status == HAL_OK0);
1853#endif
1854 if (status == HAL_EINPROGRESS36)
1855 break;
1856 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_rxbuf)->tqh_last = (bf)->bf_list
.tqe_prev; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next
; ((bf)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list
.tqe_next) = ((void *)-1); } while (0)
;
1857
1858 if (ds->ds_rxstatds_us.rx.rs_more) {
1859 /*
1860 * Frame spans multiple descriptors; this
1861 * cannot happen yet as we don't support
1862 * jumbograms. If not in monitor mode,
1863 * discard the frame.
1864 */
1865
1866 /*
1867 * Enable this if you want to see error
1868 * frames in Monitor mode.
1869 */
1870#ifdef ERROR_FRAMES
1871 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
1872 /* XXX statistic */
1873 goto rx_next;
1874 }
1875#endif
1876 /* fall thru for monitor mode handling... */
1877
1878 } else if (ds->ds_rxstatds_us.rx.rs_status != 0) {
1879 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_CRC0x01)
1880 sc->sc_stats.ast_rx_crcerr++;
1881 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_FIFO0x04)
1882 sc->sc_stats.ast_rx_fifoerr++;
1883 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_DECRYPT0x08)
1884 sc->sc_stats.ast_rx_badcrypt++;
1885 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_PHY0x02) {
1886 sc->sc_stats.ast_rx_phyerr++;
1887 phyerr = ds->ds_rxstatds_us.rx.rs_phyerr & 0x1f;
1888 sc->sc_stats.ast_rx_phy[phyerr]++;
1889 }
1890
1891 /*
1892 * reject error frames, we normally don't want
1893 * to see them in monitor mode.
1894 */
1895 if ((ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_DECRYPT0x08 ) ||
1896 (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_PHY0x02))
1897 goto rx_next;
1898
1899 /*
1900 * In monitor mode, allow through packets that
1901 * cannot be decrypted
1902 */
1903 if ((ds->ds_rxstatds_us.rx.rs_status & ~HAL_RXERR_DECRYPT0x08) ||
1904 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
1905 goto rx_next;
1906 }
1907
1908 len = ds->ds_rxstatds_us.rx.rs_datalen;
1909 if (len < IEEE80211_MIN_LEN(sizeof(struct ieee80211_frame_min) + 4)) {
1910 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n",
1911 __func__, len));
1912 sc->sc_stats.ast_rx_tooshort++;
1913 goto rx_next;
1914 }
1915
1916 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x02))
1917 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x02))
;
1918
1919 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1920 bf->bf_m = NULL((void *)0);
1921 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
1922
1923#if NBPFILTER1 > 0
1924 if (sc->sc_drvbpf) {
1925 sc->sc_rxtapsc_rxtapu.th.wr_flags = IEEE80211_RADIOTAP_F_FCS0x10;
1926 sc->sc_rxtapsc_rxtapu.th.wr_rate =
1927 sc->sc_hwmap[ds->ds_rxstatds_us.rx.rs_rate] &
1928 IEEE80211_RATE_VAL0x7f;
1929 sc->sc_rxtapsc_rxtapu.th.wr_antenna = ds->ds_rxstatds_us.rx.rs_antenna;
1930 sc->sc_rxtapsc_rxtapu.th.wr_rssi = ds->ds_rxstatds_us.rx.rs_rssi;
1931 sc->sc_rxtapsc_rxtapu.th.wr_max_rssi = ic->ic_max_rssi;
1932
1933 bpf_mtap_hdr(sc->sc_drvbpf, &sc->sc_rxtapsc_rxtapu.th,
1934 sc->sc_rxtap_len, m, BPF_DIRECTION_IN(1 << 0));
1935 }
1936#endif
1937 m_adj(m, -IEEE80211_CRC_LEN4);
1938 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
1939 rxi.rxi_flags = 0;
1940 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP0x40)) {
1941 /*
1942 * WEP is decrypted by hardware. Clear WEP bit
1943 * and trim WEP header for ieee80211_inputm().
1944 */
1945 wh->i_fc[1] &= ~IEEE80211_FC1_WEP0x40;
1946 bcopy(wh, &whbuf, sizeof(whbuf));
1947 m_adj(m, IEEE80211_WEP_IVLEN3 + IEEE80211_WEP_KIDLEN1);
1948 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
1949 bcopy(&whbuf, wh, sizeof(whbuf));
1950 /*
1951 * Also trim WEP ICV from the tail.
1952 */
1953 m_adj(m, -IEEE80211_WEP_CRCLEN4);
1954 /*
1955 * The header has probably moved.
1956 */
1957 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
1958
1959 rxi.rxi_flags |= IEEE80211_RXI_HWDEC0x00000001;
1960 }
1961
1962 /*
1963 * Locate the node for sender, track state, and
1964 * then pass this node (referenced) up to the 802.11
1965 * layer for its use.
1966 */
1967 ni = ieee80211_find_rxnode(ic, wh);
1968
1969 /*
1970 * Record driver-specific state.
1971 */
1972 an = ATH_NODE(ni)((struct ath_node *)(ni));
1973 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE16)
1974 an->an_rx_hist_next = 0;
1975 rh = &an->an_rx_hist[an->an_rx_hist_next];
1976 rh->arh_ticks = ATH_TICKS()(ticks);
1977 rh->arh_rssi = ds->ds_rxstatds_us.rx.rs_rssi;
1978 rh->arh_antenna = ds->ds_rxstatds_us.rx.rs_antenna;
1979
1980 /*
1981 * Send frame up for processing.
1982 */
1983 rxi.rxi_rssi = ds->ds_rxstatds_us.rx.rs_rssi;
1984 rxi.rxi_tstamp = ds->ds_rxstatds_us.rx.rs_tstamp;
1985 ieee80211_inputm(ifp, m, ni, &rxi, &ml);
1986
1987 /* Handle the rate adaption */
1988 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt,
1989 ds->ds_rxstatds_us.rx.rs_rssi);
1990
1991 /*
1992 * The frame may have caused the node to be marked for
1993 * reclamation (e.g. in response to a DEAUTH message)
1994 * so use release_node here instead of unref_node.
1995 */
1996 ieee80211_release_node(ic, ni);
1997
1998 rx_next:
1999 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_rxbuf)->tqh_last; *(&sc->
sc_rxbuf)->tqh_last = (bf); (&sc->sc_rxbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
2000 } while (ath_rxbuf_init(sc, bf) == 0);
2001
2002 if_input(ifp, &ml);
2003
2004 ath_hal_set_rx_signal(ah)((*(ah)->ah_set_rx_signal)((ah))); /* rx signal state monitoring */
2005 ath_hal_start_rx(ah)((*(ah)->ah_start_rx)((ah))); /* in case of RXEOL */
2006#undef PA2DESC
2007}
2008
2009/*
2010 * XXX Size of an ACK control frame in bytes.
2011 */
2012#define IEEE80211_ACK_SIZE(2+2+6 +4) (2+2+IEEE80211_ADDR_LEN6+4)
2013
2014int
2015ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
2016 struct ath_buf *bf, struct mbuf *m0)
2017{
2018 struct ieee80211com *ic = &sc->sc_ic;
2019 struct ath_hal *ah = sc->sc_ah;
2020 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
2021 int i, error, iswep, hdrlen, pktlen, len, s, tries;
2022 u_int8_t rix, cix, txrate, ctsrate;
2023 struct ath_desc *ds;
2024 struct ieee80211_frame *wh;
2025 struct ieee80211_key *k;
2026 u_int32_t iv;
2027 u_int8_t *ivp;
2028 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) +
2029 IEEE80211_WEP_IVLEN3 + IEEE80211_WEP_KIDLEN1];
2030 u_int subtype, flags, ctsduration, antenna;
2031 HAL_PKT_TYPE atype;
2032 const HAL_RATE_TABLE *rt;
2033 HAL_BOOL shortPreamble;
2034 struct ath_node *an;
2035 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN;
2036
2037 wh = mtod(m0, struct ieee80211_frame *)((struct ieee80211_frame *)((m0)->m_hdr.mh_data));
2038 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40;
2039 hdrlen = sizeof(struct ieee80211_frame);
2040 pktlen = m0->m_pkthdrM_dat.MH.MH_pkthdr.len;
2041
2042 if (ath_softcrypto && iswep) {
2043 k = ieee80211_get_txkey(ic, wh, ni);
2044 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL((void *)0))
2045 return ENOMEM12;
2046 wh = mtod(m0, struct ieee80211_frame *)((struct ieee80211_frame *)((m0)->m_hdr.mh_data));
2047
2048 /* reset len in case we got a new mbuf */
2049 pktlen = m0->m_pkthdrM_dat.MH.MH_pkthdr.len;
2050 } else if (!ath_softcrypto && iswep) {
2051 bcopy(mtod(m0, caddr_t)((caddr_t)((m0)->m_hdr.mh_data)), hdrbuf, hdrlen);
2052 m_adj(m0, hdrlen);
2053 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT)(m0) = m_prepend((m0), (sizeof(hdrbuf)), (0x0002));
2054 if (m0 == NULL((void *)0)) {
2055 sc->sc_stats.ast_tx_nombuf++;
2056 return ENOMEM12;
2057 }
2058 ivp = hdrbuf + hdrlen;
2059 wh = mtod(m0, struct ieee80211_frame *)((struct ieee80211_frame *)((m0)->m_hdr.mh_data));
2060 /*
2061 * XXX
2062 * IV must not duplicate during the lifetime of the key.
2063 * But no mechanism to renew keys is defined in IEEE 802.11
2064 * for WEP. And the IV may be duplicated at other stations
2065 * because the session key itself is shared. So we use a
2066 * pseudo random IV for now, though it is not the right way.
2067 *
2068 * NB: Rather than use a strictly random IV we select a
2069 * random one to start and then increment the value for
2070 * each frame. This is an explicit tradeoff between
2071 * overhead and security. Given the basic insecurity of
2072 * WEP this seems worthwhile.
2073 */
2074
2075 /*
2076 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir:
2077 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255
2078 */
2079 iv = ic->ic_iv;
2080 if ((iv & 0xff00) == 0xff00) {
2081 int B = (iv & 0xff0000) >> 16;
2082 if (3 <= B && B < 16)
2083 iv = (B+1) << 16;
2084 }
2085 ic->ic_iv = iv + 1;
2086
2087 /*
2088 * NB: Preserve byte order of IV for packet
2089 * sniffers; it doesn't matter otherwise.
2090 */
2091#if BYTE_ORDER1234 == BIG_ENDIAN4321
2092 ivp[0] = iv >> 0;
2093 ivp[1] = iv >> 8;
2094 ivp[2] = iv >> 16;
2095#else
2096 ivp[2] = iv >> 0;
2097 ivp[1] = iv >> 8;
2098 ivp[0] = iv >> 16;
2099#endif
2100 ivp[3] = ic->ic_wep_txkeyic_def_txkey << 6; /* Key ID and pad */
2101 bcopy(hdrbuf, mtod(m0, caddr_t)((caddr_t)((m0)->m_hdr.mh_data)), sizeof(hdrbuf));
2102 /*
2103 * The length of hdrlen and pktlen must be increased for WEP
2104 */
2105 len = IEEE80211_WEP_IVLEN3 +
2106 IEEE80211_WEP_KIDLEN1 +
2107 IEEE80211_WEP_CRCLEN4;
2108 hdrlen += len;
2109 pktlen += len;
2110 }
2111 pktlen += IEEE80211_CRC_LEN4;
2112
2113 /*
2114 * Load the DMA map so any coalescing is done. This
2115 * also calculates the number of descriptors we need.
2116 */
2117 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
2118 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
;
2119 /*
2120 * Discard null packets and check for packets that
2121 * require too many TX descriptors. We try to convert
2122 * the latter to a cluster.
2123 */
2124 if (error == EFBIG27) { /* too many desc's, linearize */
2125 sc->sc_stats.ast_tx_linear++;
2126 if (m_defrag(m0, M_DONTWAIT0x0002)) {
2127 sc->sc_stats.ast_tx_nomcl++;
2128 m_freem(m0);
2129 return ENOMEM12;
2130 }
2131 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
2132 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
;
2133 if (error != 0) {
2134 sc->sc_stats.ast_tx_busdma++;
2135 m_freem(m0);
2136 return error;
2137 }
2138 KASSERT(bf->bf_nseg == 1,if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_tx_start: packet not one segment; nseg %u"
, bf->bf_dmamap->dm_nsegs)
2139 ("ath_tx_start: packet not one segment; nseg %u",if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_tx_start: packet not one segment; nseg %u"
, bf->bf_dmamap->dm_nsegs)
2140 bf->bf_nseg))if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_tx_start: packet not one segment; nseg %u"
, bf->bf_dmamap->dm_nsegs)
;
2141 } else if (error != 0) {
2142 sc->sc_stats.ast_tx_busdma++;
2143 m_freem(m0);
2144 return error;
2145 } else if (bf->bf_nsegbf_dmamap->dm_nsegs == 0) { /* null packet, discard */
2146 sc->sc_stats.ast_tx_nodata++;
2147 m_freem(m0);
2148 return EIO5;
2149 }
2150 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen));
2151 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
2152 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
;
2153 bf->bf_m = m0;
2154 bf->bf_node = ni; /* NB: held reference */
2155 an = ATH_NODE(ni)((struct ath_node *)(ni));
2156
2157 /* setup descriptors */
2158 ds = bf->bf_desc;
2159 rt = sc->sc_currates;
2160 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode))if (!(rt != ((void *)0))) panic ("no rate table, mode %u", sc
->sc_curmode)
;
2161
2162 /*
2163 * Calculate Atheros packet type from IEEE80211 packet header
2164 * and setup for rate calculations.
2165 */
2166 bf->bf_id.id_node = NULL((void *)0);
2167 atype = HAL_PKT_TYPE_NORMAL; /* default */
2168 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c) {
2169 case IEEE80211_FC0_TYPE_MGT0x00:
2170 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
2171 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON0x80) {
2172 atype = HAL_PKT_TYPE_BEACON;
2173 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP0x50) {
2174 atype = HAL_PKT_TYPE_PROBE_RESP;
2175 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM0x90) {
2176 atype = HAL_PKT_TYPE_ATIM;
2177 }
2178 rix = 0; /* XXX lowest rate */
2179 break;
2180 case IEEE80211_FC0_TYPE_CTL0x04:
2181 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
2182 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL0xa0)
2183 atype = HAL_PKT_TYPE_PSPOLL;
2184 rix = 0; /* XXX lowest rate */
2185 break;
2186 default:
2187 /* remember link conditions for rate adaptation algorithm */
2188 if (ic->ic_fixed_rate == -1) {
2189 bf->bf_id.id_len = m0->m_pkthdrM_dat.MH.MH_pkthdr.len;
2190 bf->bf_id.id_rateidx = ni->ni_txrate;
2191 bf->bf_id.id_node = ni;
2192 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni);
2193 }
2194 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt,
2195 &ni->ni_rates, wh, m0->m_pkthdrM_dat.MH.MH_pkthdr.len, ic->ic_fixed_rate,
2196 ifp->if_xname, 0);
2197 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] &
2198 IEEE80211_RATE_VAL0x7f];
2199 if (rix == 0xff) {
2200 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n",
2201 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate],
2202 ni->ni_txrate);
2203 sc->sc_stats.ast_tx_badrate++;
2204 m_freem(m0);
2205 return EIO5;
2206 }
2207 break;
2208 }
2209
2210 /*
2211 * NB: the 802.11 layer marks whether or not we should
2212 * use short preamble based on the current mode and
2213 * negotiated parameters.
2214 */
2215 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE0x00040000) &&
2216 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE0x0020)) {
2217 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble;
2218 shortPreamble = AH_TRUE;
2219 sc->sc_stats.ast_tx_shortpre++;
2220 } else {
2221 txrate = rt->info[rix].rateCode;
2222 shortPreamble = AH_FALSE;
2223 }
2224
2225 /*
2226 * Calculate miscellaneous flags.
2227 */
2228 flags = HAL_TXDESC_CLRDMASK0x0001; /* XXX needed for wep errors */
2229 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) {
2230 flags |= HAL_TXDESC_NOACK0x0002; /* no ack on broad/multicast */
2231 sc->sc_stats.ast_tx_noack++;
2232 } else if (pktlen > ic->ic_rtsthreshold) {
2233 flags |= HAL_TXDESC_RTSENA0x0004; /* RTS based on frame length */
2234 sc->sc_stats.ast_tx_rts++;
2235 }
2236
2237 /*
2238 * Calculate duration. This logically belongs in the 802.11
2239 * layer but it lacks sufficient information to calculate it.
2240 */
2241 if ((flags & HAL_TXDESC_NOACK0x0002) == 0 &&
2242 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c) != IEEE80211_FC0_TYPE_CTL0x04) {
2243 u_int16_t dur;
2244 /*
2245 * XXX not right with fragmentation.
2246 */
2247 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE(2+2+6 +4),
2248 rix, shortPreamble);
2249 *((u_int16_t*) wh->i_dur) = htole16(dur)((__uint16_t)(dur));
2250 }
2251
2252 /*
2253 * Calculate RTS/CTS rate and duration if needed.
2254 */
2255 ctsduration = 0;
2256 if (flags & (HAL_TXDESC_RTSENA0x0004|HAL_TXDESC_CTSENA0x0008)) {
2257 /*
2258 * CTS transmit rate is derived from the transmit rate
2259 * by looking in the h/w rate table. We must also factor
2260 * in whether or not a short preamble is to be used.
2261 */
2262 cix = rt->info[rix].controlRate;
2263 ctsrate = rt->info[cix].rateCode;
2264 if (shortPreamble)
2265 ctsrate |= rt->info[cix].shortPreamble;
2266 /*
2267 * Compute the transmit duration based on the size
2268 * of an ACK frame. We call into the HAL to do the
2269 * computation since it depends on the characteristics
2270 * of the actual PHY being used.
2271 */
2272 if (flags & HAL_TXDESC_RTSENA0x0004) { /* SIFS + CTS */
2273 ctsduration += ath_hal_computetxtime(ah,
2274 rt, IEEE80211_ACK_SIZE(2+2+6 +4), cix, shortPreamble);
2275 }
2276 /* SIFS + data */
2277 ctsduration += ath_hal_computetxtime(ah,
2278 rt, pktlen, rix, shortPreamble);
2279 if ((flags & HAL_TXDESC_NOACK0x0002) == 0) { /* SIFS + ACK */
2280 ctsduration += ath_hal_computetxtime(ah,
2281 rt, IEEE80211_ACK_SIZE(2+2+6 +4), cix, shortPreamble);
2282 }
2283 } else
2284 ctsrate = 0;
2285
2286 /*
2287 * For now use the antenna on which the last good
2288 * frame was received on. We assume this field is
2289 * initialized to 0 which gives us ``auto'' or the
2290 * ``default'' antenna.
2291 */
2292 if (an->an_tx_antenna) {
2293 antenna = an->an_tx_antenna;
2294 } else {
2295 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna;
2296 }
2297
2298#if NBPFILTER1 > 0
2299 if (ic->ic_rawbpf)
2300 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT(1 << 1));
2301
2302 if (sc->sc_drvbpf) {
2303 sc->sc_txtapsc_txtapu.th.wt_flags = 0;
2304 if (shortPreamble)
2305 sc->sc_txtapsc_txtapu.th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02;
2306 if (!ath_softcrypto && iswep)
2307 sc->sc_txtapsc_txtapu.th.wt_flags |= IEEE80211_RADIOTAP_F_WEP0x04;
2308 sc->sc_txtapsc_txtapu.th.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] &
2309 IEEE80211_RATE_VAL0x7f;
2310 sc->sc_txtapsc_txtapu.th.wt_txpower = 30;
2311 sc->sc_txtapsc_txtapu.th.wt_antenna = antenna;
2312
2313 bpf_mtap_hdr(sc->sc_drvbpf, &sc->sc_txtapsc_txtapu.th, sc->sc_txtap_len,
2314 m0, BPF_DIRECTION_OUT(1 << 1));
2315 }
2316#endif
2317
2318 /*
2319 * Formulate first tx descriptor with tx controls.
2320 */
2321 tries = IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ? 1 : 15;
2322 /* XXX check return value? */
2323 ath_hal_setup_tx_desc(ah, ds((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2324 , pktlen /* packet length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2325 , hdrlen /* header length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2326 , atype /* Atheros packet type */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2327 , 60 /* txpower XXX */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2328 , txrate, tries /* series 0 rate/tries */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2329 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2330 , antenna /* antenna mode */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2331 , flags /* flags */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2332 , ctsrate /* rts/cts rate */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2333 , ctsduration /* rts/cts duration */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2334 )((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
;
2335#ifdef notyet
2336 ath_hal_setup_xtx_desc(ah, ds
2337 , AH_FALSE /* short preamble */
2338 , 0, 0 /* series 1 rate/tries */
2339 , 0, 0 /* series 2 rate/tries */
2340 , 0, 0 /* series 3 rate/tries */
2341 );
2342#endif
2343 /*
2344 * Fillin the remainder of the descriptor info.
2345 */
2346 for (i = 0; i < bf->bf_nsegbf_dmamap->dm_nsegs; i++, ds++) {
2347 ds->ds_data = bf->bf_segsbf_dmamap->dm_segs[i].ds_addr;
2348 if (i == bf->bf_nsegbf_dmamap->dm_nsegs - 1) {
2349 ds->ds_link = 0;
2350 } else {
2351 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
2352 }
2353 ath_hal_fill_tx_desc(ah, ds((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2354 , bf->bf_segs[i].ds_len /* segment length */((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2355 , i == 0 /* first segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2356 , i == bf->bf_nseg - 1 /* last segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2357 )((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
;
2358 DPRINTF(ATH_DEBUG_XMIT,
2359 ("%s: %d: %08x %08x %08x %08x %08x %08x\n",
2360 __func__, i, ds->ds_link, ds->ds_data,
2361 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]));
2362 }
2363
2364 /*
2365 * Insert the frame on the outbound list and
2366 * pass it on to the hardware.
2367 */
2368 s = splnet()splraise(0x7);
2369 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txq)->tqh_last; *(&sc->
sc_txq)->tqh_last = (bf); (&sc->sc_txq)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
2370 if (sc->sc_txlink == NULL((void *)0)) {
2371 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr)((*(ah)->ah_put_tx_buf)((ah), (sc->sc_txhalq[hwqueue]),
(bf->bf_daddr)))
;
2372 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__,
2373 (caddr_t)bf->bf_daddr, bf->bf_desc));
2374 } else {
2375 *sc->sc_txlink = bf->bf_daddr;
2376 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__,
2377 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc));
2378 }
2379 sc->sc_txlink = &bf->bf_desc[bf->bf_nsegbf_dmamap->dm_nsegs - 1].ds_link;
2380 splx(s)spllower(s);
2381
2382 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue])((*(ah)->ah_tx_start)((ah), (sc->sc_txhalq[hwqueue])));
2383 return 0;
2384}
2385
2386void
2387ath_tx_proc(void *arg, int npending)
2388{
2389 struct ath_softc *sc = arg;
2390 struct ath_hal *ah = sc->sc_ah;
2391 struct ath_buf *bf;
2392 struct ieee80211com *ic = &sc->sc_ic;
2393 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2394 struct ath_desc *ds;
2395 struct ieee80211_node *ni;
2396 struct ath_node *an;
2397 int sr, lr, s;
2398 HAL_STATUS status;
2399
2400 for (;;) {
2401 s = splnet()splraise(0x7);
2402 bf = TAILQ_FIRST(&sc->sc_txq)((&sc->sc_txq)->tqh_first);
2403 if (bf == NULL((void *)0)) {
2404 sc->sc_txlink = NULL((void *)0);
2405 splx(s)spllower(s);
2406 break;
2407 }
2408 /* only the last descriptor is needed */
2409 ds = &bf->bf_desc[bf->bf_nsegbf_dmamap->dm_nsegs - 1];
2410 status = ath_hal_proc_tx_desc(ah, ds)((*(ah)->ah_proc_tx_desc)((ah), (ds)));
2411#ifdef AR_DEBUG
2412 if (ath_debug & ATH_DEBUG_XMIT_DESC)
2413 ath_printtxbuf(bf, status == HAL_OK0);
2414#endif
2415 if (status == HAL_EINPROGRESS36) {
2416 splx(s)spllower(s);
2417 break;
2418 }
2419 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_txq)->tqh_last = (bf)->bf_list.tqe_prev
; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next; ((bf
)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list.tqe_next
) = ((void *)-1); } while (0)
;
2420 splx(s)spllower(s);
2421
2422 ni = bf->bf_node;
2423 if (ni != NULL((void *)0)) {
2424 an = (struct ath_node *) ni;
2425 if (ds->ds_txstatds_us.tx.ts_status == 0) {
2426 if (bf->bf_id.id_node != NULL((void *)0))
2427 ieee80211_rssadapt_raise_rate(ic,
2428 &an->an_rssadapt, &bf->bf_id);
2429 an->an_tx_antenna = ds->ds_txstatds_us.tx.ts_antenna;
2430 } else {
2431 if (bf->bf_id.id_node != NULL((void *)0))
2432 ieee80211_rssadapt_lower_rate(ic, ni,
2433 &an->an_rssadapt, &bf->bf_id);
2434 if (ds->ds_txstatds_us.tx.ts_status & HAL_TXERR_XRETRY0x01)
2435 sc->sc_stats.ast_tx_xretries++;
2436 if (ds->ds_txstatds_us.tx.ts_status & HAL_TXERR_FIFO0x04)
2437 sc->sc_stats.ast_tx_fifoerr++;
2438 if (ds->ds_txstatds_us.tx.ts_status & HAL_TXERR_FILT0x02)
2439 sc->sc_stats.ast_tx_filtered++;
2440 an->an_tx_antenna = 0; /* invalidate */
2441 }
2442 sr = ds->ds_txstatds_us.tx.ts_shortretry;
2443 lr = ds->ds_txstatds_us.tx.ts_longretry;
2444 sc->sc_stats.ast_tx_shortretry += sr;
2445 sc->sc_stats.ast_tx_longretry += lr;
2446 /*
2447 * Reclaim reference to node.
2448 *
2449 * NB: the node may be reclaimed here if, for example
2450 * this is a DEAUTH message that was sent and the
2451 * node was timed out due to inactivity.
2452 */
2453 ieee80211_release_node(ic, ni);
2454 }
2455 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x08))
2456 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x08))
;
2457 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
2458 m_freem(bf->bf_m);
2459 bf->bf_m = NULL((void *)0);
2460 bf->bf_node = NULL((void *)0);
2461
2462 s = splnet()splraise(0x7);
2463 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
2464 splx(s)spllower(s);
2465 }
2466 ifq_clr_oactive(&ifp->if_snd);
2467 sc->sc_tx_timer = 0;
2468
2469 ath_start(ifp);
2470}
2471
2472/*
2473 * Drain the transmit queue and reclaim resources.
2474 */
2475void
2476ath_draintxq(struct ath_softc *sc)
2477{
2478 struct ath_hal *ah = sc->sc_ah;
2479 struct ieee80211com *ic = &sc->sc_ic;
2480 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2481 struct ieee80211_node *ni;
2482 struct ath_buf *bf;
2483 int s, i;
2484
2485 /* XXX return value */
2486 if (!sc->sc_invalid) {
2487 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) {
2488 /* don't touch the hardware if marked invalid */
2489 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i])((*(ah)->ah_stop_tx_dma)((ah), (sc->sc_txhalq[i])));
2490 DPRINTF(ATH_DEBUG_RESET,
2491 ("%s: tx queue %d (%p), link %p\n", __func__, i,
2492 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah,
2493 sc->sc_txhalq[i]), sc->sc_txlink));
2494 }
2495 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq)((*(ah)->ah_stop_tx_dma)((ah), (sc->sc_bhalq)));
2496 DPRINTF(ATH_DEBUG_RESET,
2497 ("%s: beacon queue (%p)\n", __func__,
2498 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq)));
2499 }
2500 for (;;) {
2501 s = splnet()splraise(0x7);
2502 bf = TAILQ_FIRST(&sc->sc_txq)((&sc->sc_txq)->tqh_first);
2503 if (bf == NULL((void *)0)) {
2504 sc->sc_txlink = NULL((void *)0);
2505 splx(s)spllower(s);
2506 break;
2507 }
2508 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_txq)->tqh_last = (bf)->bf_list.tqe_prev
; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next; ((bf
)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list.tqe_next
) = ((void *)-1); } while (0)
;
2509 splx(s)spllower(s);
2510#ifdef AR_DEBUG
2511 if (ath_debug & ATH_DEBUG_RESET) {
2512 ath_printtxbuf(bf,
2513 ath_hal_proc_tx_desc(ah, bf->bf_desc)((*(ah)->ah_proc_tx_desc)((ah), (bf->bf_desc))) == HAL_OK0);
2514 }
2515#endif /* AR_DEBUG */
2516 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
2517 m_freem(bf->bf_m);
2518 bf->bf_m = NULL((void *)0);
2519 ni = bf->bf_node;
2520 bf->bf_node = NULL((void *)0);
2521 s = splnet()splraise(0x7);
2522 if (ni != NULL((void *)0)) {
2523 /*
2524 * Reclaim node reference.
2525 */
2526 ieee80211_release_node(ic, ni);
2527 }
2528 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
2529 splx(s)spllower(s);
2530 }
2531 ifq_clr_oactive(&ifp->if_snd);
2532 sc->sc_tx_timer = 0;
2533}
2534
2535/*
2536 * Disable the receive h/w in preparation for a reset.
2537 */
2538void
2539ath_stoprecv(struct ath_softc *sc)
2540{
2541#define PA2DESC(_sc, _pa) \
2542 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \
2543 ((_pa) - (_sc)->sc_desc_paddr)))
2544 struct ath_hal *ah = sc->sc_ah;
2545
2546 ath_hal_stop_pcu_recv(ah)((*(ah)->ah_stop_pcu_recv)((ah))); /* disable PCU */
2547 ath_hal_set_rx_filter(ah, 0)((*(ah)->ah_set_rx_filter)((ah), (0))); /* clear recv filter */
2548 ath_hal_stop_rx_dma(ah)((*(ah)->ah_stop_rx_dma)((ah))); /* disable DMA engine */
2549#ifdef AR_DEBUG
2550 if (ath_debug & ATH_DEBUG_RESET) {
2551 struct ath_buf *bf;
2552
2553 printf("%s: rx queue %p, link %p\n", __func__,
2554 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah)((*(ah)->ah_get_rx_buf)((ah))), sc->sc_rxlink);
2555 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)for((bf) = ((&sc->sc_rxbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
{
2556 struct ath_desc *ds = bf->bf_desc;
2557 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr,((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
2558 PA2DESC(sc, ds->ds_link))((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
== HAL_OK0)
2559 ath_printrxbuf(bf, 1);
2560 }
2561 }
2562#endif
2563 sc->sc_rxlink = NULL((void *)0); /* just in case */
2564#undef PA2DESC
2565}
2566
2567/*
2568 * Enable the receive h/w following a reset.
2569 */
2570int
2571ath_startrecv(struct ath_softc *sc)
2572{
2573 struct ath_hal *ah = sc->sc_ah;
2574 struct ath_buf *bf;
2575
2576 sc->sc_rxlink = NULL((void *)0);
2577 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)for((bf) = ((&sc->sc_rxbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
{
2578 int error = ath_rxbuf_init(sc, bf);
2579 if (error != 0) {
2580 DPRINTF(ATH_DEBUG_RECV,
2581 ("%s: ath_rxbuf_init failed %d\n",
2582 __func__, error));
2583 return error;
2584 }
2585 }
2586
2587 bf = TAILQ_FIRST(&sc->sc_rxbuf)((&sc->sc_rxbuf)->tqh_first);
2588 ath_hal_put_rx_buf(ah, bf->bf_daddr)((*(ah)->ah_put_rx_buf)((ah), (bf->bf_daddr)));
2589 ath_hal_start_rx(ah)((*(ah)->ah_start_rx)((ah))); /* enable recv descriptors */
2590 ath_mode_init(sc); /* set filters, etc. */
2591 ath_hal_start_rx_pcu(ah)((*(ah)->ah_start_rx_pcu)((ah))); /* re-enable PCU/DMA engine */
2592 return 0;
2593}
2594
2595/*
2596 * Set/change channels. If the channel is really being changed,
2597 * it's done by resetting the chip. To accomplish this we must
2598 * first cleanup any pending DMA, then restart stuff after a la
2599 * ath_init.
2600 */
2601int
2602ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
2603{
2604 struct ath_hal *ah = sc->sc_ah;
2605 struct ieee80211com *ic = &sc->sc_ic;
2606 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2607
2608 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__,
2609 ieee80211_chan2ieee(ic, ic->ic_ibss_chan),
2610 ic->ic_ibss_chan->ic_freq,
2611 ieee80211_chan2ieee(ic, chan), chan->ic_freq));
2612 if (chan != ic->ic_ibss_chan) {
2613 HAL_STATUS status;
2614 HAL_CHANNEL hchan;
2615 enum ieee80211_phymode mode;
2616
2617 /*
2618 * To switch channels clear any pending DMA operations;
2619 * wait long enough for the RX fifo to drain, reset the
2620 * hardware at the new frequency, and then re-enable
2621 * the relevant bits of the h/w.
2622 */
2623 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable interrupts */
2624 ath_draintxq(sc); /* clear pending tx frames */
2625 ath_stoprecv(sc); /* turn off frame recv */
2626 /*
2627 * Convert to a HAL channel description.
2628 */
2629 hchan.channel = chan->ic_freq;
2630 hchan.channelFlags = chan->ic_flags;
2631 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE,((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(AH_TRUE), (&status)))
2632 &status)((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(AH_TRUE), (&status)))
) {
2633 printf("%s: ath_chan_set: unable to reset "
2634 "channel %u (%u MHz)\n", ifp->if_xname,
2635 ieee80211_chan2ieee(ic, chan), chan->ic_freq);
2636 return EIO5;
2637 }
2638 ath_set_slot_time(sc);
2639 /*
2640 * Re-enable rx framework.
2641 */
2642 if (ath_startrecv(sc) != 0) {
2643 printf("%s: ath_chan_set: unable to restart recv "
2644 "logic\n", ifp->if_xname);
2645 return EIO5;
2646 }
2647
2648#if NBPFILTER1 > 0
2649 /*
2650 * Update BPF state.
2651 */
2652 sc->sc_txtapsc_txtapu.th.wt_chan_freq = sc->sc_rxtapsc_rxtapu.th.wr_chan_freq =
2653 htole16(chan->ic_freq)((__uint16_t)(chan->ic_freq));
2654 sc->sc_txtapsc_txtapu.th.wt_chan_flags = sc->sc_rxtapsc_rxtapu.th.wr_chan_flags =
2655 htole16(chan->ic_flags)((__uint16_t)(chan->ic_flags));
2656#endif
2657
2658 /*
2659 * Change channels and update the h/w rate map
2660 * if we're switching; e.g. 11a to 11b/g.
2661 */
2662 ic->ic_ibss_chan = chan;
2663 mode = ieee80211_chan2mode(ic, chan);
2664 if (mode != sc->sc_curmode)
2665 ath_setcurmode(sc, mode);
2666
2667 /*
2668 * Re-enable interrupts.
2669 */
2670 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
2671 }
2672 return 0;
2673}
2674
2675void
2676ath_next_scan(void *arg)
2677{
2678 struct ath_softc *sc = arg;
2679 struct ieee80211com *ic = &sc->sc_ic;
2680 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2681 int s;
2682
2683 /* don't call ath_start w/o network interrupts blocked */
2684 s = splnet()splraise(0x7);
2685
2686 if (ic->ic_state == IEEE80211_S_SCAN)
2687 ieee80211_next_scan(ifp);
2688 splx(s)spllower(s);
2689}
2690
2691int
2692ath_set_slot_time(struct ath_softc *sc)
2693{
2694 struct ath_hal *ah = sc->sc_ah;
2695 struct ieee80211com *ic = &sc->sc_ic;
2696
2697 if (ic->ic_flags & IEEE80211_F_SHSLOT0x00020000)
2698 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)((*(ah)->ah_set_slot_time)(ah, 396)));
2699
2700 return (0);
2701}
2702
2703/*
2704 * Periodically recalibrate the PHY to account
2705 * for temperature/environment changes.
2706 */
2707void
2708ath_calibrate(void *arg)
2709{
2710 struct ath_softc *sc = arg;
2711 struct ath_hal *ah = sc->sc_ah;
2712 struct ieee80211com *ic = &sc->sc_ic;
2713 struct ieee80211_channel *c;
2714 HAL_CHANNEL hchan;
2715 int s;
2716
2717 sc->sc_stats.ast_per_cal++;
2718
2719 /*
2720 * Convert to a HAL channel description.
2721 */
2722 c = ic->ic_ibss_chan;
2723 hchan.channel = c->ic_freq;
2724 hchan.channelFlags = c->ic_flags;
2725
2726 s = splnet()splraise(0x7);
2727 DPRINTF(ATH_DEBUG_CALIBRATE,
2728 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags));
2729
2730 if (ath_hal_get_rf_gain(ah)((*(ah)->ah_get_rf_gain)((ah))) == HAL_RFGAIN_NEED_CHANGE) {
2731 /*
2732 * Rfgain is out of bounds, reset the chip
2733 * to load new gain values.
2734 */
2735 sc->sc_stats.ast_per_rfgain++;
2736 ath_reset(sc, 1);
2737 }
2738 if (!ath_hal_calibrate(ah, &hchan)((*(ah)->ah_calibrate)((ah), (&hchan)))) {
2739 DPRINTF(ATH_DEBUG_ANY,
2740 ("%s: calibration of channel %u failed\n",
2741 __func__, c->ic_freq));
2742 sc->sc_stats.ast_per_calfail++;
2743 }
2744 timeout_add_sec(&sc->sc_cal_to, ath_calinterval);
2745 splx(s)spllower(s);
2746}
2747
2748void
2749ath_ledstate(struct ath_softc *sc, enum ieee80211_state state)
2750{
2751 HAL_LED_STATE led = HAL_LED_INITIEEE80211_S_INIT;
2752 u_int32_t softled = AR5K_SOFTLED_OFF1;
2753
2754 switch (state) {
2755 case IEEE80211_S_INIT:
2756 break;
2757 case IEEE80211_S_SCAN:
2758 led = HAL_LED_SCANIEEE80211_S_SCAN;
2759 break;
2760 case IEEE80211_S_AUTH:
2761 led = HAL_LED_AUTHIEEE80211_S_AUTH;
2762 break;
2763 case IEEE80211_S_ASSOC:
2764 led = HAL_LED_ASSOCIEEE80211_S_ASSOC;
2765 softled = AR5K_SOFTLED_ON0;
2766 break;
2767 case IEEE80211_S_RUN:
2768 led = HAL_LED_RUNIEEE80211_S_RUN;
2769 softled = AR5K_SOFTLED_ON0;
2770 break;
2771 }
2772
2773 ath_hal_set_ledstate(sc->sc_ah, led)((*(sc->sc_ah)->ah_set_ledstate)((sc->sc_ah), (led))
)
;
2774 if (sc->sc_softled) {
2775 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN)((*(sc->sc_ah)->ah_set_gpio_output)((sc->sc_ah), (0)
))
;
2776 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled)((*(sc->sc_ah)->ah_set_gpio)((sc->sc_ah), (0), (softled
)))
;
2777 }
2778}
2779
2780int
2781ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2782{
2783 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2784 struct ath_softc *sc = ifp->if_softc;
2785 struct ath_hal *ah = sc->sc_ah;
2786 struct ieee80211_node *ni;
2787 const u_int8_t *bssid;
2788 int error, i;
2789
2790 u_int32_t rfilt;
2791
2792 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__,
2793 ieee80211_state_name[ic->ic_state],
2794 ieee80211_state_name[nstate]));
2795
2796 timeout_del(&sc->sc_scan_to);
2797 timeout_del(&sc->sc_cal_to);
2798 ath_ledstate(sc, nstate);
2799
2800 if (nstate == IEEE80211_S_INIT) {
2801 timeout_del(&sc->sc_rssadapt_to);
2802 sc->sc_imask &= ~(HAL_INT_SWBA0x00010000 | HAL_INT_BMISS0x00040000);
2803 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
2804 return (*sc->sc_newstate)(ic, nstate, arg);
2805 }
2806 ni = ic->ic_bss;
2807 error = ath_chan_set(sc, ni->ni_chan);
2808 if (error != 0)
2809 goto bad;
2810 rfilt = ath_calcrxfilter(sc);
2811 if (nstate == IEEE80211_S_SCAN ||
2812 ic->ic_opmode == IEEE80211_M_MONITOR) {
2813 bssid = sc->sc_broadcast_addr;
2814 } else {
2815 bssid = ni->ni_bssid;
2816 }
2817 ath_hal_set_rx_filter(ah, rfilt)((*(ah)->ah_set_rx_filter)((ah), (rfilt)));
2818 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n",
2819 __func__, rfilt, ether_sprintf((u_char*)bssid)));
2820
2821 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) {
2822 ath_hal_set_associd(ah, bssid, ni->ni_associd)((*(ah)->ah_set_associd)((ah), (bssid), (ni->ni_associd
), 0))
;
2823 } else {
2824 ath_hal_set_associd(ah, bssid, 0)((*(ah)->ah_set_associd)((ah), (bssid), (0), 0));
2825 }
2826
2827 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON0x00000100)) {
2828 for (i = 0; i < IEEE80211_WEP_NKID4; i++) {
2829 if (ath_hal_is_key_valid(ah, i)(((*(ah)->ah_is_key_valid)((ah), (i)))))
2830 ath_hal_set_key_lladdr(ah, i, bssid)((*(ah)->ah_set_key_lladdr)((ah), (i), (bssid)));
2831 }
2832 }
2833
2834 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
2835 /* nothing to do */
2836 } else if (nstate == IEEE80211_S_RUN) {
2837 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): "
2838 "ic_flags=0x%08x iv=%d bssid=%s "
2839 "capinfo=0x%04x chan=%d\n",
2840 __func__,
2841 ic->ic_flags,
2842 ni->ni_intval,
2843 ether_sprintf(ni->ni_bssid),
2844 ni->ni_capinfo,
2845 ieee80211_chan2ieee(ic, ni->ni_chan)));
2846
2847 /*
2848 * Allocate and setup the beacon frame for AP or adhoc mode.
2849 */
2850#ifndef IEEE80211_STA_ONLY
2851 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2852 ic->ic_opmode == IEEE80211_M_IBSS) {
2853 error = ath_beacon_alloc(sc, ni);
2854 if (error != 0)
2855 goto bad;
2856 }
2857#endif
2858 /*
2859 * Configure the beacon and sleep timers.
2860 */
2861 ath_beacon_config(sc);
2862 } else {
2863 sc->sc_imask &= ~(HAL_INT_SWBA0x00010000 | HAL_INT_BMISS0x00040000);
2864 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
2865 }
2866
2867 /*
2868 * Invoke the parent method to complete the work.
2869 */
2870 error = (*sc->sc_newstate)(ic, nstate, arg);
2871
2872 if (nstate == IEEE80211_S_RUN) {
2873 /* start periodic recalibration timer */
2874 timeout_add_sec(&sc->sc_cal_to, ath_calinterval);
2875
2876 if (ic->ic_opmode != IEEE80211_M_MONITOR)
2877 timeout_add_msec(&sc->sc_rssadapt_to, 100);
2878 } else if (nstate == IEEE80211_S_SCAN) {
2879 /* start ap/neighbor scan timer */
2880 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime);
2881 }
2882bad:
2883 return error;
2884}
2885
2886#ifndef IEEE80211_STA_ONLY
2887void
2888ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m,
2889 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype)
2890{
2891 struct ath_softc *sc = (struct ath_softc*)ic->ic_softcic_ac.ac_if.if_softc;
2892 struct ath_hal *ah = sc->sc_ah;
2893
2894 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype);
2895
2896 switch (subtype) {
2897 case IEEE80211_FC0_SUBTYPE_PROBE_RESP0x50:
2898 case IEEE80211_FC0_SUBTYPE_BEACON0x80:
2899 if (ic->ic_opmode != IEEE80211_M_IBSS ||
2900 ic->ic_state != IEEE80211_S_RUN)
2901 break;
2902 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)((*(ah)->ah_get_tsf64)((ah)))) ==
2903 ENETRESET52)
2904 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0)((*(ah)->ah_set_associd)((ah), (ic->ic_bss->ni_bssid
), (0), 0))
;
2905 break;
2906 default:
2907 break;
2908 }
2909 return;
2910}
2911#endif
2912
2913/*
2914 * Setup driver-specific state for a newly associated node.
2915 * Note that we're called also on a re-associate, the isnew
2916 * param tells us if this is the first time or not.
2917 */
2918void
2919ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2920{
2921 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2922 return;
2923}
2924
2925int
2926ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode)
2927{
2928 struct ieee80211com *ic = &sc->sc_ic;
2929 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2930 struct ath_hal *ah = sc->sc_ah;
2931 HAL_CHANNEL *chans;
2932 int i, ix, nchan;
2933
2934 sc->sc_nchan = 0;
2935 chans = malloc(IEEE80211_CHAN_MAX255 * sizeof(HAL_CHANNEL),
2936 M_TEMP127, M_NOWAIT0x0002);
2937 if (chans == NULL((void *)0)) {
2938 printf("%s: unable to allocate channel table\n", ifp->if_xname);
2939 return ENOMEM12;
2940 }
2941 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX255, &nchan,
2942 HAL_MODE_ALL, outdoor, xchanmode)) {
2943 printf("%s: unable to collect channel list from hal\n",
2944 ifp->if_xname);
2945 free(chans, M_TEMP127, 0);
2946 return EINVAL22;
2947 }
2948
2949 /*
2950 * Convert HAL channels to ieee80211 ones and insert
2951 * them in the table according to their channel number.
2952 */
2953 for (i = 0; i < nchan; i++) {
2954 HAL_CHANNEL *c = &chans[i];
2955 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags);
2956 if (ix > IEEE80211_CHAN_MAX255) {
2957 printf("%s: bad hal channel %u (%u/%x) ignored\n",
2958 ifp->if_xname, ix, c->channel, c->channelFlags);
2959 continue;
2960 }
2961 DPRINTF(ATH_DEBUG_ANY,
2962 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n",
2963 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags,
2964 ix));
2965 /* NB: flags are known to be compatible */
2966 if (ic->ic_channels[ix].ic_freq == 0) {
2967 ic->ic_channels[ix].ic_freq = c->channel;
2968 ic->ic_channels[ix].ic_flags = c->channelFlags;
2969 } else {
2970 /* channels overlap; e.g. 11g and 11b */
2971 ic->ic_channels[ix].ic_flags |= c->channelFlags;
2972 }
2973 /* count valid channels */
2974 sc->sc_nchan++;
2975 }
2976 free(chans, M_TEMP127, 0);
2977
2978 if (sc->sc_nchan < 1) {
2979 printf("%s: no valid channels for regdomain %s(%u)\n",
2980 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomainah_capabilities.cap_regdomain.reg_current),
2981 ah->ah_regdomainah_capabilities.cap_regdomain.reg_current);
2982 return ENOENT2;
2983 }
2984
2985 /* set an initial channel */
2986 ic->ic_ibss_chan = &ic->ic_channels[0];
2987
2988 return 0;
2989}
2990
2991int
2992ath_rate_setup(struct ath_softc *sc, u_int mode)
2993{
2994 struct ath_hal *ah = sc->sc_ah;
2995 struct ieee80211com *ic = &sc->sc_ic;
2996 const HAL_RATE_TABLE *rt;
2997 struct ieee80211_rateset *rs;
2998 int i, maxrates;
2999
3000 switch (mode) {
3001 case IEEE80211_MODE_11A:
3002 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A)((*(ah)->ah_get_rate_table)((ah), (HAL_MODE_11A)));
3003 break;
3004 case IEEE80211_MODE_11B:
3005 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B)((*(ah)->ah_get_rate_table)((ah), (HAL_MODE_11B)));
3006 break;
3007 case IEEE80211_MODE_11G:
3008 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G)((*(ah)->ah_get_rate_table)((ah), (HAL_MODE_11G)));
3009 break;
3010 default:
3011 DPRINTF(ATH_DEBUG_ANY,
3012 ("%s: invalid mode %u\n", __func__, mode));
3013 return 0;
3014 }
3015 rt = sc->sc_rates[mode];
3016 if (rt == NULL((void *)0))
3017 return 0;
3018 if (rt->rateCount > IEEE80211_RATE_MAXSIZE15) {
3019 DPRINTF(ATH_DEBUG_ANY,
3020 ("%s: rate table too small (%u > %u)\n",
3021 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE));
3022 maxrates = IEEE80211_RATE_MAXSIZE15;
3023 } else {
3024 maxrates = rt->rateCount;
3025 }
3026 rs = &ic->ic_sup_rates[mode];
3027 for (i = 0; i < maxrates; i++)
3028 rs->rs_rates[i] = rt->info[i].dot11Rate;
3029 rs->rs_nrates = maxrates;
3030 return 1;
3031}
3032
3033void
3034ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
3035{
3036 const HAL_RATE_TABLE *rt;
3037 struct ieee80211com *ic = &sc->sc_ic;
3038 struct ieee80211_node *ni;
3039 int i;
3040
3041 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap))__builtin_memset((sc->sc_rixmap), (0xff), (sizeof(sc->sc_rixmap
)))
;
3042 rt = sc->sc_rates[mode];
3043 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode))if (!(rt != ((void *)0))) panic ("no h/w rate set for phy mode %u"
, mode)
;
3044 for (i = 0; i < rt->rateCount; i++)
3045 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL0x7f] = i;
3046 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap))__builtin_bzero((sc->sc_hwmap), (sizeof(sc->sc_hwmap)));
3047 for (i = 0; i < 32; i++)
3048 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate;
3049 sc->sc_currates = rt;
3050 sc->sc_curmode = mode;
3051 ni = ic->ic_bss;
3052 ni->ni_rates.rs_nrates = sc->sc_currates->rateCount;
3053 if (ni->ni_txrate >= ni->ni_rates.rs_nrates)
3054 ni->ni_txrate = 0;
3055}
3056
3057void
3058ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni)
3059{
3060 struct ath_node *an = ATH_NODE(ni)((struct ath_node *)(ni));
3061
3062 ieee80211_rssadapt_updatestats(&an->an_rssadapt);
3063}
3064
3065void
3066ath_rssadapt_updatestats(void *arg)
3067{
3068 struct ath_softc *sc = (struct ath_softc *)arg;
3069 struct ieee80211com *ic = &sc->sc_ic;
3070
3071 if (ic->ic_opmode == IEEE80211_M_STA) {
3072 ath_rssadapt_updatenode(arg, ic->ic_bss);
3073 } else {
3074 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg);
3075 }
3076
3077 timeout_add_msec(&sc->sc_rssadapt_to, 100);
3078}
3079
3080#ifdef AR_DEBUG
3081void
3082ath_printrxbuf(struct ath_buf *bf, int done)
3083{
3084 struct ath_desc *ds;
3085 int i;
3086
3087 for (i = 0, ds = bf->bf_desc; i < bf->bf_nsegbf_dmamap->dm_nsegs; i++, ds++) {
3088 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n",
3089 i, ds, (struct ath_desc *)bf->bf_daddr + i,
3090 ds->ds_link, ds->ds_data,
3091 ds->ds_ctl0, ds->ds_ctl1,
3092 ds->ds_hw[0], ds->ds_hw[1],
3093 !done ? ' ' : (ds->ds_rxstatds_us.rx.rs_status == 0) ? '*' : '!');
3094 }
3095}
3096
3097void
3098ath_printtxbuf(struct ath_buf *bf, int done)
3099{
3100 struct ath_desc *ds;
3101 int i;
3102
3103 for (i = 0, ds = bf->bf_desc; i < bf->bf_nsegbf_dmamap->dm_nsegs; i++, ds++) {
3104 printf("T%d (%p %p) "
3105 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n",
3106 i, ds, (struct ath_desc *)bf->bf_daddr + i,
3107 ds->ds_link, ds->ds_data,
3108 ds->ds_ctl0, ds->ds_ctl1,
3109 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
3110 !done ? ' ' : (ds->ds_txstatds_us.tx.ts_status == 0) ? '*' : '!');
3111 }
3112}
3113#endif /* AR_DEBUG */
3114
3115int
3116ath_gpio_attach(struct ath_softc *sc, u_int16_t devid)
3117{
3118 struct ath_hal *ah = sc->sc_ah;
3119 struct gpiobus_attach_args gba;
3120 int i;
3121
3122 if (ah->ah_gpio_npins < 1)
3123 return 0;
3124
3125 /* Initialize gpio pins array */
3126 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO10; i++) {
3127 sc->sc_gpio_pins[i].pin_num = i;
3128 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT0x0001 |
3129 GPIO_PIN_OUTPUT0x0002;
3130
3131 /* Set pin mode to input */
3132 ath_hal_set_gpio_input(ah, i)((*(ah)->ah_set_gpio_input)((ah), (i)));
3133 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT0x0001;
3134
3135 /* Get pin input */
3136 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i)((*(ah)->ah_get_gpio)((ah), (i))) ?
3137 GPIO_PIN_HIGH0x01 : GPIO_PIN_LOW0x00;
3138 }
3139
3140 /* Enable GPIO-controlled software LED if available */
3141 if ((ah->ah_version == AR5K_AR5211) ||
3142 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM0x1014)) {
3143 sc->sc_softled = 1;
3144 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN)((*(ah)->ah_set_gpio_output)((ah), (0)));
3145 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF)((*(ah)->ah_set_gpio)((ah), (0), (1)));
3146 }
3147
3148 /* Create gpio controller tag */
3149 sc->sc_gpio_gc.gp_cookie = sc;
3150 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read;
3151 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write;
3152 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl;
3153
3154 gba.gba_name = "gpio";
3155 gba.gba_gc = &sc->sc_gpio_gc;
3156 gba.gba_pins = sc->sc_gpio_pins;
3157 gba.gba_npins = ah->ah_gpio_npins;
3158
3159#ifdef notyet
3160#if NGPIO > 0
3161 if (config_found(&sc->sc_dev, &gba, gpiobus_print)config_found_sm((&sc->sc_dev), (&gba), (gpiobus_print
), ((void *)0))
== NULL((void *)0))
3162 return (ENODEV19);
3163#endif
3164#endif
3165
3166 return (0);
3167}
3168
3169int
3170ath_gpio_pin_read(void *arg, int pin)
3171{
3172 struct ath_softc *sc = arg;
3173 struct ath_hal *ah = sc->sc_ah;
3174 return (ath_hal_get_gpio(ah, pin)((*(ah)->ah_get_gpio)((ah), (pin))) ? GPIO_PIN_HIGH0x01 : GPIO_PIN_LOW0x00);
3175}
3176
3177void
3178ath_gpio_pin_write(void *arg, int pin, int value)
3179{
3180 struct ath_softc *sc = arg;
3181 struct ath_hal *ah = sc->sc_ah;
3182 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW)((*(ah)->ah_set_gpio)((ah), (pin), (value ? 0x01 : 0x00)));
3183}
3184
3185void
3186ath_gpio_pin_ctl(void *arg, int pin, int flags)
3187{
3188 struct ath_softc *sc = arg;
3189 struct ath_hal *ah = sc->sc_ah;
3190
3191 if (flags & GPIO_PIN_INPUT0x0001) {
3192 ath_hal_set_gpio_input(ah, pin)((*(ah)->ah_set_gpio_input)((ah), (pin)));
3193 } else if (flags & GPIO_PIN_OUTPUT0x0002) {
3194 ath_hal_set_gpio_output(ah, pin)((*(ah)->ah_set_gpio_output)((ah), (pin)));
3195 }
3196}