Bug Summary

File:dev/ic/ath.c
Warning:line 1233, column 3
Value stored to 'flags' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ath.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/ic/ath.c
1/* $OpenBSD: ath.c,v 1.125 2023/11/10 15:51:20 bluhm Exp $ */
2/* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */
3
4/*-
5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGES.
34 */
35
36/*
37 * Driver for the Atheros Wireless LAN controller.
38 *
39 * This software is derived from work of Atsushi Onoe; his contribution
40 * is greatly appreciated. It has been modified for OpenBSD to use an
41 * open source HAL instead of the original binary-only HAL.
42 */
43
44#include "bpfilter.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/mbuf.h>
49#include <sys/malloc.h>
50#include <sys/lock.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/device.h>
55#include <sys/errno.h>
56#include <sys/timeout.h>
57#include <sys/gpio.h>
58#include <sys/endian.h>
59
60#include <machine/bus.h>
61
62#include <net/if.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#if NBPFILTER1 > 0
66#include <net/bpf.h>
67#endif
68#include <netinet/in.h>
69#include <netinet/if_ether.h>
70
71#include <net80211/ieee80211_var.h>
72#include <net80211/ieee80211_rssadapt.h>
73
74#include <dev/pci/pcidevs.h>
75#include <dev/gpio/gpiovar.h>
76
77#include <dev/ic/athvar.h>
78
79int ath_init(struct ifnet *);
80int ath_init1(struct ath_softc *);
81int ath_intr1(struct ath_softc *);
82void ath_stop(struct ifnet *);
83void ath_start(struct ifnet *);
84void ath_reset(struct ath_softc *, int);
85int ath_media_change(struct ifnet *);
86void ath_watchdog(struct ifnet *);
87int ath_ioctl(struct ifnet *, u_long, caddr_t);
88void ath_fatal_proc(void *, int);
89void ath_rxorn_proc(void *, int);
90void ath_bmiss_proc(void *, int);
91int ath_initkeytable(struct ath_softc *);
92void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]);
93void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]);
94u_int32_t ath_calcrxfilter(struct ath_softc *);
95void ath_mode_init(struct ath_softc *);
96#ifndef IEEE80211_STA_ONLY
97int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
98void ath_beacon_proc(void *, int);
99void ath_beacon_free(struct ath_softc *);
100#endif
101void ath_beacon_config(struct ath_softc *);
102int ath_desc_alloc(struct ath_softc *);
103void ath_desc_free(struct ath_softc *);
104struct ieee80211_node *ath_node_alloc(struct ieee80211com *);
105struct mbuf *ath_getmbuf(int, int, u_int);
106void ath_node_free(struct ieee80211com *, struct ieee80211_node *);
107void ath_node_copy(struct ieee80211com *,
108 struct ieee80211_node *, const struct ieee80211_node *);
109u_int8_t ath_node_getrssi(struct ieee80211com *,
110 const struct ieee80211_node *);
111int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
112void ath_rx_proc(void *, int);
113int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
114 struct ath_buf *, struct mbuf *);
115void ath_tx_proc(void *, int);
116int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
117void ath_draintxq(struct ath_softc *);
118void ath_stoprecv(struct ath_softc *);
119int ath_startrecv(struct ath_softc *);
120void ath_next_scan(void *);
121int ath_set_slot_time(struct ath_softc *);
122void ath_calibrate(void *);
123void ath_ledstate(struct ath_softc *, enum ieee80211_state);
124int ath_newstate(struct ieee80211com *, enum ieee80211_state, int);
125void ath_newassoc(struct ieee80211com *,
126 struct ieee80211_node *, int);
127int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor,
128 HAL_BOOL xchanmode);
129int ath_rate_setup(struct ath_softc *sc, u_int mode);
130void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
131void ath_rssadapt_updatenode(void *, struct ieee80211_node *);
132void ath_rssadapt_updatestats(void *);
133#ifndef IEEE80211_STA_ONLY
134void ath_recv_mgmt(struct ieee80211com *, struct mbuf *,
135 struct ieee80211_node *, struct ieee80211_rxinfo *, int);
136#endif
137void ath_disable(struct ath_softc *);
138
139int ath_gpio_attach(struct ath_softc *, u_int16_t);
140int ath_gpio_pin_read(void *, int);
141void ath_gpio_pin_write(void *, int, int);
142void ath_gpio_pin_ctl(void *, int, int);
143
144#ifdef AR_DEBUG
145void ath_printrxbuf(struct ath_buf *, int);
146void ath_printtxbuf(struct ath_buf *, int);
147int ath_debug = 0;
148#endif
149
150int ath_dwelltime = 200; /* 5 channels/second */
151int ath_calinterval = 30; /* calibrate every 30 secs */
152int ath_outdoor = AH_TRUE; /* outdoor operation */
153int ath_xchanmode = AH_TRUE; /* enable extended channels */
154int ath_softcrypto = 1; /* 1=enable software crypto */
155
156struct cfdriver ath_cd = {
157 NULL((void *)0), "ath", DV_IFNET
158};
159
160int
161ath_activate(struct device *self, int act)
162{
163 struct ath_softc *sc = (struct ath_softc *)self;
164 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
165
166 switch (act) {
167 case DVACT_SUSPEND3:
168 if (ifp->if_flags & IFF_RUNNING0x40) {
169 ath_stop(ifp);
170 if (sc->sc_power != NULL((void *)0))
171 (*sc->sc_power)(sc, act);
172 }
173 break;
174 case DVACT_RESUME4:
175 if (ifp->if_flags & IFF_UP0x1) {
176 ath_init(ifp);
177 if (ifp->if_flags & IFF_RUNNING0x40)
178 ath_start(ifp);
179 }
180 break;
181 }
182 return 0;
183}
184
185int
186ath_enable(struct ath_softc *sc)
187{
188 if (ATH_IS_ENABLED(sc)((sc)->sc_flags & 0x0002) == 0) {
189 if (sc->sc_enable != NULL((void *)0) && (*sc->sc_enable)(sc) != 0) {
190 printf("%s: device enable failed\n",
191 sc->sc_dev.dv_xname);
192 return (EIO5);
193 }
194 sc->sc_flags |= ATH_ENABLED0x0002;
195 }
196 return (0);
197}
198
199void
200ath_disable(struct ath_softc *sc)
201{
202 if (!ATH_IS_ENABLED(sc)((sc)->sc_flags & 0x0002))
203 return;
204 if (sc->sc_disable != NULL((void *)0))
205 (*sc->sc_disable)(sc);
206 sc->sc_flags &= ~ATH_ENABLED0x0002;
207}
208
209int
210ath_attach(u_int16_t devid, struct ath_softc *sc)
211{
212 struct ieee80211com *ic = &sc->sc_ic;
213 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
214 struct ath_hal *ah;
215 HAL_STATUS status;
216 HAL_TXQ_INFO qinfo;
217 int error = 0, i;
218
219 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid));
220
221 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ16);
222 sc->sc_flags &= ~ATH_ATTACHED0x0001; /* make sure that it's not attached */
223
224 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
225 sc->sc_pcie, &status);
226 if (ah == NULL((void *)0)) {
227 printf("%s: unable to attach hardware; HAL status %d\n",
228 ifp->if_xname, status);
229 error = ENXIO6;
230 goto bad;
231 }
232 if (ah->ah_abi != HAL_ABI_VERSION0x04090901) {
233 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n",
234 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION0x04090901);
235 error = ENXIO6;
236 goto bad;
237 }
238
239 if (ah->ah_single_chip == AH_TRUE) {
240 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname,
241 ar5k_printver(AR5K_VERSION_DEV, devid),
242 ah->ah_mac_version, ah->ah_mac_revision,
243 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf,
244 ah->ah_radio_5ghz_revision >> 4,
245 ah->ah_radio_5ghz_revision & 0xf);
246 } else {
247 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname,
248 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev),
249 ah->ah_mac_version, ah->ah_mac_revision,
250 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf);
251 printf(" rf%s %u.%u",
252 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision),
253 ah->ah_radio_5ghz_revision >> 4,
254 ah->ah_radio_5ghz_revision & 0xf);
255 if (ah->ah_radio_2ghz_revision != 0) {
256 printf(" rf%s %u.%u",
257 ar5k_printver(AR5K_VERSION_RAD,
258 ah->ah_radio_2ghz_revision),
259 ah->ah_radio_2ghz_revision >> 4,
260 ah->ah_radio_2ghz_revision & 0xf);
261 }
262 }
263 if (ah->ah_ee_versionah_capabilities.cap_eeprom.ee_version == AR5K_EEPROM_VERSION_4_70x3007)
264 printf(" eeprom 4.7");
265 else
266 printf(" eeprom %1x.%1x", ah->ah_ee_versionah_capabilities.cap_eeprom.ee_version >> 12,
267 ah->ah_ee_versionah_capabilities.cap_eeprom.ee_version & 0xff);
268
269#if 0
270 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP0xff ||
271 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP0xff) {
272 printf(": RF radio not supported\n");
273 error = EOPNOTSUPP45;
274 goto bad;
275 }
276#endif
277
278 sc->sc_ah = ah;
279 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
280
281 /*
282 * Get regulation domain either stored in the EEPROM or defined
283 * as the default value. Some devices are known to have broken
284 * regulation domain values in their EEPROM.
285 */
286 ath_hal_get_regdomain(ah, &ah->ah_regdomain)(*(&ah->ah_capabilities.cap_regdomain.reg_current) = (
ah)->ah_get_regdomain(ah))
;
287
288 /*
289 * Construct channel list based on the current regulation domain.
290 */
291 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode);
292 if (error != 0)
293 goto bad;
294
295 /*
296 * Setup rate tables for all potential media types.
297 */
298 ath_rate_setup(sc, IEEE80211_MODE_11A);
299 ath_rate_setup(sc, IEEE80211_MODE_11B);
300 ath_rate_setup(sc, IEEE80211_MODE_11G);
301
302 error = ath_desc_alloc(sc);
303 if (error != 0) {
304 printf(": failed to allocate descriptors: %d\n", error);
305 goto bad;
306 }
307 timeout_set(&sc->sc_scan_to, ath_next_scan, sc);
308 timeout_set(&sc->sc_cal_to, ath_calibrate, sc);
309 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc);
310
311 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc)do { (&sc->sc_txtask)->t_func = (ath_tx_proc); (&
sc->sc_txtask)->t_context = (sc); } while (0)
;
312 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc)do { (&sc->sc_rxtask)->t_func = (ath_rx_proc); (&
sc->sc_rxtask)->t_context = (sc); } while (0)
;
313 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc)do { (&sc->sc_rxorntask)->t_func = (ath_rxorn_proc)
; (&sc->sc_rxorntask)->t_context = (sc); } while (0
)
;
314 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc)do { (&sc->sc_fataltask)->t_func = (ath_fatal_proc)
; (&sc->sc_fataltask)->t_context = (sc); } while (0
)
;
315 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc)do { (&sc->sc_bmisstask)->t_func = (ath_bmiss_proc)
; (&sc->sc_bmisstask)->t_context = (sc); } while (0
)
;
316#ifndef IEEE80211_STA_ONLY
317 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc)do { (&sc->sc_swbatask)->t_func = (ath_beacon_proc)
; (&sc->sc_swbatask)->t_context = (sc); } while (0)
;
318#endif
319
320 /*
321 * For now just pre-allocate one data queue and one
322 * beacon queue. Note that the HAL handles resetting
323 * them at the needed time. Eventually we'll want to
324 * allocate more tx queues for splitting management
325 * frames and for QOS support.
326 */
327 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL)((*(ah)->ah_setup_tx_queue)((ah), (HAL_TX_QUEUE_BEACON), (
((void *)0))))
;
328 if (sc->sc_bhalq == (u_int) -1) {
329 printf(": unable to setup a beacon xmit queue!\n");
330 goto bad2;
331 }
332
333 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) {
334 bzero(&qinfo, sizeof(qinfo))__builtin_bzero((&qinfo), (sizeof(qinfo)));
335 qinfo.tqi_type = HAL_TX_QUEUE_DATA;
336 qinfo.tqi_subtype = i; /* should be mapped to WME types */
337 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah,((*(ah)->ah_setup_tx_queue)((ah), (HAL_TX_QUEUE_DATA), (&
qinfo)))
338 HAL_TX_QUEUE_DATA, &qinfo)((*(ah)->ah_setup_tx_queue)((ah), (HAL_TX_QUEUE_DATA), (&
qinfo)))
;
339 if (sc->sc_txhalq[i] == (u_int) -1) {
340 printf(": unable to setup a data xmit queue %u!\n", i);
341 goto bad2;
342 }
343 }
344
345 ifp->if_softc = sc;
346 ifp->if_flags = IFF_SIMPLEX0x800 | IFF_BROADCAST0x2 | IFF_MULTICAST0x8000;
347 ifp->if_start = ath_start;
348 ifp->if_watchdog = ath_watchdog;
349 ifp->if_ioctl = ath_ioctl;
350 ifq_init_maxlen(&ifp->if_snd, ATH_TXBUF60 * ATH_TXDESC8);
351
352 ic->ic_softcic_ac.ac_if.if_softc = sc;
353 ic->ic_newassoc = ath_newassoc;
354 /* XXX not right but it's not used anywhere important */
355 ic->ic_phytype = IEEE80211_T_OFDM;
356 ic->ic_opmode = IEEE80211_M_STA;
357 ic->ic_caps = IEEE80211_C_WEP0x00000001 /* wep supported */
358 | IEEE80211_C_PMGT0x00000004 /* power management */
359#ifndef IEEE80211_STA_ONLY
360 | IEEE80211_C_IBSS0x00000002 /* ibss, nee adhoc, mode */
361 | IEEE80211_C_HOSTAP0x00000008 /* hostap mode */
362#endif
363 | IEEE80211_C_MONITOR0x00000200 /* monitor mode */
364 | IEEE80211_C_SHSLOT0x00000080 /* short slot time supported */
365 | IEEE80211_C_SHPREAMBLE0x00000100; /* short preamble supported */
366 if (ath_softcrypto)
367 ic->ic_caps |= IEEE80211_C_RSN0x00001000; /* wpa/rsn supported */
368
369 /*
370 * Not all chips have the VEOL support we want to use with
371 * IBSS beacon; check here for it.
372 */
373 sc->sc_veol = ath_hal_has_veol(ah)((*(ah)->ah_has_veol)((ah)));
374
375 /* get mac address from hardware */
376 ath_hal_get_lladdr(ah, ic->ic_myaddr)((*(ah)->ah_get_lladdr)((ah), (ic->ic_myaddr)));
377
378 if_attach(ifp);
379
380 /* call MI attach routine. */
381 ieee80211_ifattach(ifp);
382
383 /* override default methods */
384 ic->ic_node_alloc = ath_node_alloc;
385 sc->sc_node_free = ic->ic_node_free;
386 ic->ic_node_free = ath_node_free;
387 sc->sc_node_copy = ic->ic_node_copy;
388 ic->ic_node_copy = ath_node_copy;
389 ic->ic_node_getrssi = ath_node_getrssi;
390 sc->sc_newstate = ic->ic_newstate;
391 ic->ic_newstate = ath_newstate;
392#ifndef IEEE80211_STA_ONLY
393 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
394 ic->ic_recv_mgmt = ath_recv_mgmt;
395#endif
396 ic->ic_max_rssi = AR5K_MAX_RSSI64;
397 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN6);
398
399 /* complete initialization */
400 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status);
401
402#if NBPFILTER1 > 0
403 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO127,
404 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN64);
405
406 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
407 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len)__builtin_bzero((&sc->sc_rxtapu), (sc->sc_rxtap_len
))
;
408 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_len = htole16(sc->sc_rxtap_len)((__uint16_t)(sc->sc_rxtap_len));
409 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT)((__uint32_t)(( (1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
) | (1 << IEEE80211_RADIOTAP_ANTENNA) | (1 << IEEE80211_RADIOTAP_RSSI
) | 0)))
;
410
411 sc->sc_txtap_len = sizeof(sc->sc_txtapu);
412 bzero(&sc->sc_txtapu, sc->sc_txtap_len)__builtin_bzero((&sc->sc_txtapu), (sc->sc_txtap_len
))
;
413 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_len = htole16(sc->sc_txtap_len)((__uint16_t)(sc->sc_txtap_len));
414 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT)((__uint32_t)(( (1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
) | (1 << IEEE80211_RADIOTAP_DBM_TX_POWER) | (1 <<
IEEE80211_RADIOTAP_ANTENNA) | 0)))
;
415#endif
416
417 sc->sc_flags |= ATH_ATTACHED0x0001;
418
419 /*
420 * Print regulation domain and the mac address. The regulation domain
421 * will be marked with a * if the EEPROM value has been overwritten.
422 */
423 printf(", %s%s, address %s\n",
424 ieee80211_regdomain2name(ah->ah_regdomainah_capabilities.cap_regdomain.reg_current),
425 ah->ah_regdomainah_capabilities.cap_regdomain.reg_current != ah->ah_regdomain_hwah_capabilities.cap_regdomain.reg_hw ? "*" : "",
426 ether_sprintf(ic->ic_myaddr));
427
428 if (ath_gpio_attach(sc, devid) == 0)
429 sc->sc_flags |= ATH_GPIO0x0004;
430
431 return 0;
432bad2:
433 ath_desc_free(sc);
434bad:
435 if (ah)
436 ath_hal_detach(ah)((*(ah)->ah_detach)(ah));
437 sc->sc_invalid = 1;
438 return error;
439}
440
441int
442ath_detach(struct ath_softc *sc, int flags)
443{
444 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
445 int s;
446
447 if ((sc->sc_flags & ATH_ATTACHED0x0001) == 0)
448 return (0);
449
450 config_detach_children(&sc->sc_dev, flags);
451
452 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags));
453
454 timeout_del(&sc->sc_scan_to);
455 timeout_del(&sc->sc_cal_to);
456 timeout_del(&sc->sc_rssadapt_to);
457
458 s = splnet()splraise(0x4);
459 ath_stop(ifp);
460 ath_desc_free(sc);
461 ath_hal_detach(sc->sc_ah)((*(sc->sc_ah)->ah_detach)(sc->sc_ah));
462
463 ieee80211_ifdetach(ifp);
464 if_detach(ifp);
465
466 splx(s)spllower(s);
467
468 return 0;
469}
470
471int
472ath_intr(void *arg)
473{
474 return ath_intr1((struct ath_softc *)arg);
475}
476
477int
478ath_intr1(struct ath_softc *sc)
479{
480 struct ieee80211com *ic = &sc->sc_ic;
481 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
482 struct ath_hal *ah = sc->sc_ah;
483 HAL_INT status;
484
485 if (sc->sc_invalid) {
486 /*
487 * The hardware is not ready/present, don't touch anything.
488 * Note this can happen early on if the IRQ is shared.
489 */
490 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__));
491 return 0;
492 }
493 if (!ath_hal_is_intr_pending(ah)((*(ah)->ah_is_intr_pending)((ah)))) /* shared irq, not for us */
494 return 0;
495 if ((ifp->if_flags & (IFF_RUNNING0x40|IFF_UP0x1)) != (IFF_RUNNING0x40|IFF_UP0x1)) {
496 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n",
497 __func__, ifp->if_flags));
498 ath_hal_get_isr(ah, &status)((*(ah)->ah_get_isr)((ah), (&status))); /* clear ISR */
499 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable further intr's */
500 return 1; /* XXX */
501 }
502 ath_hal_get_isr(ah, &status)((*(ah)->ah_get_isr)((ah), (&status))); /* NB: clears ISR too */
503 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status));
504 status &= sc->sc_imask; /* discard unasked for bits */
505 if (status & HAL_INT_FATAL0x40000000) {
506 sc->sc_stats.ast_hardware++;
507 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable intr's until reset */
508 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask)((*(&sc->sc_fataltask)->t_func)((&sc->sc_fataltask
)->t_context, 1))
;
509 } else if (status & HAL_INT_RXORN0x00000020) {
510 sc->sc_stats.ast_rxorn++;
511 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable intr's until reset */
512 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask)((*(&sc->sc_rxorntask)->t_func)((&sc->sc_rxorntask
)->t_context, 1))
;
513 } else if (status & HAL_INT_MIB0x00001000) {
514 DPRINTF(ATH_DEBUG_INTR,
515 ("%s: resetting MIB counters\n", __func__));
516 sc->sc_stats.ast_mib++;
517 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats)((*(ah)->ah_update_mib_counters)((ah), (&sc->sc_mib_stats
)))
;
518 } else {
519 if (status & HAL_INT_RXEOL0x00000010) {
520 /*
521 * NB: the hardware should re-read the link when
522 * RXE bit is written, but it doesn't work at
523 * least on older hardware revs.
524 */
525 sc->sc_stats.ast_rxeol++;
526 sc->sc_rxlink = NULL((void *)0);
527 }
528 if (status & HAL_INT_TXURN0x00000800) {
529 sc->sc_stats.ast_txurn++;
530 /* bump tx trigger level */
531 ath_hal_update_tx_triglevel(ah, AH_TRUE)((*(ah)->ah_update_tx_triglevel)((ah), (AH_TRUE)));
532 }
533 if (status & HAL_INT_RX0x00000001)
534 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask)((*(&sc->sc_rxtask)->t_func)((&sc->sc_rxtask
)->t_context, 1))
;
535 if (status & HAL_INT_TX0x00000040)
536 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask)((*(&sc->sc_txtask)->t_func)((&sc->sc_txtask
)->t_context, 1))
;
537 if (status & HAL_INT_SWBA0x00010000)
538 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask)((*(&sc->sc_swbatask)->t_func)((&sc->sc_swbatask
)->t_context, 1))
;
539 if (status & HAL_INT_BMISS0x00040000) {
540 sc->sc_stats.ast_bmiss++;
541 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask)((*(&sc->sc_bmisstask)->t_func)((&sc->sc_bmisstask
)->t_context, 1))
;
542 }
543 }
544 return 1;
545}
546
547void
548ath_fatal_proc(void *arg, int pending)
549{
550 struct ath_softc *sc = arg;
551 struct ieee80211com *ic = &sc->sc_ic;
552 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
553
554 if (ifp->if_flags & IFF_DEBUG0x4)
555 printf("%s: hardware error; resetting\n", ifp->if_xname);
556 ath_reset(sc, 1);
557}
558
559void
560ath_rxorn_proc(void *arg, int pending)
561{
562 struct ath_softc *sc = arg;
563 struct ieee80211com *ic = &sc->sc_ic;
564 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
565
566 if (ifp->if_flags & IFF_DEBUG0x4)
567 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname);
568 ath_reset(sc, 1);
569}
570
571void
572ath_bmiss_proc(void *arg, int pending)
573{
574 struct ath_softc *sc = arg;
575 struct ieee80211com *ic = &sc->sc_ic;
576
577 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending));
578 if (ic->ic_opmode != IEEE80211_M_STA)
579 return;
580 if (ic->ic_state == IEEE80211_S_RUN) {
581 /*
582 * Rather than go directly to scan state, try to
583 * reassociate first. If that fails then the state
584 * machine will drop us into scanning after timing
585 * out waiting for a probe response.
586 */
587 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_ASSOC), (-1)));
588 }
589}
590
591int
592ath_init(struct ifnet *ifp)
593{
594 return ath_init1((struct ath_softc *)ifp->if_softc);
595}
596
597int
598ath_init1(struct ath_softc *sc)
599{
600 struct ieee80211com *ic = &sc->sc_ic;
601 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
602 struct ieee80211_node *ni;
603 enum ieee80211_phymode mode;
604 struct ath_hal *ah = sc->sc_ah;
605 HAL_STATUS status;
606 HAL_CHANNEL hchan;
607 int error = 0, s;
608
609 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n",
610 __func__, ifp->if_flags));
611
612 if ((error = ath_enable(sc)) != 0)
613 return error;
614
615 s = splnet()splraise(0x4);
616 /*
617 * Stop anything previously setup. This is safe
618 * whether this is the first time through or not.
619 */
620 ath_stop(ifp);
621
622 /*
623 * Reset the link layer address to the latest value.
624 */
625 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl))__builtin_memcpy((ic->ic_myaddr), (((caddr_t)((ifp->if_sadl
)->sdl_data + (ifp->if_sadl)->sdl_nlen))), (6))
;
626 ath_hal_set_lladdr(ah, ic->ic_myaddr)((*(ah)->ah_set_lladdr)((ah), (ic->ic_myaddr)));
627
628 /*
629 * The basic interface to setting the hardware in a good
630 * state is ``reset''. On return the hardware is known to
631 * be powered up and with interrupts disabled. This must
632 * be followed by initialization of the appropriate bits
633 * and then setup of the interrupt mask.
634 */
635 hchan.channel = ic->ic_ibss_chan->ic_freq;
636 hchan.channelFlags = ic->ic_ibss_chan->ic_flags;
637 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(AH_TRUE), (&status)))
) {
638 printf("%s: unable to reset hardware; hal status %u\n",
639 ifp->if_xname, status);
640 error = EIO5;
641 goto done;
642 }
643 ath_set_slot_time(sc);
644
645 if ((error = ath_initkeytable(sc)) != 0) {
646 printf("%s: unable to reset the key cache\n",
647 ifp->if_xname);
648 goto done;
649 }
650
651 if ((error = ath_startrecv(sc)) != 0) {
652 printf("%s: unable to start recv logic\n", ifp->if_xname);
653 goto done;
654 }
655
656 /*
657 * Enable interrupts.
658 */
659 sc->sc_imask = HAL_INT_RX0x00000001 | HAL_INT_TX0x00000040
660 | HAL_INT_RXEOL0x00000010 | HAL_INT_RXORN0x00000020
661 | HAL_INT_FATAL0x40000000 | HAL_INT_GLOBAL0x80000000;
662#ifndef IEEE80211_STA_ONLY
663 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
664 sc->sc_imask |= HAL_INT_MIB0x00001000;
665#endif
666 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
667
668 ifp->if_flags |= IFF_RUNNING0x40;
669 ic->ic_state = IEEE80211_S_INIT;
670
671 /*
672 * The hardware should be ready to go now so it's safe
673 * to kick the 802.11 state machine as it's likely to
674 * immediately call back to us to send mgmt frames.
675 */
676 ni = ic->ic_bss;
677 ni->ni_chan = ic->ic_ibss_chan;
678 mode = ieee80211_chan2mode(ic, ni->ni_chan);
679 if (mode != sc->sc_curmode)
680 ath_setcurmode(sc, mode);
681 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
682 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_SCAN), (-1)));
683 } else {
684 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
685 }
686done:
687 splx(s)spllower(s);
688 return error;
689}
690
691void
692ath_stop(struct ifnet *ifp)
693{
694 struct ieee80211com *ic = (struct ieee80211com *) ifp;
695 struct ath_softc *sc = ifp->if_softc;
696 struct ath_hal *ah = sc->sc_ah;
697 int s;
698
699 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n",
700 __func__, sc->sc_invalid, ifp->if_flags));
701
702 s = splnet()splraise(0x4);
703 if (ifp->if_flags & IFF_RUNNING0x40) {
704 /*
705 * Shutdown the hardware and driver:
706 * disable interrupts
707 * turn off timers
708 * clear transmit machinery
709 * clear receive machinery
710 * drain and release tx queues
711 * reclaim beacon resources
712 * reset 802.11 state machine
713 * power down hardware
714 *
715 * Note that some of this work is not possible if the
716 * hardware is gone (invalid).
717 */
718 ifp->if_flags &= ~IFF_RUNNING0x40;
719 ifp->if_timer = 0;
720 if (!sc->sc_invalid)
721 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0)));
722 ath_draintxq(sc);
723 if (!sc->sc_invalid) {
724 ath_stoprecv(sc);
725 } else {
726 sc->sc_rxlink = NULL((void *)0);
727 }
728 ifq_purge(&ifp->if_snd);
729#ifndef IEEE80211_STA_ONLY
730 ath_beacon_free(sc);
731#endif
732 ieee80211_new_state(ic, IEEE80211_S_INIT, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_INIT), (-1)));
733 if (!sc->sc_invalid) {
734 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0)((*(ah)->ah_set_power)((ah), (HAL_PM_FULL_SLEEP), AH_TRUE,
(0)))
;
735 }
736 ath_disable(sc);
737 }
738 splx(s)spllower(s);
739}
740
741/*
742 * Reset the hardware w/o losing operational state. This is
743 * basically a more efficient way of doing ath_stop, ath_init,
744 * followed by state transitions to the current 802.11
745 * operational state. Used to recover from errors rx overrun
746 * and to reset the hardware when rf gain settings must be reset.
747 */
748void
749ath_reset(struct ath_softc *sc, int full)
750{
751 struct ieee80211com *ic = &sc->sc_ic;
752 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
753 struct ath_hal *ah = sc->sc_ah;
754 struct ieee80211_channel *c;
755 HAL_STATUS status;
756 HAL_CHANNEL hchan;
757
758 /*
759 * Convert to a HAL channel description.
760 */
761 c = ic->ic_ibss_chan;
762 hchan.channel = c->ic_freq;
763 hchan.channelFlags = c->ic_flags;
764
765 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable interrupts */
766 ath_draintxq(sc); /* stop xmit side */
767 ath_stoprecv(sc); /* stop recv side */
768 /* NB: indicate channel change so we do a full reset */
769 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan,((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(full ? AH_TRUE : AH_FALSE), (&status)))
770 full ? AH_TRUE : AH_FALSE, &status)((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(full ? AH_TRUE : AH_FALSE), (&status)))
) {
771 printf("%s: %s: unable to reset hardware; hal status %u\n",
772 ifp->if_xname, __func__, status);
773 }
774 ath_set_slot_time(sc);
775 /* In case channel changed, save as a node channel */
776 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
777 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
778 if (ath_startrecv(sc) != 0) /* restart recv */
779 printf("%s: %s: unable to start recv logic\n", ifp->if_xname,
780 __func__);
781 ath_start(ifp); /* restart xmit */
782 if (ic->ic_state == IEEE80211_S_RUN)
783 ath_beacon_config(sc); /* restart beacons */
784}
785
786void
787ath_start(struct ifnet *ifp)
788{
789 struct ath_softc *sc = ifp->if_softc;
790 struct ath_hal *ah = sc->sc_ah;
791 struct ieee80211com *ic = &sc->sc_ic;
792 struct ieee80211_node *ni;
793 struct ath_buf *bf;
794 struct mbuf *m;
795 struct ieee80211_frame *wh;
796 int s;
797
798 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd) ||
799 sc->sc_invalid)
800 return;
801 for (;;) {
802 /*
803 * Grab a TX buffer and associated resources.
804 */
805 s = splnet()splraise(0x4);
806 bf = TAILQ_FIRST(&sc->sc_txbuf)((&sc->sc_txbuf)->tqh_first);
807 if (bf != NULL((void *)0))
808 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_txbuf)->tqh_last = (bf)->bf_list
.tqe_prev; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next
; ((bf)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list
.tqe_next) = ((void *)-1); } while (0)
;
809 splx(s)spllower(s);
810 if (bf == NULL((void *)0)) {
811 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n",
812 __func__));
813 sc->sc_stats.ast_tx_qstop++;
814 ifq_set_oactive(&ifp->if_snd);
815 break;
816 }
817 /*
818 * Poll the management queue for frames; they
819 * have priority over normal data frames.
820 */
821 m = mq_dequeue(&ic->ic_mgtq);
822 if (m == NULL((void *)0)) {
823 /*
824 * No data frames go out unless we're associated.
825 */
826 if (ic->ic_state != IEEE80211_S_RUN) {
827 DPRINTF(ATH_DEBUG_ANY,
828 ("%s: ignore data packet, state %u\n",
829 __func__, ic->ic_state));
830 sc->sc_stats.ast_tx_discard++;
831 s = splnet()splraise(0x4);
832 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
833 splx(s)spllower(s);
834 break;
835 }
836 m = ifq_dequeue(&ifp->if_snd);
837 if (m == NULL((void *)0)) {
838 s = splnet()splraise(0x4);
839 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
840 splx(s)spllower(s);
841 break;
842 }
843
844#if NBPFILTER1 > 0
845 if (ifp->if_bpf)
846 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
847#endif
848
849 /*
850 * Encapsulate the packet in prep for transmission.
851 */
852 m = ieee80211_encap(ifp, m, &ni);
853 if (m == NULL((void *)0)) {
854 DPRINTF(ATH_DEBUG_ANY,
855 ("%s: encapsulation failure\n",
856 __func__));
857 sc->sc_stats.ast_tx_encap++;
858 goto bad;
859 }
860 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
861 } else {
862 ni = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_cookie;
863
864 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
865 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) ==
866 IEEE80211_FC0_SUBTYPE_PROBE_RESP0x50) {
867 /* fill time stamp */
868 u_int64_t tsf;
869 u_int32_t *tstamp;
870
871 tsf = ath_hal_get_tsf64(ah)((*(ah)->ah_get_tsf64)((ah)));
872 /* XXX: adjust 100us delay to xmit */
873 tsf += 100;
874 tstamp = (u_int32_t *)&wh[1];
875 tstamp[0] = htole32(tsf & 0xffffffff)((__uint32_t)(tsf & 0xffffffff));
876 tstamp[1] = htole32(tsf >> 32)((__uint32_t)(tsf >> 32));
877 }
878 sc->sc_stats.ast_tx_mgmt++;
879 }
880
881 if (ath_tx_start(sc, ni, bf, m)) {
882 bad:
883 s = splnet()splraise(0x4);
884 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
885 splx(s)spllower(s);
886 ifp->if_oerrorsif_data.ifi_oerrors++;
887 if (ni != NULL((void *)0))
888 ieee80211_release_node(ic, ni);
889 continue;
890 }
891
892 sc->sc_tx_timer = 5;
893 ifp->if_timer = 1;
894 }
895}
896
897int
898ath_media_change(struct ifnet *ifp)
899{
900 int error;
901
902 error = ieee80211_media_change(ifp);
903 if (error == ENETRESET52) {
904 if ((ifp->if_flags & (IFF_RUNNING0x40|IFF_UP0x1)) ==
905 (IFF_RUNNING0x40|IFF_UP0x1))
906 ath_init(ifp); /* XXX lose error */
907 error = 0;
908 }
909 return error;
910}
911
912void
913ath_watchdog(struct ifnet *ifp)
914{
915 struct ath_softc *sc = ifp->if_softc;
916
917 ifp->if_timer = 0;
918 if ((ifp->if_flags & IFF_RUNNING0x40) == 0 || sc->sc_invalid)
919 return;
920 if (sc->sc_tx_timer) {
921 if (--sc->sc_tx_timer == 0) {
922 printf("%s: device timeout\n", ifp->if_xname);
923 ath_reset(sc, 1);
924 ifp->if_oerrorsif_data.ifi_oerrors++;
925 sc->sc_stats.ast_watchdog++;
926 return;
927 }
928 ifp->if_timer = 1;
929 }
930
931 ieee80211_watchdog(ifp);
932}
933
934int
935ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
936{
937 struct ath_softc *sc = ifp->if_softc;
938 struct ieee80211com *ic = &sc->sc_ic;
939 struct ifreq *ifr = (struct ifreq *)data;
940 int error = 0, s;
941
942 s = splnet()splraise(0x4);
943 switch (cmd) {
944 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
945 ifp->if_flags |= IFF_UP0x1;
946 /* FALLTHROUGH */
947 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
948 if (ifp->if_flags & IFF_UP0x1) {
949 if (ifp->if_flags & IFF_RUNNING0x40) {
950 /*
951 * To avoid rescanning another access point,
952 * do not call ath_init() here. Instead,
953 * only reflect promisc mode settings.
954 */
955 ath_mode_init(sc);
956 } else {
957 /*
958 * Beware of being called during detach to
959 * reset promiscuous mode. In that case we
960 * will still be marked UP but not RUNNING.
961 * However trying to re-init the interface
962 * is the wrong thing to do as we've already
963 * torn down much of our state. There's
964 * probably a better way to deal with this.
965 */
966 if (!sc->sc_invalid)
967 ath_init(ifp); /* XXX lose error */
968 }
969 } else
970 ath_stop(ifp);
971 break;
972 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
973 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
974 error = (cmd == SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
) ?
975 ether_addmulti(ifr, &sc->sc_ic.ic_ac) :
976 ether_delmulti(ifr, &sc->sc_ic.ic_ac);
977 if (error == ENETRESET52) {
978 if (ifp->if_flags & IFF_RUNNING0x40)
979 ath_mode_init(sc);
980 error = 0;
981 }
982 break;
983 case SIOCGATHSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((137)))
:
984 error = copyout(&sc->sc_stats,
985 ifr->ifr_dataifr_ifru.ifru_data, sizeof (sc->sc_stats));
986 break;
987 default:
988 error = ieee80211_ioctl(ifp, cmd, data);
989 if (error == ENETRESET52) {
990 if ((ifp->if_flags & (IFF_RUNNING0x40|IFF_UP0x1)) ==
991 (IFF_RUNNING0x40|IFF_UP0x1)) {
992 if (ic->ic_opmode != IEEE80211_M_MONITOR)
993 ath_init(ifp); /* XXX lose error */
994 else
995 ath_reset(sc, 1);
996 }
997 error = 0;
998 }
999 break;
1000 }
1001 splx(s)spllower(s);
1002 return error;
1003}
1004
1005/*
1006 * Fill the hardware key cache with key entries.
1007 */
1008int
1009ath_initkeytable(struct ath_softc *sc)
1010{
1011 struct ieee80211com *ic = &sc->sc_ic;
1012 struct ath_hal *ah = sc->sc_ah;
1013 int i;
1014
1015 if (ath_softcrypto) {
1016 /*
1017 * Disable the hardware crypto engine and reset the key cache
1018 * to allow software crypto operation for WEP/RSN/WPA2
1019 */
1020 if (ic->ic_flags & (IEEE80211_F_WEPON0x00000100|IEEE80211_F_RSNON0x00200000))
1021 (void)ath_hal_softcrypto(ah, AH_TRUE)((*(ah)->ah_softcrypto)((ah), (AH_TRUE)));
1022 else
1023 (void)ath_hal_softcrypto(ah, AH_FALSE)((*(ah)->ah_softcrypto)((ah), (AH_FALSE)));
1024 return (0);
1025 }
1026
1027 /* WEP is disabled, we only support WEP in hardware yet */
1028 if ((ic->ic_flags & IEEE80211_F_WEPON0x00000100) == 0)
1029 return (0);
1030
1031 /*
1032 * Setup the hardware after reset: the key cache is filled as
1033 * needed and the receive engine is set going. Frame transmit
1034 * is handled entirely in the frame output path; there's nothing
1035 * to do here except setup the interrupt mask.
1036 */
1037
1038 /* XXX maybe should reset all keys when !WEPON */
1039 for (i = 0; i < IEEE80211_WEP_NKID4; i++) {
1040 struct ieee80211_key *k = &ic->ic_nw_keys[i];
1041 if (k->k_len == 0)
1042 ath_hal_reset_key(ah, i)((*(ah)->ah_reset_key)((ah), (i)));
1043 else {
1044 HAL_KEYVAL hk;
1045
1046 bzero(&hk, sizeof(hk))__builtin_bzero((&hk), (sizeof(hk)));
1047 /*
1048 * Pad the key to a supported key length. It
1049 * is always a good idea to use full-length
1050 * keys without padded zeros but this seems
1051 * to be the default behaviour used by many
1052 * implementations.
1053 */
1054 if (k->k_cipher == IEEE80211_CIPHER_WEP40)
1055 hk.wk_len = AR5K_KEYVAL_LENGTH_405;
1056 else if (k->k_cipher == IEEE80211_CIPHER_WEP104)
1057 hk.wk_len = AR5K_KEYVAL_LENGTH_10413;
1058 else
1059 return (EINVAL22);
1060 bcopy(k->k_key, hk.wk_key, hk.wk_len);
1061
1062 if (ath_hal_set_key(ah, i, &hk)((*(ah)->ah_set_key)((ah), (i), (&hk), ((void *)0), AH_FALSE
))
!= AH_TRUE)
1063 return (EINVAL22);
1064 }
1065 }
1066
1067 return (0);
1068}
1069
1070void
1071ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2])
1072{
1073 u_int32_t val;
1074 u_int8_t pos;
1075
1076 val = LE_READ_4(dl + 0)((u_int32_t) ((((u_int8_t *)(dl + 0))[0] ) | (((u_int8_t *)(dl
+ 0))[1] << 8) | (((u_int8_t *)(dl + 0))[2] << 16
) | (((u_int8_t *)(dl + 0))[3] << 24)))
;
1077 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1078 val = LE_READ_4(dl + 3)((u_int32_t) ((((u_int8_t *)(dl + 3))[0] ) | (((u_int8_t *)(dl
+ 3))[1] << 8) | (((u_int8_t *)(dl + 3))[2] << 16
) | (((u_int8_t *)(dl + 3))[3] << 24)))
;
1079 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1080 pos &= 0x3f;
1081 (*mfilt)[pos / 32] |= (1 << (pos % 32));
1082}
1083
1084void
1085ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2])
1086{
1087 struct arpcom *ac = &sc->sc_ic.ic_ac;
1088 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
1089 struct ether_multi *enm;
1090 struct ether_multistep estep;
1091
1092 if (ac->ac_multirangecnt > 0) {
1093 /* XXX Punt on ranges. */
1094 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0);
1095 ifp->if_flags |= IFF_ALLMULTI0x200;
1096 return;
1097 }
1098
1099 ETHER_FIRST_MULTI(estep, ac, enm)do { (estep).e_enm = ((&(ac)->ac_multiaddrs)->lh_first
); do { if ((((enm)) = ((estep)).e_enm) != ((void *)0)) ((estep
)).e_enm = ((((enm)))->enm_list.le_next); } while ( 0); } while
( 0)
;
1100 while (enm != NULL((void *)0)) {
1101 ath_mcastfilter_accum(enm->enm_addrlo, mfilt);
1102 ETHER_NEXT_MULTI(estep, enm)do { if (((enm) = (estep).e_enm) != ((void *)0)) (estep).e_enm
= (((enm))->enm_list.le_next); } while ( 0)
;
1103 }
1104 ifp->if_flags &= ~IFF_ALLMULTI0x200;
1105}
1106
1107/*
1108 * Calculate the receive filter according to the
1109 * operating mode and state:
1110 *
1111 * o always accept unicast, broadcast, and multicast traffic
1112 * o maintain current state of phy error reception
1113 * o probe request frames are accepted only when operating in
1114 * hostap, adhoc, or monitor modes
1115 * o enable promiscuous mode according to the interface state
1116 * o accept beacons:
1117 * - when operating in adhoc mode so the 802.11 layer creates
1118 * node table entries for peers,
1119 * - when operating in station mode for collecting rssi data when
1120 * the station is otherwise quiet, or
1121 * - when scanning
1122 */
1123u_int32_t
1124ath_calcrxfilter(struct ath_softc *sc)
1125{
1126 struct ieee80211com *ic = &sc->sc_ic;
1127 struct ath_hal *ah = sc->sc_ah;
1128 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
1129 u_int32_t rfilt;
1130
1131 rfilt = (ath_hal_get_rx_filter(ah)((*(ah)->ah_get_rx_filter)((ah))) & HAL_RX_FILTER_PHYERR0x00000100)
1132 | HAL_RX_FILTER_UCAST0x00000001 | HAL_RX_FILTER_BCAST0x00000004 | HAL_RX_FILTER_MCAST0x00000002;
1133 if (ic->ic_opmode != IEEE80211_M_STA)
1134 rfilt |= HAL_RX_FILTER_PROBEREQ0x00000080;
1135#ifndef IEEE80211_STA_ONLY
1136 if (ic->ic_opmode != IEEE80211_M_AHDEMO)
1137#endif
1138 rfilt |= HAL_RX_FILTER_BEACON0x00000010;
1139 if (ifp->if_flags & IFF_PROMISC0x100)
1140 rfilt |= HAL_RX_FILTER_PROM0x00000020;
1141 return rfilt;
1142}
1143
1144void
1145ath_mode_init(struct ath_softc *sc)
1146{
1147 struct ath_hal *ah = sc->sc_ah;
1148 u_int32_t rfilt, mfilt[2];
1149
1150 /* configure rx filter */
1151 rfilt = ath_calcrxfilter(sc);
1152 ath_hal_set_rx_filter(ah, rfilt)((*(ah)->ah_set_rx_filter)((ah), (rfilt)));
1153
1154 /* configure operational mode */
1155 ath_hal_set_opmode(ah)((*(ah)->ah_set_opmode)((ah)));
1156
1157 /* calculate and install multicast filter */
1158 mfilt[0] = mfilt[1] = 0;
1159 ath_mcastfilter_compute(sc, &mfilt);
1160 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1])((*(ah)->ah_set_mcast_filter)((ah), (mfilt[0]), (mfilt[1])
))
;
1161 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n",
1162 __func__, rfilt, mfilt[0], mfilt[1]));
1163}
1164
1165struct mbuf *
1166ath_getmbuf(int flags, int type, u_int pktlen)
1167{
1168 struct mbuf *m;
1169
1170 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen))if (!(pktlen <= (1 << 11))) panic ("802.11 packet too large: %u"
, pktlen)
;
1171 MGETHDR(m, flags, type)m = m_gethdr((flags), (type));
1172 if (m != NULL((void *)0) && pktlen > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
1173 MCLGET(m, flags)(void) m_clget((m), (flags), (1 << 11));
1174 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
1175 m_free(m);
1176 m = NULL((void *)0);
1177 }
1178 }
1179 return m;
1180}
1181
1182#ifndef IEEE80211_STA_ONLY
1183int
1184ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
1185{
1186 struct ieee80211com *ic = &sc->sc_ic;
1187 struct ath_hal *ah = sc->sc_ah;
1188 struct ath_buf *bf;
1189 struct ath_desc *ds;
1190 struct mbuf *m;
1191 int error;
1192 u_int8_t rate;
1193 const HAL_RATE_TABLE *rt;
1194 u_int flags = 0;
1195
1196 bf = sc->sc_bcbuf;
1197 if (bf->bf_m != NULL((void *)0)) {
1198 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1199 m_freem(bf->bf_m);
1200 bf->bf_m = NULL((void *)0);
1201 bf->bf_node = NULL((void *)0);
1202 }
1203 /*
1204 * NB: the beacon data buffer must be 32-bit aligned;
1205 * we assume the mbuf routines will return us something
1206 * with this alignment (perhaps should assert).
1207 */
1208 m = ieee80211_beacon_alloc(ic, ni);
1209 if (m == NULL((void *)0)) {
1210 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n",
1211 __func__));
1212 sc->sc_stats.ast_be_nombuf++;
1213 return ENOMEM12;
1214 }
1215
1216 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len));
1217 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
1218 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
;
1219 if (error != 0) {
1220 m_freem(m);
1221 return error;
1222 }
1223 KASSERT(bf->bf_nseg == 1,if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("%s: multi-segment packet; nseg %u"
, __func__, bf->bf_dmamap->dm_nsegs)
1224 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg))if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("%s: multi-segment packet; nseg %u"
, __func__, bf->bf_dmamap->dm_nsegs)
;
1225 bf->bf_m = m;
1226
1227 /* setup descriptors */
1228 ds = bf->bf_desc;
1229 bzero(ds, sizeof(struct ath_desc))__builtin_bzero((ds), (sizeof(struct ath_desc)));
1230
1231 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) {
1232 ds->ds_link = bf->bf_daddr; /* link to self */
1233 flags |= HAL_TXDESC_VEOL0x0020;
Value stored to 'flags' is never read
1234 } else {
1235 ds->ds_link = 0;
1236 }
1237 ds->ds_data = bf->bf_segsbf_dmamap->dm_segs[0].ds_addr;
1238
1239 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__,
1240 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len));
1241
1242 /*
1243 * Calculate rate code.
1244 * XXX everything at min xmit rate
1245 */
1246 rt = sc->sc_currates;
1247 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode))if (!(rt != ((void *)0))) panic ("no rate table, mode %u", sc
->sc_curmode)
;
1248 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE0x00040000) {
1249 rate = rt->info[0].rateCode | rt->info[0].shortPreamble;
1250 } else {
1251 rate = rt->info[0].rateCode;
1252 }
1253
1254 flags = HAL_TXDESC_NOACK0x0002;
1255 if (ic->ic_opmode == IEEE80211_M_IBSS)
1256 flags |= HAL_TXDESC_VEOL0x0020;
1257
1258 if (!ath_hal_setup_tx_desc(ah, ds((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1259 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1260 , sizeof(struct ieee80211_frame) /* header length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1261 , HAL_PKT_TYPE_BEACON /* Atheros packet type */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1262 , 60 /* txpower XXX */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1263 , rate, 1 /* series 0 rate/tries */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1264 , HAL_TXKEYIX_INVALID /* no encryption */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1265 , 0 /* antenna mode */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1266 , flags /* no ack for beacons */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1267 , 0 /* rts/cts rate */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1268 , 0 /* rts/cts duration */((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
1269 )((*(ah)->ah_setup_tx_desc)((ah), (ds), (m->M_dat.MH.MH_pkthdr
.len + 4), (sizeof(struct ieee80211_frame)), (HAL_PKT_TYPE_BEACON
), (60), (rate), (1), (((u_int32_t) - 1)), (0), (flags), (0),
(0)))
) {
1270 printf("%s: ath_hal_setup_tx_desc failed\n", __func__);
1271 return -1;
1272 }
1273 /* NB: beacon's BufLen must be a multiple of 4 bytes */
1274 /* XXX verify mbuf data area covers this roundup */
1275 if (!ath_hal_fill_tx_desc(ah, ds((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1276 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1277 , AH_TRUE /* first segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1278 , AH_TRUE /* last segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
1279 )((*(ah)->ah_fill_tx_desc)((ah), (ds), (((((bf->bf_dmamap
->dm_segs[0].ds_len)+((4)-1))/(4))*(4))), (AH_TRUE), (AH_TRUE
)))
) {
1280 printf("%s: ath_hal_fill_tx_desc failed\n", __func__);
1281 return -1;
1282 }
1283
1284 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */
1285
1286 return 0;
1287}
1288
1289void
1290ath_beacon_proc(void *arg, int pending)
1291{
1292 struct ath_softc *sc = arg;
1293 struct ieee80211com *ic = &sc->sc_ic;
1294 struct ath_buf *bf = sc->sc_bcbuf;
1295 struct ath_hal *ah = sc->sc_ah;
1296
1297 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending));
1298 if (ic->ic_opmode == IEEE80211_M_STA ||
1299 bf == NULL((void *)0) || bf->bf_m == NULL((void *)0)) {
1300 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n",
1301 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL));
1302 return;
1303 }
1304 /* TODO: update beacon to reflect PS poll state */
1305 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)((*(ah)->ah_stop_tx_dma)((ah), (sc->sc_bhalq)))) {
1306 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n",
1307 __func__, sc->sc_bhalq));
1308 }
1309 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
1310 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
;
1311
1312 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr)((*(ah)->ah_put_tx_buf)((ah), (sc->sc_bhalq), (bf->bf_daddr
)))
;
1313 ath_hal_tx_start(ah, sc->sc_bhalq)((*(ah)->ah_tx_start)((ah), (sc->sc_bhalq)));
1314 DPRINTF(ATH_DEBUG_BEACON_PROC,
1315 ("%s: TXDP%u = %p (%p)\n", __func__,
1316 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc));
1317}
1318
1319void
1320ath_beacon_free(struct ath_softc *sc)
1321{
1322 struct ath_buf *bf = sc->sc_bcbuf;
1323
1324 if (bf->bf_m != NULL((void *)0)) {
1325 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1326 m_freem(bf->bf_m);
1327 bf->bf_m = NULL((void *)0);
1328 bf->bf_node = NULL((void *)0);
1329 }
1330}
1331#endif /* IEEE80211_STA_ONLY */
1332
1333/*
1334 * Configure the beacon and sleep timers.
1335 *
1336 * When operating as an AP this resets the TSF and sets
1337 * up the hardware to notify us when we need to issue beacons.
1338 *
1339 * When operating in station mode this sets up the beacon
1340 * timers according to the timestamp of the last received
1341 * beacon and the current TSF, configures PCF and DTIM
1342 * handling, programs the sleep registers so the hardware
1343 * will wakeup in time to receive beacons, and configures
1344 * the beacon miss handling so we'll receive a BMISS
1345 * interrupt when we stop seeing beacons from the AP
1346 * we've associated with.
1347 */
1348void
1349ath_beacon_config(struct ath_softc *sc)
1350{
1351#define MS_TO_TU(x)(((x) * 1000) / 1024) (((x) * 1000) / 1024)
1352 struct ath_hal *ah = sc->sc_ah;
1353 struct ieee80211com *ic = &sc->sc_ic;
1354 struct ieee80211_node *ni = ic->ic_bss;
1355 u_int32_t nexttbtt, intval;
1356
1357 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4)((u_int32_t) ((((u_int8_t *)(ni->ni_tstamp + 4))[0] ) | ((
(u_int8_t *)(ni->ni_tstamp + 4))[1] << 8) | (((u_int8_t
*)(ni->ni_tstamp + 4))[2] << 16) | (((u_int8_t *)(ni
->ni_tstamp + 4))[3] << 24)))
<< 22) |
1358 (LE_READ_4(ni->ni_tstamp)((u_int32_t) ((((u_int8_t *)(ni->ni_tstamp))[0] ) | (((u_int8_t
*)(ni->ni_tstamp))[1] << 8) | (((u_int8_t *)(ni->
ni_tstamp))[2] << 16) | (((u_int8_t *)(ni->ni_tstamp
))[3] << 24)))
>> 10);
1359 intval = MAX(1, ni->ni_intval)(((1)>(ni->ni_intval))?(1):(ni->ni_intval)) & HAL_BEACON_PERIOD0x0000ffff;
1360 if (nexttbtt == 0) { /* e.g. for ap mode */
1361 nexttbtt = intval;
1362 } else if (intval) {
1363 nexttbtt = roundup(nexttbtt, intval)((((nexttbtt)+((intval)-1))/(intval))*(intval));
1364 }
1365 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n",
1366 __func__, ni->ni_intval, nexttbtt));
1367 if (ic->ic_opmode == IEEE80211_M_STA) {
1368 HAL_BEACON_STATE bs;
1369
1370 /* NB: no PCF support right now */
1371 bzero(&bs, sizeof(bs))__builtin_bzero((&bs), (sizeof(bs)));
1372 bs.bs_intvalbs_interval = intval;
1373 bs.bs_nexttbttbs_next_beacon = nexttbtt;
1374 bs.bs_dtimperiodbs_dtim_period = bs.bs_intvalbs_interval;
1375 bs.bs_nextdtimbs_next_dtim = nexttbtt;
1376 /*
1377 * Calculate the number of consecutive beacons to miss
1378 * before taking a BMISS interrupt.
1379 * Note that we clamp the result to at most 7 beacons.
1380 */
1381 bs.bs_bmissthresholdbs_bmiss_threshold = ic->ic_bmissthres;
1382 if (bs.bs_bmissthresholdbs_bmiss_threshold > 7) {
1383 bs.bs_bmissthresholdbs_bmiss_threshold = 7;
1384 } else if (bs.bs_bmissthresholdbs_bmiss_threshold <= 0) {
1385 bs.bs_bmissthresholdbs_bmiss_threshold = 1;
1386 }
1387
1388 /*
1389 * Calculate sleep duration. The configuration is
1390 * given in ms. We insure a multiple of the beacon
1391 * period is used. Also, if the sleep duration is
1392 * greater than the DTIM period then it makes senses
1393 * to make it a multiple of that.
1394 *
1395 * XXX fixed at 100ms
1396 */
1397 bs.bs_sleepdurationbs_sleep_duration =
1398 roundup(MS_TO_TU(100), bs.bs_intval)(((((((100) * 1000) / 1024))+((bs.bs_interval)-1))/(bs.bs_interval
))*(bs.bs_interval))
;
1399 if (bs.bs_sleepdurationbs_sleep_duration > bs.bs_dtimperiodbs_dtim_period) {
1400 bs.bs_sleepdurationbs_sleep_duration =
1401 roundup(bs.bs_sleepduration, bs.bs_dtimperiod)((((bs.bs_sleep_duration)+((bs.bs_dtim_period)-1))/(bs.bs_dtim_period
))*(bs.bs_dtim_period))
;
1402 }
1403
1404 DPRINTF(ATH_DEBUG_BEACON,
1405 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u"
1406 " sleep %u\n"
1407 , __func__
1408 , bs.bs_intval
1409 , bs.bs_nexttbtt
1410 , bs.bs_dtimperiod
1411 , bs.bs_nextdtim
1412 , bs.bs_bmissthreshold
1413 , bs.bs_sleepduration
1414 ));
1415 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0)));
1416 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0)((*(ah)->ah_set_beacon_timers)((ah), (&bs), (0), (0), (
0)))
;
1417 sc->sc_imask |= HAL_INT_BMISS0x00040000;
1418 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
1419 }
1420#ifndef IEEE80211_STA_ONLY
1421 else {
1422 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0)));
1423 if (nexttbtt == intval)
1424 intval |= HAL_BEACON_RESET_TSF0x01000000;
1425 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1426 /*
1427 * In IBSS mode enable the beacon timers but only
1428 * enable SWBA interrupts if we need to manually
1429 * prepare beacon frames. Otherwise we use a
1430 * self-linked tx descriptor and let the hardware
1431 * deal with things.
1432 */
1433 intval |= HAL_BEACON_ENA0x00800000;
1434 if (!sc->sc_veol)
1435 sc->sc_imask |= HAL_INT_SWBA0x00010000;
1436 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
1437 /*
1438 * In AP mode we enable the beacon timers and
1439 * SWBA interrupts to prepare beacon frames.
1440 */
1441 intval |= HAL_BEACON_ENA0x00800000;
1442 sc->sc_imask |= HAL_INT_SWBA0x00010000; /* beacon prepare */
1443 }
1444 ath_hal_init_beacon(ah, nexttbtt, intval)((*(ah)->ah_init_beacon)((ah), (nexttbtt), (intval)));
1445 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
1446 /*
1447 * When using a self-linked beacon descriptor in IBBS
1448 * mode load it once here.
1449 */
1450 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol)
1451 ath_beacon_proc(sc, 0);
1452 }
1453#endif
1454}
1455
1456int
1457ath_desc_alloc(struct ath_softc *sc)
1458{
1459 int i, bsize, error = -1;
1460 struct ath_desc *ds;
1461 struct ath_buf *bf;
1462
1463 /* allocate descriptors */
1464 sc->sc_desc_len = sizeof(struct ath_desc) *
1465 (ATH_TXBUF60 * ATH_TXDESC8 + ATH_RXBUF40 + 1);
1466 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sc->
sc_desc_len), ((1 << 12)), (0), (&sc->sc_dseg), (
1), (&sc->sc_dnseg), (0))
1467 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (sc->
sc_desc_len), ((1 << 12)), (0), (&sc->sc_dseg), (
1), (&sc->sc_dnseg), (0))
) != 0) {
1468 printf("%s: unable to allocate control data, error = %d\n",
1469 sc->sc_dev.dv_xname, error);
1470 goto fail0;
1471 }
1472
1473 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_dseg), (sc->sc_dnseg), (sc->sc_desc_len), ((caddr_t
*)&sc->sc_desc), (0x0004))
1474 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&sc
->sc_dseg), (sc->sc_dnseg), (sc->sc_desc_len), ((caddr_t
*)&sc->sc_desc), (0x0004))
) != 0) {
1475 printf("%s: unable to map control data, error = %d\n",
1476 sc->sc_dev.dv_xname, error);
1477 goto fail1;
1478 }
1479
1480 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc->
sc_desc_len), (1), (sc->sc_desc_len), (0), (0), (&sc->
sc_ddmamap))
1481 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (sc->
sc_desc_len), (1), (sc->sc_desc_len), (0), (0), (&sc->
sc_ddmamap))
) != 0) {
1482 printf("%s: unable to create control data DMA map, "
1483 "error = %d\n", sc->sc_dev.dv_xname, error);
1484 goto fail2;
1485 }
1486
1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_ddmamap), (sc->sc_desc), (sc->sc_desc_len), (((void *
)0)), (0))
1488 sc->sc_desc_len, NULL, 0)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (sc->
sc_ddmamap), (sc->sc_desc), (sc->sc_desc_len), (((void *
)0)), (0))
) != 0) {
1489 printf("%s: unable to load control data DMA map, error = %d\n",
1490 sc->sc_dev.dv_xname, error);
1491 goto fail3;
1492 }
1493
1494 ds = sc->sc_desc;
1495 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr;
1496
1497 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC,
1498 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n",
1499 ds, (u_long)sc->sc_desc_len,
1500 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len));
1501
1502 /* allocate buffers */
1503 bsize = sizeof(struct ath_buf) * (ATH_TXBUF60 + ATH_RXBUF40 + 1);
1504 bf = malloc(bsize, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1505 if (bf == NULL((void *)0)) {
1506 printf("%s: unable to allocate Tx/Rx buffers\n",
1507 sc->sc_dev.dv_xname);
1508 error = ENOMEM12;
1509 goto fail3;
1510 }
1511 sc->sc_bufptr = bf;
1512
1513 TAILQ_INIT(&sc->sc_rxbuf)do { (&sc->sc_rxbuf)->tqh_first = ((void *)0); (&
sc->sc_rxbuf)->tqh_last = &(&sc->sc_rxbuf)->
tqh_first; } while (0)
;
1514 for (i = 0; i < ATH_RXBUF40; i++, bf++, ds++) {
1515 bf->bf_desc = ds;
1516 bf->bf_daddr = sc->sc_desc_paddr +
1517 ((caddr_t)ds - (caddr_t)sc->sc_desc);
1518 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
1519 MCLBYTES, 0, 0, &bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
) != 0) {
1520 printf("%s: unable to create Rx dmamap, error = %d\n",
1521 sc->sc_dev.dv_xname, error);
1522 goto fail4;
1523 }
1524 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_rxbuf)->tqh_last; *(&sc->
sc_rxbuf)->tqh_last = (bf); (&sc->sc_rxbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
1525 }
1526
1527 TAILQ_INIT(&sc->sc_txbuf)do { (&sc->sc_txbuf)->tqh_first = ((void *)0); (&
sc->sc_txbuf)->tqh_last = &(&sc->sc_txbuf)->
tqh_first; } while (0)
;
1528 for (i = 0; i < ATH_TXBUF60; i++, bf++, ds += ATH_TXDESC8) {
1529 bf->bf_desc = ds;
1530 bf->bf_daddr = sc->sc_desc_paddr +
1531 ((caddr_t)ds - (caddr_t)sc->sc_desc);
1532 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (8), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
1533 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (8), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
) != 0) {
1534 printf("%s: unable to create Tx dmamap, error = %d\n",
1535 sc->sc_dev.dv_xname, error);
1536 goto fail5;
1537 }
1538 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
1539 }
1540 TAILQ_INIT(&sc->sc_txq)do { (&sc->sc_txq)->tqh_first = ((void *)0); (&
sc->sc_txq)->tqh_last = &(&sc->sc_txq)->tqh_first
; } while (0)
;
1541
1542 /* beacon buffer */
1543 bf->bf_desc = ds;
1544 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc);
1545 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
1546 &bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((1 <<
11)), (1), ((1 << 11)), (0), (0), (&bf->bf_dmamap
))
) != 0) {
1547 printf("%s: unable to create beacon dmamap, error = %d\n",
1548 sc->sc_dev.dv_xname, error);
1549 goto fail5;
1550 }
1551 sc->sc_bcbuf = bf;
1552 return 0;
1553
1554fail5:
1555 for (i = ATH_RXBUF40; i < ATH_RXBUF40 + ATH_TXBUF60; i++) {
1556 if (sc->sc_bufptr[i].bf_dmamap == NULL((void *)0))
1557 continue;
1558 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_bufptr[i].bf_dmamap))
;
1559 }
1560fail4:
1561 for (i = 0; i < ATH_RXBUF40; i++) {
1562 if (sc->sc_bufptr[i].bf_dmamap == NULL((void *)0))
1563 continue;
1564 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_bufptr[i].bf_dmamap))
;
1565 }
1566fail3:
1567 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_ddmamap))
;
1568fail2:
1569 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_ddmamap))
;
1570 sc->sc_ddmamap = NULL((void *)0);
1571fail1:
1572 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), ((caddr_t
)sc->sc_desc), (sc->sc_desc_len))
;
1573fail0:
1574 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_dseg), (sc->sc_dnseg))
;
1575 return error;
1576}
1577
1578void
1579ath_desc_free(struct ath_softc *sc)
1580{
1581 struct ath_buf *bf;
1582
1583 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_ddmamap))
;
1584 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_ddmamap))
;
1585 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
sc->sc_dseg), (sc->sc_dnseg))
;
1586
1587 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list)for((bf) = ((&sc->sc_txq)->tqh_first); (bf) != ((void
*)0); (bf) = ((bf)->bf_list.tqe_next))
{
1588 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1589 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bf
->bf_dmamap))
;
1590 m_freem(bf->bf_m);
1591 }
1592 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list)for((bf) = ((&sc->sc_txbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
1593 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bf
->bf_dmamap))
;
1594 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)for((bf) = ((&sc->sc_rxbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
{
1595 if (bf->bf_m) {
1596 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1597 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (bf
->bf_dmamap))
;
1598 m_freem(bf->bf_m);
1599 bf->bf_m = NULL((void *)0);
1600 }
1601 }
1602 if (sc->sc_bcbuf != NULL((void *)0)) {
1603 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (sc->
sc_bcbuf->bf_dmamap))
;
1604 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (sc
->sc_bcbuf->bf_dmamap))
;
1605 sc->sc_bcbuf = NULL((void *)0);
1606 }
1607
1608 TAILQ_INIT(&sc->sc_rxbuf)do { (&sc->sc_rxbuf)->tqh_first = ((void *)0); (&
sc->sc_rxbuf)->tqh_last = &(&sc->sc_rxbuf)->
tqh_first; } while (0)
;
1609 TAILQ_INIT(&sc->sc_txbuf)do { (&sc->sc_txbuf)->tqh_first = ((void *)0); (&
sc->sc_txbuf)->tqh_last = &(&sc->sc_txbuf)->
tqh_first; } while (0)
;
1610 TAILQ_INIT(&sc->sc_txq)do { (&sc->sc_txq)->tqh_first = ((void *)0); (&
sc->sc_txq)->tqh_last = &(&sc->sc_txq)->tqh_first
; } while (0)
;
1611 free(sc->sc_bufptr, M_DEVBUF2, 0);
1612 sc->sc_bufptr = NULL((void *)0);
1613}
1614
1615struct ieee80211_node *
1616ath_node_alloc(struct ieee80211com *ic)
1617{
1618 struct ath_node *an;
1619
1620 an = malloc(sizeof(*an), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1621 if (an) {
1622 int i;
1623 for (i = 0; i < ATH_RHIST_SIZE16; i++)
1624 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME(~0);
1625 an->an_rx_hist_next = ATH_RHIST_SIZE16-1;
1626 return &an->an_node;
1627 } else
1628 return NULL((void *)0);
1629}
1630
1631void
1632ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1633{
1634 struct ath_softc *sc = ic->ic_ific_ac.ac_if.if_softc;
1635 struct ath_buf *bf;
1636
1637 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list)for((bf) = ((&sc->sc_txq)->tqh_first); (bf) != ((void
*)0); (bf) = ((bf)->bf_list.tqe_next))
{
1638 if (bf->bf_node == ni)
1639 bf->bf_node = NULL((void *)0);
1640 }
1641 (*sc->sc_node_free)(ic, ni);
1642}
1643
1644void
1645ath_node_copy(struct ieee80211com *ic,
1646 struct ieee80211_node *dst, const struct ieee80211_node *src)
1647{
1648 struct ath_softc *sc = ic->ic_ific_ac.ac_if.if_softc;
1649
1650 bcopy(&src[1], &dst[1],
1651 sizeof(struct ath_node) - sizeof(struct ieee80211_node));
1652 (*sc->sc_node_copy)(ic, dst, src);
1653}
1654
1655u_int8_t
1656ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni)
1657{
1658 const struct ath_node *an = ATH_NODE(ni)((struct ath_node *)(ni));
1659 int i, now, nsamples, rssi;
1660
1661 /*
1662 * Calculate the average over the last second of sampled data.
1663 */
1664 now = ATH_TICKS()(ticks);
1665 nsamples = 0;
1666 rssi = 0;
1667 i = an->an_rx_hist_next;
1668 do {
1669 const struct ath_recv_hist *rh = &an->an_rx_hist[i];
1670 if (rh->arh_ticks == ATH_RHIST_NOTIME(~0))
1671 goto done;
1672 if (now - rh->arh_ticks > hz)
1673 goto done;
1674 rssi += rh->arh_rssi;
1675 nsamples++;
1676 if (i == 0) {
1677 i = ATH_RHIST_SIZE16-1;
1678 } else {
1679 i--;
1680 }
1681 } while (i != an->an_rx_hist_next);
1682done:
1683 /*
1684 * Return either the average or the last known
1685 * value if there is no recent data.
1686 */
1687 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi);
1688}
1689
1690int
1691ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
1692{
1693 struct ath_hal *ah = sc->sc_ah;
1694 int error;
1695 struct mbuf *m;
1696 struct ath_desc *ds;
1697
1698 m = bf->bf_m;
1699 if (m == NULL((void *)0)) {
1700 /*
1701 * NB: by assigning a page to the rx dma buffer we
1702 * implicitly satisfy the Atheros requirement that
1703 * this buffer be cache-line-aligned and sized to be
1704 * multiple of the cache line size. Not doing this
1705 * causes weird stuff to happen (for the 5210 at least).
1706 */
1707 m = ath_getmbuf(M_DONTWAIT0x0002, MT_DATA1, MCLBYTES(1 << 11));
1708 if (m == NULL((void *)0)) {
1709 DPRINTF(ATH_DEBUG_ANY,
1710 ("%s: no mbuf/cluster\n", __func__));
1711 sc->sc_stats.ast_rx_nombuf++;
1712 return ENOMEM12;
1713 }
1714 bf->bf_m = m;
1715 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size;
1716
1717 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
1718 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m), (0x0001))
;
1719 if (error != 0) {
1720 DPRINTF(ATH_DEBUG_ANY,
1721 ("%s: ath_bus_dmamap_load_mbuf failed;"
1722 " error %d\n", __func__, error));
1723 sc->sc_stats.ast_rx_busdma++;
1724 return error;
1725 }
1726 KASSERT(bf->bf_nseg == 1,if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_rxbuf_init: multi-segment packet; nseg %u"
, bf->bf_dmamap->dm_nsegs)
1727 ("ath_rxbuf_init: multi-segment packet; nseg %u",if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_rxbuf_init: multi-segment packet; nseg %u"
, bf->bf_dmamap->dm_nsegs)
1728 bf->bf_nseg))if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_rxbuf_init: multi-segment packet; nseg %u"
, bf->bf_dmamap->dm_nsegs)
;
1729 }
1730 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x01))
1731 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x01))
;
1732
1733 /*
1734 * Setup descriptors. For receive we always terminate
1735 * the descriptor list with a self-linked entry so we'll
1736 * not get overrun under high load (as can happen with a
1737 * 5212 when ANI processing enables PHY errors).
1738 *
1739 * To insure the last descriptor is self-linked we create
1740 * each descriptor as self-linked and add it to the end. As
1741 * each additional descriptor is added the previous self-linked
1742 * entry is ``fixed'' naturally. This should be safe even
1743 * if DMA is happening. When processing RX interrupts we
1744 * never remove/process the last, self-linked, entry on the
1745 * descriptor list. This insures the hardware always has
1746 * someplace to write a new frame.
1747 */
1748 ds = bf->bf_desc;
1749 bzero(ds, sizeof(struct ath_desc))__builtin_bzero((ds), (sizeof(struct ath_desc)));
1750#ifndef IEEE80211_STA_ONLY
1751 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP)
1752 ds->ds_link = bf->bf_daddr; /* link to self */
1753#endif
1754 ds->ds_data = bf->bf_segsbf_dmamap->dm_segs[0].ds_addr;
1755 ath_hal_setup_rx_desc(ah, ds((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
1756 , m->m_len /* buffer size */((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
1757 , 0((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
1758 )((*(ah)->ah_setup_rx_desc)((ah), (ds), (m->m_hdr.mh_len
), (0)))
;
1759
1760 if (sc->sc_rxlink != NULL((void *)0))
1761 *sc->sc_rxlink = bf->bf_daddr;
1762 sc->sc_rxlink = &ds->ds_link;
1763 return 0;
1764}
1765
1766void
1767ath_rx_proc(void *arg, int npending)
1768{
1769 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
1770#define PA2DESC(_sc, _pa) \
1771 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \
1772 ((_pa) - (_sc)->sc_desc_paddr)))
1773 struct ath_softc *sc = arg;
1774 struct ath_buf *bf;
1775 struct ieee80211com *ic = &sc->sc_ic;
1776 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
1777 struct ath_hal *ah = sc->sc_ah;
1778 struct ath_desc *ds;
1779 struct mbuf *m;
1780 struct ieee80211_frame *wh;
1781 struct ieee80211_frame whbuf;
1782 struct ieee80211_rxinfo rxi;
1783 struct ieee80211_node *ni;
1784 struct ath_node *an;
1785 struct ath_recv_hist *rh;
1786 int len;
1787 u_int phyerr;
1788 HAL_STATUS status;
1789
1790 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending));
1791 do {
1792 bf = TAILQ_FIRST(&sc->sc_rxbuf)((&sc->sc_rxbuf)->tqh_first);
1793 if (bf == NULL((void *)0)) { /* NB: shouldn't happen */
1794 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname);
1795 break;
1796 }
1797 ds = bf->bf_desc;
1798 if (ds->ds_link == bf->bf_daddr) {
1799 /* NB: never process the self-linked entry at the end */
1800 break;
1801 }
1802 m = bf->bf_m;
1803 if (m == NULL((void *)0)) { /* NB: shouldn't happen */
1804 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname);
1805 continue;
1806 }
1807 /* XXX sync descriptor memory */
1808 /*
1809 * Must provide the virtual address of the current
1810 * descriptor, the physical address, and the virtual
1811 * address of the next descriptor in the h/w chain.
1812 * This allows the HAL to look ahead to see if the
1813 * hardware is done with a descriptor by checking the
1814 * done bit in the following descriptor and the address
1815 * of the current descriptor the DMA engine is working
1816 * on. All this is necessary because of our use of
1817 * a self-linked list to avoid rx overruns.
1818 */
1819 status = ath_hal_proc_rx_desc(ah, ds,((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
1820 bf->bf_daddr, PA2DESC(sc, ds->ds_link))((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
;
1821#ifdef AR_DEBUG
1822 if (ath_debug & ATH_DEBUG_RECV_DESC)
1823 ath_printrxbuf(bf, status == HAL_OK0);
1824#endif
1825 if (status == HAL_EINPROGRESS36)
1826 break;
1827 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_rxbuf)->tqh_last = (bf)->bf_list
.tqe_prev; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next
; ((bf)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list
.tqe_next) = ((void *)-1); } while (0)
;
1828
1829 if (ds->ds_rxstatds_us.rx.rs_more) {
1830 /*
1831 * Frame spans multiple descriptors; this
1832 * cannot happen yet as we don't support
1833 * jumbograms. If not in monitor mode,
1834 * discard the frame.
1835 */
1836
1837 /*
1838 * Enable this if you want to see error
1839 * frames in Monitor mode.
1840 */
1841#ifdef ERROR_FRAMES
1842 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
1843 /* XXX statistic */
1844 goto rx_next;
1845 }
1846#endif
1847 /* fall thru for monitor mode handling... */
1848
1849 } else if (ds->ds_rxstatds_us.rx.rs_status != 0) {
1850 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_CRC0x01)
1851 sc->sc_stats.ast_rx_crcerr++;
1852 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_FIFO0x04)
1853 sc->sc_stats.ast_rx_fifoerr++;
1854 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_DECRYPT0x08)
1855 sc->sc_stats.ast_rx_badcrypt++;
1856 if (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_PHY0x02) {
1857 sc->sc_stats.ast_rx_phyerr++;
1858 phyerr = ds->ds_rxstatds_us.rx.rs_phyerr & 0x1f;
1859 sc->sc_stats.ast_rx_phy[phyerr]++;
1860 }
1861
1862 /*
1863 * reject error frames, we normally don't want
1864 * to see them in monitor mode.
1865 */
1866 if ((ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_DECRYPT0x08 ) ||
1867 (ds->ds_rxstatds_us.rx.rs_status & HAL_RXERR_PHY0x02))
1868 goto rx_next;
1869
1870 /*
1871 * In monitor mode, allow through packets that
1872 * cannot be decrypted
1873 */
1874 if ((ds->ds_rxstatds_us.rx.rs_status & ~HAL_RXERR_DECRYPT0x08) ||
1875 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
1876 goto rx_next;
1877 }
1878
1879 len = ds->ds_rxstatds_us.rx.rs_datalen;
1880 if (len < IEEE80211_MIN_LEN(sizeof(struct ieee80211_frame_min) + 4)) {
1881 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n",
1882 __func__, len));
1883 sc->sc_stats.ast_rx_tooshort++;
1884 goto rx_next;
1885 }
1886
1887 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x02))
1888 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x02))
;
1889
1890 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
1891 bf->bf_m = NULL((void *)0);
1892 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
1893
1894#if NBPFILTER1 > 0
1895 if (sc->sc_drvbpf) {
1896 sc->sc_rxtapsc_rxtapu.th.wr_flags = IEEE80211_RADIOTAP_F_FCS0x10;
1897 sc->sc_rxtapsc_rxtapu.th.wr_rate =
1898 sc->sc_hwmap[ds->ds_rxstatds_us.rx.rs_rate] &
1899 IEEE80211_RATE_VAL0x7f;
1900 sc->sc_rxtapsc_rxtapu.th.wr_antenna = ds->ds_rxstatds_us.rx.rs_antenna;
1901 sc->sc_rxtapsc_rxtapu.th.wr_rssi = ds->ds_rxstatds_us.rx.rs_rssi;
1902 sc->sc_rxtapsc_rxtapu.th.wr_max_rssi = ic->ic_max_rssi;
1903
1904 bpf_mtap_hdr(sc->sc_drvbpf, &sc->sc_rxtapsc_rxtapu.th,
1905 sc->sc_rxtap_len, m, BPF_DIRECTION_IN(1 << 0));
1906 }
1907#endif
1908 m_adj(m, -IEEE80211_CRC_LEN4);
1909 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
1910 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
1911 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP0x40)) {
1912 /*
1913 * WEP is decrypted by hardware. Clear WEP bit
1914 * and trim WEP header for ieee80211_inputm().
1915 */
1916 wh->i_fc[1] &= ~IEEE80211_FC1_WEP0x40;
1917 bcopy(wh, &whbuf, sizeof(whbuf));
1918 m_adj(m, IEEE80211_WEP_IVLEN3 + IEEE80211_WEP_KIDLEN1);
1919 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
1920 bcopy(&whbuf, wh, sizeof(whbuf));
1921 /*
1922 * Also trim WEP ICV from the tail.
1923 */
1924 m_adj(m, -IEEE80211_WEP_CRCLEN4);
1925 /*
1926 * The header has probably moved.
1927 */
1928 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
1929
1930 rxi.rxi_flags |= IEEE80211_RXI_HWDEC0x00000001;
1931 }
1932
1933 /*
1934 * Locate the node for sender, track state, and
1935 * then pass this node (referenced) up to the 802.11
1936 * layer for its use.
1937 */
1938 ni = ieee80211_find_rxnode(ic, wh);
1939
1940 /*
1941 * Record driver-specific state.
1942 */
1943 an = ATH_NODE(ni)((struct ath_node *)(ni));
1944 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE16)
1945 an->an_rx_hist_next = 0;
1946 rh = &an->an_rx_hist[an->an_rx_hist_next];
1947 rh->arh_ticks = ATH_TICKS()(ticks);
1948 rh->arh_rssi = ds->ds_rxstatds_us.rx.rs_rssi;
1949 rh->arh_antenna = ds->ds_rxstatds_us.rx.rs_antenna;
1950
1951 /*
1952 * Send frame up for processing.
1953 */
1954 rxi.rxi_rssi = ds->ds_rxstatds_us.rx.rs_rssi;
1955 rxi.rxi_tstamp = ds->ds_rxstatds_us.rx.rs_tstamp;
1956 ieee80211_inputm(ifp, m, ni, &rxi, &ml);
1957
1958 /* Handle the rate adaption */
1959 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt,
1960 ds->ds_rxstatds_us.rx.rs_rssi);
1961
1962 /*
1963 * The frame may have caused the node to be marked for
1964 * reclamation (e.g. in response to a DEAUTH message)
1965 * so use release_node here instead of unref_node.
1966 */
1967 ieee80211_release_node(ic, ni);
1968
1969 rx_next:
1970 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_rxbuf)->tqh_last; *(&sc->
sc_rxbuf)->tqh_last = (bf); (&sc->sc_rxbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
1971 } while (ath_rxbuf_init(sc, bf) == 0);
1972
1973 if_input(ifp, &ml);
1974
1975 ath_hal_set_rx_signal(ah)((*(ah)->ah_set_rx_signal)((ah))); /* rx signal state monitoring */
1976 ath_hal_start_rx(ah)((*(ah)->ah_start_rx)((ah))); /* in case of RXEOL */
1977#undef PA2DESC
1978}
1979
1980/*
1981 * XXX Size of an ACK control frame in bytes.
1982 */
1983#define IEEE80211_ACK_SIZE(2+2+6 +4) (2+2+IEEE80211_ADDR_LEN6+4)
1984
1985int
1986ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1987 struct ath_buf *bf, struct mbuf *m0)
1988{
1989 struct ieee80211com *ic = &sc->sc_ic;
1990 struct ath_hal *ah = sc->sc_ah;
1991 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
1992 int i, error, iswep, hdrlen, pktlen, len, s, tries;
1993 u_int8_t rix, cix, txrate, ctsrate;
1994 struct ath_desc *ds;
1995 struct ieee80211_frame *wh;
1996 struct ieee80211_key *k;
1997 u_int32_t iv;
1998 u_int8_t *ivp;
1999 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) +
2000 IEEE80211_WEP_IVLEN3 + IEEE80211_WEP_KIDLEN1];
2001 u_int subtype, flags, ctsduration, antenna;
2002 HAL_PKT_TYPE atype;
2003 const HAL_RATE_TABLE *rt;
2004 HAL_BOOL shortPreamble;
2005 struct ath_node *an;
2006 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN;
2007
2008 wh = mtod(m0, struct ieee80211_frame *)((struct ieee80211_frame *)((m0)->m_hdr.mh_data));
2009 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40;
2010 hdrlen = sizeof(struct ieee80211_frame);
2011 pktlen = m0->m_pkthdrM_dat.MH.MH_pkthdr.len;
2012
2013 if (ath_softcrypto && iswep) {
2014 k = ieee80211_get_txkey(ic, wh, ni);
2015 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL((void *)0))
2016 return ENOMEM12;
2017 wh = mtod(m0, struct ieee80211_frame *)((struct ieee80211_frame *)((m0)->m_hdr.mh_data));
2018
2019 /* reset len in case we got a new mbuf */
2020 pktlen = m0->m_pkthdrM_dat.MH.MH_pkthdr.len;
2021 } else if (!ath_softcrypto && iswep) {
2022 bcopy(mtod(m0, caddr_t)((caddr_t)((m0)->m_hdr.mh_data)), hdrbuf, hdrlen);
2023 m_adj(m0, hdrlen);
2024 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT)(m0) = m_prepend((m0), (sizeof(hdrbuf)), (0x0002));
2025 if (m0 == NULL((void *)0)) {
2026 sc->sc_stats.ast_tx_nombuf++;
2027 return ENOMEM12;
2028 }
2029 ivp = hdrbuf + hdrlen;
2030 wh = mtod(m0, struct ieee80211_frame *)((struct ieee80211_frame *)((m0)->m_hdr.mh_data));
2031 /*
2032 * XXX
2033 * IV must not duplicate during the lifetime of the key.
2034 * But no mechanism to renew keys is defined in IEEE 802.11
2035 * for WEP. And the IV may be duplicated at other stations
2036 * because the session key itself is shared. So we use a
2037 * pseudo random IV for now, though it is not the right way.
2038 *
2039 * NB: Rather than use a strictly random IV we select a
2040 * random one to start and then increment the value for
2041 * each frame. This is an explicit tradeoff between
2042 * overhead and security. Given the basic insecurity of
2043 * WEP this seems worthwhile.
2044 */
2045
2046 /*
2047 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir:
2048 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255
2049 */
2050 iv = ic->ic_iv;
2051 if ((iv & 0xff00) == 0xff00) {
2052 int B = (iv & 0xff0000) >> 16;
2053 if (3 <= B && B < 16)
2054 iv = (B+1) << 16;
2055 }
2056 ic->ic_iv = iv + 1;
2057
2058 /*
2059 * NB: Preserve byte order of IV for packet
2060 * sniffers; it doesn't matter otherwise.
2061 */
2062#if BYTE_ORDER1234 == BIG_ENDIAN4321
2063 ivp[0] = iv >> 0;
2064 ivp[1] = iv >> 8;
2065 ivp[2] = iv >> 16;
2066#else
2067 ivp[2] = iv >> 0;
2068 ivp[1] = iv >> 8;
2069 ivp[0] = iv >> 16;
2070#endif
2071 ivp[3] = ic->ic_wep_txkeyic_def_txkey << 6; /* Key ID and pad */
2072 bcopy(hdrbuf, mtod(m0, caddr_t)((caddr_t)((m0)->m_hdr.mh_data)), sizeof(hdrbuf));
2073 /*
2074 * The length of hdrlen and pktlen must be increased for WEP
2075 */
2076 len = IEEE80211_WEP_IVLEN3 +
2077 IEEE80211_WEP_KIDLEN1 +
2078 IEEE80211_WEP_CRCLEN4;
2079 hdrlen += len;
2080 pktlen += len;
2081 }
2082 pktlen += IEEE80211_CRC_LEN4;
2083
2084 /*
2085 * Load the DMA map so any coalescing is done. This
2086 * also calculates the number of descriptors we need.
2087 */
2088 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
2089 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
;
2090 /*
2091 * Discard null packets and check for packets that
2092 * require too many TX descriptors. We try to convert
2093 * the latter to a cluster.
2094 */
2095 if (error == EFBIG27) { /* too many desc's, linearize */
2096 sc->sc_stats.ast_tx_linear++;
2097 if (m_defrag(m0, M_DONTWAIT0x0002)) {
2098 sc->sc_stats.ast_tx_nomcl++;
2099 m_freem(m0);
2100 return ENOMEM12;
2101 }
2102 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
2103 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
bf->bf_dmamap), (m0), (0x0001))
;
2104 if (error != 0) {
2105 sc->sc_stats.ast_tx_busdma++;
2106 m_freem(m0);
2107 return error;
2108 }
2109 KASSERT(bf->bf_nseg == 1,if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_tx_start: packet not one segment; nseg %u"
, bf->bf_dmamap->dm_nsegs)
2110 ("ath_tx_start: packet not one segment; nseg %u",if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_tx_start: packet not one segment; nseg %u"
, bf->bf_dmamap->dm_nsegs)
2111 bf->bf_nseg))if (!(bf->bf_dmamap->dm_nsegs == 1)) panic ("ath_tx_start: packet not one segment; nseg %u"
, bf->bf_dmamap->dm_nsegs)
;
2112 } else if (error != 0) {
2113 sc->sc_stats.ast_tx_busdma++;
2114 m_freem(m0);
2115 return error;
2116 } else if (bf->bf_nsegbf_dmamap->dm_nsegs == 0) { /* null packet, discard */
2117 sc->sc_stats.ast_tx_nodata++;
2118 m_freem(m0);
2119 return EIO5;
2120 }
2121 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen));
2122 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
2123 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x04))
;
2124 bf->bf_m = m0;
2125 bf->bf_node = ni; /* NB: held reference */
2126 an = ATH_NODE(ni)((struct ath_node *)(ni));
2127
2128 /* setup descriptors */
2129 ds = bf->bf_desc;
2130 rt = sc->sc_currates;
2131 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode))if (!(rt != ((void *)0))) panic ("no rate table, mode %u", sc
->sc_curmode)
;
2132
2133 /*
2134 * Calculate Atheros packet type from IEEE80211 packet header
2135 * and setup for rate calculations.
2136 */
2137 bf->bf_id.id_node = NULL((void *)0);
2138 atype = HAL_PKT_TYPE_NORMAL; /* default */
2139 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c) {
2140 case IEEE80211_FC0_TYPE_MGT0x00:
2141 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
2142 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON0x80) {
2143 atype = HAL_PKT_TYPE_BEACON;
2144 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP0x50) {
2145 atype = HAL_PKT_TYPE_PROBE_RESP;
2146 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM0x90) {
2147 atype = HAL_PKT_TYPE_ATIM;
2148 }
2149 rix = 0; /* XXX lowest rate */
2150 break;
2151 case IEEE80211_FC0_TYPE_CTL0x04:
2152 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
2153 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL0xa0)
2154 atype = HAL_PKT_TYPE_PSPOLL;
2155 rix = 0; /* XXX lowest rate */
2156 break;
2157 default:
2158 /* remember link conditions for rate adaptation algorithm */
2159 if (ic->ic_fixed_rate == -1) {
2160 bf->bf_id.id_len = m0->m_pkthdrM_dat.MH.MH_pkthdr.len;
2161 bf->bf_id.id_rateidx = ni->ni_txrate;
2162 bf->bf_id.id_node = ni;
2163 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni);
2164 }
2165 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt,
2166 &ni->ni_rates, wh, m0->m_pkthdrM_dat.MH.MH_pkthdr.len, ic->ic_fixed_rate,
2167 ifp->if_xname, 0);
2168 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] &
2169 IEEE80211_RATE_VAL0x7f];
2170 if (rix == 0xff) {
2171 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n",
2172 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate],
2173 ni->ni_txrate);
2174 sc->sc_stats.ast_tx_badrate++;
2175 m_freem(m0);
2176 return EIO5;
2177 }
2178 break;
2179 }
2180
2181 /*
2182 * NB: the 802.11 layer marks whether or not we should
2183 * use short preamble based on the current mode and
2184 * negotiated parameters.
2185 */
2186 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE0x00040000) &&
2187 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE0x0020)) {
2188 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble;
2189 shortPreamble = AH_TRUE;
2190 sc->sc_stats.ast_tx_shortpre++;
2191 } else {
2192 txrate = rt->info[rix].rateCode;
2193 shortPreamble = AH_FALSE;
2194 }
2195
2196 /*
2197 * Calculate miscellaneous flags.
2198 */
2199 flags = HAL_TXDESC_CLRDMASK0x0001; /* XXX needed for wep errors */
2200 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) {
2201 flags |= HAL_TXDESC_NOACK0x0002; /* no ack on broad/multicast */
2202 sc->sc_stats.ast_tx_noack++;
2203 } else if (pktlen > ic->ic_rtsthreshold) {
2204 flags |= HAL_TXDESC_RTSENA0x0004; /* RTS based on frame length */
2205 sc->sc_stats.ast_tx_rts++;
2206 }
2207
2208 /*
2209 * Calculate duration. This logically belongs in the 802.11
2210 * layer but it lacks sufficient information to calculate it.
2211 */
2212 if ((flags & HAL_TXDESC_NOACK0x0002) == 0 &&
2213 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c) != IEEE80211_FC0_TYPE_CTL0x04) {
2214 u_int16_t dur;
2215 /*
2216 * XXX not right with fragmentation.
2217 */
2218 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE(2+2+6 +4),
2219 rix, shortPreamble);
2220 *((u_int16_t*) wh->i_dur) = htole16(dur)((__uint16_t)(dur));
2221 }
2222
2223 /*
2224 * Calculate RTS/CTS rate and duration if needed.
2225 */
2226 ctsduration = 0;
2227 if (flags & (HAL_TXDESC_RTSENA0x0004|HAL_TXDESC_CTSENA0x0008)) {
2228 /*
2229 * CTS transmit rate is derived from the transmit rate
2230 * by looking in the h/w rate table. We must also factor
2231 * in whether or not a short preamble is to be used.
2232 */
2233 cix = rt->info[rix].controlRate;
2234 ctsrate = rt->info[cix].rateCode;
2235 if (shortPreamble)
2236 ctsrate |= rt->info[cix].shortPreamble;
2237 /*
2238 * Compute the transmit duration based on the size
2239 * of an ACK frame. We call into the HAL to do the
2240 * computation since it depends on the characteristics
2241 * of the actual PHY being used.
2242 */
2243 if (flags & HAL_TXDESC_RTSENA0x0004) { /* SIFS + CTS */
2244 ctsduration += ath_hal_computetxtime(ah,
2245 rt, IEEE80211_ACK_SIZE(2+2+6 +4), cix, shortPreamble);
2246 }
2247 /* SIFS + data */
2248 ctsduration += ath_hal_computetxtime(ah,
2249 rt, pktlen, rix, shortPreamble);
2250 if ((flags & HAL_TXDESC_NOACK0x0002) == 0) { /* SIFS + ACK */
2251 ctsduration += ath_hal_computetxtime(ah,
2252 rt, IEEE80211_ACK_SIZE(2+2+6 +4), cix, shortPreamble);
2253 }
2254 } else
2255 ctsrate = 0;
2256
2257 /*
2258 * For now use the antenna on which the last good
2259 * frame was received on. We assume this field is
2260 * initialized to 0 which gives us ``auto'' or the
2261 * ``default'' antenna.
2262 */
2263 if (an->an_tx_antenna) {
2264 antenna = an->an_tx_antenna;
2265 } else {
2266 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna;
2267 }
2268
2269#if NBPFILTER1 > 0
2270 if (ic->ic_rawbpf)
2271 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT(1 << 1));
2272
2273 if (sc->sc_drvbpf) {
2274 sc->sc_txtapsc_txtapu.th.wt_flags = 0;
2275 if (shortPreamble)
2276 sc->sc_txtapsc_txtapu.th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02;
2277 if (!ath_softcrypto && iswep)
2278 sc->sc_txtapsc_txtapu.th.wt_flags |= IEEE80211_RADIOTAP_F_WEP0x04;
2279 sc->sc_txtapsc_txtapu.th.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] &
2280 IEEE80211_RATE_VAL0x7f;
2281 sc->sc_txtapsc_txtapu.th.wt_txpower = 30;
2282 sc->sc_txtapsc_txtapu.th.wt_antenna = antenna;
2283
2284 bpf_mtap_hdr(sc->sc_drvbpf, &sc->sc_txtapsc_txtapu.th, sc->sc_txtap_len,
2285 m0, BPF_DIRECTION_OUT(1 << 1));
2286 }
2287#endif
2288
2289 /*
2290 * Formulate first tx descriptor with tx controls.
2291 */
2292 tries = IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ? 1 : 15;
2293 /* XXX check return value? */
2294 ath_hal_setup_tx_desc(ah, ds((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2295 , pktlen /* packet length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2296 , hdrlen /* header length */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2297 , atype /* Atheros packet type */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2298 , 60 /* txpower XXX */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2299 , txrate, tries /* series 0 rate/tries */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2300 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2301 , antenna /* antenna mode */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2302 , flags /* flags */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2303 , ctsrate /* rts/cts rate */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2304 , ctsduration /* rts/cts duration */((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
2305 )((*(ah)->ah_setup_tx_desc)((ah), (ds), (pktlen), (hdrlen),
(atype), (60), (txrate), (tries), (iswep ? sc->sc_ic.ic_def_txkey
: ((u_int32_t) - 1)), (antenna), (flags), (ctsrate), (ctsduration
)))
;
2306#ifdef notyet
2307 ath_hal_setup_xtx_desc(ah, ds
2308 , AH_FALSE /* short preamble */
2309 , 0, 0 /* series 1 rate/tries */
2310 , 0, 0 /* series 2 rate/tries */
2311 , 0, 0 /* series 3 rate/tries */
2312 );
2313#endif
2314 /*
2315 * Fillin the remainder of the descriptor info.
2316 */
2317 for (i = 0; i < bf->bf_nsegbf_dmamap->dm_nsegs; i++, ds++) {
2318 ds->ds_data = bf->bf_segsbf_dmamap->dm_segs[i].ds_addr;
2319 if (i == bf->bf_nsegbf_dmamap->dm_nsegs - 1) {
2320 ds->ds_link = 0;
2321 } else {
2322 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
2323 }
2324 ath_hal_fill_tx_desc(ah, ds((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2325 , bf->bf_segs[i].ds_len /* segment length */((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2326 , i == 0 /* first segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2327 , i == bf->bf_nseg - 1 /* last segment */((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
2328 )((*(ah)->ah_fill_tx_desc)((ah), (ds), (bf->bf_dmamap->
dm_segs[i].ds_len), (i == 0), (i == bf->bf_dmamap->dm_nsegs
- 1)))
;
2329 DPRINTF(ATH_DEBUG_XMIT,
2330 ("%s: %d: %08x %08x %08x %08x %08x %08x\n",
2331 __func__, i, ds->ds_link, ds->ds_data,
2332 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]));
2333 }
2334
2335 /*
2336 * Insert the frame on the outbound list and
2337 * pass it on to the hardware.
2338 */
2339 s = splnet()splraise(0x4);
2340 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txq)->tqh_last; *(&sc->
sc_txq)->tqh_last = (bf); (&sc->sc_txq)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
2341 if (sc->sc_txlink == NULL((void *)0)) {
2342 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr)((*(ah)->ah_put_tx_buf)((ah), (sc->sc_txhalq[hwqueue]),
(bf->bf_daddr)))
;
2343 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__,
2344 (caddr_t)bf->bf_daddr, bf->bf_desc));
2345 } else {
2346 *sc->sc_txlink = bf->bf_daddr;
2347 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__,
2348 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc));
2349 }
2350 sc->sc_txlink = &bf->bf_desc[bf->bf_nsegbf_dmamap->dm_nsegs - 1].ds_link;
2351 splx(s)spllower(s);
2352
2353 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue])((*(ah)->ah_tx_start)((ah), (sc->sc_txhalq[hwqueue])));
2354 return 0;
2355}
2356
2357void
2358ath_tx_proc(void *arg, int npending)
2359{
2360 struct ath_softc *sc = arg;
2361 struct ath_hal *ah = sc->sc_ah;
2362 struct ath_buf *bf;
2363 struct ieee80211com *ic = &sc->sc_ic;
2364 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2365 struct ath_desc *ds;
2366 struct ieee80211_node *ni;
2367 struct ath_node *an;
2368 int sr, lr, s;
2369 HAL_STATUS status;
2370
2371 for (;;) {
2372 s = splnet()splraise(0x4);
2373 bf = TAILQ_FIRST(&sc->sc_txq)((&sc->sc_txq)->tqh_first);
2374 if (bf == NULL((void *)0)) {
2375 sc->sc_txlink = NULL((void *)0);
2376 splx(s)spllower(s);
2377 break;
2378 }
2379 /* only the last descriptor is needed */
2380 ds = &bf->bf_desc[bf->bf_nsegbf_dmamap->dm_nsegs - 1];
2381 status = ath_hal_proc_tx_desc(ah, ds)((*(ah)->ah_proc_tx_desc)((ah), (ds)));
2382#ifdef AR_DEBUG
2383 if (ath_debug & ATH_DEBUG_XMIT_DESC)
2384 ath_printtxbuf(bf, status == HAL_OK0);
2385#endif
2386 if (status == HAL_EINPROGRESS36) {
2387 splx(s)spllower(s);
2388 break;
2389 }
2390 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_txq)->tqh_last = (bf)->bf_list.tqe_prev
; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next; ((bf
)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list.tqe_next
) = ((void *)-1); } while (0)
;
2391 splx(s)spllower(s);
2392
2393 ni = bf->bf_node;
2394 if (ni != NULL((void *)0)) {
2395 an = (struct ath_node *) ni;
2396 if (ds->ds_txstatds_us.tx.ts_status == 0) {
2397 if (bf->bf_id.id_node != NULL((void *)0))
2398 ieee80211_rssadapt_raise_rate(ic,
2399 &an->an_rssadapt, &bf->bf_id);
2400 an->an_tx_antenna = ds->ds_txstatds_us.tx.ts_antenna;
2401 } else {
2402 if (bf->bf_id.id_node != NULL((void *)0))
2403 ieee80211_rssadapt_lower_rate(ic, ni,
2404 &an->an_rssadapt, &bf->bf_id);
2405 if (ds->ds_txstatds_us.tx.ts_status & HAL_TXERR_XRETRY0x01)
2406 sc->sc_stats.ast_tx_xretries++;
2407 if (ds->ds_txstatds_us.tx.ts_status & HAL_TXERR_FIFO0x04)
2408 sc->sc_stats.ast_tx_fifoerr++;
2409 if (ds->ds_txstatds_us.tx.ts_status & HAL_TXERR_FILT0x02)
2410 sc->sc_stats.ast_tx_filtered++;
2411 an->an_tx_antenna = 0; /* invalidate */
2412 }
2413 sr = ds->ds_txstatds_us.tx.ts_shortretry;
2414 lr = ds->ds_txstatds_us.tx.ts_longretry;
2415 sc->sc_stats.ast_tx_shortretry += sr;
2416 sc->sc_stats.ast_tx_longretry += lr;
2417 /*
2418 * Reclaim reference to node.
2419 *
2420 * NB: the node may be reclaimed here if, for example
2421 * this is a DEAUTH message that was sent and the
2422 * node was timed out due to inactivity.
2423 */
2424 ieee80211_release_node(ic, ni);
2425 }
2426 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x08))
2427 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (bf->
bf_dmamap), (0), (bf->bf_dmamap->dm_mapsize), (0x08))
;
2428 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
2429 m_freem(bf->bf_m);
2430 bf->bf_m = NULL((void *)0);
2431 bf->bf_node = NULL((void *)0);
2432
2433 s = splnet()splraise(0x4);
2434 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
2435 splx(s)spllower(s);
2436 }
2437 ifq_clr_oactive(&ifp->if_snd);
2438 sc->sc_tx_timer = 0;
2439
2440 ath_start(ifp);
2441}
2442
2443/*
2444 * Drain the transmit queue and reclaim resources.
2445 */
2446void
2447ath_draintxq(struct ath_softc *sc)
2448{
2449 struct ath_hal *ah = sc->sc_ah;
2450 struct ieee80211com *ic = &sc->sc_ic;
2451 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2452 struct ieee80211_node *ni;
2453 struct ath_buf *bf;
2454 int s, i;
2455
2456 /* XXX return value */
2457 if (!sc->sc_invalid) {
2458 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) {
2459 /* don't touch the hardware if marked invalid */
2460 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i])((*(ah)->ah_stop_tx_dma)((ah), (sc->sc_txhalq[i])));
2461 DPRINTF(ATH_DEBUG_RESET,
2462 ("%s: tx queue %d (%p), link %p\n", __func__, i,
2463 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah,
2464 sc->sc_txhalq[i]), sc->sc_txlink));
2465 }
2466 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq)((*(ah)->ah_stop_tx_dma)((ah), (sc->sc_bhalq)));
2467 DPRINTF(ATH_DEBUG_RESET,
2468 ("%s: beacon queue (%p)\n", __func__,
2469 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq)));
2470 }
2471 for (;;) {
2472 s = splnet()splraise(0x4);
2473 bf = TAILQ_FIRST(&sc->sc_txq)((&sc->sc_txq)->tqh_first);
2474 if (bf == NULL((void *)0)) {
2475 sc->sc_txlink = NULL((void *)0);
2476 splx(s)spllower(s);
2477 break;
2478 }
2479 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list)do { if (((bf)->bf_list.tqe_next) != ((void *)0)) (bf)->
bf_list.tqe_next->bf_list.tqe_prev = (bf)->bf_list.tqe_prev
; else (&sc->sc_txq)->tqh_last = (bf)->bf_list.tqe_prev
; *(bf)->bf_list.tqe_prev = (bf)->bf_list.tqe_next; ((bf
)->bf_list.tqe_prev) = ((void *)-1); ((bf)->bf_list.tqe_next
) = ((void *)-1); } while (0)
;
2480 splx(s)spllower(s);
2481#ifdef AR_DEBUG
2482 if (ath_debug & ATH_DEBUG_RESET) {
2483 ath_printtxbuf(bf,
2484 ath_hal_proc_tx_desc(ah, bf->bf_desc)((*(ah)->ah_proc_tx_desc)((ah), (bf->bf_desc))) == HAL_OK0);
2485 }
2486#endif /* AR_DEBUG */
2487 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (bf->
bf_dmamap))
;
2488 m_freem(bf->bf_m);
2489 bf->bf_m = NULL((void *)0);
2490 ni = bf->bf_node;
2491 bf->bf_node = NULL((void *)0);
2492 s = splnet()splraise(0x4);
2493 if (ni != NULL((void *)0)) {
2494 /*
2495 * Reclaim node reference.
2496 */
2497 ieee80211_release_node(ic, ni);
2498 }
2499 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list)do { (bf)->bf_list.tqe_next = ((void *)0); (bf)->bf_list
.tqe_prev = (&sc->sc_txbuf)->tqh_last; *(&sc->
sc_txbuf)->tqh_last = (bf); (&sc->sc_txbuf)->tqh_last
= &(bf)->bf_list.tqe_next; } while (0)
;
2500 splx(s)spllower(s);
2501 }
2502 ifq_clr_oactive(&ifp->if_snd);
2503 sc->sc_tx_timer = 0;
2504}
2505
2506/*
2507 * Disable the receive h/w in preparation for a reset.
2508 */
2509void
2510ath_stoprecv(struct ath_softc *sc)
2511{
2512#define PA2DESC(_sc, _pa) \
2513 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \
2514 ((_pa) - (_sc)->sc_desc_paddr)))
2515 struct ath_hal *ah = sc->sc_ah;
2516
2517 ath_hal_stop_pcu_recv(ah)((*(ah)->ah_stop_pcu_recv)((ah))); /* disable PCU */
2518 ath_hal_set_rx_filter(ah, 0)((*(ah)->ah_set_rx_filter)((ah), (0))); /* clear recv filter */
2519 ath_hal_stop_rx_dma(ah)((*(ah)->ah_stop_rx_dma)((ah))); /* disable DMA engine */
2520#ifdef AR_DEBUG
2521 if (ath_debug & ATH_DEBUG_RESET) {
2522 struct ath_buf *bf;
2523
2524 printf("%s: rx queue %p, link %p\n", __func__,
2525 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah)((*(ah)->ah_get_rx_buf)((ah))), sc->sc_rxlink);
2526 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)for((bf) = ((&sc->sc_rxbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
{
2527 struct ath_desc *ds = bf->bf_desc;
2528 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr,((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
2529 PA2DESC(sc, ds->ds_link))((*(ah)->ah_proc_rx_desc)((ah), (ds), (bf->bf_daddr), (
PA2DESC(sc, ds->ds_link))))
== HAL_OK0)
2530 ath_printrxbuf(bf, 1);
2531 }
2532 }
2533#endif
2534 sc->sc_rxlink = NULL((void *)0); /* just in case */
2535#undef PA2DESC
2536}
2537
2538/*
2539 * Enable the receive h/w following a reset.
2540 */
2541int
2542ath_startrecv(struct ath_softc *sc)
2543{
2544 struct ath_hal *ah = sc->sc_ah;
2545 struct ath_buf *bf;
2546
2547 sc->sc_rxlink = NULL((void *)0);
2548 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)for((bf) = ((&sc->sc_rxbuf)->tqh_first); (bf) != ((
void *)0); (bf) = ((bf)->bf_list.tqe_next))
{
2549 int error = ath_rxbuf_init(sc, bf);
2550 if (error != 0) {
2551 DPRINTF(ATH_DEBUG_RECV,
2552 ("%s: ath_rxbuf_init failed %d\n",
2553 __func__, error));
2554 return error;
2555 }
2556 }
2557
2558 bf = TAILQ_FIRST(&sc->sc_rxbuf)((&sc->sc_rxbuf)->tqh_first);
2559 ath_hal_put_rx_buf(ah, bf->bf_daddr)((*(ah)->ah_put_rx_buf)((ah), (bf->bf_daddr)));
2560 ath_hal_start_rx(ah)((*(ah)->ah_start_rx)((ah))); /* enable recv descriptors */
2561 ath_mode_init(sc); /* set filters, etc. */
2562 ath_hal_start_rx_pcu(ah)((*(ah)->ah_start_rx_pcu)((ah))); /* re-enable PCU/DMA engine */
2563 return 0;
2564}
2565
2566/*
2567 * Set/change channels. If the channel is really being changed,
2568 * it's done by resetting the chip. To accomplish this we must
2569 * first cleanup any pending DMA, then restart stuff after a la
2570 * ath_init.
2571 */
2572int
2573ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
2574{
2575 struct ath_hal *ah = sc->sc_ah;
2576 struct ieee80211com *ic = &sc->sc_ic;
2577 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2578
2579 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__,
2580 ieee80211_chan2ieee(ic, ic->ic_ibss_chan),
2581 ic->ic_ibss_chan->ic_freq,
2582 ieee80211_chan2ieee(ic, chan), chan->ic_freq));
2583 if (chan != ic->ic_ibss_chan) {
2584 HAL_STATUS status;
2585 HAL_CHANNEL hchan;
2586 enum ieee80211_phymode mode;
2587
2588 /*
2589 * To switch channels clear any pending DMA operations;
2590 * wait long enough for the RX fifo to drain, reset the
2591 * hardware at the new frequency, and then re-enable
2592 * the relevant bits of the h/w.
2593 */
2594 ath_hal_set_intr(ah, 0)((*(ah)->ah_set_intr)((ah), (0))); /* disable interrupts */
2595 ath_draintxq(sc); /* clear pending tx frames */
2596 ath_stoprecv(sc); /* turn off frame recv */
2597 /*
2598 * Convert to a HAL channel description.
2599 */
2600 hchan.channel = chan->ic_freq;
2601 hchan.channelFlags = chan->ic_flags;
2602 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE,((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(AH_TRUE), (&status)))
2603 &status)((*(ah)->ah_reset)((ah), (ic->ic_opmode), (&hchan),
(AH_TRUE), (&status)))
) {
2604 printf("%s: ath_chan_set: unable to reset "
2605 "channel %u (%u MHz)\n", ifp->if_xname,
2606 ieee80211_chan2ieee(ic, chan), chan->ic_freq);
2607 return EIO5;
2608 }
2609 ath_set_slot_time(sc);
2610 /*
2611 * Re-enable rx framework.
2612 */
2613 if (ath_startrecv(sc) != 0) {
2614 printf("%s: ath_chan_set: unable to restart recv "
2615 "logic\n", ifp->if_xname);
2616 return EIO5;
2617 }
2618
2619#if NBPFILTER1 > 0
2620 /*
2621 * Update BPF state.
2622 */
2623 sc->sc_txtapsc_txtapu.th.wt_chan_freq = sc->sc_rxtapsc_rxtapu.th.wr_chan_freq =
2624 htole16(chan->ic_freq)((__uint16_t)(chan->ic_freq));
2625 sc->sc_txtapsc_txtapu.th.wt_chan_flags = sc->sc_rxtapsc_rxtapu.th.wr_chan_flags =
2626 htole16(chan->ic_flags)((__uint16_t)(chan->ic_flags));
2627#endif
2628
2629 /*
2630 * Change channels and update the h/w rate map
2631 * if we're switching; e.g. 11a to 11b/g.
2632 */
2633 ic->ic_ibss_chan = chan;
2634 mode = ieee80211_chan2mode(ic, chan);
2635 if (mode != sc->sc_curmode)
2636 ath_setcurmode(sc, mode);
2637
2638 /*
2639 * Re-enable interrupts.
2640 */
2641 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
2642 }
2643 return 0;
2644}
2645
2646void
2647ath_next_scan(void *arg)
2648{
2649 struct ath_softc *sc = arg;
2650 struct ieee80211com *ic = &sc->sc_ic;
2651 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2652 int s;
2653
2654 /* don't call ath_start w/o network interrupts blocked */
2655 s = splnet()splraise(0x4);
2656
2657 if (ic->ic_state == IEEE80211_S_SCAN)
2658 ieee80211_next_scan(ifp);
2659 splx(s)spllower(s);
2660}
2661
2662int
2663ath_set_slot_time(struct ath_softc *sc)
2664{
2665 struct ath_hal *ah = sc->sc_ah;
2666 struct ieee80211com *ic = &sc->sc_ic;
2667
2668 if (ic->ic_flags & IEEE80211_F_SHSLOT0x00020000)
2669 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9)((*(ah)->ah_set_slot_time)(ah, 396)));
2670
2671 return (0);
2672}
2673
2674/*
2675 * Periodically recalibrate the PHY to account
2676 * for temperature/environment changes.
2677 */
2678void
2679ath_calibrate(void *arg)
2680{
2681 struct ath_softc *sc = arg;
2682 struct ath_hal *ah = sc->sc_ah;
2683 struct ieee80211com *ic = &sc->sc_ic;
2684 struct ieee80211_channel *c;
2685 HAL_CHANNEL hchan;
2686 int s;
2687
2688 sc->sc_stats.ast_per_cal++;
2689
2690 /*
2691 * Convert to a HAL channel description.
2692 */
2693 c = ic->ic_ibss_chan;
2694 hchan.channel = c->ic_freq;
2695 hchan.channelFlags = c->ic_flags;
2696
2697 s = splnet()splraise(0x4);
2698 DPRINTF(ATH_DEBUG_CALIBRATE,
2699 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags));
2700
2701 if (ath_hal_get_rf_gain(ah)((*(ah)->ah_get_rf_gain)((ah))) == HAL_RFGAIN_NEED_CHANGE) {
2702 /*
2703 * Rfgain is out of bounds, reset the chip
2704 * to load new gain values.
2705 */
2706 sc->sc_stats.ast_per_rfgain++;
2707 ath_reset(sc, 1);
2708 }
2709 if (!ath_hal_calibrate(ah, &hchan)((*(ah)->ah_calibrate)((ah), (&hchan)))) {
2710 DPRINTF(ATH_DEBUG_ANY,
2711 ("%s: calibration of channel %u failed\n",
2712 __func__, c->ic_freq));
2713 sc->sc_stats.ast_per_calfail++;
2714 }
2715 timeout_add_sec(&sc->sc_cal_to, ath_calinterval);
2716 splx(s)spllower(s);
2717}
2718
2719void
2720ath_ledstate(struct ath_softc *sc, enum ieee80211_state state)
2721{
2722 HAL_LED_STATE led = HAL_LED_INITIEEE80211_S_INIT;
2723 u_int32_t softled = AR5K_SOFTLED_OFF1;
2724
2725 switch (state) {
2726 case IEEE80211_S_INIT:
2727 break;
2728 case IEEE80211_S_SCAN:
2729 led = HAL_LED_SCANIEEE80211_S_SCAN;
2730 break;
2731 case IEEE80211_S_AUTH:
2732 led = HAL_LED_AUTHIEEE80211_S_AUTH;
2733 break;
2734 case IEEE80211_S_ASSOC:
2735 led = HAL_LED_ASSOCIEEE80211_S_ASSOC;
2736 softled = AR5K_SOFTLED_ON0;
2737 break;
2738 case IEEE80211_S_RUN:
2739 led = HAL_LED_RUNIEEE80211_S_RUN;
2740 softled = AR5K_SOFTLED_ON0;
2741 break;
2742 }
2743
2744 ath_hal_set_ledstate(sc->sc_ah, led)((*(sc->sc_ah)->ah_set_ledstate)((sc->sc_ah), (led))
)
;
2745 if (sc->sc_softled) {
2746 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN)((*(sc->sc_ah)->ah_set_gpio_output)((sc->sc_ah), (0)
))
;
2747 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled)((*(sc->sc_ah)->ah_set_gpio)((sc->sc_ah), (0), (softled
)))
;
2748 }
2749}
2750
2751int
2752ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2753{
2754 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2755 struct ath_softc *sc = ifp->if_softc;
2756 struct ath_hal *ah = sc->sc_ah;
2757 struct ieee80211_node *ni;
2758 const u_int8_t *bssid;
2759 int error, i;
2760
2761 u_int32_t rfilt;
2762
2763 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__,
2764 ieee80211_state_name[ic->ic_state],
2765 ieee80211_state_name[nstate]));
2766
2767 timeout_del(&sc->sc_scan_to);
2768 timeout_del(&sc->sc_cal_to);
2769 ath_ledstate(sc, nstate);
2770
2771 if (nstate == IEEE80211_S_INIT) {
2772 timeout_del(&sc->sc_rssadapt_to);
2773 sc->sc_imask &= ~(HAL_INT_SWBA0x00010000 | HAL_INT_BMISS0x00040000);
2774 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
2775 return (*sc->sc_newstate)(ic, nstate, arg);
2776 }
2777 ni = ic->ic_bss;
2778 error = ath_chan_set(sc, ni->ni_chan);
2779 if (error != 0)
2780 goto bad;
2781 rfilt = ath_calcrxfilter(sc);
2782 if (nstate == IEEE80211_S_SCAN ||
2783 ic->ic_opmode == IEEE80211_M_MONITOR) {
2784 bssid = sc->sc_broadcast_addr;
2785 } else {
2786 bssid = ni->ni_bssid;
2787 }
2788 ath_hal_set_rx_filter(ah, rfilt)((*(ah)->ah_set_rx_filter)((ah), (rfilt)));
2789 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n",
2790 __func__, rfilt, ether_sprintf((u_char*)bssid)));
2791
2792 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) {
2793 ath_hal_set_associd(ah, bssid, ni->ni_associd)((*(ah)->ah_set_associd)((ah), (bssid), (ni->ni_associd
), 0))
;
2794 } else {
2795 ath_hal_set_associd(ah, bssid, 0)((*(ah)->ah_set_associd)((ah), (bssid), (0), 0));
2796 }
2797
2798 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON0x00000100)) {
2799 for (i = 0; i < IEEE80211_WEP_NKID4; i++) {
2800 if (ath_hal_is_key_valid(ah, i)(((*(ah)->ah_is_key_valid)((ah), (i)))))
2801 ath_hal_set_key_lladdr(ah, i, bssid)((*(ah)->ah_set_key_lladdr)((ah), (i), (bssid)));
2802 }
2803 }
2804
2805 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
2806 /* nothing to do */
2807 } else if (nstate == IEEE80211_S_RUN) {
2808 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): "
2809 "ic_flags=0x%08x iv=%d bssid=%s "
2810 "capinfo=0x%04x chan=%d\n",
2811 __func__,
2812 ic->ic_flags,
2813 ni->ni_intval,
2814 ether_sprintf(ni->ni_bssid),
2815 ni->ni_capinfo,
2816 ieee80211_chan2ieee(ic, ni->ni_chan)));
2817
2818 /*
2819 * Allocate and setup the beacon frame for AP or adhoc mode.
2820 */
2821#ifndef IEEE80211_STA_ONLY
2822 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2823 ic->ic_opmode == IEEE80211_M_IBSS) {
2824 error = ath_beacon_alloc(sc, ni);
2825 if (error != 0)
2826 goto bad;
2827 }
2828#endif
2829 /*
2830 * Configure the beacon and sleep timers.
2831 */
2832 ath_beacon_config(sc);
2833 } else {
2834 sc->sc_imask &= ~(HAL_INT_SWBA0x00010000 | HAL_INT_BMISS0x00040000);
2835 ath_hal_set_intr(ah, sc->sc_imask)((*(ah)->ah_set_intr)((ah), (sc->sc_imask)));
2836 }
2837
2838 /*
2839 * Invoke the parent method to complete the work.
2840 */
2841 error = (*sc->sc_newstate)(ic, nstate, arg);
2842
2843 if (nstate == IEEE80211_S_RUN) {
2844 /* start periodic recalibration timer */
2845 timeout_add_sec(&sc->sc_cal_to, ath_calinterval);
2846
2847 if (ic->ic_opmode != IEEE80211_M_MONITOR)
2848 timeout_add_msec(&sc->sc_rssadapt_to, 100);
2849 } else if (nstate == IEEE80211_S_SCAN) {
2850 /* start ap/neighbor scan timer */
2851 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime);
2852 }
2853bad:
2854 return error;
2855}
2856
2857#ifndef IEEE80211_STA_ONLY
2858void
2859ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m,
2860 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype)
2861{
2862 struct ath_softc *sc = (struct ath_softc*)ic->ic_softcic_ac.ac_if.if_softc;
2863 struct ath_hal *ah = sc->sc_ah;
2864
2865 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype);
2866
2867 switch (subtype) {
2868 case IEEE80211_FC0_SUBTYPE_PROBE_RESP0x50:
2869 case IEEE80211_FC0_SUBTYPE_BEACON0x80:
2870 if (ic->ic_opmode != IEEE80211_M_IBSS ||
2871 ic->ic_state != IEEE80211_S_RUN)
2872 break;
2873 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)((*(ah)->ah_get_tsf64)((ah)))) ==
2874 ENETRESET52)
2875 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0)((*(ah)->ah_set_associd)((ah), (ic->ic_bss->ni_bssid
), (0), 0))
;
2876 break;
2877 default:
2878 break;
2879 }
2880 return;
2881}
2882#endif
2883
2884/*
2885 * Setup driver-specific state for a newly associated node.
2886 * Note that we're called also on a re-associate, the isnew
2887 * param tells us if this is the first time or not.
2888 */
2889void
2890ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2891{
2892 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2893 return;
2894}
2895
2896int
2897ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode)
2898{
2899 struct ieee80211com *ic = &sc->sc_ic;
2900 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
2901 struct ath_hal *ah = sc->sc_ah;
2902 HAL_CHANNEL *chans;
2903 int i, ix, nchan;
2904
2905 sc->sc_nchan = 0;
2906 chans = malloc(IEEE80211_CHAN_MAX255 * sizeof(HAL_CHANNEL),
2907 M_TEMP127, M_NOWAIT0x0002);
2908 if (chans == NULL((void *)0)) {
2909 printf("%s: unable to allocate channel table\n", ifp->if_xname);
2910 return ENOMEM12;
2911 }
2912 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX255, &nchan,
2913 HAL_MODE_ALL, outdoor, xchanmode)) {
2914 printf("%s: unable to collect channel list from hal\n",
2915 ifp->if_xname);
2916 free(chans, M_TEMP127, 0);
2917 return EINVAL22;
2918 }
2919
2920 /*
2921 * Convert HAL channels to ieee80211 ones and insert
2922 * them in the table according to their channel number.
2923 */
2924 for (i = 0; i < nchan; i++) {
2925 HAL_CHANNEL *c = &chans[i];
2926 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags);
2927 if (ix > IEEE80211_CHAN_MAX255) {
2928 printf("%s: bad hal channel %u (%u/%x) ignored\n",
2929 ifp->if_xname, ix, c->channel, c->channelFlags);
2930 continue;
2931 }
2932 DPRINTF(ATH_DEBUG_ANY,
2933 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n",
2934 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags,
2935 ix));
2936 /* NB: flags are known to be compatible */
2937 if (ic->ic_channels[ix].ic_freq == 0) {
2938 ic->ic_channels[ix].ic_freq = c->channel;
2939 ic->ic_channels[ix].ic_flags = c->channelFlags;
2940 } else {
2941 /* channels overlap; e.g. 11g and 11b */
2942 ic->ic_channels[ix].ic_flags |= c->channelFlags;
2943 }
2944 /* count valid channels */
2945 sc->sc_nchan++;
2946 }
2947 free(chans, M_TEMP127, 0);
2948
2949 if (sc->sc_nchan < 1) {
2950 printf("%s: no valid channels for regdomain %s(%u)\n",
2951 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomainah_capabilities.cap_regdomain.reg_current),
2952 ah->ah_regdomainah_capabilities.cap_regdomain.reg_current);
2953 return ENOENT2;
2954 }
2955
2956 /* set an initial channel */
2957 ic->ic_ibss_chan = &ic->ic_channels[0];
2958
2959 return 0;
2960}
2961
2962int
2963ath_rate_setup(struct ath_softc *sc, u_int mode)
2964{
2965 struct ath_hal *ah = sc->sc_ah;
2966 struct ieee80211com *ic = &sc->sc_ic;
2967 const HAL_RATE_TABLE *rt;
2968 struct ieee80211_rateset *rs;
2969 int i, maxrates;
2970
2971 switch (mode) {
2972 case IEEE80211_MODE_11A:
2973 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A)((*(ah)->ah_get_rate_table)((ah), (HAL_MODE_11A)));
2974 break;
2975 case IEEE80211_MODE_11B:
2976 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B)((*(ah)->ah_get_rate_table)((ah), (HAL_MODE_11B)));
2977 break;
2978 case IEEE80211_MODE_11G:
2979 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G)((*(ah)->ah_get_rate_table)((ah), (HAL_MODE_11G)));
2980 break;
2981 default:
2982 DPRINTF(ATH_DEBUG_ANY,
2983 ("%s: invalid mode %u\n", __func__, mode));
2984 return 0;
2985 }
2986 rt = sc->sc_rates[mode];
2987 if (rt == NULL((void *)0))
2988 return 0;
2989 if (rt->rateCount > IEEE80211_RATE_MAXSIZE15) {
2990 DPRINTF(ATH_DEBUG_ANY,
2991 ("%s: rate table too small (%u > %u)\n",
2992 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE));
2993 maxrates = IEEE80211_RATE_MAXSIZE15;
2994 } else {
2995 maxrates = rt->rateCount;
2996 }
2997 rs = &ic->ic_sup_rates[mode];
2998 for (i = 0; i < maxrates; i++)
2999 rs->rs_rates[i] = rt->info[i].dot11Rate;
3000 rs->rs_nrates = maxrates;
3001 return 1;
3002}
3003
3004void
3005ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
3006{
3007 const HAL_RATE_TABLE *rt;
3008 struct ieee80211com *ic = &sc->sc_ic;
3009 struct ieee80211_node *ni;
3010 int i;
3011
3012 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap))__builtin_memset((sc->sc_rixmap), (0xff), (sizeof(sc->sc_rixmap
)))
;
3013 rt = sc->sc_rates[mode];
3014 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode))if (!(rt != ((void *)0))) panic ("no h/w rate set for phy mode %u"
, mode)
;
3015 for (i = 0; i < rt->rateCount; i++)
3016 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL0x7f] = i;
3017 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap))__builtin_bzero((sc->sc_hwmap), (sizeof(sc->sc_hwmap)));
3018 for (i = 0; i < 32; i++)
3019 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate;
3020 sc->sc_currates = rt;
3021 sc->sc_curmode = mode;
3022 ni = ic->ic_bss;
3023 ni->ni_rates.rs_nrates = sc->sc_currates->rateCount;
3024 if (ni->ni_txrate >= ni->ni_rates.rs_nrates)
3025 ni->ni_txrate = 0;
3026}
3027
3028void
3029ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni)
3030{
3031 struct ath_node *an = ATH_NODE(ni)((struct ath_node *)(ni));
3032
3033 ieee80211_rssadapt_updatestats(&an->an_rssadapt);
3034}
3035
3036void
3037ath_rssadapt_updatestats(void *arg)
3038{
3039 struct ath_softc *sc = (struct ath_softc *)arg;
3040 struct ieee80211com *ic = &sc->sc_ic;
3041
3042 if (ic->ic_opmode == IEEE80211_M_STA) {
3043 ath_rssadapt_updatenode(arg, ic->ic_bss);
3044 } else {
3045 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg);
3046 }
3047
3048 timeout_add_msec(&sc->sc_rssadapt_to, 100);
3049}
3050
3051#ifdef AR_DEBUG
3052void
3053ath_printrxbuf(struct ath_buf *bf, int done)
3054{
3055 struct ath_desc *ds;
3056 int i;
3057
3058 for (i = 0, ds = bf->bf_desc; i < bf->bf_nsegbf_dmamap->dm_nsegs; i++, ds++) {
3059 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n",
3060 i, ds, (struct ath_desc *)bf->bf_daddr + i,
3061 ds->ds_link, ds->ds_data,
3062 ds->ds_ctl0, ds->ds_ctl1,
3063 ds->ds_hw[0], ds->ds_hw[1],
3064 !done ? ' ' : (ds->ds_rxstatds_us.rx.rs_status == 0) ? '*' : '!');
3065 }
3066}
3067
3068void
3069ath_printtxbuf(struct ath_buf *bf, int done)
3070{
3071 struct ath_desc *ds;
3072 int i;
3073
3074 for (i = 0, ds = bf->bf_desc; i < bf->bf_nsegbf_dmamap->dm_nsegs; i++, ds++) {
3075 printf("T%d (%p %p) "
3076 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n",
3077 i, ds, (struct ath_desc *)bf->bf_daddr + i,
3078 ds->ds_link, ds->ds_data,
3079 ds->ds_ctl0, ds->ds_ctl1,
3080 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
3081 !done ? ' ' : (ds->ds_txstatds_us.tx.ts_status == 0) ? '*' : '!');
3082 }
3083}
3084#endif /* AR_DEBUG */
3085
3086int
3087ath_gpio_attach(struct ath_softc *sc, u_int16_t devid)
3088{
3089 struct ath_hal *ah = sc->sc_ah;
3090 struct gpiobus_attach_args gba;
3091 int i;
3092
3093 if (ah->ah_gpio_npins < 1)
3094 return 0;
3095
3096 /* Initialize gpio pins array */
3097 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO10; i++) {
3098 sc->sc_gpio_pins[i].pin_num = i;
3099 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT0x0001 |
3100 GPIO_PIN_OUTPUT0x0002;
3101
3102 /* Set pin mode to input */
3103 ath_hal_set_gpio_input(ah, i)((*(ah)->ah_set_gpio_input)((ah), (i)));
3104 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT0x0001;
3105
3106 /* Get pin input */
3107 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i)((*(ah)->ah_get_gpio)((ah), (i))) ?
3108 GPIO_PIN_HIGH0x01 : GPIO_PIN_LOW0x00;
3109 }
3110
3111 /* Enable GPIO-controlled software LED if available */
3112 if ((ah->ah_version == AR5K_AR5211) ||
3113 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM0x1014)) {
3114 sc->sc_softled = 1;
3115 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN)((*(ah)->ah_set_gpio_output)((ah), (0)));
3116 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF)((*(ah)->ah_set_gpio)((ah), (0), (1)));
3117 }
3118
3119 /* Create gpio controller tag */
3120 sc->sc_gpio_gc.gp_cookie = sc;
3121 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read;
3122 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write;
3123 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl;
3124
3125 gba.gba_name = "gpio";
3126 gba.gba_gc = &sc->sc_gpio_gc;
3127 gba.gba_pins = sc->sc_gpio_pins;
3128 gba.gba_npins = ah->ah_gpio_npins;
3129
3130#ifdef notyet
3131#if NGPIO > 0
3132 if (config_found(&sc->sc_dev, &gba, gpiobus_print)config_found_sm((&sc->sc_dev), (&gba), (gpiobus_print
), ((void *)0))
== NULL((void *)0))
3133 return (ENODEV19);
3134#endif
3135#endif
3136
3137 return (0);
3138}
3139
3140int
3141ath_gpio_pin_read(void *arg, int pin)
3142{
3143 struct ath_softc *sc = arg;
3144 struct ath_hal *ah = sc->sc_ah;
3145 return (ath_hal_get_gpio(ah, pin)((*(ah)->ah_get_gpio)((ah), (pin))) ? GPIO_PIN_HIGH0x01 : GPIO_PIN_LOW0x00);
3146}
3147
3148void
3149ath_gpio_pin_write(void *arg, int pin, int value)
3150{
3151 struct ath_softc *sc = arg;
3152 struct ath_hal *ah = sc->sc_ah;
3153 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW)((*(ah)->ah_set_gpio)((ah), (pin), (value ? 0x01 : 0x00)));
3154}
3155
3156void
3157ath_gpio_pin_ctl(void *arg, int pin, int flags)
3158{
3159 struct ath_softc *sc = arg;
3160 struct ath_hal *ah = sc->sc_ah;
3161
3162 if (flags & GPIO_PIN_INPUT0x0001) {
3163 ath_hal_set_gpio_input(ah, pin)((*(ah)->ah_set_gpio_input)((ah), (pin)));
3164 } else if (flags & GPIO_PIN_OUTPUT0x0002) {
3165 ath_hal_set_gpio_output(ah, pin)((*(ah)->ah_set_gpio_output)((ah), (pin)));
3166 }
3167}