Bug Summary

File:dev/pci/if_iwx.c
Warning:line 4017, column 15
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_iwx.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/if_iwx.c
1/* $OpenBSD: if_iwx.c,v 1.133 2022/01/09 05:42:52 jsg Exp $ */
2
3/*
4 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5 * Author: Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ******************************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * BSD LICENSE
46 *
47 * Copyright(c) 2017 Intel Deutschland GmbH
48 * Copyright(c) 2018 - 2019 Intel Corporation
49 * All rights reserved.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 *
55 * * Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * * Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in
59 * the documentation and/or other materials provided with the
60 * distribution.
61 * * Neither the name Intel Corporation nor the names of its
62 * contributors may be used to endorse or promote products derived
63 * from this software without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76 *
77 *****************************************************************************
78 */
79
80/*-
81 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82 *
83 * Permission to use, copy, modify, and distribute this software for any
84 * purpose with or without fee is hereby granted, provided that the above
85 * copyright notice and this permission notice appear in all copies.
86 *
87 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94 */
95
96#include "bpfilter.h"
97
98#include <sys/param.h>
99#include <sys/conf.h>
100#include <sys/kernel.h>
101#include <sys/malloc.h>
102#include <sys/mbuf.h>
103#include <sys/mutex.h>
104#include <sys/proc.h>
105#include <sys/rwlock.h>
106#include <sys/socket.h>
107#include <sys/sockio.h>
108#include <sys/systm.h>
109#include <sys/endian.h>
110
111#include <sys/refcnt.h>
112#include <sys/task.h>
113#include <machine/bus.h>
114#include <machine/intr.h>
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118#include <dev/pci/pcidevs.h>
119
120#if NBPFILTER1 > 0
121#include <net/bpf.h>
122#endif
123#include <net/if.h>
124#include <net/if_dl.h>
125#include <net/if_media.h>
126
127#include <netinet/in.h>
128#include <netinet/if_ether.h>
129
130#include <net80211/ieee80211_var.h>
131#include <net80211/ieee80211_radiotap.h>
132#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
133#undef DPRINTF /* defined in ieee80211_priv.h */
134
135#define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname)
136
137#define IC2IFP(_ic_)(&(_ic_)->ic_ac.ac_if) (&(_ic_)->ic_ific_ac.ac_if)
138
139#define le16_to_cpup(_a_)(((__uint16_t)(*(const uint16_t *)(_a_)))) (le16toh(*(const uint16_t *)(_a_))((__uint16_t)(*(const uint16_t *)(_a_))))
140#define le32_to_cpup(_a_)(((__uint32_t)(*(const uint32_t *)(_a_)))) (le32toh(*(const uint32_t *)(_a_))((__uint32_t)(*(const uint32_t *)(_a_))))
141
142#ifdef IWX_DEBUG
143#define DPRINTF(x)do { ; } while (0) do { if (iwx_debug > 0) printf x; } while (0)
144#define DPRINTFN(n, x)do { ; } while (0) do { if (iwx_debug >= (n)) printf x; } while (0)
145int iwx_debug = 1;
146#else
147#define DPRINTF(x)do { ; } while (0) do { ; } while (0)
148#define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0)
149#endif
150
151#include <dev/pci/if_iwxreg.h>
152#include <dev/pci/if_iwxvar.h>
153
154const uint8_t iwx_nvm_channels_8000[] = {
155 /* 2.4 GHz */
156 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
157 /* 5 GHz */
158 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
159 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
160 149, 153, 157, 161, 165, 169, 173, 177, 181
161};
162
163static const uint8_t iwx_nvm_channels_uhb[] = {
164 /* 2.4 GHz */
165 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166 /* 5 GHz */
167 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169 149, 153, 157, 161, 165, 169, 173, 177, 181,
170 /* 6-7 GHz */
171 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
172 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
173 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
174 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
175};
176
177#define IWX_NUM_2GHZ_CHANNELS14 14
178
179const struct iwx_rate {
180 uint16_t rate;
181 uint8_t plcp;
182 uint8_t ht_plcp;
183} iwx_rates[] = {
184 /* Legacy */ /* HT */
185 { 2, IWX_RATE_1M_PLCP10, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
186 { 4, IWX_RATE_2M_PLCP20, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
187 { 11, IWX_RATE_5M_PLCP55, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
188 { 22, IWX_RATE_11M_PLCP110, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
189 { 12, IWX_RATE_6M_PLCP13, IWX_RATE_HT_SISO_MCS_0_PLCP0 },
190 { 18, IWX_RATE_9M_PLCP15, IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 },
191 { 24, IWX_RATE_12M_PLCP5, IWX_RATE_HT_SISO_MCS_1_PLCP1 },
192 { 26, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_8_PLCP0x8 },
193 { 36, IWX_RATE_18M_PLCP7, IWX_RATE_HT_SISO_MCS_2_PLCP2 },
194 { 48, IWX_RATE_24M_PLCP9, IWX_RATE_HT_SISO_MCS_3_PLCP3 },
195 { 52, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_9_PLCP0x9 },
196 { 72, IWX_RATE_36M_PLCP11, IWX_RATE_HT_SISO_MCS_4_PLCP4 },
197 { 78, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_10_PLCP0xA },
198 { 96, IWX_RATE_48M_PLCP1, IWX_RATE_HT_SISO_MCS_5_PLCP5 },
199 { 104, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_11_PLCP0xB },
200 { 108, IWX_RATE_54M_PLCP3, IWX_RATE_HT_SISO_MCS_6_PLCP6 },
201 { 128, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_SISO_MCS_7_PLCP7 },
202 { 156, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_12_PLCP0xC },
203 { 208, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_13_PLCP0xD },
204 { 234, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_14_PLCP0xE },
205 { 260, IWX_RATE_INVM_PLCP0xff, IWX_RATE_HT_MIMO2_MCS_15_PLCP0xF },
206};
207#define IWX_RIDX_CCK0 0
208#define IWX_RIDX_OFDM4 4
209#define IWX_RIDX_MAX((sizeof((iwx_rates)) / sizeof((iwx_rates)[0]))-1) (nitems(iwx_rates)(sizeof((iwx_rates)) / sizeof((iwx_rates)[0]))-1)
210#define IWX_RIDX_IS_CCK(_i_)((_i_) < 4) ((_i_) < IWX_RIDX_OFDM4)
211#define IWX_RIDX_IS_OFDM(_i_)((_i_) >= 4) ((_i_) >= IWX_RIDX_OFDM4)
212#define IWX_RVAL_IS_OFDM(_i_)((_i_) >= 12 && (_i_) != 22) ((_i_) >= 12 && (_i_) != 22)
213
214/* Convert an MCS index into an iwx_rates[] index. */
215const int iwx_mcs2ridx[] = {
216 IWX_RATE_MCS_0_INDEX,
217 IWX_RATE_MCS_1_INDEX,
218 IWX_RATE_MCS_2_INDEX,
219 IWX_RATE_MCS_3_INDEX,
220 IWX_RATE_MCS_4_INDEX,
221 IWX_RATE_MCS_5_INDEX,
222 IWX_RATE_MCS_6_INDEX,
223 IWX_RATE_MCS_7_INDEX,
224 IWX_RATE_MCS_8_INDEX,
225 IWX_RATE_MCS_9_INDEX,
226 IWX_RATE_MCS_10_INDEX,
227 IWX_RATE_MCS_11_INDEX,
228 IWX_RATE_MCS_12_INDEX,
229 IWX_RATE_MCS_13_INDEX,
230 IWX_RATE_MCS_14_INDEX,
231 IWX_RATE_MCS_15_INDEX,
232};
233
234uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
235uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
236int iwx_is_mimo_ht_plcp(uint8_t);
237int iwx_is_mimo_mcs(int);
238int iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
239int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
240int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
241int iwx_apply_debug_destination(struct iwx_softc *);
242int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
243void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
244void iwx_ctxt_info_free_paging(struct iwx_softc *);
245int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
246 struct iwx_context_info_dram *);
247void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
248int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
249 uint8_t *, size_t);
250int iwx_set_default_calib(struct iwx_softc *, const void *);
251void iwx_fw_info_free(struct iwx_fw_info *);
252int iwx_read_firmware(struct iwx_softc *);
253uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
254uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
255void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
256void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
257int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
258int iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
259int iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
260int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
261int iwx_nic_lock(struct iwx_softc *);
262void iwx_nic_assert_locked(struct iwx_softc *);
263void iwx_nic_unlock(struct iwx_softc *);
264int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
265 uint32_t);
266int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
267int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
268int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
269 bus_size_t);
270void iwx_dma_contig_free(struct iwx_dma_info *);
271int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
272void iwx_disable_rx_dma(struct iwx_softc *);
273void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
274void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
275int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
276void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
277void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
278void iwx_enable_rfkill_int(struct iwx_softc *);
279int iwx_check_rfkill(struct iwx_softc *);
280void iwx_enable_interrupts(struct iwx_softc *);
281void iwx_enable_fwload_interrupt(struct iwx_softc *);
282void iwx_restore_interrupts(struct iwx_softc *);
283void iwx_disable_interrupts(struct iwx_softc *);
284void iwx_ict_reset(struct iwx_softc *);
285int iwx_set_hw_ready(struct iwx_softc *);
286int iwx_prepare_card_hw(struct iwx_softc *);
287int iwx_force_power_gating(struct iwx_softc *);
288void iwx_apm_config(struct iwx_softc *);
289int iwx_apm_init(struct iwx_softc *);
290void iwx_apm_stop(struct iwx_softc *);
291int iwx_allow_mcast(struct iwx_softc *);
292void iwx_init_msix_hw(struct iwx_softc *);
293void iwx_conf_msix_hw(struct iwx_softc *, int);
294int iwx_clear_persistence_bit(struct iwx_softc *);
295int iwx_start_hw(struct iwx_softc *);
296void iwx_stop_device(struct iwx_softc *);
297void iwx_nic_config(struct iwx_softc *);
298int iwx_nic_rx_init(struct iwx_softc *);
299int iwx_nic_init(struct iwx_softc *);
300int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
301int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
302void iwx_post_alive(struct iwx_softc *);
303int iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
304 uint32_t);
305void iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
306void iwx_setup_ht_rates(struct iwx_softc *);
307int iwx_mimo_enabled(struct iwx_softc *);
308void iwx_mac_ctxt_task(void *);
309void iwx_phy_ctxt_task(void *);
310void iwx_updatechan(struct ieee80211com *);
311void iwx_updateprot(struct ieee80211com *);
312void iwx_updateslot(struct ieee80211com *);
313void iwx_updateedca(struct ieee80211com *);
314void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
315 uint16_t);
316void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
317int iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
318 uint8_t);
319void iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
320 uint8_t);
321int iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
322 uint8_t);
323void iwx_rx_ba_session_expired(void *);
324void iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
325 struct mbuf_list *);
326void iwx_reorder_timer_expired(void *);
327void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
328 uint16_t, uint16_t, int, int);
329void iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
330 uint8_t);
331void iwx_ba_task(void *);
332
333int iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
334int iwx_is_valid_mac_addr(const uint8_t *);
335int iwx_nvm_get(struct iwx_softc *);
336int iwx_load_firmware(struct iwx_softc *);
337int iwx_start_fw(struct iwx_softc *);
338int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
339int iwx_send_phy_cfg_cmd(struct iwx_softc *);
340int iwx_load_ucode_wait_alive(struct iwx_softc *);
341int iwx_send_dqa_cmd(struct iwx_softc *);
342int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
343int iwx_config_ltr(struct iwx_softc *);
344void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
345int iwx_rx_addbuf(struct iwx_softc *, int, int);
346int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
347void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
348 struct iwx_rx_data *);
349int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
350int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
351 struct ieee80211_rxinfo *);
352int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
353 struct ieee80211_node *, struct ieee80211_rxinfo *);
354void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
355 uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
356void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
357void iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
358void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, int);
359void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
360 struct iwx_rx_data *);
361void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
362void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
363 struct iwx_rx_data *);
364int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
365int iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
366 uint8_t, uint32_t, uint8_t);
367int iwx_phy_ctxt_cmd_v3(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
368 uint8_t, uint32_t, uint8_t);
369int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
370 uint8_t, uint32_t, uint32_t, uint8_t);
371int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
372int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
373 const void *);
374int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
375 uint32_t *);
376int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
377 const void *, uint32_t *);
378void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
379void iwx_cmd_done(struct iwx_softc *, int, int, int);
380const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
381 struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
382void iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
383int iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
384int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
385int iwx_wait_tx_queues_empty(struct iwx_softc *);
386int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
387int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
388int iwx_beacon_filter_send_cmd(struct iwx_softc *,
389 struct iwx_beacon_filter_cmd *);
390int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
391void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
392 struct iwx_mac_power_cmd *);
393int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
394int iwx_power_update_device(struct iwx_softc *);
395int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
396int iwx_disable_beacon_filter(struct iwx_softc *);
397int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
398int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
399int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
400int iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
401int iwx_config_umac_scan_reduced(struct iwx_softc *);
402uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
403void iwx_scan_umac_dwell_v10(struct iwx_softc *,
404 struct iwx_scan_general_params_v10 *, int);
405void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
406 struct iwx_scan_general_params_v10 *, uint16_t, int);
407void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
408 struct iwx_scan_channel_params_v6 *, uint32_t, int, int);
409int iwx_umac_scan_v14(struct iwx_softc *, int);
410void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
411uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
412int iwx_rval2ridx(int);
413void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
414void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
415 struct iwx_mac_ctx_cmd *, uint32_t);
416void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
417 struct iwx_mac_data_sta *, int);
418int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
419int iwx_clear_statistics(struct iwx_softc *);
420void iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
421void iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
422int iwx_scan(struct iwx_softc *);
423int iwx_bgscan(struct ieee80211com *);
424void iwx_bgscan_done(struct ieee80211com *,
425 struct ieee80211_node_switch_bss_arg *, size_t);
426void iwx_bgscan_done_task(void *);
427int iwx_umac_scan_abort(struct iwx_softc *);
428int iwx_scan_abort(struct iwx_softc *);
429int iwx_enable_mgmt_queue(struct iwx_softc *);
430int iwx_rs_rval2idx(uint8_t);
431uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
432int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
433int iwx_enable_data_tx_queues(struct iwx_softc *);
434int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
435 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t);
436int iwx_auth(struct iwx_softc *);
437int iwx_deauth(struct iwx_softc *);
438int iwx_run(struct iwx_softc *);
439int iwx_run_stop(struct iwx_softc *);
440struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
441int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
442 struct ieee80211_key *);
443void iwx_setkey_task(void *);
444void iwx_delete_key(struct ieee80211com *,
445 struct ieee80211_node *, struct ieee80211_key *);
446int iwx_media_change(struct ifnet *);
447void iwx_newstate_task(void *);
448int iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
449void iwx_endscan(struct iwx_softc *);
450void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
451 struct ieee80211_node *);
452int iwx_sf_config(struct iwx_softc *, int);
453int iwx_send_bt_init_conf(struct iwx_softc *);
454int iwx_send_soc_conf(struct iwx_softc *);
455int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
456int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
457int iwx_init_hw(struct iwx_softc *);
458int iwx_init(struct ifnet *);
459void iwx_start(struct ifnet *);
460void iwx_stop(struct ifnet *);
461void iwx_watchdog(struct ifnet *);
462int iwx_ioctl(struct ifnet *, u_long, caddr_t);
463const char *iwx_desc_lookup(uint32_t);
464void iwx_nic_error(struct iwx_softc *);
465void iwx_dump_driver_status(struct iwx_softc *);
466void iwx_nic_umac_error(struct iwx_softc *);
467int iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
468 struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
469int iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
470void iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
471 struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
472 struct mbuf_list *);
473int iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
474 int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
475int iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
476 struct iwx_rx_mpdu_desc *, int, int, uint32_t,
477 struct ieee80211_rxinfo *, struct mbuf_list *);
478void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
479 struct mbuf_list *);
480int iwx_rx_pkt_valid(struct iwx_rx_packet *);
481void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
482 struct mbuf_list *);
483void iwx_notif_intr(struct iwx_softc *);
484int iwx_intr(void *);
485int iwx_intr_msix(void *);
486int iwx_match(struct device *, void *, void *);
487int iwx_preinit(struct iwx_softc *);
488void iwx_attach_hook(struct device *);
489void iwx_attach(struct device *, struct device *, void *);
490void iwx_init_task(void *);
491int iwx_activate(struct device *, int);
492void iwx_resume(struct iwx_softc *);
493int iwx_wakeup(struct iwx_softc *);
494
495#if NBPFILTER1 > 0
496void iwx_radiotap_attach(struct iwx_softc *);
497#endif
498
499uint8_t
500iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
501{
502 const struct iwx_fw_cmd_version *entry;
503 int i;
504
505 for (i = 0; i < sc->n_cmd_versions; i++) {
506 entry = &sc->cmd_versions[i];
507 if (entry->group == grp && entry->cmd == cmd)
508 return entry->cmd_ver;
509 }
510
511 return IWX_FW_CMD_VER_UNKNOWN99;
512}
513
514uint8_t
515iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
516{
517 const struct iwx_fw_cmd_version *entry;
518 int i;
519
520 for (i = 0; i < sc->n_cmd_versions; i++) {
521 entry = &sc->cmd_versions[i];
522 if (entry->group == grp && entry->cmd == cmd)
523 return entry->notif_ver;
524 }
525
526 return IWX_FW_CMD_VER_UNKNOWN99;
527}
528
529int
530iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
531{
532 return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP0x20 &&
533 (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK(3 << 3)));
534}
535
536int
537iwx_is_mimo_mcs(int mcs)
538{
539 int ridx = iwx_mcs2ridx[mcs];
540 return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
541
542}
543
544int
545iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
546{
547 struct iwx_fw_cscheme_list *l = (void *)data;
548
549 if (dlen < sizeof(*l) ||
550 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
551 return EINVAL22;
552
553 /* we don't actually store anything for now, always use s/w crypto */
554
555 return 0;
556}
557
558int
559iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
560 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
561{
562 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
563 if (err) {
564 printf("%s: could not allocate context info DMA memory\n",
565 DEVNAME(sc)((sc)->sc_dev.dv_xname));
566 return err;
567 }
568
569 memcpy(dram->vaddr, sec->fws_data, sec->fws_len)__builtin_memcpy((dram->vaddr), (sec->fws_data), (sec->
fws_len))
;
570
571 return 0;
572}
573
574void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
575{
576 struct iwx_self_init_dram *dram = &sc->init_dram;
577 int i;
578
579 if (!dram->paging)
580 return;
581
582 /* free paging*/
583 for (i = 0; i < dram->paging_cnt; i++)
584 iwx_dma_contig_free(&dram->paging[i]);
585
586 free(dram->paging, M_DEVBUF2, dram->paging_cnt * sizeof(*dram->paging));
587 dram->paging_cnt = 0;
588 dram->paging = NULL((void *)0);
589}
590
591int
592iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
593{
594 int i = 0;
595
596 while (start < fws->fw_count &&
597 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION0xFFFFCCCC &&
598 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION0xAAAABBBB) {
599 start++;
600 i++;
601 }
602
603 return i;
604}
605
606int
607iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
608 struct iwx_context_info_dram *ctxt_dram)
609{
610 struct iwx_self_init_dram *dram = &sc->init_dram;
611 int i, ret, fw_cnt = 0;
612
613 KASSERT(dram->paging == NULL)((dram->paging == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwx.c", 613, "dram->paging == NULL"
))
;
614
615 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
616 /* add 1 due to separator */
617 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
618 /* add 2 due to separators */
619 dram->paging_cnt = iwx_get_num_sections(fws,
620 dram->lmac_cnt + dram->umac_cnt + 2);
621
622 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
623 sizeof(*dram->fw), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
624 if (!dram->fw) {
625 printf("%s: could not allocate memory for firmware sections\n",
626 DEVNAME(sc)((sc)->sc_dev.dv_xname));
627 return ENOMEM12;
628 }
629
630 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
631 M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
632 if (!dram->paging) {
633 printf("%s: could not allocate memory for firmware paging\n",
634 DEVNAME(sc)((sc)->sc_dev.dv_xname));
635 return ENOMEM12;
636 }
637
638 /* initialize lmac sections */
639 for (i = 0; i < dram->lmac_cnt; i++) {
640 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
641 &dram->fw[fw_cnt]);
642 if (ret)
643 return ret;
644 ctxt_dram->lmac_img[i] =
645 htole64(dram->fw[fw_cnt].paddr)((__uint64_t)(dram->fw[fw_cnt].paddr));
646 DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
647 (unsigned long long)dram->fw[fw_cnt].paddr,do { ; } while (0)
648 (unsigned long long)dram->fw[fw_cnt].size))do { ; } while (0);
649 fw_cnt++;
650 }
651
652 /* initialize umac sections */
653 for (i = 0; i < dram->umac_cnt; i++) {
654 /* access FW with +1 to make up for lmac separator */
655 ret = iwx_ctxt_info_alloc_dma(sc,
656 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
657 if (ret)
658 return ret;
659 ctxt_dram->umac_img[i] =
660 htole64(dram->fw[fw_cnt].paddr)((__uint64_t)(dram->fw[fw_cnt].paddr));
661 DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
662 (unsigned long long)dram->fw[fw_cnt].paddr,do { ; } while (0)
663 (unsigned long long)dram->fw[fw_cnt].size))do { ; } while (0);
664 fw_cnt++;
665 }
666
667 /*
668 * Initialize paging.
669 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
670 * stored separately.
671 * This is since the timing of its release is different -
672 * while fw memory can be released on alive, the paging memory can be
673 * freed only when the device goes down.
674 * Given that, the logic here in accessing the fw image is a bit
675 * different - fw_cnt isn't changing so loop counter is added to it.
676 */
677 for (i = 0; i < dram->paging_cnt; i++) {
678 /* access FW with +2 to make up for lmac & umac separators */
679 int fw_idx = fw_cnt + i + 2;
680
681 ret = iwx_ctxt_info_alloc_dma(sc,
682 &fws->fw_sect[fw_idx], &dram->paging[i]);
683 if (ret)
684 return ret;
685
686 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr)((__uint64_t)(dram->paging[i].paddr));
687 DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,do { ; } while (0)
688 (unsigned long long)dram->paging[i].paddr,do { ; } while (0)
689 (unsigned long long)dram->paging[i].size))do { ; } while (0);
690 }
691
692 return 0;
693}
694
695void
696iwx_fw_version_str(char *buf, size_t bufsize,
697 uint32_t major, uint32_t minor, uint32_t api)
698{
699 /*
700 * Starting with major version 35 the Linux driver prints the minor
701 * version in hexadecimal.
702 */
703 if (major >= 35)
704 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
705 else
706 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
707}
708
709int
710iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
711 uint8_t min_power)
712{
713 struct iwx_dma_info *fw_mon = &sc->fw_mon;
714 uint32_t size = 0;
715 uint8_t power;
716 int err;
717
718 if (fw_mon->size)
719 return 0;
720
721 for (power = max_power; power >= min_power; power--) {
722 size = (1 << power);
723
724 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
725 if (err)
726 continue;
727
728 DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",do { ; } while (0)
729 DEVNAME(sc), size))do { ; } while (0);
730 break;
731 }
732
733 if (err) {
734 fw_mon->size = 0;
735 return err;
736 }
737
738 if (power != max_power)
739 DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",do { ; } while (0)
740 DEVNAME(sc), (unsigned long)(1 << (power - 10)),do { ; } while (0)
741 (unsigned long)(1 << (max_power - 10))))do { ; } while (0);
742
743 return 0;
744}
745
746int
747iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
748{
749 if (!max_power) {
750 /* default max_power is maximum */
751 max_power = 26;
752 } else {
753 max_power += 11;
754 }
755
756 if (max_power > 26) {
757 DPRINTF(("%s: External buffer size for monitor is too big %d, "do { ; } while (0)
758 "check the FW TLV\n", DEVNAME(sc), max_power))do { ; } while (0);
759 return 0;
760 }
761
762 if (sc->fw_mon.size)
763 return 0;
764
765 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
766}
767
768int
769iwx_apply_debug_destination(struct iwx_softc *sc)
770{
771 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
772 int i, err;
773 uint8_t mon_mode, size_power, base_shift, end_shift;
774 uint32_t base_reg, end_reg;
775
776 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
777 mon_mode = dest_v1->monitor_mode;
778 size_power = dest_v1->size_power;
779 base_reg = le32toh(dest_v1->base_reg)((__uint32_t)(dest_v1->base_reg));
780 end_reg = le32toh(dest_v1->end_reg)((__uint32_t)(dest_v1->end_reg));
781 base_shift = dest_v1->base_shift;
782 end_shift = dest_v1->end_shift;
783
784 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode))do { ; } while (0);
785
786 if (mon_mode == EXTERNAL_MODE) {
787 err = iwx_alloc_fw_monitor(sc, size_power);
788 if (err)
789 return err;
790 }
791
792 if (!iwx_nic_lock(sc))
793 return EBUSY16;
794
795 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
796 uint32_t addr, val;
797 uint8_t op;
798
799 addr = le32toh(dest_v1->reg_ops[i].addr)((__uint32_t)(dest_v1->reg_ops[i].addr));
800 val = le32toh(dest_v1->reg_ops[i].val)((__uint32_t)(dest_v1->reg_ops[i].val));
801 op = dest_v1->reg_ops[i].op;
802
803 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val))do { ; } while (0);
804 switch (op) {
805 case CSR_ASSIGN:
806 IWX_WRITE(sc, addr, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), ((val
))))
;
807 break;
808 case CSR_SETBIT:
809 IWX_SETBITS(sc, addr, (1 << val))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), (((
((sc)->sc_st)->read_4(((sc)->sc_sh), ((addr)))) | ((
1 << val))))))
;
810 break;
811 case CSR_CLEARBIT:
812 IWX_CLRBITS(sc, addr, (1 << val))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((addr)), (((
((sc)->sc_st)->read_4(((sc)->sc_sh), ((addr)))) &
~((1 << val))))))
;
813 break;
814 case PRPH_ASSIGN:
815 iwx_write_prph(sc, addr, val);
816 break;
817 case PRPH_SETBIT:
818 err = iwx_set_bits_prph(sc, addr, (1 << val));
819 if (err)
820 return err;
821 break;
822 case PRPH_CLEARBIT:
823 err = iwx_clear_bits_prph(sc, addr, (1 << val));
824 if (err)
825 return err;
826 break;
827 case PRPH_BLOCKBIT:
828 if (iwx_read_prph(sc, addr) & (1 << val))
829 goto monitor;
830 break;
831 default:
832 DPRINTF(("%s: FW debug - unknown OP %d\n",do { ; } while (0)
833 DEVNAME(sc), op))do { ; } while (0);
834 break;
835 }
836 }
837
838monitor:
839 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
840 iwx_write_prph(sc, le32toh(base_reg)((__uint32_t)(base_reg)),
841 sc->fw_mon.paddr >> base_shift);
842 iwx_write_prph(sc, end_reg,
843 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
844 >> end_shift);
845 }
846
847 iwx_nic_unlock(sc);
848 return 0;
849}
850
851int
852iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
853{
854 struct iwx_context_info *ctxt_info;
855 struct iwx_context_info_rbd_cfg *rx_cfg;
856 uint32_t control_flags = 0, rb_size;
857 uint64_t paddr;
858 int err;
859
860 ctxt_info = sc->ctxt_info_dma.vaddr;
861
862 ctxt_info->version.version = 0;
863 ctxt_info->version.mac_id =
864 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV))((__uint16_t)((uint16_t)(((sc)->sc_st)->read_4(((sc)->
sc_sh), (((0x028)))))))
;
865 /* size is in DWs */
866 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4)((__uint16_t)(sizeof(*ctxt_info) / 4));
867
868 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_225602)
869 rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
870 else
871 rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
872
873 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF)((((sizeof(512) <= 4) ? (fls(512) - 1) : (flsl(512) - 1)) <
0xF) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 873, "IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF")
)
;
874 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
875 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE)((sizeof(512) <= 4) ? (fls(512) - 1) : (flsl(512) - 1)) <<
876 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
877 (rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
878 ctxt_info->control.control_flags = htole32(control_flags)((__uint32_t)(control_flags));
879
880 /* initialize RX default queue */
881 rx_cfg = &ctxt_info->rbd_cfg;
882 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr)((__uint64_t)(sc->rxq.free_desc_dma.paddr));
883 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr)((__uint64_t)(sc->rxq.used_desc_dma.paddr));
884 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr)((__uint64_t)(sc->rxq.stat_dma.paddr));
885
886 /* initialize TX command queue */
887 ctxt_info->hcmd_cfg.cmd_queue_addr =
888 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr)((__uint64_t)(sc->txq[0].desc_dma.paddr));
889 ctxt_info->hcmd_cfg.cmd_queue_size =
890 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT)(((sizeof((256)) <= 4) ? (fls((256)) - 1) : (flsl((256)) -
1)) - 3)
;
891
892 /* allocate ucode sections in dram and set addresses */
893 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
894 if (err) {
895 iwx_ctxt_info_free_fw_img(sc);
896 return err;
897 }
898
899 /* Configure debug, if exists */
900 if (sc->sc_fw.dbg_dest_tlv_v1) {
901 err = iwx_apply_debug_destination(sc);
902 if (err) {
903 iwx_ctxt_info_free_fw_img(sc);
904 return err;
905 }
906 }
907
908 /*
909 * Write the context info DMA base address. The device expects a
910 * 64-bit address but a simple bus_space_write_8 to this register
911 * won't work on some devices, such as the AX201.
912 */
913 paddr = sc->ctxt_info_dma.paddr;
914 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x40)), ((paddr
& 0xffffffff))))
;
915 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x40 + 4)),
((paddr >> 32))))
;
916
917 /* kick FW self load */
918 if (!iwx_nic_lock(sc)) {
919 iwx_ctxt_info_free_fw_img(sc);
920 return EBUSY16;
921 }
922 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN(0xa05c44), 1);
923 iwx_nic_unlock(sc);
924
925 /* Context info will be released upon alive or failure to get one */
926
927 return 0;
928}
929
930void
931iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
932{
933 struct iwx_self_init_dram *dram = &sc->init_dram;
934 int i;
935
936 if (!dram->fw)
937 return;
938
939 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
940 iwx_dma_contig_free(&dram->fw[i]);
941
942 free(dram->fw, M_DEVBUF2,
943 (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
944 dram->lmac_cnt = 0;
945 dram->umac_cnt = 0;
946 dram->fw = NULL((void *)0);
947}
948
949int
950iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
951 uint8_t *data, size_t dlen)
952{
953 struct iwx_fw_sects *fws;
954 struct iwx_fw_onesect *fwone;
955
956 if (type >= IWX_UCODE_TYPE_MAX)
957 return EINVAL22;
958 if (dlen < sizeof(uint32_t))
959 return EINVAL22;
960
961 fws = &sc->sc_fw.fw_sects[type];
962 DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count))do { ; } while (0);
963 if (fws->fw_count >= IWX_UCODE_SECT_MAX49)
964 return EINVAL22;
965
966 fwone = &fws->fw_sect[fws->fw_count];
967
968 /* first 32bit are device load offset */
969 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t))__builtin_memcpy((&fwone->fws_devoff), (data), (sizeof
(uint32_t)))
;
970
971 /* rest is data */
972 fwone->fws_data = data + sizeof(uint32_t);
973 fwone->fws_len = dlen - sizeof(uint32_t);
974
975 fws->fw_count++;
976 fws->fw_totlen += fwone->fws_len;
977
978 return 0;
979}
980
981#define IWX_DEFAULT_SCAN_CHANNELS40 40
982/* Newer firmware might support more channels. Raise this value if needed. */
983#define IWX_MAX_SCAN_CHANNELS67 67 /* as of iwx-cc-a0-62 firmware */
984
985struct iwx_tlv_calib_data {
986 uint32_t ucode_type;
987 struct iwx_tlv_calib_ctrl calib;
988} __packed__attribute__((__packed__));
989
990int
991iwx_set_default_calib(struct iwx_softc *sc, const void *data)
992{
993 const struct iwx_tlv_calib_data *def_calib = data;
994 uint32_t ucode_type = le32toh(def_calib->ucode_type)((__uint32_t)(def_calib->ucode_type));
995
996 if (ucode_type >= IWX_UCODE_TYPE_MAX)
997 return EINVAL22;
998
999 sc->sc_default_calib[ucode_type].flow_trigger =
1000 def_calib->calib.flow_trigger;
1001 sc->sc_default_calib[ucode_type].event_trigger =
1002 def_calib->calib.event_trigger;
1003
1004 return 0;
1005}
1006
1007void
1008iwx_fw_info_free(struct iwx_fw_info *fw)
1009{
1010 free(fw->fw_rawdata, M_DEVBUF2, fw->fw_rawsize);
1011 fw->fw_rawdata = NULL((void *)0);
1012 fw->fw_rawsize = 0;
1013 /* don't touch fw->fw_status */
1014 memset(fw->fw_sects, 0, sizeof(fw->fw_sects))__builtin_memset((fw->fw_sects), (0), (sizeof(fw->fw_sects
)))
;
1015}
1016
1017#define IWX_FW_ADDR_CACHE_CONTROL0xC0000000 0xC0000000
1018
1019int
1020iwx_read_firmware(struct iwx_softc *sc)
1021{
1022 struct iwx_fw_info *fw = &sc->sc_fw;
1023 struct iwx_tlv_ucode_header *uhdr;
1024 struct iwx_ucode_tlv tlv;
1025 uint32_t tlv_type;
1026 uint8_t *data;
1027 int err;
1028 size_t len;
1029
1030 if (fw->fw_status == IWX_FW_STATUS_DONE2)
1031 return 0;
1032
1033 while (fw->fw_status == IWX_FW_STATUS_INPROGRESS1)
1034 tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP0xffffffffffffffffULL);
1035 fw->fw_status = IWX_FW_STATUS_INPROGRESS1;
1036
1037 if (fw->fw_rawdata != NULL((void *)0))
1038 iwx_fw_info_free(fw);
1039
1040 err = loadfirmware(sc->sc_fwname,
1041 (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1042 if (err) {
1043 printf("%s: could not read firmware %s (error %d)\n",
1044 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname, err);
1045 goto out;
1046 }
1047
1048 sc->sc_capaflags = 0;
1049 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS40;
1050 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa))__builtin_memset((sc->sc_enabled_capa), (0), (sizeof(sc->
sc_enabled_capa)))
;
1051 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api))__builtin_memset((sc->sc_ucode_api), (0), (sizeof(sc->sc_ucode_api
)))
;
1052 sc->n_cmd_versions = 0;
1053
1054 uhdr = (void *)fw->fw_rawdata;
1055 if (*(uint32_t *)fw->fw_rawdata != 0
1056 || le32toh(uhdr->magic)((__uint32_t)(uhdr->magic)) != IWX_TLV_UCODE_MAGIC0x0a4c5749) {
1057 printf("%s: invalid firmware %s\n",
1058 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname);
1059 err = EINVAL22;
1060 goto out;
1061 }
1062
1063 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1064 IWX_UCODE_MAJOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0xFF000000) >> 24
)
,
1065 IWX_UCODE_MINOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x00FF0000) >> 16
)
,
1066 IWX_UCODE_API(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x0000FF00) >> 8
)
);
1067
1068 data = uhdr->data;
1069 len = fw->fw_rawsize - sizeof(*uhdr);
1070
1071 while (len >= sizeof(tlv)) {
1072 size_t tlv_len;
1073 void *tlv_data;
1074
1075 memcpy(&tlv, data, sizeof(tlv))__builtin_memcpy((&tlv), (data), (sizeof(tlv)));
1076 tlv_len = le32toh(tlv.length)((__uint32_t)(tlv.length));
1077 tlv_type = le32toh(tlv.type)((__uint32_t)(tlv.type));
1078
1079 len -= sizeof(tlv);
1080 data += sizeof(tlv);
1081 tlv_data = data;
1082
1083 if (len < tlv_len) {
1084 printf("%s: firmware too short: %zu bytes\n",
1085 DEVNAME(sc)((sc)->sc_dev.dv_xname), len);
1086 err = EINVAL22;
1087 goto parse_out;
1088 }
1089
1090 switch (tlv_type) {
1091 case IWX_UCODE_TLV_PROBE_MAX_LEN6:
1092 if (tlv_len < sizeof(uint32_t)) {
1093 err = EINVAL22;
1094 goto parse_out;
1095 }
1096 sc->sc_capa_max_probe_len
1097 = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1098 if (sc->sc_capa_max_probe_len >
1099 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE512) {
1100 err = EINVAL22;
1101 goto parse_out;
1102 }
1103 break;
1104 case IWX_UCODE_TLV_PAN7:
1105 if (tlv_len) {
1106 err = EINVAL22;
1107 goto parse_out;
1108 }
1109 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN(1 << 0);
1110 break;
1111 case IWX_UCODE_TLV_FLAGS18:
1112 if (tlv_len < sizeof(uint32_t)) {
1113 err = EINVAL22;
1114 goto parse_out;
1115 }
1116 /*
1117 * Apparently there can be many flags, but Linux driver
1118 * parses only the first one, and so do we.
1119 *
1120 * XXX: why does this override IWX_UCODE_TLV_PAN?
1121 * Intentional or a bug? Observations from
1122 * current firmware file:
1123 * 1) TLV_PAN is parsed first
1124 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1125 * ==> this resets TLV_PAN to itself... hnnnk
1126 */
1127 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1128 break;
1129 case IWX_UCODE_TLV_CSCHEME28:
1130 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1131 if (err)
1132 goto parse_out;
1133 break;
1134 case IWX_UCODE_TLV_NUM_OF_CPU27: {
1135 uint32_t num_cpu;
1136 if (tlv_len != sizeof(uint32_t)) {
1137 err = EINVAL22;
1138 goto parse_out;
1139 }
1140 num_cpu = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1141 if (num_cpu < 1 || num_cpu > 2) {
1142 err = EINVAL22;
1143 goto parse_out;
1144 }
1145 break;
1146 }
1147 case IWX_UCODE_TLV_SEC_RT19:
1148 err = iwx_firmware_store_section(sc,
1149 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1150 if (err)
1151 goto parse_out;
1152 break;
1153 case IWX_UCODE_TLV_SEC_INIT20:
1154 err = iwx_firmware_store_section(sc,
1155 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1156 if (err)
1157 goto parse_out;
1158 break;
1159 case IWX_UCODE_TLV_SEC_WOWLAN21:
1160 err = iwx_firmware_store_section(sc,
1161 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1162 if (err)
1163 goto parse_out;
1164 break;
1165 case IWX_UCODE_TLV_DEF_CALIB22:
1166 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1167 err = EINVAL22;
1168 goto parse_out;
1169 }
1170 err = iwx_set_default_calib(sc, tlv_data);
1171 if (err)
1172 goto parse_out;
1173 break;
1174 case IWX_UCODE_TLV_PHY_SKU23:
1175 if (tlv_len != sizeof(uint32_t)) {
1176 err = EINVAL22;
1177 goto parse_out;
1178 }
1179 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1180 break;
1181
1182 case IWX_UCODE_TLV_API_CHANGES_SET29: {
1183 struct iwx_ucode_api *api;
1184 int idx, i;
1185 if (tlv_len != sizeof(*api)) {
1186 err = EINVAL22;
1187 goto parse_out;
1188 }
1189 api = (struct iwx_ucode_api *)tlv_data;
1190 idx = le32toh(api->api_index)((__uint32_t)(api->api_index));
1191 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)(((128) + ((32) - 1)) / (32))) {
1192 err = EINVAL22;
1193 goto parse_out;
1194 }
1195 for (i = 0; i < 32; i++) {
1196 if ((le32toh(api->api_flags)((__uint32_t)(api->api_flags)) & (1 << i)) == 0)
1197 continue;
1198 setbit(sc->sc_ucode_api, i + (32 * idx))((sc->sc_ucode_api)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
1199 }
1200 break;
1201 }
1202
1203 case IWX_UCODE_TLV_ENABLED_CAPABILITIES30: {
1204 struct iwx_ucode_capa *capa;
1205 int idx, i;
1206 if (tlv_len != sizeof(*capa)) {
1207 err = EINVAL22;
1208 goto parse_out;
1209 }
1210 capa = (struct iwx_ucode_capa *)tlv_data;
1211 idx = le32toh(capa->api_index)((__uint32_t)(capa->api_index));
1212 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)(((128) + ((32) - 1)) / (32))) {
1213 goto parse_out;
1214 }
1215 for (i = 0; i < 32; i++) {
1216 if ((le32toh(capa->api_capa)((__uint32_t)(capa->api_capa)) & (1 << i)) == 0)
1217 continue;
1218 setbit(sc->sc_enabled_capa, i + (32 * idx))((sc->sc_enabled_capa)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
1219 }
1220 break;
1221 }
1222
1223 case IWX_UCODE_TLV_SDIO_ADMA_ADDR35:
1224 case IWX_UCODE_TLV_FW_GSCAN_CAPA50:
1225 /* ignore, not used by current driver */
1226 break;
1227
1228 case IWX_UCODE_TLV_SEC_RT_USNIFFER34:
1229 err = iwx_firmware_store_section(sc,
1230 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1231 tlv_len);
1232 if (err)
1233 goto parse_out;
1234 break;
1235
1236 case IWX_UCODE_TLV_PAGING32:
1237 if (tlv_len != sizeof(uint32_t)) {
1238 err = EINVAL22;
1239 goto parse_out;
1240 }
1241 break;
1242
1243 case IWX_UCODE_TLV_N_SCAN_CHANNELS31:
1244 if (tlv_len != sizeof(uint32_t)) {
1245 err = EINVAL22;
1246 goto parse_out;
1247 }
1248 sc->sc_capa_n_scan_channels =
1249 le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
1250 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS67) {
1251 err = ERANGE34;
1252 goto parse_out;
1253 }
1254 break;
1255
1256 case IWX_UCODE_TLV_FW_VERSION36:
1257 if (tlv_len != sizeof(uint32_t) * 3) {
1258 err = EINVAL22;
1259 goto parse_out;
1260 }
1261
1262 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1263 le32toh(((uint32_t *)tlv_data)[0])((__uint32_t)(((uint32_t *)tlv_data)[0])),
1264 le32toh(((uint32_t *)tlv_data)[1])((__uint32_t)(((uint32_t *)tlv_data)[1])),
1265 le32toh(((uint32_t *)tlv_data)[2])((__uint32_t)(((uint32_t *)tlv_data)[2])));
1266 break;
1267
1268 case IWX_UCODE_TLV_FW_DBG_DEST38: {
1269 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL((void *)0);
1270
1271 fw->dbg_dest_ver = (uint8_t *)tlv_data;
1272 if (*fw->dbg_dest_ver != 0) {
1273 err = EINVAL22;
1274 goto parse_out;
1275 }
1276
1277 if (fw->dbg_dest_tlv_init)
1278 break;
1279 fw->dbg_dest_tlv_init = true1;
1280
1281 dest_v1 = (void *)tlv_data;
1282 fw->dbg_dest_tlv_v1 = dest_v1;
1283 fw->n_dest_reg = tlv_len -
1284 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops)__builtin_offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1285 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1286 DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg))do { ; } while (0);
1287 break;
1288 }
1289
1290 case IWX_UCODE_TLV_FW_DBG_CONF39: {
1291 struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1292
1293 if (!fw->dbg_dest_tlv_init ||
1294 conf->id >= nitems(fw->dbg_conf_tlv)(sizeof((fw->dbg_conf_tlv)) / sizeof((fw->dbg_conf_tlv)
[0]))
||
1295 fw->dbg_conf_tlv[conf->id] != NULL((void *)0))
1296 break;
1297
1298 DPRINTF(("Found debug configuration: %d\n", conf->id))do { ; } while (0);
1299 fw->dbg_conf_tlv[conf->id] = conf;
1300 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1301 break;
1302 }
1303
1304 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS54: {
1305 struct iwx_umac_debug_addrs *dbg_ptrs =
1306 (void *)tlv_data;
1307
1308 if (tlv_len != sizeof(*dbg_ptrs)) {
1309 err = EINVAL22;
1310 goto parse_out;
1311 }
1312 if (sc->sc_device_family < IWX_DEVICE_FAMILY_220001)
1313 break;
1314 sc->sc_uc.uc_umac_error_event_table =
1315 le32toh(dbg_ptrs->error_info_addr)((__uint32_t)(dbg_ptrs->error_info_addr)) &
1316 ~IWX_FW_ADDR_CACHE_CONTROL0xC0000000;
1317 sc->sc_uc.error_event_table_tlv_status |=
1318 IWX_ERROR_EVENT_TABLE_UMAC(1 << 2);
1319 break;
1320 }
1321
1322 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS55: {
1323 struct iwx_lmac_debug_addrs *dbg_ptrs =
1324 (void *)tlv_data;
1325
1326 if (tlv_len != sizeof(*dbg_ptrs)) {
1327 err = EINVAL22;
1328 goto parse_out;
1329 }
1330 if (sc->sc_device_family < IWX_DEVICE_FAMILY_220001)
1331 break;
1332 sc->sc_uc.uc_lmac_error_event_table[0] =
1333 le32toh(dbg_ptrs->error_event_table_ptr)((__uint32_t)(dbg_ptrs->error_event_table_ptr)) &
1334 ~IWX_FW_ADDR_CACHE_CONTROL0xC0000000;
1335 sc->sc_uc.error_event_table_tlv_status |=
1336 IWX_ERROR_EVENT_TABLE_LMAC1(1 << 0);
1337 break;
1338 }
1339
1340 case IWX_UCODE_TLV_FW_MEM_SEG51:
1341 break;
1342
1343 case IWX_UCODE_TLV_CMD_VERSIONS48:
1344 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1345 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1346 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1347 }
1348 if (sc->n_cmd_versions != 0) {
1349 err = EINVAL22;
1350 goto parse_out;
1351 }
1352 if (tlv_len > sizeof(sc->cmd_versions)) {
1353 err = EINVAL22;
1354 goto parse_out;
1355 }
1356 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len)__builtin_memcpy((&sc->cmd_versions[0]), (tlv_data), (
tlv_len))
;
1357 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1358 break;
1359
1360 case IWX_UCODE_TLV_FW_RECOVERY_INFO57:
1361 break;
1362
1363 case IWX_UCODE_TLV_FW_FSEQ_VERSION60:
1364 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION61:
1365 case IWX_UCODE_TLV_FW_NUM_STATIONS(0x100 + 0):
1366 break;
1367
1368 /* undocumented TLVs found in iwx-cc-a0-46 image */
1369 case 58:
1370 case 0x1000003:
1371 case 0x1000004:
1372 break;
1373
1374 /* undocumented TLVs found in iwx-cc-a0-48 image */
1375 case 0x1000000:
1376 case 0x1000002:
1377 break;
1378
1379 case IWX_UCODE_TLV_TYPE_DEBUG_INFO(0x1000005 + 0):
1380 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION(0x1000005 + 1):
1381 case IWX_UCODE_TLV_TYPE_HCMD(0x1000005 + 2):
1382 case IWX_UCODE_TLV_TYPE_REGIONS(0x1000005 + 3):
1383 case IWX_UCODE_TLV_TYPE_TRIGGERS(0x1000005 + 4):
1384 case IWX_UCODE_TLV_TYPE_CONF_SET(0x1000005 + 5):
1385 break;
1386
1387 /* undocumented TLV found in iwx-cc-a0-67 image */
1388 case 0x100000b:
1389 break;
1390
1391 default:
1392 err = EINVAL22;
1393 goto parse_out;
1394 }
1395
1396 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1397 data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1398 }
1399
1400 KASSERT(err == 0)((err == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 1400, "err == 0"))
;
1401
1402 parse_out:
1403 if (err) {
1404 printf("%s: firmware parse error %d, "
1405 "section type %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err, tlv_type);
1406 }
1407
1408 out:
1409 if (err) {
1410 fw->fw_status = IWX_FW_STATUS_NONE0;
1411 if (fw->fw_rawdata != NULL((void *)0))
1412 iwx_fw_info_free(fw);
1413 } else
1414 fw->fw_status = IWX_FW_STATUS_DONE2;
1415 wakeup(&sc->sc_fw);
1416
1417 return err;
1418}
1419
1420uint32_t
1421iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1422{
1423 IWX_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
1424 IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1425 IWX_BARRIER_READ_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x01 | 0x02)
;
1426 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x050
)))))
;
1427}
1428
1429uint32_t
1430iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1431{
1432 iwx_nic_assert_locked(sc);
1433 return iwx_read_prph_unlocked(sc, addr);
1434}
1435
1436void
1437iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1438{
1439 IWX_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
1440 IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1441 IWX_BARRIER_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x02)
;
1442 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x04c
))), ((val))))
;
1443}
1444
1445void
1446iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1447{
1448 iwx_nic_assert_locked(sc);
1449 iwx_write_prph_unlocked(sc, addr, val);
1450}
1451
1452void
1453iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1454{
1455 iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1456 iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1457}
1458
1459int
1460iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1461{
1462 int offs, err = 0;
1463 uint32_t *vals = buf;
1464
1465 if (iwx_nic_lock(sc)) {
1466 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x00c
))), ((addr))))
;
1467 for (offs = 0; offs < dwords; offs++)
1468 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT))((__uint32_t)((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x400)+0x01c)))))))
;
1469 iwx_nic_unlock(sc);
1470 } else {
1471 err = EBUSY16;
1472 }
1473 return err;
1474}
1475
1476int
1477iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1478{
1479 int offs;
1480 const uint32_t *vals = buf;
1481
1482 if (iwx_nic_lock(sc)) {
1483 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x010
))), ((addr))))
;
1484 /* WADDR auto-increments */
1485 for (offs = 0; offs < dwords; offs++) {
1486 uint32_t val = vals ? vals[offs] : 0;
1487 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x018
))), ((val))))
;
1488 }
1489 iwx_nic_unlock(sc);
1490 } else {
1491 return EBUSY16;
1492 }
1493 return 0;
1494}
1495
1496int
1497iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1498{
1499 return iwx_write_mem(sc, addr, &val, 1);
1500}
1501
1502int
1503iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1504 int timo)
1505{
1506 for (;;) {
1507 if ((IWX_READ(sc, reg)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((reg)))) & mask) == (bits & mask)) {
1508 return 1;
1509 }
1510 if (timo < 10) {
1511 return 0;
1512 }
1513 timo -= 10;
1514 DELAY(10)(*delay_func)(10);
1515 }
1516}
1517
1518int
1519iwx_nic_lock(struct iwx_softc *sc)
1520{
1521 if (sc->sc_nic_locks > 0) {
1522 iwx_nic_assert_locked(sc);
1523 sc->sc_nic_locks++;
1524 return 1; /* already locked */
1525 }
1526
1527 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
1528 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
;
1529
1530 DELAY(2)(*delay_func)(2);
1531
1532 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
1533 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN(0x00000001),
1534 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001)
1535 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP(0x00000010), 150000)) {
1536 sc->sc_nic_locks++;
1537 return 1;
1538 }
1539
1540 printf("%s: acquiring device failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1541 return 0;
1542}
1543
1544void
1545iwx_nic_assert_locked(struct iwx_softc *sc)
1546{
1547 if (sc->sc_nic_locks <= 0)
1548 panic("%s: nic locks counter %d", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
1549}
1550
1551void
1552iwx_nic_unlock(struct iwx_softc *sc)
1553{
1554 if (sc->sc_nic_locks > 0) {
1555 if (--sc->sc_nic_locks == 0)
1556 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
1557 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
1558 } else
1559 printf("%s: NIC already unlocked\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1560}
1561
1562int
1563iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1564 uint32_t mask)
1565{
1566 uint32_t val;
1567
1568 if (iwx_nic_lock(sc)) {
1569 val = iwx_read_prph(sc, reg) & mask;
1570 val |= bits;
1571 iwx_write_prph(sc, reg, val);
1572 iwx_nic_unlock(sc);
1573 return 0;
1574 }
1575 return EBUSY16;
1576}
1577
1578int
1579iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1580{
1581 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1582}
1583
1584int
1585iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1586{
1587 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1588}
1589
1590int
1591iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1592 bus_size_t size, bus_size_t alignment)
1593{
1594 int nsegs, err;
1595 caddr_t va;
1596
1597 dma->tag = tag;
1598 dma->size = size;
1599
1600 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
1601 &dma->map)(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
;
1602 if (err)
1603 goto fail;
1604
1605 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
1606 BUS_DMA_NOWAIT)(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
;
1607 if (err)
1608 goto fail;
1609
1610 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
1611 BUS_DMA_NOWAIT)(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
;
1612 if (err)
1613 goto fail;
1614 dma->vaddr = va;
1615
1616 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
1617 BUS_DMA_NOWAIT)(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
;
1618 if (err)
1619 goto fail;
1620
1621 memset(dma->vaddr, 0, size)__builtin_memset((dma->vaddr), (0), (size));
1622 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE)(*(tag)->_dmamap_sync)((tag), (dma->map), (0), (size), (
0x04))
;
1623 dma->paddr = dma->map->dm_segs[0].ds_addr;
1624
1625 return 0;
1626
1627fail: iwx_dma_contig_free(dma);
1628 return err;
1629}
1630
1631void
1632iwx_dma_contig_free(struct iwx_dma_info *dma)
1633{
1634 if (dma->map != NULL((void *)0)) {
1635 if (dma->vaddr != NULL((void *)0)) {
1636 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
1637 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
;
1638 bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map
))
;
1639 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr
), (dma->size))
;
1640 bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
seg), (1))
;
1641 dma->vaddr = NULL((void *)0);
1642 }
1643 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
1644 dma->map = NULL((void *)0);
1645 }
1646}
1647
1648int
1649iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1650{
1651 bus_size_t size;
1652 int i, err;
1653
1654 ring->cur = 0;
1655
1656 /* Allocate RX descriptors (256-byte aligned). */
1657 size = IWX_RX_MQ_RING_COUNT512 * sizeof(uint64_t);
1658 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1659 if (err) {
1660 printf("%s: could not allocate RX ring DMA memory\n",
1661 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1662 goto fail;
1663 }
1664 ring->desc = ring->free_desc_dma.vaddr;
1665
1666 /* Allocate RX status area (16-byte aligned). */
1667 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1668 sizeof(*ring->stat), 16);
1669 if (err) {
1670 printf("%s: could not allocate RX status DMA memory\n",
1671 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1672 goto fail;
1673 }
1674 ring->stat = ring->stat_dma.vaddr;
1675
1676 size = IWX_RX_MQ_RING_COUNT512 * sizeof(uint32_t);
1677 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1678 size, 256);
1679 if (err) {
1680 printf("%s: could not allocate RX ring DMA memory\n",
1681 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1682 goto fail;
1683 }
1684
1685 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++) {
1686 struct iwx_rx_data *data = &ring->data[i];
1687
1688 memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data)));
1689 err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1690 IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1691 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
;
1692 if (err) {
1693 printf("%s: could not create RX buf DMA map\n",
1694 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1695 goto fail;
1696 }
1697
1698 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE4096, i);
1699 if (err)
1700 goto fail;
1701 }
1702 return 0;
1703
1704fail: iwx_free_rx_ring(sc, ring);
1705 return err;
1706}
1707
1708void
1709iwx_disable_rx_dma(struct iwx_softc *sc)
1710{
1711 int ntries;
1712
1713 if (iwx_nic_lock(sc)) {
1714 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG0xA09820, 0);
1715 for (ntries = 0; ntries < 1000; ntries++) {
1716 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS0xA09808) &
1717 IWX_RXF_DMA_IDLE(1U << 31))
1718 break;
1719 DELAY(10)(*delay_func)(10);
1720 }
1721 iwx_nic_unlock(sc);
1722 }
1723}
1724
1725void
1726iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1727{
1728 ring->cur = 0;
1729 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
1730 ring->stat_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
;
1731 memset(ring->stat, 0, sizeof(*ring->stat))__builtin_memset((ring->stat), (0), (sizeof(*ring->stat
)))
;
1732 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
1733 ring->stat_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
;
1734
1735}
1736
1737void
1738iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1739{
1740 int i;
1741
1742 iwx_dma_contig_free(&ring->free_desc_dma);
1743 iwx_dma_contig_free(&ring->stat_dma);
1744 iwx_dma_contig_free(&ring->used_desc_dma);
1745
1746 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++) {
1747 struct iwx_rx_data *data = &ring->data[i];
1748
1749 if (data->m != NULL((void *)0)) {
1750 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
1751 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
;
1752 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1753 m_freem(data->m);
1754 data->m = NULL((void *)0);
1755 }
1756 if (data->map != NULL((void *)0))
1757 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1758 }
1759}
1760
1761int
1762iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1763{
1764 bus_addr_t paddr;
1765 bus_size_t size;
1766 int i, err;
1767
1768 ring->qid = qid;
1769 ring->queued = 0;
1770 ring->cur = 0;
1771 ring->tail = 0;
1772
1773 /* Allocate TX descriptors (256-byte aligned). */
1774 size = IWX_TX_RING_COUNT(256) * sizeof(struct iwx_tfh_tfd);
1775 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1776 if (err) {
1777 printf("%s: could not allocate TX ring DMA memory\n",
1778 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1779 goto fail;
1780 }
1781 ring->desc = ring->desc_dma.vaddr;
1782
1783 /*
1784 * The hardware supports up to 512 Tx rings which is more
1785 * than we currently need.
1786 *
1787 * In DQA mode we use 1 command queue + 1 default queue for
1788 * management, control, and non-QoS data frames.
1789 * The command is queue sc->txq[0], our default queue is sc->txq[1].
1790 *
1791 * Tx aggregation requires additional queues, one queue per TID for
1792 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
1793 * Firmware may assign its own internal IDs for these queues
1794 * depending on which TID gets aggregation enabled first.
1795 * The driver maintains a table mapping driver-side queue IDs
1796 * to firmware-side queue IDs.
1797 */
1798
1799 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1800 sizeof(struct iwx_agn_scd_bc_tbl), 0);
1801 if (err) {
1802 printf("%s: could not allocate byte count table DMA memory\n",
1803 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1804 goto fail;
1805 }
1806
1807 size = IWX_TX_RING_COUNT(256) * sizeof(struct iwx_device_cmd);
1808 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1809 IWX_FIRST_TB_SIZE_ALIGN((20 + (64 - 1)) & ~(64 - 1)));
1810 if (err) {
1811 printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1812 goto fail;
1813 }
1814 ring->cmd = ring->cmd_dma.vaddr;
1815
1816 paddr = ring->cmd_dma.paddr;
1817 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
1818 struct iwx_tx_data *data = &ring->data[i];
1819 size_t mapsize;
1820
1821 data->cmd_paddr = paddr;
1822 paddr += sizeof(struct iwx_device_cmd);
1823
1824 /* FW commands may require more mapped space than packets. */
1825 if (qid == IWX_DQA_CMD_QUEUE0)
1826 mapsize = (sizeof(struct iwx_cmd_header) +
1827 IWX_MAX_CMD_PAYLOAD_SIZE(4096 - sizeof(struct iwx_cmd_header_wide)));
1828 else
1829 mapsize = MCLBYTES(1 << 11);
1830 err = bus_dmamap_create(sc->sc_dmat, mapsize,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
1831 IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
1832 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (25 - 2), (mapsize), (0), (0x0001), (&data->map))
;
1833 if (err) {
1834 printf("%s: could not create TX buf DMA map\n",
1835 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1836 goto fail;
1837 }
1838 }
1839 KASSERT(paddr == ring->cmd_dma.paddr + size)((paddr == ring->cmd_dma.paddr + size) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c", 1839, "paddr == ring->cmd_dma.paddr + size"
))
;
1840 return 0;
1841
1842fail: iwx_free_tx_ring(sc, ring);
1843 return err;
1844}
1845
1846void
1847iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1848{
1849 int i;
1850
1851 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
1852 struct iwx_tx_data *data = &ring->data[i];
1853
1854 if (data->m != NULL((void *)0)) {
1855 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1856 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1857 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1858 m_freem(data->m);
1859 data->m = NULL((void *)0);
1860 }
1861 }
1862
1863 /* Clear byte count table. */
1864 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size)__builtin_memset((ring->bc_tbl.vaddr), (0), (ring->bc_tbl
.size))
;
1865
1866 /* Clear TX descriptors. */
1867 memset(ring->desc, 0, ring->desc_dma.size)__builtin_memset((ring->desc), (0), (ring->desc_dma.size
))
;
1868 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
1869 ring->desc_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
;
1870 sc->qfullmsk &= ~(1 << ring->qid);
1871 sc->qenablemsk &= ~(1 << ring->qid);
1872 for (i = 0; i < nitems(sc->aggqid)(sizeof((sc->aggqid)) / sizeof((sc->aggqid)[0])); i++) {
1873 if (sc->aggqid[i] == ring->qid) {
1874 sc->aggqid[i] = 0;
1875 break;
1876 }
1877 }
1878 ring->queued = 0;
1879 ring->cur = 0;
1880 ring->tail = 0;
1881 ring->tid = 0;
1882}
1883
1884void
1885iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1886{
1887 int i;
1888
1889 iwx_dma_contig_free(&ring->desc_dma);
1890 iwx_dma_contig_free(&ring->cmd_dma);
1891 iwx_dma_contig_free(&ring->bc_tbl);
1892
1893 for (i = 0; i < IWX_TX_RING_COUNT(256); i++) {
1894 struct iwx_tx_data *data = &ring->data[i];
1895
1896 if (data->m != NULL((void *)0)) {
1897 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1898 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1899 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1900 m_freem(data->m);
1901 data->m = NULL((void *)0);
1902 }
1903 if (data->map != NULL((void *)0))
1904 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1905 }
1906}
1907
1908void
1909iwx_enable_rfkill_int(struct iwx_softc *sc)
1910{
1911 if (!sc->sc_msix) {
1912 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL(1 << 7);
1913 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1914 } else {
1915 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
1916 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
1917 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
1918 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
;
1919 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1920 }
1921
1922 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
1923 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
;
1924}
1925
1926int
1927iwx_check_rfkill(struct iwx_softc *sc)
1928{
1929 uint32_t v;
1930 int rv;
1931
1932 /*
1933 * "documentation" is not really helpful here:
1934 * 27: HW_RF_KILL_SW
1935 * Indicates state of (platform's) hardware RF-Kill switch
1936 *
1937 * But apparently when it's off, it's on ...
1938 */
1939 v = IWX_READ(sc, IWX_CSR_GP_CNTRL)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))));
1940 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW(0x08000000)) == 0;
1941 if (rv) {
1942 sc->sc_flags |= IWX_FLAG_RFKILL0x02;
1943 } else {
1944 sc->sc_flags &= ~IWX_FLAG_RFKILL0x02;
1945 }
1946
1947 return rv;
1948}
1949
1950void
1951iwx_enable_interrupts(struct iwx_softc *sc)
1952{
1953 if (!sc->sc_msix) {
1954 sc->sc_intmask = IWX_CSR_INI_SET_MASK((1U << 31) | (1 << 29) | (1 << 27) | (1 <<
25) | (1 << 7) | (1 << 3) | (1 << 1) | (1 <<
0) | (1 << 28))
;
1955 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1956 } else {
1957 /*
1958 * fh/hw_mask keeps all the unmasked causes.
1959 * Unlike msi, in msix cause is enabled when it is unset.
1960 */
1961 sc->sc_hw_mask = sc->sc_hw_init_mask;
1962 sc->sc_fh_mask = sc->sc_fh_init_mask;
1963 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
1964 ~sc->sc_fh_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
;
1965 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
1966 ~sc->sc_hw_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
;
1967 }
1968}
1969
1970void
1971iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1972{
1973 if (!sc->sc_msix) {
1974 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE(1 << 0) | IWX_CSR_INT_BIT_FH_RX(1U << 31);
1975 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1976 } else {
1977 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE))))
1978 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE))))
;
1979 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1980 /*
1981 * Leave all the FH causes enabled to get the ALIVE
1982 * notification.
1983 */
1984 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_init_mask))))
1985 ~sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_init_mask))))
;
1986 sc->sc_fh_mask = sc->sc_fh_init_mask;
1987 }
1988}
1989
1990void
1991iwx_restore_interrupts(struct iwx_softc *sc)
1992{
1993 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1994}
1995
1996void
1997iwx_disable_interrupts(struct iwx_softc *sc)
1998{
1999 if (!sc->sc_msix) {
2000 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
2001
2002 /* acknowledge all interrupts */
2003 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
2004 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(~0))))
;
2005 } else {
2006 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
2007 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
2008 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
2009 sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
;
2010 }
2011}
2012
2013void
2014iwx_ict_reset(struct iwx_softc *sc)
2015{
2016 iwx_disable_interrupts(sc);
2017
2018 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE)__builtin_memset((sc->ict_dma.vaddr), (0), (4096));
2019 sc->ict_cur = 0;
2020
2021 /* Set physical address of ICT (4KB aligned). */
2022 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2023 IWX_CSR_DRAM_INT_TBL_ENABLE(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2024 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2025 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
2026 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
;
2027
2028 /* Switch to ICT interrupt mode in driver. */
2029 sc->sc_flags |= IWX_FLAG_USE_ICT0x01;
2030
2031 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
2032 iwx_enable_interrupts(sc);
2033}
2034
2035#define IWX_HW_READY_TIMEOUT 50
2036int
2037iwx_set_hw_ready(struct iwx_softc *sc)
2038{
2039 int ready;
2040
2041 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
2042 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
;
2043
2044 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG(0x000),
2045 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
2046 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
2047 IWX_HW_READY_TIMEOUT);
2048 if (ready)
2049 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
2050 IWX_CSR_MBOX_SET_REG_OS_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
;
2051
2052 return ready;
2053}
2054#undef IWX_HW_READY_TIMEOUT
2055
2056int
2057iwx_prepare_card_hw(struct iwx_softc *sc)
2058{
2059 int t = 0;
2060 int ntries;
2061
2062 if (iwx_set_hw_ready(sc))
2063 return 0;
2064
2065 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
2066 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
2067 DELAY(1000)(*delay_func)(1000);
2068
2069 for (ntries = 0; ntries < 10; ntries++) {
2070 /* If HW is not ready, prepare the conditions to check again */
2071 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
2072 IWX_CSR_HW_IF_CONFIG_REG_PREPARE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
;
2073
2074 do {
2075 if (iwx_set_hw_ready(sc))
2076 return 0;
2077 DELAY(200)(*delay_func)(200);
2078 t += 200;
2079 } while (t < 150000);
2080 DELAY(25000)(*delay_func)(25000);
2081 }
2082
2083 return ETIMEDOUT60;
2084}
2085
2086int
2087iwx_force_power_gating(struct iwx_softc *sc)
2088{
2089 int err;
2090
2091 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2092 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE(1 << 10));
2093 if (err)
2094 return err;
2095 DELAY(20)(*delay_func)(20);
2096 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2097 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN(1 << 0) |
2098 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN(1 << 1));
2099 if (err)
2100 return err;
2101 DELAY(20)(*delay_func)(20);
2102 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG0xa03458,
2103 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE(1 << 10));
2104 return err;
2105}
2106
2107void
2108iwx_apm_config(struct iwx_softc *sc)
2109{
2110 pcireg_t lctl, cap;
2111
2112 /*
2113 * L0S states have been found to be unstable with our devices
2114 * and in newer hardware they are not officially supported at
2115 * all, so we must always set the L0S_DISABLED bit.
2116 */
2117 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
| ((0x00000002))))))
;
2118
2119 lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2120 sc->sc_cap_off + PCI_PCIE_LCSR0x10);
2121 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S0x00000001);
2122 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2123 sc->sc_cap_off + PCI_PCIE_DCSR20x28);
2124 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN0x00000400) ? 1 : 0;
2125 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",do { ; } while (0)
2126 DEVNAME(sc),do { ; } while (0)
2127 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",do { ; } while (0)
2128 sc->sc_ltr_enabled ? "En" : "Dis"))do { ; } while (0);
2129}
2130
2131/*
2132 * Start up NIC's basic functionality after it has been reset
2133 * e.g. after platform boot or shutdown.
2134 * NOTE: This does not load uCode nor start the embedded processor
2135 */
2136int
2137iwx_apm_init(struct iwx_softc *sc)
2138{
2139 int err = 0;
2140
2141 /*
2142 * Disable L0s without affecting L1;
2143 * don't wait for ICH L0s (ICH bug W/A)
2144 */
2145 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
2146 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
;
2147
2148 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2149 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x240))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x240)))))
| ((0xFFFF0000))))))
;
2150
2151 /*
2152 * Enable HAP INTA (interrupt from management bus) to
2153 * wake device's PCI Express link L1a -> L0s
2154 */
2155 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
2156 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
;
2157
2158 iwx_apm_config(sc);
2159
2160 /*
2161 * Set "initialization complete" bit to move adapter from
2162 * D0U* --> D0A* (powered-up active) state.
2163 */
2164 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
2165
2166 /*
2167 * Wait for clock stabilization; once stabilized, access to
2168 * device-internal resources is supported, e.g. iwx_write_prph()
2169 * and accesses to uCode SRAM.
2170 */
2171 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
2172 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
2173 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
2174 printf("%s: timeout waiting for clock stabilization\n",
2175 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2176 err = ETIMEDOUT60;
2177 goto out;
2178 }
2179 out:
2180 if (err)
2181 printf("%s: apm init error %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
2182 return err;
2183}
2184
2185void
2186iwx_apm_stop(struct iwx_softc *sc)
2187{
2188 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
2189 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
2190 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
2191 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
2192 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
;
2193 DELAY(1000)(*delay_func)(1000);
2194 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
2195 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
;
2196 DELAY(5000)(*delay_func)(5000);
2197
2198 /* stop device's busmaster DMA activity */
2199 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000200))))))
;
2200
2201 if (!iwx_poll_bit(sc, IWX_CSR_RESET(0x020),
2202 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100),
2203 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), 100))
2204 printf("%s: timeout waiting for master\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2205
2206 /*
2207 * Clear "initialization complete" bit to move adapter from
2208 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2209 */
2210 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
2211 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
;
2212}
2213
2214void
2215iwx_init_msix_hw(struct iwx_softc *sc)
2216{
2217 iwx_conf_msix_hw(sc, 0);
2218
2219 if (!sc->sc_msix)
2220 return;
2221
2222 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x804
)))))
;
2223 sc->sc_fh_mask = sc->sc_fh_init_mask;
2224 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x80C
)))))
;
2225 sc->sc_hw_mask = sc->sc_hw_init_mask;
2226}
2227
2228void
2229iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2230{
2231 int vector = 0;
2232
2233 if (!sc->sc_msix) {
2234 /* Newer chips default to MSIX. */
2235 if (!stopped && iwx_nic_lock(sc)) {
2236 iwx_write_prph(sc, IWX_UREG_CHICK0xa05c00,
2237 IWX_UREG_CHICK_MSI_ENABLE(1 << 24));
2238 iwx_nic_unlock(sc);
2239 }
2240 return;
2241 }
2242
2243 if (!stopped && iwx_nic_lock(sc)) {
2244 iwx_write_prph(sc, IWX_UREG_CHICK0xa05c00, IWX_UREG_CHICK_MSIX_ENABLE(1 << 25));
2245 iwx_nic_unlock(sc);
2246 }
2247
2248 /* Disable all interrupts */
2249 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~0))))
;
2250 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~0))))
;
2251
2252 /* Map fallback-queue (command/mgmt) to a single vector */
2253 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
2254 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
;
2255 /* Map RSS queue (data) to the same vector */
2256 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
2257 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
;
2258
2259 /* Enable the RX queues cause interrupts */
2260 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1
)))))
2261 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1
)))))
;
2262
2263 /* Map non-RX causes to the same vector */
2264 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
2265 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
;
2266 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
2267 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
;
2268 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
2269 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
;
2270 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
2271 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
;
2272 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
2273 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
;
2274 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
2275 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
;
2276 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
2277 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
;
2278 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
2279 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
;
2280 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
2281 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
;
2282 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
2283 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
;
2284 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
2285 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
;
2286 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
2287 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
;
2288 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
2289 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
;
2290 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
2291 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
;
2292 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
2293 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWX_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
;
2294
2295 /* Enable non-RX causes interrupts */
2296 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2297 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2298 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2299 IWX_MSIX_FH_INT_CAUSES_S2D |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
2300 IWX_MSIX_FH_INT_CAUSES_FH_ERR)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWX_MSIX_FH_INT_CAUSES_S2D
| IWX_MSIX_FH_INT_CAUSES_FH_ERR)))))
;
2301 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2302 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2303 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2304 IWX_MSIX_HW_INT_CAUSES_REG_IML |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2305 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2306 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2307 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2308 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2309 IWX_MSIX_HW_INT_CAUSES_REG_SCD |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2310 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2311 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2312 IWX_MSIX_HW_INT_CAUSES_REG_HAP)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWX_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWX_MSIX_HW_INT_CAUSES_REG_IML
| IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWX_MSIX_HW_INT_CAUSES_REG_SCD | IWX_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWX_MSIX_HW_INT_CAUSES_REG_HAP
)))))
;
2313}
2314
2315int
2316iwx_clear_persistence_bit(struct iwx_softc *sc)
2317{
2318 uint32_t hpm, wprot;
2319
2320 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG0xa03440);
2321 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT(1 << 12))) {
2322 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_220000xa04d00);
2323 if (wprot & IWX_PREG_WFPM_ACCESS(1 << 12)) {
2324 printf("%s: cannot clear persistence bit\n",
2325 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2326 return EPERM1;
2327 }
2328 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG0xa03440,
2329 hpm & ~IWX_PERSISTENCE_BIT(1 << 12));
2330 }
2331
2332 return 0;
2333}
2334
2335int
2336iwx_start_hw(struct iwx_softc *sc)
2337{
2338 int err;
2339
2340 err = iwx_prepare_card_hw(sc);
2341 if (err)
2342 return err;
2343
2344 err = iwx_clear_persistence_bit(sc);
2345 if (err)
2346 return err;
2347
2348 /* Reset the entire device */
2349 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2350 DELAY(5000)(*delay_func)(5000);
2351
2352 if (sc->sc_integrated) {
2353 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
2354 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
2355 DELAY(20)(*delay_func)(20);
2356 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL(0x024),
2357 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
2358 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
2359 printf("%s: timeout waiting for clock stabilization\n",
2360 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2361 return ETIMEDOUT60;
2362 }
2363
2364 err = iwx_force_power_gating(sc);
2365 if (err)
2366 return err;
2367
2368 /* Reset the entire device */
2369 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2370 DELAY(5000)(*delay_func)(5000);
2371 }
2372
2373 err = iwx_apm_init(sc);
2374 if (err)
2375 return err;
2376
2377 iwx_init_msix_hw(sc);
2378
2379 iwx_enable_rfkill_int(sc);
2380 iwx_check_rfkill(sc);
2381
2382 return 0;
2383}
2384
2385void
2386iwx_stop_device(struct iwx_softc *sc)
2387{
2388 struct ieee80211com *ic = &sc->sc_ic;
2389 struct ieee80211_node *ni = ic->ic_bss;
2390 int i;
2391
2392 iwx_disable_interrupts(sc);
2393 sc->sc_flags &= ~IWX_FLAG_USE_ICT0x01;
2394
2395 iwx_disable_rx_dma(sc);
2396 iwx_reset_rx_ring(sc, &sc->rxq);
2397 for (i = 0; i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); i++)
2398 iwx_reset_tx_ring(sc, &sc->txq[i]);
2399 for (i = 0; i < IEEE80211_NUM_TID16; i++) {
2400 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2401 if (ba->ba_state != IEEE80211_BA_AGREED2)
2402 continue;
2403 ieee80211_delba_request(ic, ni, 0, 1, i);
2404 }
2405
2406 /* Make sure (redundant) we've released our request to stay awake */
2407 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
2408 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
2409 if (sc->sc_nic_locks > 0)
2410 printf("%s: %d active NIC locks forcefully cleared\n",
2411 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
2412 sc->sc_nic_locks = 0;
2413
2414 /* Stop the device, and put it in low power state */
2415 iwx_apm_stop(sc);
2416
2417 /* Reset the on-board processor. */
2418 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000080))))))
;
2419 DELAY(5000)(*delay_func)(5000);
2420
2421 /*
2422 * Upon stop, the IVAR table gets erased, so msi-x won't
2423 * work. This causes a bug in RF-KILL flows, since the interrupt
2424 * that enables radio won't fire on the correct irq, and the
2425 * driver won't be able to handle the interrupt.
2426 * Configure the IVAR table again after reset.
2427 */
2428 iwx_conf_msix_hw(sc, 1);
2429
2430 /*
2431 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2432 * Clear the interrupt again.
2433 */
2434 iwx_disable_interrupts(sc);
2435
2436 /* Even though we stop the HW we still want the RF kill interrupt. */
2437 iwx_enable_rfkill_int(sc);
2438 iwx_check_rfkill(sc);
2439
2440 iwx_prepare_card_hw(sc);
2441
2442 iwx_ctxt_info_free_paging(sc);
2443}
2444
2445void
2446iwx_nic_config(struct iwx_softc *sc)
2447{
2448 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2449 uint32_t mask, val, reg_val = 0;
2450
2451 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE(0x3 << 0)) >>
2452 IWX_FW_PHY_CFG_RADIO_TYPE_POS0;
2453 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP(0x3 << 2)) >>
2454 IWX_FW_PHY_CFG_RADIO_STEP_POS2;
2455 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH(0x3 << 4)) >>
2456 IWX_FW_PHY_CFG_RADIO_DASH_POS4;
2457
2458 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000000C) >> 2) <<
2459 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP(2);
2460 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x0000003) >> 0) <<
2461 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH(0);
2462
2463 /* radio configuration */
2464 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE(10);
2465 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP(14);
2466 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH(12);
2467
2468 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH(0x00000003) |
2469 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP(0x0000000C) |
2470 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP(0x0000C000) |
2471 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH(0x00003000) |
2472 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE(0x00000C00) |
2473 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI(0x00000200) |
2474 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI(0x00000100);
2475
2476 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))));
2477 val &= ~mask;
2478 val |= reg_val;
2479 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
(val))))
;
2480}
2481
2482int
2483iwx_nic_rx_init(struct iwx_softc *sc)
2484{
2485 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), (
((0x40)))))
;
2486
2487 /*
2488 * We don't configure the RFH; the firmware will do that.
2489 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2490 */
2491 return 0;
2492}
2493
2494int
2495iwx_nic_init(struct iwx_softc *sc)
2496{
2497 int err;
2498
2499 iwx_apm_init(sc);
2500 iwx_nic_config(sc);
2501
2502 err = iwx_nic_rx_init(sc);
2503 if (err)
2504 return err;
2505
2506 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A8))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x0A8)))))
| (0x800fffff)))))
;
2507
2508 return 0;
2509}
2510
2511/* Map a TID to an ieee80211_edca_ac category. */
2512const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT8] = {
2513 EDCA_AC_BE,
2514 EDCA_AC_BK,
2515 EDCA_AC_BK,
2516 EDCA_AC_BE,
2517 EDCA_AC_VI,
2518 EDCA_AC_VI,
2519 EDCA_AC_VO,
2520 EDCA_AC_VO,
2521};
2522
2523/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2524const uint8_t iwx_ac_to_tx_fifo[] = {
2525 IWX_GEN2_EDCA_TX_FIFO_BE,
2526 IWX_GEN2_EDCA_TX_FIFO_BK,
2527 IWX_GEN2_EDCA_TX_FIFO_VI,
2528 IWX_GEN2_EDCA_TX_FIFO_VO,
2529};
2530
2531int
2532iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2533 int num_slots)
2534{
2535 struct iwx_tx_queue_cfg_cmd cmd;
2536 struct iwx_rx_packet *pkt;
2537 struct iwx_tx_queue_cfg_rsp *resp;
2538 struct iwx_host_cmd hcmd = {
2539 .id = IWX_SCD_QUEUE_CFG0x1d,
2540 .flags = IWX_CMD_WANT_RESP,
2541 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2542 };
2543 struct iwx_tx_ring *ring = &sc->txq[qid];
2544 int err, fwqid;
2545 uint32_t wr_idx;
2546 size_t resp_len;
2547
2548 iwx_reset_tx_ring(sc, ring);
2549
2550 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2551 cmd.sta_id = sta_id;
2552 cmd.tid = tid;
2553 cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE)((__uint16_t)((1 << 0)));
2554 cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots))((__uint32_t)((((sizeof(num_slots) <= 4) ? (fls(num_slots)
- 1) : (flsl(num_slots) - 1)) - 3)))
;
2555 cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr)((__uint64_t)(ring->bc_tbl.paddr));
2556 cmd.tfdq_addr = htole64(ring->desc_dma.paddr)((__uint64_t)(ring->desc_dma.paddr));
2557
2558 hcmd.data[0] = &cmd;
2559 hcmd.len[0] = sizeof(cmd);
2560
2561 err = iwx_send_cmd(sc, &hcmd);
2562 if (err)
2563 return err;
2564
2565 pkt = hcmd.resp_pkt;
2566 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2567 DPRINTF(("SCD_QUEUE_CFG command failed\n"))do { ; } while (0);
2568 err = EIO5;
2569 goto out;
2570 }
2571
2572 resp_len = iwx_rx_packet_payload_len(pkt);
2573 if (resp_len != sizeof(*resp)) {
2574 DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)))do { ; } while (0);
2575 err = EIO5;
2576 goto out;
2577 }
2578
2579 resp = (void *)pkt->data;
2580 fwqid = le16toh(resp->queue_number)((__uint16_t)(resp->queue_number));
2581 wr_idx = le16toh(resp->write_pointer)((__uint16_t)(resp->write_pointer));
2582
2583 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2584 if (fwqid != qid) {
2585 DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid))do { ; } while (0);
2586 err = EIO5;
2587 goto out;
2588 }
2589
2590 if (wr_idx != ring->cur) {
2591 DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur))do { ; } while (0);
2592 err = EIO5;
2593 goto out;
2594 }
2595
2596 sc->qenablemsk |= (1 << qid);
2597 ring->tid = tid;
2598out:
2599 iwx_free_resp(sc, &hcmd);
2600 return err;
2601}
2602
2603int
2604iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2605{
2606 struct iwx_tx_queue_cfg_cmd cmd;
2607 struct iwx_rx_packet *pkt;
2608 struct iwx_tx_queue_cfg_rsp *resp;
2609 struct iwx_host_cmd hcmd = {
2610 .id = IWX_SCD_QUEUE_CFG0x1d,
2611 .flags = IWX_CMD_WANT_RESP,
2612 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2613 };
2614 struct iwx_tx_ring *ring = &sc->txq[qid];
2615 int err;
2616
2617 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2618 cmd.sta_id = sta_id;
2619 cmd.tid = tid;
2620 cmd.flags = htole16(0)((__uint16_t)(0)); /* clear "queue enabled" flag */
2621 cmd.cb_size = htole32(0)((__uint32_t)(0));
2622 cmd.byte_cnt_addr = htole64(0)((__uint64_t)(0));
2623 cmd.tfdq_addr = htole64(0)((__uint64_t)(0));
2624
2625 hcmd.data[0] = &cmd;
2626 hcmd.len[0] = sizeof(cmd);
2627
2628 err = iwx_send_cmd(sc, &hcmd);
2629 if (err)
2630 return err;
2631
2632 pkt = hcmd.resp_pkt;
2633 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2634 DPRINTF(("SCD_QUEUE_CFG command failed\n"))do { ; } while (0);
2635 err = EIO5;
2636 goto out;
2637 }
2638
2639 sc->qenablemsk &= ~(1 << qid);
2640 iwx_reset_tx_ring(sc, ring);
2641out:
2642 iwx_free_resp(sc, &hcmd);
2643 return err;
2644}
2645
2646void
2647iwx_post_alive(struct iwx_softc *sc)
2648{
2649 iwx_ict_reset(sc);
2650}
2651
2652/*
2653 * For the high priority TE use a time event type that has similar priority to
2654 * the FW's action scan priority.
2655 */
2656#define IWX_ROC_TE_TYPE_NORMAL4 IWX_TE_P2P_DEVICE_DISCOVERABLE4
2657#define IWX_ROC_TE_TYPE_MGMT_TX9 IWX_TE_P2P_CLIENT_ASSOC9
2658
2659int
2660iwx_send_time_event_cmd(struct iwx_softc *sc,
2661 const struct iwx_time_event_cmd *cmd)
2662{
2663 struct iwx_rx_packet *pkt;
2664 struct iwx_time_event_resp *resp;
2665 struct iwx_host_cmd hcmd = {
2666 .id = IWX_TIME_EVENT_CMD0x29,
2667 .flags = IWX_CMD_WANT_RESP,
2668 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2669 };
2670 uint32_t resp_len;
2671 int err;
2672
2673 hcmd.data[0] = cmd;
2674 hcmd.len[0] = sizeof(*cmd);
2675 err = iwx_send_cmd(sc, &hcmd);
2676 if (err)
2677 return err;
2678
2679 pkt = hcmd.resp_pkt;
2680 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
2681 err = EIO5;
2682 goto out;
2683 }
2684
2685 resp_len = iwx_rx_packet_payload_len(pkt);
2686 if (resp_len != sizeof(*resp)) {
2687 err = EIO5;
2688 goto out;
2689 }
2690
2691 resp = (void *)pkt->data;
2692 if (le32toh(resp->status)((__uint32_t)(resp->status)) == 0)
2693 sc->sc_time_event_uid = le32toh(resp->unique_id)((__uint32_t)(resp->unique_id));
2694 else
2695 err = EIO5;
2696out:
2697 iwx_free_resp(sc, &hcmd);
2698 return err;
2699}
2700
2701int
2702iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2703 uint32_t duration)
2704{
2705 struct iwx_session_prot_cmd cmd = {
2706 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
2707 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
,
2708 .action = htole32(IWX_FW_CTXT_ACTION_ADD)((__uint32_t)(1)),
2709 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC)((__uint32_t)(IWX_SESSION_PROTECT_CONF_ASSOC)),
2710 .duration_tu = htole32(duration * IEEE80211_DUR_TU)((__uint32_t)(duration * 1024)),
2711 };
2712 uint32_t cmd_id;
2713
2714 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD0x05, IWX_MAC_CONF_GROUP0x3, 0);
2715 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2716}
2717
2718/*
2719 * NVM read access and content parsing. We do not support
2720 * external NVM or writing NVM.
2721 */
2722
2723uint8_t
2724iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2725{
2726 uint8_t tx_ant;
2727
2728 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN(0xf << 16))
2729 >> IWX_FW_PHY_CFG_TX_CHAIN_POS16);
2730
2731 if (sc->sc_nvm.valid_tx_ant)
2732 tx_ant &= sc->sc_nvm.valid_tx_ant;
2733
2734 return tx_ant;
2735}
2736
2737uint8_t
2738iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2739{
2740 uint8_t rx_ant;
2741
2742 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN(0xf << 20))
2743 >> IWX_FW_PHY_CFG_RX_CHAIN_POS20);
2744
2745 if (sc->sc_nvm.valid_rx_ant)
2746 rx_ant &= sc->sc_nvm.valid_rx_ant;
2747
2748 return rx_ant;
2749}
2750
2751void
2752iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2753 uint32_t *channel_profile_v4, int nchan_profile)
2754{
2755 struct ieee80211com *ic = &sc->sc_ic;
2756 struct iwx_nvm_data *data = &sc->sc_nvm;
2757 int ch_idx;
2758 struct ieee80211_channel *channel;
2759 uint32_t ch_flags;
2760 int is_5ghz;
2761 int flags, hw_value;
2762 int nchan;
2763 const uint8_t *nvm_channels;
2764
2765 if (sc->sc_uhb_supported) {
2766 nchan = nitems(iwx_nvm_channels_uhb)(sizeof((iwx_nvm_channels_uhb)) / sizeof((iwx_nvm_channels_uhb
)[0]))
;
2767 nvm_channels = iwx_nvm_channels_uhb;
2768 } else {
2769 nchan = nitems(iwx_nvm_channels_8000)(sizeof((iwx_nvm_channels_8000)) / sizeof((iwx_nvm_channels_8000
)[0]))
;
2770 nvm_channels = iwx_nvm_channels_8000;
2771 }
2772
2773 for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2774 if (channel_profile_v4)
2775 ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx)(((__uint32_t)(*(const uint32_t *)(channel_profile_v4 + ch_idx
))))
;
2776 else
2777 ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx)(((__uint16_t)(*(const uint16_t *)(channel_profile_v3 + ch_idx
))))
;
2778
2779 is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS14;
2780 if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2781 ch_flags &= ~IWX_NVM_CHANNEL_VALID(1 << 0);
2782
2783 hw_value = nvm_channels[ch_idx];
2784 channel = &ic->ic_channels[hw_value];
2785
2786 if (!(ch_flags & IWX_NVM_CHANNEL_VALID(1 << 0))) {
2787 channel->ic_freq = 0;
2788 channel->ic_flags = 0;
2789 continue;
2790 }
2791
2792 if (!is_5ghz) {
2793 flags = IEEE80211_CHAN_2GHZ0x0080;
2794 channel->ic_flags
2795 = IEEE80211_CHAN_CCK0x0020
2796 | IEEE80211_CHAN_OFDM0x0040
2797 | IEEE80211_CHAN_DYN0x0400
2798 | IEEE80211_CHAN_2GHZ0x0080;
2799 } else {
2800 flags = IEEE80211_CHAN_5GHZ0x0100;
2801 channel->ic_flags =
2802 IEEE80211_CHAN_A(0x0100 | 0x0040);
2803 }
2804 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2805
2806 if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE(1 << 3)))
2807 channel->ic_flags |= IEEE80211_CHAN_PASSIVE0x0200;
2808
2809 if (data->sku_cap_11n_enable) {
2810 channel->ic_flags |= IEEE80211_CHAN_HT0x2000;
2811 if (ch_flags & IWX_NVM_CHANNEL_40MHZ(1 << 9))
2812 channel->ic_flags |= IEEE80211_CHAN_40MHZ0x8000;
2813 }
2814 }
2815}
2816
2817int
2818iwx_mimo_enabled(struct iwx_softc *sc)
2819{
2820 struct ieee80211com *ic = &sc->sc_ic;
2821
2822 return !sc->sc_nvm.sku_cap_mimo_disable &&
2823 (ic->ic_userflags & IEEE80211_F_NOMIMO0x00000008) == 0;
2824}
2825
2826void
2827iwx_setup_ht_rates(struct iwx_softc *sc)
2828{
2829 struct ieee80211com *ic = &sc->sc_ic;
2830 uint8_t rx_ant;
2831
2832 /* TX is supported with the same MCS as RX. */
2833 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED0x01;
2834
2835 memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs))__builtin_memset((ic->ic_sup_mcs), (0), (sizeof(ic->ic_sup_mcs
)))
;
2836 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2837
2838 if (!iwx_mimo_enabled(sc))
2839 return;
2840
2841 rx_ant = iwx_fw_valid_rx_ant(sc);
2842 if ((rx_ant & IWX_ANT_AB((1 << 0) | (1 << 1))) == IWX_ANT_AB((1 << 0) | (1 << 1)) ||
2843 (rx_ant & IWX_ANT_BC((1 << 1) | (1 << 2))) == IWX_ANT_BC((1 << 1) | (1 << 2)))
2844 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2845}
2846
2847void
2848iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
2849 uint16_t ssn, uint16_t buf_size)
2850{
2851 reorder_buf->head_sn = ssn;
2852 reorder_buf->num_stored = 0;
2853 reorder_buf->buf_size = buf_size;
2854 reorder_buf->last_amsdu = 0;
2855 reorder_buf->last_sub_index = 0;
2856 reorder_buf->removed = 0;
2857 reorder_buf->valid = 0;
2858 reorder_buf->consec_oldsn_drops = 0;
2859 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
2860 reorder_buf->consec_oldsn_prev_drop = 0;
2861}
2862
2863void
2864iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
2865{
2866 int i;
2867 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
2868 struct iwx_reorder_buf_entry *entry;
2869
2870 for (i = 0; i < reorder_buf->buf_size; i++) {
2871 entry = &rxba->entries[i];
2872 ml_purge(&entry->frames);
2873 timerclear(&entry->reorder_time)(&entry->reorder_time)->tv_sec = (&entry->reorder_time
)->tv_usec = 0
;
2874 }
2875
2876 reorder_buf->removed = 1;
2877 timeout_del(&reorder_buf->reorder_timer);
2878 timerclear(&rxba->last_rx)(&rxba->last_rx)->tv_sec = (&rxba->last_rx)->
tv_usec = 0
;
2879 timeout_del(&rxba->session_timer);
2880 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID0x7f;
2881}
2882
2883#define RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL) (100000ULL)
2884
2885void
2886iwx_rx_ba_session_expired(void *arg)
2887{
2888 struct iwx_rxba_data *rxba = arg;
2889 struct iwx_softc *sc = rxba->sc;
2890 struct ieee80211com *ic = &sc->sc_ic;
2891 struct ieee80211_node *ni = ic->ic_bss;
2892 struct timeval now, timeout, expiry;
2893 int s;
2894
2895 s = splnet()splraise(0x7);
2896 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0 &&
2897 ic->ic_state == IEEE80211_S_RUN &&
2898 rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID0x7f) {
2899 getmicrouptime(&now);
2900 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
2901 timeradd(&rxba->last_rx, &timeout, &expiry)do { (&expiry)->tv_sec = (&rxba->last_rx)->tv_sec
+ (&timeout)->tv_sec; (&expiry)->tv_usec = (&
rxba->last_rx)->tv_usec + (&timeout)->tv_usec; if
((&expiry)->tv_usec >= 1000000) { (&expiry)->
tv_sec++; (&expiry)->tv_usec -= 1000000; } } while (0)
;
2902 if (timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
) {
2903 timeout_add_usec(&rxba->session_timer, rxba->timeout);
2904 } else {
2905 ic->ic_stats.is_ht_rx_ba_timeout++;
2906 ieee80211_delba_request(ic, ni,
2907 IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
2908 }
2909 }
2910 splx(s)spllower(s);
2911}
2912
2913void
2914iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
2915 struct mbuf_list *ml)
2916{
2917 struct ieee80211com *ic = &sc->sc_ic;
2918 struct ieee80211_node *ni = ic->ic_bss;
2919 struct iwx_bar_frame_release *release = (void *)pkt->data;
2920 struct iwx_reorder_buffer *buf;
2921 struct iwx_rxba_data *rxba;
2922 unsigned int baid, nssn, sta_id, tid;
2923
2924 if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
2925 return;
2926
2927 baid = (le32toh(release->ba_info)((__uint32_t)(release->ba_info)) & IWX_BAR_FRAME_RELEASE_BAID_MASK0x3f000000) >>
2928 IWX_BAR_FRAME_RELEASE_BAID_SHIFT24;
2929 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
2930 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
2931 return;
2932
2933 rxba = &sc->sc_rxba_data[baid];
2934 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f)
2935 return;
2936
2937 tid = le32toh(release->sta_tid)((__uint32_t)(release->sta_tid)) & IWX_BAR_FRAME_RELEASE_TID_MASK0x0000000f;
2938 sta_id = (le32toh(release->sta_tid)((__uint32_t)(release->sta_tid)) &
2939 IWX_BAR_FRAME_RELEASE_STA_MASK0x000001f0) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT4;
2940 if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID0)
2941 return;
2942
2943 nssn = le32toh(release->ba_info)((__uint32_t)(release->ba_info)) & IWX_BAR_FRAME_RELEASE_NSSN_MASK0x00000fff;
2944 buf = &rxba->reorder_buf;
2945 iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
2946}
2947
2948void
2949iwx_reorder_timer_expired(void *arg)
2950{
2951 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
2952 struct iwx_reorder_buffer *buf = arg;
2953 struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
2954 struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
2955 struct iwx_softc *sc = rxba->sc;
2956 struct ieee80211com *ic = &sc->sc_ic;
2957 struct ieee80211_node *ni = ic->ic_bss;
2958 int i, s;
2959 uint16_t sn = 0, index = 0;
2960 int expired = 0;
2961 int cont = 0;
2962 struct timeval now, timeout, expiry;
2963
2964 if (!buf->num_stored || buf->removed)
2965 return;
2966
2967 s = splnet()splraise(0x7);
2968 getmicrouptime(&now);
2969 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
2970
2971 for (i = 0; i < buf->buf_size ; i++) {
2972 index = (buf->head_sn + i) % buf->buf_size;
2973
2974 if (ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
2975 /*
2976 * If there is a hole and the next frame didn't expire
2977 * we want to break and not advance SN.
2978 */
2979 cont = 0;
2980 continue;
2981 }
2982 timeradd(&entries[index].reorder_time, &timeout, &expiry)do { (&expiry)->tv_sec = (&entries[index].reorder_time
)->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec
= (&entries[index].reorder_time)->tv_usec + (&timeout
)->tv_usec; if ((&expiry)->tv_usec >= 1000000) {
(&expiry)->tv_sec++; (&expiry)->tv_usec -= 1000000
; } } while (0)
;
2983 if (!cont && timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
)
2984 break;
2985
2986 expired = 1;
2987 /* continue until next hole after this expired frame */
2988 cont = 1;
2989 sn = (buf->head_sn + (i + 1)) & 0xfff;
2990 }
2991
2992 if (expired) {
2993 /* SN is set to the last expired frame + 1 */
2994 iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
2995 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
2996 ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
2997 } else {
2998 /*
2999 * If no frame expired and there are stored frames, index is now
3000 * pointing to the first unexpired frame - modify reorder timeout
3001 * accordingly.
3002 */
3003 timeout_add_usec(&buf->reorder_timer,
3004 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
3005 }
3006
3007 splx(s)spllower(s);
3008}
3009
3010#define IWX_MAX_RX_BA_SESSIONS16 16
3011
3012void
3013iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3014 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3015{
3016 struct ieee80211com *ic = &sc->sc_ic;
3017 struct iwx_add_sta_cmd cmd;
3018 struct iwx_node *in = (void *)ni;
3019 int err, s;
3020 uint32_t status;
3021 struct iwx_rxba_data *rxba = NULL((void *)0);
3022 uint8_t baid = 0;
3023
3024 s = splnet()splraise(0x7);
3025
3026 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS16) {
3027 ieee80211_addba_req_refuse(ic, ni, tid);
3028 splx(s)spllower(s);
3029 return;
3030 }
3031
3032 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3033
3034 cmd.sta_id = IWX_STATION_ID0;
3035 cmd.mac_id_n_color
3036 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
3037 cmd.add_modify = IWX_STA_MODE_MODIFY1;
3038
3039 if (start) {
3040 cmd.add_immediate_ba_tid = (uint8_t)tid;
3041 cmd.add_immediate_ba_ssn = htole16(ssn)((__uint16_t)(ssn));
3042 cmd.rx_ba_window = htole16(winsize)((__uint16_t)(winsize));
3043 } else {
3044 cmd.remove_immediate_ba_tid = (uint8_t)tid;
3045 }
3046 cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID(1 << 3) :
3047 IWX_STA_MODIFY_REMOVE_BA_TID(1 << 4);
3048
3049 status = IWX_ADD_STA_SUCCESS0x1;
3050 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA0x18, sizeof(cmd), &cmd,
3051 &status);
3052
3053 if (err || (status & IWX_ADD_STA_STATUS_MASK0xFF) != IWX_ADD_STA_SUCCESS0x1) {
3054 if (start)
3055 ieee80211_addba_req_refuse(ic, ni, tid);
3056 splx(s)spllower(s);
3057 return;
3058 }
3059
3060 /* Deaggregation is done in hardware. */
3061 if (start) {
3062 if (!(status & IWX_ADD_STA_BAID_VALID_MASK0x8000)) {
3063 ieee80211_addba_req_refuse(ic, ni, tid);
3064 splx(s)spllower(s);
3065 return;
3066 }
3067 baid = (status & IWX_ADD_STA_BAID_MASK0x7F00) >>
3068 IWX_ADD_STA_BAID_SHIFT8;
3069 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
3070 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
) {
3071 ieee80211_addba_req_refuse(ic, ni, tid);
3072 splx(s)spllower(s);
3073 return;
3074 }
3075 rxba = &sc->sc_rxba_data[baid];
3076 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID0x7f) {
3077 ieee80211_addba_req_refuse(ic, ni, tid);
3078 splx(s)spllower(s);
3079 return;
3080 }
3081 rxba->sta_id = IWX_STATION_ID0;
3082 rxba->tid = tid;
3083 rxba->baid = baid;
3084 rxba->timeout = timeout_val;
3085 getmicrouptime(&rxba->last_rx);
3086 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3087 winsize);
3088 if (timeout_val != 0) {
3089 struct ieee80211_rx_ba *ba;
3090 timeout_add_usec(&rxba->session_timer,
3091 timeout_val);
3092 /* XXX disable net80211's BA timeout handler */
3093 ba = &ni->ni_rx_ba[tid];
3094 ba->ba_timeout_val = 0;
3095 }
3096 } else {
3097 int i;
3098 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
3099 rxba = &sc->sc_rxba_data[i];
3100 if (rxba->baid ==
3101 IWX_RX_REORDER_DATA_INVALID_BAID0x7f)
3102 continue;
3103 if (rxba->tid != tid)
3104 continue;
3105 iwx_clear_reorder_buffer(sc, rxba);
3106 break;
3107 }
3108 }
3109
3110 if (start) {
3111 sc->sc_rx_ba_sessions++;
3112 ieee80211_addba_req_accept(ic, ni, tid);
3113 } else if (sc->sc_rx_ba_sessions > 0)
3114 sc->sc_rx_ba_sessions--;
3115
3116 splx(s)spllower(s);
3117}
3118
3119void
3120iwx_mac_ctxt_task(void *arg)
3121{
3122 struct iwx_softc *sc = arg;
3123 struct ieee80211com *ic = &sc->sc_ic;
3124 struct iwx_node *in = (void *)ic->ic_bss;
3125 int err, s = splnet()splraise(0x7);
3126
3127 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) ||
3128 ic->ic_state != IEEE80211_S_RUN) {
3129 refcnt_rele_wake(&sc->task_refs);
3130 splx(s)spllower(s);
3131 return;
3132 }
3133
3134 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY2, 1);
3135 if (err)
3136 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3137
3138 refcnt_rele_wake(&sc->task_refs);
3139 splx(s)spllower(s);
3140}
3141
3142void
3143iwx_phy_ctxt_task(void *arg)
3144{
3145 struct iwx_softc *sc = arg;
3146 struct ieee80211com *ic = &sc->sc_ic;
3147 struct iwx_node *in = (void *)ic->ic_bss;
3148 struct ieee80211_node *ni = &in->in_ni;
3149 uint8_t chains, sco;
3150 int err, s = splnet()splraise(0x7);
3151
3152 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) ||
3153 ic->ic_state != IEEE80211_S_RUN ||
3154 in->in_phyctxt == NULL((void *)0)) {
3155 refcnt_rele_wake(&sc->task_refs);
3156 splx(s)spllower(s);
3157 return;
3158 }
3159
3160 chains = iwx_mimo_enabled(sc) ? 2 : 1;
3161 if (ieee80211_node_supports_ht_chan40(ni))
3162 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
3163 else
3164 sco = IEEE80211_HTOP0_SCO_SCN0;
3165 if (in->in_phyctxt->sco != sco) {
3166 err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3167 in->in_phyctxt->channel, chains, chains, 0, sco);
3168 if (err)
3169 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3170 }
3171
3172 refcnt_rele_wake(&sc->task_refs);
3173 splx(s)spllower(s);
3174}
3175
3176void
3177iwx_updatechan(struct ieee80211com *ic)
3178{
3179 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3180
3181 if (ic->ic_state == IEEE80211_S_RUN &&
3182 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3183 iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3184}
3185
3186void
3187iwx_updateprot(struct ieee80211com *ic)
3188{
3189 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3190
3191 if (ic->ic_state == IEEE80211_S_RUN &&
3192 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3193 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3194}
3195
3196void
3197iwx_updateslot(struct ieee80211com *ic)
3198{
3199 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3200
3201 if (ic->ic_state == IEEE80211_S_RUN &&
3202 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3203 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3204}
3205
3206void
3207iwx_updateedca(struct ieee80211com *ic)
3208{
3209 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3210
3211 if (ic->ic_state == IEEE80211_S_RUN &&
3212 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3213 iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3214}
3215
3216void
3217iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3218 uint8_t tid)
3219{
3220 struct ieee80211com *ic = &sc->sc_ic;
3221 struct ieee80211_tx_ba *ba;
3222 int err, qid;
3223 struct iwx_tx_ring *ring;
3224
3225 /* Ensure we can map this TID to an aggregation queue. */
3226 if (tid >= IWX_MAX_TID_COUNT8)
3227 return;
3228
3229 ba = &ni->ni_tx_ba[tid];
3230 if (ba->ba_state != IEEE80211_BA_REQUESTED1)
3231 return;
3232
3233 qid = sc->aggqid[tid];
3234 if (qid == 0) {
3235 /* Firmware should pick the next unused Tx queue. */
3236 qid = fls(sc->qenablemsk);
3237 }
3238
3239 /*
3240 * Simply enable the queue.
3241 * Firmware handles Tx Ba session setup and teardown.
3242 */
3243 if ((sc->qenablemsk & (1 << qid)) == 0) {
3244 if (!iwx_nic_lock(sc)) {
3245 ieee80211_addba_resp_refuse(ic, ni, tid,
3246 IEEE80211_STATUS_UNSPECIFIED);
3247 return;
3248 }
3249 err = iwx_enable_txq(sc, IWX_STATION_ID0, qid, tid,
3250 IWX_TX_RING_COUNT(256));
3251 iwx_nic_unlock(sc);
3252 if (err) {
3253 printf("%s: could not enable Tx queue %d "
3254 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), qid, err);
3255 ieee80211_addba_resp_refuse(ic, ni, tid,
3256 IEEE80211_STATUS_UNSPECIFIED);
3257 return;
3258 }
3259
3260 ba->ba_winstart = 0;
3261 } else
3262 ba->ba_winstart = ni->ni_qos_txseqs[tid];
3263
3264 ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3265
3266 ring = &sc->txq[qid];
3267 ba->ba_timeout_val = 0;
3268 ieee80211_addba_resp_accept(ic, ni, tid);
3269 sc->aggqid[tid] = qid;
3270}
3271
3272void
3273iwx_ba_task(void *arg)
3274{
3275 struct iwx_softc *sc = arg;
3276 struct ieee80211com *ic = &sc->sc_ic;
3277 struct ieee80211_node *ni = ic->ic_bss;
3278 int s = splnet()splraise(0x7);
3279 int tid;
3280
3281 for (tid = 0; tid < IWX_MAX_TID_COUNT8; tid++) {
3282 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100)
3283 break;
3284 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3285 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3286 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3287 ba->ba_winsize, ba->ba_timeout_val, 1);
3288 sc->ba_rx.start_tidmask &= ~(1 << tid);
3289 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3290 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3291 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3292 }
3293 }
3294
3295 for (tid = 0; tid < IWX_MAX_TID_COUNT8; tid++) {
3296 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100)
3297 break;
3298 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3299 iwx_sta_tx_agg_start(sc, ni, tid);
3300 sc->ba_tx.start_tidmask &= ~(1 << tid);
3301 }
3302 }
3303
3304 refcnt_rele_wake(&sc->task_refs);
3305 splx(s)spllower(s);
3306}
3307
3308/*
3309 * This function is called by upper layer when an ADDBA request is received
3310 * from another STA and before the ADDBA response is sent.
3311 */
3312int
3313iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3314 uint8_t tid)
3315{
3316 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3317
3318 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS16 ||
3319 tid >= IWX_MAX_TID_COUNT8)
3320 return ENOSPC28;
3321
3322 if (sc->ba_rx.start_tidmask & (1 << tid))
3323 return EBUSY16;
3324
3325 sc->ba_rx.start_tidmask |= (1 << tid);
3326 iwx_add_task(sc, systq, &sc->ba_task);
3327
3328 return EBUSY16;
3329}
3330
3331/*
3332 * This function is called by upper layer on teardown of an HT-immediate
3333 * Block Ack agreement (eg. upon receipt of a DELBA frame).
3334 */
3335void
3336iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3337 uint8_t tid)
3338{
3339 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3340
3341 if (tid >= IWX_MAX_TID_COUNT8 || sc->ba_rx.stop_tidmask & (1 << tid))
3342 return;
3343
3344 sc->ba_rx.stop_tidmask = (1 << tid);
3345 iwx_add_task(sc, systq, &sc->ba_task);
3346}
3347
3348int
3349iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3350 uint8_t tid)
3351{
3352 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3353 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3354
3355 /*
3356 * Require a firmware version which uses an internal AUX queue.
3357 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3358 */
3359 if (sc->first_data_qid != IWX_DQA_CMD_QUEUE0 + 1)
3360 return ENOTSUP91;
3361
3362 /* Ensure we can map this TID to an aggregation queue. */
3363 if (tid >= IWX_MAX_TID_COUNT8)
3364 return EINVAL22;
3365
3366 /* We only support a fixed Tx aggregation window size, for now. */
3367 if (ba->ba_winsize != IWX_FRAME_LIMIT64)
3368 return ENOTSUP91;
3369
3370 /* Is firmware already using an agg queue with this TID? */
3371 if (sc->aggqid[tid] != 0)
3372 return ENOSPC28;
3373
3374 /* Are we already processing an ADDBA request? */
3375 if (sc->ba_tx.start_tidmask & (1 << tid))
3376 return EBUSY16;
3377
3378 sc->ba_tx.start_tidmask |= (1 << tid);
3379 iwx_add_task(sc, systq, &sc->ba_task);
3380
3381 return EBUSY16;
3382}
3383
3384/* Read the mac address from WFMP registers. */
3385int
3386iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3387{
3388 const uint8_t *hw_addr;
3389 uint32_t mac_addr0, mac_addr1;
3390
3391 if (!iwx_nic_lock(sc))
3392 return EBUSY16;
3393
3394 mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0))((__uint32_t)(iwx_read_prph(sc, 0xa03080)));
3395 mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1))((__uint32_t)(iwx_read_prph(sc, 0xa03084)));
3396
3397 hw_addr = (const uint8_t *)&mac_addr0;
3398 data->hw_addr[0] = hw_addr[3];
3399 data->hw_addr[1] = hw_addr[2];
3400 data->hw_addr[2] = hw_addr[1];
3401 data->hw_addr[3] = hw_addr[0];
3402
3403 hw_addr = (const uint8_t *)&mac_addr1;
3404 data->hw_addr[4] = hw_addr[1];
3405 data->hw_addr[5] = hw_addr[0];
3406
3407 iwx_nic_unlock(sc);
3408 return 0;
3409}
3410
3411int
3412iwx_is_valid_mac_addr(const uint8_t *addr)
3413{
3414 static const uint8_t reserved_mac[] = {
3415 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3416 };
3417
3418 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN)__builtin_memcmp((reserved_mac), (addr), (6)) != 0 &&
3419 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr))__builtin_memcmp((etherbroadcastaddr), (addr), (sizeof(etherbroadcastaddr
)))
!= 0 &&
3420 memcmp(etheranyaddr, addr, sizeof(etheranyaddr))__builtin_memcmp((etheranyaddr), (addr), (sizeof(etheranyaddr
)))
!= 0 &&
3421 !ETHER_IS_MULTICAST(addr)(*(addr) & 0x01));
3422}
3423
3424int
3425iwx_nvm_get(struct iwx_softc *sc)
3426{
3427 struct iwx_nvm_get_info cmd = {};
3428 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3429 struct iwx_host_cmd hcmd = {
3430 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3431 .data = { &cmd, },
3432 .len = { sizeof(cmd) },
3433 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x02)
3434 IWX_NVM_GET_INFO)((0xc << 8) | 0x02)
3435 };
3436 int err;
3437 uint32_t mac_flags;
3438 /*
3439 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3440 * in v3, except for the channel profile part of the
3441 * regulatory. So we can just access the new struct, with the
3442 * exception of the latter.
3443 */
3444 struct iwx_nvm_get_info_rsp *rsp;
3445 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3446 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO)((sc->sc_ucode_api)[(48)>>3] & (1<<((48)&
(8 -1))))
;
3447 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3448
3449 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3450 err = iwx_send_cmd(sc, &hcmd);
3451 if (err)
3452 return err;
3453
3454 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3455 err = EIO5;
3456 goto out;
3457 }
3458
3459 memset(nvm, 0, sizeof(*nvm))__builtin_memset((nvm), (0), (sizeof(*nvm)));
3460
3461 iwx_set_mac_addr_from_csr(sc, nvm);
3462 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3463 printf("%s: no valid mac address was found\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3464 err = EINVAL22;
3465 goto out;
3466 }
3467
3468 rsp = (void *)hcmd.resp_pkt->data;
3469
3470 /* Initialize general data */
3471 nvm->nvm_version = le16toh(rsp->general.nvm_version)((__uint16_t)(rsp->general.nvm_version));
3472 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3473
3474 /* Initialize MAC sku data */
3475 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags)((__uint32_t)(rsp->mac_sku.mac_sku_flags));
3476 nvm->sku_cap_11ac_enable =
3477 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED(1 << 3));
3478 nvm->sku_cap_11n_enable =
3479 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED(1 << 2));
3480 nvm->sku_cap_11ax_enable =
3481 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED(1 << 4));
3482 nvm->sku_cap_band_24GHz_enable =
3483 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED(1 << 0));
3484 nvm->sku_cap_band_52GHz_enable =
3485 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED(1 << 1));
3486 nvm->sku_cap_mimo_disable =
3487 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED(1 << 5));
3488
3489 /* Initialize PHY sku data */
3490 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains)((__uint32_t)(rsp->phy_sku.tx_chains));
3491 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains)((__uint32_t)(rsp->phy_sku.rx_chains));
3492
3493 if (le32toh(rsp->regulatory.lar_enabled)((__uint32_t)(rsp->regulatory.lar_enabled)) &&
3494 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)((sc->sc_enabled_capa)[(1)>>3] & (1<<((1)&
(8 -1))))
) {
3495 nvm->lar_enabled = 1;
3496 }
3497
3498 if (v4) {
3499 iwx_init_channel_map(sc, NULL((void *)0),
3500 rsp->regulatory.channel_profile, IWX_NUM_CHANNELS110);
3501 } else {
3502 rsp_v3 = (void *)rsp;
3503 iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3504 NULL((void *)0), IWX_NUM_CHANNELS_V151);
3505 }
3506out:
3507 iwx_free_resp(sc, &hcmd);
3508 return err;
3509}
3510
3511int
3512iwx_load_firmware(struct iwx_softc *sc)
3513{
3514 struct iwx_fw_sects *fws;
3515 int err;
3516
3517 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
3518
3519 sc->sc_uc.uc_intr = 0;
3520 sc->sc_uc.uc_ok = 0;
3521
3522 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3523 err = iwx_ctxt_info_init(sc, fws);
3524 if (err) {
3525 printf("%s: could not init context info\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3526 return err;
3527 }
3528
3529 /* wait for the firmware to load */
3530 err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
3531 if (err || !sc->sc_uc.uc_ok) {
3532 printf("%s: could not load firmware, %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
3533 iwx_ctxt_info_free_paging(sc);
3534 }
3535
3536 iwx_ctxt_info_free_fw_img(sc);
3537
3538 if (!sc->sc_uc.uc_ok)
3539 return EINVAL22;
3540
3541 return err;
3542}
3543
3544int
3545iwx_start_fw(struct iwx_softc *sc)
3546{
3547 int err;
3548
3549 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
3550
3551 iwx_disable_interrupts(sc);
3552
3553 /* make sure rfkill handshake bits are cleared */
3554 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
3555 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
3556 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
;
3557
3558 /* clear (again), then enable firmware load interrupt */
3559 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
3560
3561 err = iwx_nic_init(sc);
3562 if (err) {
3563 printf("%s: unable to init nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3564 return err;
3565 }
3566
3567 iwx_enable_fwload_interrupt(sc);
3568
3569 return iwx_load_firmware(sc);
3570}
3571
3572int
3573iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3574{
3575 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3576 .valid = htole32(valid_tx_ant)((__uint32_t)(valid_tx_ant)),
3577 };
3578
3579 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD0x98,
3580 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3581}
3582
3583int
3584iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3585{
3586 struct iwx_phy_cfg_cmd phy_cfg_cmd;
3587
3588 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config)((__uint32_t)(sc->sc_fw_phy_config));
3589 phy_cfg_cmd.calib_control.event_trigger =
3590 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3591 phy_cfg_cmd.calib_control.flow_trigger =
3592 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3593
3594 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD0x6a, 0,
3595 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3596}
3597
3598int
3599iwx_send_dqa_cmd(struct iwx_softc *sc)
3600{
3601 struct iwx_dqa_enable_cmd dqa_cmd = {
3602 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE)((__uint32_t)(0)),
3603 };
3604 uint32_t cmd_id;
3605
3606 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD0x00, IWX_DATA_PATH_GROUP0x5, 0);
3607 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3608}
3609
3610int
3611iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3612{
3613 int err;
3614
3615 err = iwx_read_firmware(sc);
3616 if (err)
3617 return err;
3618
3619 err = iwx_start_fw(sc);
3620 if (err)
3621 return err;
3622
3623 iwx_post_alive(sc);
3624
3625 return 0;
3626}
3627
3628int
3629iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3630{
3631 const int wait_flags = IWX_INIT_COMPLETE0x01;
3632 struct iwx_nvm_access_complete_cmd nvm_complete = {};
3633 struct iwx_init_extended_cfg_cmd init_cfg = {
3634 .init_flags = htole32(IWX_INIT_NVM)((__uint32_t)((1 << 1))),
3635 };
3636 int err, s;
3637
3638 if ((sc->sc_flags & IWX_FLAG_RFKILL0x02) && !readnvm) {
3639 printf("%s: radio is disabled by hardware switch\n",
3640 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3641 return EPERM1;
3642 }
3643
3644 s = splnet()splraise(0x7);
3645 sc->sc_init_complete = 0;
3646 err = iwx_load_ucode_wait_alive(sc);
3647 if (err) {
3648 printf("%s: failed to load init firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3649 splx(s)spllower(s);
3650 return err;
3651 }
3652
3653 /*
3654 * Send init config command to mark that we are sending NVM
3655 * access commands
3656 */
3657 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,((0x2 << 8) | 0x03)
3658 IWX_INIT_EXTENDED_CFG_CMD)((0x2 << 8) | 0x03), 0, sizeof(init_cfg), &init_cfg);
3659 if (err) {
3660 splx(s)spllower(s);
3661 return err;
3662 }
3663
3664 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x00)
3665 IWX_NVM_ACCESS_COMPLETE)((0xc << 8) | 0x00), 0, sizeof(nvm_complete), &nvm_complete);
3666 if (err) {
3667 splx(s)spllower(s);
3668 return err;
3669 }
3670
3671 /* Wait for the init complete notification from the firmware. */
3672 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3673 err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3674 SEC_TO_NSEC(2));
3675 if (err) {
3676 splx(s)spllower(s);
3677 return err;
3678 }
3679 }
3680 splx(s)spllower(s);
3681 if (readnvm) {
3682 err = iwx_nvm_get(sc);
3683 if (err) {
3684 printf("%s: failed to read nvm\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3685 return err;
3686 }
3687 if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr)(__builtin_memcmp((etheranyaddr), (sc->sc_ic.ic_myaddr), (
6)) == 0)
)
3688 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
3689 sc->sc_nvm.hw_addr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
;
3690
3691 }
3692 return 0;
3693}
3694
3695int
3696iwx_config_ltr(struct iwx_softc *sc)
3697{
3698 struct iwx_ltr_config_cmd cmd = {
3699 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE)((__uint32_t)(0x00000001)),
3700 };
3701
3702 if (!sc->sc_ltr_enabled)
3703 return 0;
3704
3705 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG0xee, 0, sizeof(cmd), &cmd);
3706}
3707
3708void
3709iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3710{
3711 struct iwx_rx_data *data = &ring->data[idx];
3712
3713 ((uint64_t *)ring->desc)[idx] =
3714 htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff))((__uint64_t)(data->map->dm_segs[0].ds_addr | (idx &
0x0fff)))
;
3715 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
3716 idx * sizeof(uint64_t), sizeof(uint64_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
3717 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
;
3718}
3719
3720int
3721iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3722{
3723 struct iwx_rx_ring *ring = &sc->rxq;
3724 struct iwx_rx_data *data = &ring->data[idx];
3725 struct mbuf *m;
3726 int err;
3727 int fatal = 0;
3728
3729 m = m_gethdr(M_DONTWAIT0x0002, MT_DATA1);
3730 if (m == NULL((void *)0))
3731 return ENOBUFS55;
3732
3733 if (size <= MCLBYTES(1 << 11)) {
3734 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
3735 } else {
3736 MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE)m_clget((m), (0x0002), (4096));
3737 }
3738 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
3739 m_freem(m);
3740 return ENOBUFS55;
3741 }
3742
3743 if (data->m != NULL((void *)0)) {
3744 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
3745 fatal = 1;
3746 }
3747
3748 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size;
3749 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
3750 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
;
3751 if (err) {
3752 /* XXX */
3753 if (fatal)
3754 panic("%s: could not load RX mbuf", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3755 m_freem(m);
3756 return err;
3757 }
3758 data->m = m;
3759 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (size), (0x01))
;
3760
3761 /* Update RX descriptor. */
3762 iwx_update_rx_desc(sc, ring, idx);
3763
3764 return 0;
3765}
3766
3767int
3768iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3769 struct iwx_rx_mpdu_desc *desc)
3770{
3771 int energy_a, energy_b;
3772
3773 energy_a = desc->v1.energy_a;
3774 energy_b = desc->v1.energy_b;
3775 energy_a = energy_a ? -energy_a : -256;
3776 energy_b = energy_b ? -energy_b : -256;
3777 return MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b));
3778}
3779
3780void
3781iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3782 struct iwx_rx_data *data)
3783{
3784 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3785
3786 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
3787 sizeof(*phy_info), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
;
3788
3789 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info))__builtin_memcpy((&sc->sc_last_phy_info), (phy_info), (
sizeof(sc->sc_last_phy_info)))
;
3790}
3791
3792/*
3793 * Retrieve the average noise (in dBm) among receivers.
3794 */
3795int
3796iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3797{
3798 int i, total, nbant, noise;
3799
3800 total = nbant = noise = 0;
3801 for (i = 0; i < 3; i++) {
3802 noise = letoh32(stats->beacon_silence_rssi[i])((__uint32_t)(stats->beacon_silence_rssi[i])) & 0xff;
3803 if (noise) {
3804 total += noise;
3805 nbant++;
3806 }
3807 }
3808
3809 /* There should be at least one antenna but check anyway. */
3810 return (nbant == 0) ? -127 : (total / nbant) - 107;
3811}
3812
3813int
3814iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3815 struct ieee80211_rxinfo *rxi)
3816{
3817 struct ieee80211com *ic = &sc->sc_ic;
3818 struct ieee80211_key *k;
3819 struct ieee80211_frame *wh;
3820 uint64_t pn, *prsc;
3821 uint8_t *ivp;
3822 uint8_t tid;
3823 int hdrlen, hasqos;
3824
3825 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
3826 hdrlen = ieee80211_get_hdrlen(wh);
3827 ivp = (uint8_t *)wh + hdrlen;
3828
3829 /* find key for decryption */
3830 k = ieee80211_get_rxkey(ic, m, ni);
3831 if (k == NULL((void *)0) || k->k_cipher != IEEE80211_CIPHER_CCMP)
3832 return 1;
3833
3834 /* Check that ExtIV bit is be set. */
3835 if (!(ivp[3] & IEEE80211_WEP_EXTIV0x20))
3836 return 1;
3837
3838 hasqos = ieee80211_has_qos(wh);
3839 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
3840 prsc = &k->k_rsc[tid];
3841
3842 /* Extract the 48-bit PN from the CCMP header. */
3843 pn = (uint64_t)ivp[0] |
3844 (uint64_t)ivp[1] << 8 |
3845 (uint64_t)ivp[4] << 16 |
3846 (uint64_t)ivp[5] << 24 |
3847 (uint64_t)ivp[6] << 32 |
3848 (uint64_t)ivp[7] << 40;
3849 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN0x00000004) {
3850 if (pn < *prsc) {
3851 ic->ic_stats.is_ccmp_replays++;
3852 return 1;
3853 }
3854 } else if (pn <= *prsc) {
3855 ic->ic_stats.is_ccmp_replays++;
3856 return 1;
3857 }
3858 /* Last seen packet number is updated in ieee80211_inputm(). */
3859
3860 /*
3861 * Some firmware versions strip the MIC, and some don't. It is not
3862 * clear which of the capability flags could tell us what to expect.
3863 * For now, keep things simple and just leave the MIC in place if
3864 * it is present.
3865 *
3866 * The IV will be stripped by ieee80211_inputm().
3867 */
3868 return 0;
3869}
3870
3871int
3872iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
3873 struct ieee80211_rxinfo *rxi)
3874{
3875 struct ieee80211com *ic = &sc->sc_ic;
3876 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
3877 struct ieee80211_frame *wh;
3878 struct ieee80211_node *ni;
3879 int ret = 0;
3880 uint8_t type, subtype;
3881
3882 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
3883
3884 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
3885 if (type == IEEE80211_FC0_TYPE_CTL0x04)
3886 return 0;
3887
3888 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
3889 if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40))
3890 return 0;
3891
3892 ni = ieee80211_find_rxnode(ic, wh);
3893 /* Handle hardware decryption. */
3894 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c) != IEEE80211_FC0_TYPE_CTL0x04)
3895 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40) &&
3896 (ni->ni_flags & IEEE80211_NODE_RXPROT0x0008) &&
3897 ((!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
3898 ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
3899 (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
3900 ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
3901 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) !=
3902 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
3903 ic->ic_stats.is_ccmp_dec_errs++;
3904 ret = 1;
3905 goto out;
3906 }
3907 /* Check whether decryption was successful or not. */
3908 if ((rx_pkt_status &
3909 (IWX_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
3910 IWX_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) !=
3911 (IWX_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
3912 IWX_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) {
3913 ic->ic_stats.is_ccmp_dec_errs++;
3914 ret = 1;
3915 goto out;
3916 }
3917 rxi->rxi_flags |= IEEE80211_RXI_HWDEC0x00000001;
3918 }
3919out:
3920 if (ret)
3921 ifp->if_ierrorsif_data.ifi_ierrors++;
3922 ieee80211_release_node(ic, ni);
3923 return ret;
3924}
3925
3926void
3927iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3928 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3929 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3930 struct mbuf_list *ml)
3931{
3932 struct ieee80211com *ic = &sc->sc_ic;
3933 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
3934 struct ieee80211_frame *wh;
3935 struct ieee80211_node *ni;
3936 struct ieee80211_channel *bss_chan;
19
'bss_chan' declared without an initial value
3937 uint8_t saved_bssid[IEEE80211_ADDR_LEN6] = { 0 };
3938
3939 if (chanidx
19.1
'chanidx' is >= 0
< 0 || chanidx >= nitems(ic->ic_channels)(sizeof((ic->ic_channels)) / sizeof((ic->ic_channels)[0
]))
)
20
Taking false branch
3940 chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3941
3942 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
3943 ni = ieee80211_find_rxnode(ic, wh);
3944 if (ni == ic->ic_bss) {
21
Assuming 'ni' is not equal to field 'ic_bss'
22
Taking false branch
3945 /*
3946 * We may switch ic_bss's channel during scans.
3947 * Record the current channel so we can restore it later.
3948 */
3949 bss_chan = ni->ni_chan;
3950 IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr)__builtin_memcpy((&saved_bssid), (ni->ni_macaddr), (6)
)
;
3951 }
3952 ni->ni_chan = &ic->ic_channels[chanidx];
3953
3954 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC0x00000001) &&
3955 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
3956 ifp->if_ierrorsif_data.ifi_ierrors++;
3957 m_freem(m);
3958 ieee80211_release_node(ic, ni);
3959 return;
3960 }
3961
3962#if NBPFILTER1 > 0
3963 if (sc->sc_drvbpf != NULL((void *)0)) {
23
Assuming field 'sc_drvbpf' is equal to NULL
24
Taking false branch
3964 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtapsc_rxtapu.th;
3965 uint16_t chan_flags;
3966
3967 tap->wr_flags = 0;
3968 if (is_shortpre)
3969 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02;
3970 tap->wr_chan_freq =
3971 htole16(ic->ic_channels[chanidx].ic_freq)((__uint16_t)(ic->ic_channels[chanidx].ic_freq));
3972 chan_flags = ic->ic_channels[chanidx].ic_flags;
3973 if (ic->ic_curmode != IEEE80211_MODE_11N)
3974 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
3975 tap->wr_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
3976 tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3977 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3978 tap->wr_tsft = device_timestamp;
3979 if (rate_n_flags & IWX_RATE_MCS_HT_MSK(1 << 8)) {
3980 uint8_t mcs = (rate_n_flags &
3981 (IWX_RATE_HT_MCS_RATE_CODE_MSK0x7 |
3982 IWX_RATE_HT_MCS_NSS_MSK(3 << 3)));
3983 tap->wr_rate = (0x80 | mcs);
3984 } else {
3985 uint8_t rate = (rate_n_flags &
3986 IWX_RATE_LEGACY_RATE_MSK0xff);
3987 switch (rate) {
3988 /* CCK rates. */
3989 case 10: tap->wr_rate = 2; break;
3990 case 20: tap->wr_rate = 4; break;
3991 case 55: tap->wr_rate = 11; break;
3992 case 110: tap->wr_rate = 22; break;
3993 /* OFDM rates. */
3994 case 0xd: tap->wr_rate = 12; break;
3995 case 0xf: tap->wr_rate = 18; break;
3996 case 0x5: tap->wr_rate = 24; break;
3997 case 0x7: tap->wr_rate = 36; break;
3998 case 0x9: tap->wr_rate = 48; break;
3999 case 0xb: tap->wr_rate = 72; break;
4000 case 0x1: tap->wr_rate = 96; break;
4001 case 0x3: tap->wr_rate = 108; break;
4002 /* Unknown rate: should not happen. */
4003 default: tap->wr_rate = 0;
4004 }
4005 }
4006
4007 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4008 m, BPF_DIRECTION_IN(1 << 0));
4009 }
4010#endif
4011 ieee80211_inputm(IC2IFP(ic)(&(ic)->ic_ac.ac_if), m, ni, rxi, ml);
4012 /*
4013 * ieee80211_inputm() might have changed our BSS.
4014 * Restore ic_bss's channel if we are still in the same BSS.
4015 */
4016 if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr)(__builtin_memcmp((saved_bssid), (ni->ni_macaddr), (6)) ==
0)
)
25
Assuming 'ni' is equal to field 'ic_bss'
26
Assuming the condition is true
27
Taking true branch
4017 ni->ni_chan = bss_chan;
28
Assigned value is garbage or undefined
4018 ieee80211_release_node(ic, ni);
4019}
4020
4021/*
4022 * Drop duplicate 802.11 retransmissions
4023 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4024 * and handle pseudo-duplicate frames which result from deaggregation
4025 * of A-MSDU frames in hardware.
4026 */
4027int
4028iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4029 struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4030{
4031 struct ieee80211com *ic = &sc->sc_ic;
4032 struct iwx_node *in = (void *)ic->ic_bss;
4033 struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4034 uint8_t tid = IWX_MAX_TID_COUNT8, subframe_idx;
4035 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4036 uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4037 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4038 int hasqos = ieee80211_has_qos(wh);
4039 uint16_t seq;
4040
4041 if (type == IEEE80211_FC0_TYPE_CTL0x04 ||
4042 (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) ||
4043 IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4044 return 0;
4045
4046 if (hasqos) {
4047 tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f);
4048 if (tid > IWX_MAX_TID_COUNT8)
4049 tid = IWX_MAX_TID_COUNT8;
4050 }
4051
4052 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4053 subframe_idx = desc->amsdu_info &
4054 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f;
4055
4056 seq = letoh16(*(u_int16_t *)wh->i_seq)((__uint16_t)(*(u_int16_t *)wh->i_seq)) >> IEEE80211_SEQ_SEQ_SHIFT4;
4057 if ((wh->i_fc[1] & IEEE80211_FC1_RETRY0x08) &&
4058 dup_data->last_seq[tid] == seq &&
4059 dup_data->last_sub_frame[tid] >= subframe_idx)
4060 return 1;
4061
4062 /*
4063 * Allow the same frame sequence number for all A-MSDU subframes
4064 * following the first subframe.
4065 * Otherwise these subframes would be discarded as replays.
4066 */
4067 if (dup_data->last_seq[tid] == seq &&
4068 subframe_idx > dup_data->last_sub_frame[tid] &&
4069 (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40)) {
4070 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4071 }
4072
4073 dup_data->last_seq[tid] = seq;
4074 dup_data->last_sub_frame[tid] = subframe_idx;
4075
4076 return 0;
4077}
4078
4079/*
4080 * Returns true if sn2 - buffer_size < sn1 < sn2.
4081 * To be used only in order to compare reorder buffer head with NSSN.
4082 * We fully trust NSSN unless it is behind us due to reorder timeout.
4083 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4084 */
4085int
4086iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4087{
4088 return SEQ_LT(sn1, sn2)((((u_int16_t)(sn1) - (u_int16_t)(sn2)) & 0xfff) > 2048
)
&& !SEQ_LT(sn1, sn2 - buffer_size)((((u_int16_t)(sn1) - (u_int16_t)(sn2 - buffer_size)) & 0xfff
) > 2048)
;
4089}
4090
4091void
4092iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4093 struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4094 uint16_t nssn, struct mbuf_list *ml)
4095{
4096 struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4097 uint16_t ssn = reorder_buf->head_sn;
4098
4099 /* ignore nssn smaller than head sn - this can happen due to timeout */
4100 if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4101 goto set_timer;
4102
4103 while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4104 int index = ssn % reorder_buf->buf_size;
4105 struct mbuf *m;
4106 int chanidx, is_shortpre;
4107 uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4108 struct ieee80211_rxinfo *rxi;
4109
4110 /* This data is the same for all A-MSDU subframes. */
4111 chanidx = entries[index].chanidx;
4112 rx_pkt_status = entries[index].rx_pkt_status;
4113 is_shortpre = entries[index].is_shortpre;
4114 rate_n_flags = entries[index].rate_n_flags;
4115 device_timestamp = entries[index].device_timestamp;
4116 rxi = &entries[index].rxi;
4117
4118 /*
4119 * Empty the list. Will have more than one frame for A-MSDU.
4120 * Empty list is valid as well since nssn indicates frames were
4121 * received.
4122 */
4123 while ((m = ml_dequeue(&entries[index].frames)) != NULL((void *)0)) {
4124 iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4125 rate_n_flags, device_timestamp, rxi, ml);
4126 reorder_buf->num_stored--;
4127
4128 /*
4129 * Allow the same frame sequence number and CCMP PN for
4130 * all A-MSDU subframes following the first subframe.
4131 * Otherwise they would be discarded as replays.
4132 */
4133 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
4134 rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
4135 }
4136
4137 ssn = (ssn + 1) & 0xfff;
4138 }
4139 reorder_buf->head_sn = nssn;
4140
4141set_timer:
4142 if (reorder_buf->num_stored && !reorder_buf->removed) {
4143 timeout_add_usec(&reorder_buf->reorder_timer,
4144 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
4145 } else
4146 timeout_del(&reorder_buf->reorder_timer);
4147}
4148
4149int
4150iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4151 struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4152{
4153 struct ieee80211com *ic = &sc->sc_ic;
4154
4155 if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4156 /* we have a new (A-)MPDU ... */
4157
4158 /*
4159 * reset counter to 0 if we didn't have any oldsn in
4160 * the last A-MPDU (as detected by GP2 being identical)
4161 */
4162 if (!buffer->consec_oldsn_prev_drop)
4163 buffer->consec_oldsn_drops = 0;
4164
4165 /* either way, update our tracking state */
4166 buffer->consec_oldsn_ampdu_gp2 = gp2;
4167 } else if (buffer->consec_oldsn_prev_drop) {
4168 /*
4169 * tracking state didn't change, and we had an old SN
4170 * indication before - do nothing in this case, we
4171 * already noted this one down and are waiting for the
4172 * next A-MPDU (by GP2)
4173 */
4174 return 0;
4175 }
4176
4177 /* return unless this MPDU has old SN */
4178 if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN0x80000000))
4179 return 0;
4180
4181 /* update state */
4182 buffer->consec_oldsn_prev_drop = 1;
4183 buffer->consec_oldsn_drops++;
4184
4185 /* if limit is reached, send del BA and reset state */
4186 if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA10) {
4187 ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4188 0, tid);
4189 buffer->consec_oldsn_prev_drop = 0;
4190 buffer->consec_oldsn_drops = 0;
4191 return 1;
4192 }
4193
4194 return 0;
4195}
4196
4197/*
4198 * Handle re-ordering of frames which were de-aggregated in hardware.
4199 * Returns 1 if the MPDU was consumed (buffered or dropped).
4200 * Returns 0 if the MPDU should be passed to upper layer.
4201 */
4202int
4203iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4204 struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4205 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4206 struct mbuf_list *ml)
4207{
4208 struct ieee80211com *ic = &sc->sc_ic;
4209 struct ieee80211_frame *wh;
4210 struct ieee80211_node *ni;
4211 struct iwx_rxba_data *rxba;
4212 struct iwx_reorder_buffer *buffer;
4213 uint32_t reorder_data = le32toh(desc->reorder_data)((__uint32_t)(desc->reorder_data));
4214 int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40);
4215 int last_subframe =
4216 (desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME0x80);
4217 uint8_t tid;
4218 uint8_t subframe_idx = (desc->amsdu_info &
4219 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
4220 struct iwx_reorder_buf_entry *entries;
4221 int index;
4222 uint16_t nssn, sn;
4223 uint8_t baid, type, subtype;
4224 int hasqos;
4225
4226 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4227 hasqos = ieee80211_has_qos(wh);
4228 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
4229
4230 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4231 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4232
4233 /*
4234 * We are only interested in Block Ack requests and unicast QoS data.
4235 */
4236 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4237 return 0;
4238 if (hasqos) {
4239 if (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)
4240 return 0;
4241 } else {
4242 if (type != IEEE80211_FC0_TYPE_CTL0x04 ||
4243 subtype != IEEE80211_FC0_SUBTYPE_BAR0x80)
4244 return 0;
4245 }
4246
4247 baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK0x7f000000) >>
4248 IWX_RX_MPDU_REORDER_BAID_SHIFT24;
4249 if (baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
4250 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
4251 return 0;
4252
4253 rxba = &sc->sc_rxba_data[baid];
4254 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f ||
4255 tid != rxba->tid || rxba->sta_id != IWX_STATION_ID0)
4256 return 0;
4257
4258 if (rxba->timeout != 0)
4259 getmicrouptime(&rxba->last_rx);
4260
4261 /* Bypass A-MPDU re-ordering in net80211. */
4262 rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE0x00000002;
4263
4264 nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK0x00000fff;
4265 sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK0x00fff000) >>
4266 IWX_RX_MPDU_REORDER_SN_SHIFT12;
4267
4268 buffer = &rxba->reorder_buf;
4269 entries = &rxba->entries[0];
4270
4271 if (!buffer->valid) {
4272 if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN0x80000000)
4273 return 0;
4274 buffer->valid = 1;
4275 }
4276
4277 ni = ieee80211_find_rxnode(ic, wh);
4278 if (type == IEEE80211_FC0_TYPE_CTL0x04 &&
4279 subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) {
4280 iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4281 goto drop;
4282 }
4283
4284 /*
4285 * If there was a significant jump in the nssn - adjust.
4286 * If the SN is smaller than the NSSN it might need to first go into
4287 * the reorder buffer, in which case we just release up to it and the
4288 * rest of the function will take care of storing it and releasing up to
4289 * the nssn.
4290 */
4291 if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
4292 buffer->buf_size) ||
4293 !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn + buffer->
buf_size)) & 0xfff) > 2048)
) {
4294 uint16_t min_sn = SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
? sn : nssn;
4295 ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
4296 iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
4297 }
4298
4299 if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
4300 device_timestamp)) {
4301 /* BA session will be torn down. */
4302 ic->ic_stats.is_ht_rx_ba_window_jump++;
4303 goto drop;
4304
4305 }
4306
4307 /* drop any outdated packets */
4308 if (SEQ_LT(sn, buffer->head_sn)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn)) & 0xfff
) > 2048)
) {
4309 ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
4310 goto drop;
4311 }
4312
4313 /* release immediately if allowed by nssn and no stored frames */
4314 if (!buffer->num_stored && SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
) {
4315 if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
4316 (!is_amsdu || last_subframe))
4317 buffer->head_sn = nssn;
4318 ieee80211_release_node(ic, ni);
4319 return 0;
4320 }
4321
4322 /*
4323 * release immediately if there are no stored frames, and the sn is
4324 * equal to the head.
4325 * This can happen due to reorder timer, where NSSN is behind head_sn.
4326 * When we released everything, and we got the next frame in the
4327 * sequence, according to the NSSN we can't release immediately,
4328 * while technically there is no hole and we can move forward.
4329 */
4330 if (!buffer->num_stored && sn == buffer->head_sn) {
4331 if (!is_amsdu || last_subframe)
4332 buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
4333 ieee80211_release_node(ic, ni);
4334 return 0;
4335 }
4336
4337 index = sn % buffer->buf_size;
4338
4339 /*
4340 * Check if we already stored this frame
4341 * As AMSDU is either received or not as whole, logic is simple:
4342 * If we have frames in that position in the buffer and the last frame
4343 * originated from AMSDU had a different SN then it is a retransmission.
4344 * If it is the same SN then if the subframe index is incrementing it
4345 * is the same AMSDU - otherwise it is a retransmission.
4346 */
4347 if (!ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
4348 if (!is_amsdu) {
4349 ic->ic_stats.is_ht_rx_ba_no_buf++;
4350 goto drop;
4351 } else if (sn != buffer->last_amsdu ||
4352 buffer->last_sub_index >= subframe_idx) {
4353 ic->ic_stats.is_ht_rx_ba_no_buf++;
4354 goto drop;
4355 }
4356 } else {
4357 /* This data is the same for all A-MSDU subframes. */
4358 entries[index].chanidx = chanidx;
4359 entries[index].is_shortpre = is_shortpre;
4360 entries[index].rate_n_flags = rate_n_flags;
4361 entries[index].device_timestamp = device_timestamp;
4362 memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi))__builtin_memcpy((&entries[index].rxi), (rxi), (sizeof(entries
[index].rxi)))
;
4363 }
4364
4365 /* put in reorder buffer */
4366 ml_enqueue(&entries[index].frames, m);
4367 buffer->num_stored++;
4368 getmicrouptime(&entries[index].reorder_time);
4369
4370 if (is_amsdu) {
4371 buffer->last_amsdu = sn;
4372 buffer->last_sub_index = subframe_idx;
4373 }
4374
4375 /*
4376 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
4377 * The reason is that NSSN advances on the first sub-frame, and may
4378 * cause the reorder buffer to advance before all the sub-frames arrive.
4379 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
4380 * SN 1. NSSN for first sub frame will be 3 with the result of driver
4381 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
4382 * already ahead and it will be dropped.
4383 * If the last sub-frame is not on this queue - we will get frame
4384 * release notification with up to date NSSN.
4385 */
4386 if (!is_amsdu || last_subframe)
4387 iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
4388
4389 ieee80211_release_node(ic, ni);
4390 return 1;
4391
4392drop:
4393 m_freem(m);
4394 ieee80211_release_node(ic, ni);
4395 return 1;
4396}
4397
4398void
4399iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4400 size_t maxlen, struct mbuf_list *ml)
4401{
4402 struct ieee80211com *ic = &sc->sc_ic;
4403 struct ieee80211_rxinfo rxi;
4404 struct iwx_rx_mpdu_desc *desc;
4405 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4406 int rssi;
4407 uint8_t chanidx;
4408 uint16_t phy_info;
4409
4410 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4411
4412 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)((__uint16_t)((1 << 0)))) ||
1
Assuming the condition is false
3
Taking false branch
4413 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK)((__uint16_t)((1 << 1))))) {
2
Assuming the condition is false
4414 m_freem(m);
4415 return; /* drop */
4416 }
4417
4418 len = le16toh(desc->mpdu_len)((__uint16_t)(desc->mpdu_len));
4419 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4
Assuming field 'ic_opmode' is not equal to IEEE80211_M_MONITOR
5
Taking false branch
4420 /* Allow control frames in monitor mode. */
4421 if (len < sizeof(struct ieee80211_frame_cts)) {
4422 ic->ic_stats.is_rx_tooshort++;
4423 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4424 m_freem(m);
4425 return;
4426 }
4427 } else if (len < sizeof(struct ieee80211_frame)) {
6
Assuming the condition is false
7
Taking false branch
4428 ic->ic_stats.is_rx_tooshort++;
4429 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4430 m_freem(m);
4431 return;
4432 }
4433 if (len > maxlen - sizeof(*desc)) {
8
Assuming the condition is false
9
Taking false branch
4434 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4435 m_freem(m);
4436 return;
4437 }
4438
4439 m->m_datam_hdr.mh_data = pktdata + sizeof(*desc);
4440 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
4441
4442 /* Account for padding following the frame header. */
4443 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD0x20) {
10
Assuming the condition is false
11
Taking false branch
4444 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4445 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4446 if (type == IEEE80211_FC0_TYPE_CTL0x04) {
4447 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) {
4448 case IEEE80211_FC0_SUBTYPE_CTS0xc0:
4449 hdrlen = sizeof(struct ieee80211_frame_cts);
4450 break;
4451 case IEEE80211_FC0_SUBTYPE_ACK0xd0:
4452 hdrlen = sizeof(struct ieee80211_frame_ack);
4453 break;
4454 default:
4455 hdrlen = sizeof(struct ieee80211_frame_min);
4456 break;
4457 }
4458 } else
4459 hdrlen = ieee80211_get_hdrlen(wh);
4460
4461 if ((le16toh(desc->status)((__uint16_t)(desc->status)) &
4462 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) ==
4463 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
4464 /* Padding is inserted after the IV. */
4465 hdrlen += IEEE80211_CCMP_HDRLEN8;
4466 }
4467
4468 memmove(m->m_data + 2, m->m_data, hdrlen)__builtin_memmove((m->m_hdr.mh_data + 2), (m->m_hdr.mh_data
), (hdrlen))
;
4469 m_adj(m, 2);
4470 }
4471
4472 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
4473
4474 /*
4475 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4476 * in place for each subframe. But it leaves the 'A-MSDU present'
4477 * bit set in the frame header. We need to clear this bit ourselves.
4478 * (XXX This workaround is not required on AX200/AX201 devices that
4479 * have been tested by me, but it's unclear when this problem was
4480 * fixed in the hardware. It definitely affects the 9k generation.
4481 * Leaving this in place for now since some 9k/AX200 hybrids seem
4482 * to exist that we may eventually add support for.)
4483 *
4484 * And we must allow the same CCMP PN for subframes following the
4485 * first subframe. Otherwise they would be discarded as replays.
4486 */
4487 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU0x40) {
12
Assuming the condition is false
13
Taking false branch
4488 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4489 uint8_t subframe_idx = (desc->amsdu_info &
4490 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
4491 if (subframe_idx > 0)
4492 rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
4493 if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
4494 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe_addr4)) {
4495 struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data))
4496 struct ieee80211_qosframe_addr4 *)((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data));
4497 qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
4498 } else if (ieee80211_has_qos(wh) &&
4499 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe)) {
4500 struct ieee80211_qosframe *qwh = mtod(m,((struct ieee80211_qosframe *)((m)->m_hdr.mh_data))
4501 struct ieee80211_qosframe *)((struct ieee80211_qosframe *)((m)->m_hdr.mh_data));
4502 qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
4503 }
4504 }
4505
4506 /*
4507 * Verify decryption before duplicate detection. The latter uses
4508 * the TID supplied in QoS frame headers and this TID is implicitly
4509 * verified as part of the CCMP nonce.
4510 */
4511 if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)((__uint16_t)(desc->status)), &rxi)) {
14
Taking false branch
4512 m_freem(m);
4513 return;
4514 }
4515
4516 if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
15
Taking false branch
4517 m_freem(m);
4518 return;
4519 }
4520
4521 phy_info = le16toh(desc->phy_info)((__uint16_t)(desc->phy_info));
4522 rate_n_flags = le32toh(desc->v1.rate_n_flags)((__uint32_t)(desc->v1.rate_n_flags));
4523 chanidx = desc->v1.channel;
4524 device_timestamp = desc->v1.gp2_on_air_rise;
4525
4526 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4527 rssi = (0 - IWX_MIN_DBM-100) + rssi; /* normalize */
4528 rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi)
)
; /* clip to max. 100% */
16
'?' condition is true
4529
4530 rxi.rxi_rssi = rssi;
4531 rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise)((__uint64_t)(desc->v1.tsf_on_air_rise));
4532
4533 if (iwx_rx_reorder(sc, m, chanidx, desc,
17
Taking false branch
4534 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
4535 rate_n_flags, device_timestamp, &rxi, ml))
4536 return;
4537
4538 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status)((__uint16_t)(desc->status)),
18
Calling 'iwx_rx_frame'
4539 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
4540 rate_n_flags, device_timestamp, &rxi, ml);
4541}
4542
4543void
4544iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4545{
4546 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4547 uint8_t num_tbs = le16toh(desc->num_tbs)((__uint16_t)(desc->num_tbs)) & 0x1f;
4548 int i;
4549
4550 /* First TB is never cleared - it is bidirectional DMA data. */
4551 for (i = 1; i < num_tbs; i++) {
4552 struct iwx_tfh_tb *tb = &desc->tbs[i];
4553 memset(tb, 0, sizeof(*tb))__builtin_memset((tb), (0), (sizeof(*tb)));
4554 }
4555 desc->num_tbs = 0;
4556
4557 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
4558 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
4559 sizeof(*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof(*desc)), (0x04))
;
4560}
4561
4562void
4563iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
4564{
4565 struct ieee80211com *ic = &sc->sc_ic;
4566
4567 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
4568 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
;
4569 bus_dmamap_unload(sc->sc_dmat, txd->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->map))
;
4570 m_freem(txd->m);
4571 txd->m = NULL((void *)0);
4572
4573 KASSERT(txd->in)((txd->in) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 4573, "txd->in"))
;
4574 ieee80211_release_node(ic, &txd->in->in_ni);
4575 txd->in = NULL((void *)0);
4576}
4577
4578void
4579iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4580{
4581 struct iwx_tx_data *txd;
4582
4583 while (ring->tail != idx) {
4584 txd = &ring->data[ring->tail];
4585 if (txd->m != NULL((void *)0)) {
4586 iwx_clear_tx_desc(sc, ring, ring->tail);
4587 iwx_tx_update_byte_tbl(ring, ring->tail, 0, 0);
4588 iwx_txd_done(sc, txd);
4589 ring->queued--;
4590 }
4591 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT(256);
4592 }
4593}
4594
4595void
4596iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4597 struct iwx_rx_data *data)
4598{
4599 struct ieee80211com *ic = &sc->sc_ic;
4600 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4601 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4602 int qid = cmd_hdr->qid, status, txfail;
4603 struct iwx_tx_ring *ring = &sc->txq[qid];
4604 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4605 uint32_t ssn;
4606 uint32_t len = iwx_rx_packet_len(pkt);
4607
4608 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
4609 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
4610
4611 /* Sanity checks. */
4612 if (sizeof(*tx_resp) > len)
4613 return;
4614 if (qid < IWX_FIRST_AGG_TX_QUEUE(1 + 1) && tx_resp->frame_count > 1)
4615 return;
4616 if (qid >= IWX_FIRST_AGG_TX_QUEUE(1 + 1) && sizeof(*tx_resp) + sizeof(ssn) +
4617 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4618 return;
4619
4620 sc->sc_tx_timer[qid] = 0;
4621
4622 if (tx_resp->frame_count > 1) /* A-MPDU */
4623 return;
4624
4625 status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWX_TX_STATUS_MSK0x000000ff;
4626 txfail = (status != IWX_TX_STATUS_SUCCESS0x01 &&
4627 status != IWX_TX_STATUS_DIRECT_DONE0x02);
4628
4629 if (txfail)
4630 ifp->if_oerrorsif_data.ifi_oerrors++;
4631
4632 /*
4633 * On hardware supported by iwx(4) the SSN counter is only
4634 * 8 bit and corresponds to a Tx ring index rather than a
4635 * sequence number. Frames up to this index (non-inclusive)
4636 * can now be freed.
4637 */
4638 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn))__builtin_memcpy((&ssn), (&tx_resp->status + tx_resp
->frame_count), (sizeof(ssn)))
;
4639 ssn = le32toh(ssn)((__uint32_t)(ssn)) & 0xff;
4640 iwx_txq_advance(sc, ring, ssn);
4641 iwx_clear_oactive(sc, ring);
4642}
4643
4644void
4645iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4646{
4647 struct ieee80211com *ic = &sc->sc_ic;
4648 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4649
4650 if (ring->queued < IWX_TX_RING_LOMARK192) {
4651 sc->qfullmsk &= ~(1 << ring->qid);
4652 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4653 ifq_clr_oactive(&ifp->if_snd);
4654 /*
4655 * Well, we're in interrupt context, but then again
4656 * I guess net80211 does all sorts of stunts in
4657 * interrupt context, so maybe this is no biggie.
4658 */
4659 (*ifp->if_start)(ifp);
4660 }
4661 }
4662}
4663
4664void
4665iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4666{
4667 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4668 struct ieee80211com *ic = &sc->sc_ic;
4669 struct ieee80211_node *ni;
4670 struct ieee80211_tx_ba *ba;
4671 struct iwx_node *in;
4672 struct iwx_tx_ring *ring;
4673 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4674 int qid;
4675
4676 if (ic->ic_state != IEEE80211_S_RUN)
4677 return;
4678
4679 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4680 return;
4681
4682 if (ba_res->sta_id != IWX_STATION_ID0)
4683 return;
4684
4685 ni = ic->ic_bss;
4686 in = (void *)ni;
4687
4688 tfd_cnt = le16toh(ba_res->tfd_cnt)((__uint16_t)(ba_res->tfd_cnt));
4689 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt)((__uint16_t)(ba_res->ra_tid_cnt));
4690 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4691 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4692 sizeof(ba_res->tfd[0]) * tfd_cnt))
4693 return;
4694
4695 for (i = 0; i < tfd_cnt; i++) {
4696 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4697 uint8_t tid;
4698
4699 tid = ba_tfd->tid;
4700 if (tid >= nitems(sc->aggqid)(sizeof((sc->aggqid)) / sizeof((sc->aggqid)[0])))
4701 continue;
4702
4703 qid = sc->aggqid[tid];
4704 if (qid != htole16(ba_tfd->q_num)((__uint16_t)(ba_tfd->q_num)))
4705 continue;
4706
4707 ring = &sc->txq[qid];
4708
4709 ba = &ni->ni_tx_ba[tid];
4710 if (ba->ba_state != IEEE80211_BA_AGREED2)
4711 continue;
4712
4713 idx = le16toh(ba_tfd->tfd_index)((__uint16_t)(ba_tfd->tfd_index));
4714 if (idx >= IWX_TX_RING_COUNT(256))
4715 continue;
4716 sc->sc_tx_timer[qid] = 0;
4717 iwx_txq_advance(sc, ring, idx);
4718 iwx_clear_oactive(sc, ring);
4719 }
4720}
4721
4722void
4723iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4724 struct iwx_rx_data *data)
4725{
4726 struct ieee80211com *ic = &sc->sc_ic;
4727 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4728 uint32_t missed;
4729
4730 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4731 (ic->ic_state != IEEE80211_S_RUN))
4732 return;
4733
4734 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
4735 sizeof(*mbn), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
;
4736
4737 missed = le32toh(mbn->consec_missed_beacons_since_last_rx)((__uint32_t)(mbn->consec_missed_beacons_since_last_rx));
4738 if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4739 if (ic->ic_ific_ac.ac_if.if_flags & IFF_DEBUG0x4)
4740 printf("%s: receiving no beacons from %s; checking if "
4741 "this AP is still responding to probe requests\n",
4742 DEVNAME(sc)((sc)->sc_dev.dv_xname), ether_sprintf(ic->ic_bss->ni_macaddr));
4743 /*
4744 * Rather than go directly to scan state, try to send a
4745 * directed probe request first. If that fails then the
4746 * state machine will drop us into scanning after timing
4747 * out waiting for a probe response.
4748 */
4749 IEEE80211_SEND_MGMT(ic, ic->ic_bss,((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0))
4750 IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0)((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0));
4751 }
4752
4753}
4754
4755int
4756iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4757{
4758 struct iwx_binding_cmd cmd;
4759 struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
4760 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color)((in->in_id << (0)) | (in->in_color << (8))
)
;
4761 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE0x10);
4762 uint32_t status;
4763
4764 if (action == IWX_FW_CTXT_ACTION_ADD1 && active)
4765 panic("binding already added");
4766 if (action == IWX_FW_CTXT_ACTION_REMOVE3 && !active)
4767 panic("binding already removed");
4768
4769 if (phyctxt == NULL((void *)0)) /* XXX race with iwx_stop() */
4770 return EINVAL22;
4771
4772 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
4773
4774 cmd.id_and_color
4775 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
4776 cmd.action = htole32(action)((__uint32_t)(action));
4777 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
4778
4779 cmd.macs[0] = htole32(mac_id)((__uint32_t)(mac_id));
4780 for (i = 1; i < IWX_MAX_MACS_IN_BINDING(3); i++)
4781 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID)((__uint32_t)((0xffffffff)));
4782
4783 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel)(((phyctxt->channel)->ic_flags & 0x0080) != 0) ||
4784 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
4785 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
4786 else
4787 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
4788
4789 status = 0;
4790 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD0x2b, sizeof(cmd),
4791 &cmd, &status);
4792 if (err == 0 && status != 0)
4793 err = EIO5;
4794
4795 return err;
4796}
4797
4798int
4799iwx_phy_ctxt_cmd_uhb_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4800 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4801{
4802 struct ieee80211com *ic = &sc->sc_ic;
4803 struct iwx_phy_context_cmd_uhb cmd;
4804 uint8_t active_cnt, idle_cnt;
4805 struct ieee80211_channel *chan = ctxt->channel;
4806
4807 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
4808 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
4809 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
4810 cmd.action = htole32(action)((__uint32_t)(action));
4811
4812 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel)(((ctxt->channel)->ic_flags & 0x0080) != 0) ||
4813 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
4814 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
4815 else
4816 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
4817
4818 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
4819 IWX_PHY_BAND_24(1) : IWX_PHY_BAND_5(0);
4820 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan))((__uint32_t)(ieee80211_chan2ieee(ic, chan)));
4821 if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
4822 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
4823 /* secondary chan above -> control chan below */
4824 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4825 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4826 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
4827 /* secondary chan below -> control chan above */
4828 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
4829 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4830 } else {
4831 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4832 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4833 }
4834 } else {
4835 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4836 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4837 }
4838
4839 idle_cnt = chains_static;
4840 active_cnt = chains_dynamic;
4841 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)))
4842 IWX_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)));
4843 cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
4844 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
4845 IWX_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
4846
4847 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
4848}
4849
4850int
4851iwx_phy_ctxt_cmd_v3(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4852 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco)
4853{
4854 struct ieee80211com *ic = &sc->sc_ic;
4855 struct iwx_phy_context_cmd cmd;
4856 uint8_t active_cnt, idle_cnt;
4857 struct ieee80211_channel *chan = ctxt->channel;
4858
4859 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
4860 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
4861 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
4862 cmd.action = htole32(action)((__uint32_t)(action));
4863
4864 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel)(((ctxt->channel)->ic_flags & 0x0080) != 0) ||
4865 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
4866 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX)((__uint32_t)(0));
4867 else
4868 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX)((__uint32_t)(1));
4869
4870 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
4871 IWX_PHY_BAND_24(1) : IWX_PHY_BAND_5(0);
4872 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
4873 if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
4874 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
4875 /* secondary chan above -> control chan below */
4876 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4877 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4878 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
4879 /* secondary chan below -> control chan above */
4880 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
4881 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40(0x1);
4882 } else {
4883 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4884 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4885 }
4886 } else {
4887 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20(0x0);
4888 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW(0x0);
4889 }
4890
4891 idle_cnt = chains_static;
4892 active_cnt = chains_dynamic;
4893 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)))
4894 IWX_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwx_fw_valid_rx_ant(sc) << (1)));
4895 cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
4896 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
4897 IWX_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
4898
4899 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
4900}
4901
4902int
4903iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
4904 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4905 uint32_t apply_time, uint8_t sco)
4906{
4907 int cmdver;
4908
4909 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP0x1, IWX_PHY_CONTEXT_CMD0x8);
4910 if (cmdver != 3) {
4911 printf("%s: firmware does not support phy-context-cmd v3\n",
4912 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4913 return ENOTSUP91;
4914 }
4915
4916 /*
4917 * Intel increased the size of the fw_channel_info struct and neglected
4918 * to bump the phy_context_cmd struct, which contains an fw_channel_info
4919 * member in the middle.
4920 * To keep things simple we use a separate function to handle the larger
4921 * variant of the phy context command.
4922 */
4923 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)((sc->sc_enabled_capa)[(48)>>3] & (1<<((48
)&(8 -1))))
) {
4924 return iwx_phy_ctxt_cmd_uhb_v3(sc, ctxt, chains_static,
4925 chains_dynamic, action, sco);
4926 }
4927
4928 return iwx_phy_ctxt_cmd_v3(sc, ctxt, chains_static, chains_dynamic,
4929 action, sco);
4930}
4931
4932int
4933iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
4934{
4935 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE0];
4936 struct iwx_tfh_tfd *desc;
4937 struct iwx_tx_data *txdata;
4938 struct iwx_device_cmd *cmd;
4939 struct mbuf *m;
4940 bus_addr_t paddr;
4941 uint64_t addr;
4942 int err = 0, i, paylen, off, s;
4943 int idx, code, async, group_id;
4944 size_t hdrlen, datasz;
4945 uint8_t *data;
4946 int generation = sc->sc_generation;
4947
4948 code = hcmd->id;
4949 async = hcmd->flags & IWX_CMD_ASYNC;
4950 idx = ring->cur;
4951
4952 for (i = 0, paylen = 0; i < nitems(hcmd->len)(sizeof((hcmd->len)) / sizeof((hcmd->len)[0])); i++) {
4953 paylen += hcmd->len[i];
4954 }
4955
4956 /* If this command waits for a response, allocate response buffer. */
4957 hcmd->resp_pkt = NULL((void *)0);
4958 if (hcmd->flags & IWX_CMD_WANT_RESP) {
4959 uint8_t *resp_buf;
4960 KASSERT(!async)((!async) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 4960, "!async"))
;
4961 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet))((hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet)) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 4961, "hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet)"
))
;
4962 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX)((hcmd->resp_pkt_len <= (1 << 12)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c", 4962, "hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX"
))
;
4963 if (sc->sc_cmd_resp_pkt[idx] != NULL((void *)0))
4964 return ENOSPC28;
4965 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF2,
4966 M_NOWAIT0x0002 | M_ZERO0x0008);
4967 if (resp_buf == NULL((void *)0))
4968 return ENOMEM12;
4969 sc->sc_cmd_resp_pkt[idx] = resp_buf;
4970 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4971 } else {
4972 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
4973 }
4974
4975 s = splnet()splraise(0x7);
4976
4977 desc = &ring->desc[idx];
4978 txdata = &ring->data[idx];
4979
4980 /*
4981 * XXX Intel inside (tm)
4982 * Firmware API versions >= 50 reject old-style commands in
4983 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
4984 * that such commands were in the LONG_GROUP instead in order
4985 * for firmware to accept them.
4986 */
4987 if (iwx_cmd_groupid(code) == 0) {
4988 code = IWX_WIDE_ID(IWX_LONG_GROUP, code)((0x1 << 8) | code);
4989 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW0x01;
4990 } else
4991 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW0x01;
4992
4993 group_id = iwx_cmd_groupid(code);
4994
4995 hdrlen = sizeof(cmd->hdr_wide);
4996 datasz = sizeof(cmd->data_wide);
4997
4998 if (paylen > datasz) {
4999 /* Command is too large to fit in pre-allocated space. */
5000 size_t totlen = hdrlen + paylen;
5001 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE(4096 - sizeof(struct iwx_cmd_header_wide))) {
5002 printf("%s: firmware command too long (%zd bytes)\n",
5003 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
5004 err = EINVAL22;
5005 goto out;
5006 }
5007 m = MCLGETL(NULL, M_DONTWAIT, totlen)m_clget((((void *)0)), (0x0002), (totlen));
5008 if (m == NULL((void *)0)) {
5009 printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5010 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
5011 err = ENOMEM12;
5012 goto out;
5013 }
5014 cmd = mtod(m, struct iwx_device_cmd *)((struct iwx_device_cmd *)((m)->m_hdr.mh_data));
5015 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
5016 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
;
5017 if (err) {
5018 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5019 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
5020 m_freem(m);
5021 goto out;
5022 }
5023 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5024 paddr = txdata->map->dm_segs[0].ds_addr;
5025 } else {
5026 cmd = &ring->cmd[idx];
5027 paddr = txdata->cmd_paddr;
5028 }
5029
5030 memset(cmd, 0, sizeof(*cmd))__builtin_memset((cmd), (0), (sizeof(*cmd)));
5031 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5032 cmd->hdr_wide.group_id = group_id;
5033 cmd->hdr_wide.qid = ring->qid;
5034 cmd->hdr_wide.idx = idx;
5035 cmd->hdr_wide.length = htole16(paylen)((__uint16_t)(paylen));
5036 cmd->hdr_wide.version = iwx_cmd_version(code);
5037 data = cmd->data_wide;
5038
5039 for (i = 0, off = 0; i < nitems(hcmd->data)(sizeof((hcmd->data)) / sizeof((hcmd->data)[0])); i++) {
5040 if (hcmd->len[i] == 0)
5041 continue;
5042 memcpy(data + off, hcmd->data[i], hcmd->len[i])__builtin_memcpy((data + off), (hcmd->data[i]), (hcmd->
len[i]))
;
5043 off += hcmd->len[i];
5044 }
5045 KASSERT(off == paylen)((off == paylen) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 5045, "off == paylen"))
;
5046
5047 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE))((__uint16_t)((((hdrlen + paylen)<(20))?(hdrlen + paylen):
(20))))
;
5048 addr = htole64(paddr)((__uint64_t)(paddr));
5049 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr))__builtin_memcpy((&desc->tbs[0].addr), (&addr), (sizeof
(addr)))
;
5050 if (hdrlen + paylen > IWX_FIRST_TB_SIZE20) {
5051 desc->tbs[1].tb_len = htole16(hdrlen + paylen -((__uint16_t)(hdrlen + paylen - 20))
5052 IWX_FIRST_TB_SIZE)((__uint16_t)(hdrlen + paylen - 20));
5053 addr = htole64(paddr + IWX_FIRST_TB_SIZE)((__uint64_t)(paddr + 20));
5054 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr))__builtin_memcpy((&desc->tbs[1].addr), (&addr), (sizeof
(addr)))
;
5055 desc->num_tbs = htole16(2)((__uint16_t)(2));
5056 } else
5057 desc->num_tbs = htole16(1)((__uint16_t)(1));
5058
5059 if (paylen > datasz) {
5060 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
5061 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
;
5062 } else {
5063 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
5064 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
5065 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
;
5066 }
5067 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5068 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5069 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
5070 /* Kick command ring. */
5071 DPRINTF(("%s: sending command 0x%x\n", __func__, code))do { ; } while (0);
5072 ring->queued++;
5073 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT(256);
5074 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 16 | ring->cur))))
;
5075
5076 if (!async) {
5077 err = tsleep_nsec(desc, PCATCH0x100, "iwxcmd", SEC_TO_NSEC(1));
5078 if (err == 0) {
5079 /* if hardware is no longer up, return error */
5080 if (generation != sc->sc_generation) {
5081 err = ENXIO6;
5082 goto out;
5083 }
5084
5085 /* Response buffer will be freed in iwx_free_resp(). */
5086 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5087 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
5088 } else if (generation == sc->sc_generation) {
5089 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF2,
5090 sc->sc_cmd_resp_len[idx]);
5091 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
5092 }
5093 }
5094 out:
5095 splx(s)spllower(s);
5096
5097 return err;
5098}
5099
5100int
5101iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5102 uint16_t len, const void *data)
5103{
5104 struct iwx_host_cmd cmd = {
5105 .id = id,
5106 .len = { len, },
5107 .data = { data, },
5108 .flags = flags,
5109 };
5110
5111 return iwx_send_cmd(sc, &cmd);
5112}
5113
5114int
5115iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5116 uint32_t *status)
5117{
5118 struct iwx_rx_packet *pkt;
5119 struct iwx_cmd_response *resp;
5120 int err, resp_len;
5121
5122 KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0)(((cmd->flags & IWX_CMD_WANT_RESP) == 0) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c", 5122, "(cmd->flags & IWX_CMD_WANT_RESP) == 0"
))
;
5123 cmd->flags |= IWX_CMD_WANT_RESP;
5124 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5125
5126 err = iwx_send_cmd(sc, cmd);
5127 if (err)
5128 return err;
5129
5130 pkt = cmd->resp_pkt;
5131 if (pkt == NULL((void *)0) || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40))
5132 return EIO5;
5133
5134 resp_len = iwx_rx_packet_payload_len(pkt);
5135 if (resp_len != sizeof(*resp)) {
5136 iwx_free_resp(sc, cmd);
5137 return EIO5;
5138 }
5139
5140 resp = (void *)pkt->data;
5141 *status = le32toh(resp->status)((__uint32_t)(resp->status));
5142 iwx_free_resp(sc, cmd);
5143 return err;
5144}
5145
5146int
5147iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5148 const void *data, uint32_t *status)
5149{
5150 struct iwx_host_cmd cmd = {
5151 .id = id,
5152 .len = { len, },
5153 .data = { data, },
5154 };
5155
5156 return iwx_send_cmd_status(sc, &cmd, status);
5157}
5158
5159void
5160iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5161{
5162 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP)(((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP
) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 5162, "(hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP"
))
;
5163 free(hcmd->resp_pkt, M_DEVBUF2, hcmd->resp_pkt_len);
5164 hcmd->resp_pkt = NULL((void *)0);
5165}
5166
5167void
5168iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5169{
5170 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE0];
5171 struct iwx_tx_data *data;
5172
5173 if (qid != IWX_DQA_CMD_QUEUE0) {
5174 return; /* Not a command ack. */
5175 }
5176
5177 data = &ring->data[idx];
5178
5179 if (data->m != NULL((void *)0)) {
5180 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
5181 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
5182 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
5183 m_freem(data->m);
5184 data->m = NULL((void *)0);
5185 }
5186 wakeup(&ring->desc[idx]);
5187
5188 DPRINTF(("%s: command 0x%x done\n", __func__, code))do { ; } while (0);
5189 if (ring->queued == 0) {
5190 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",do { ; } while (0)
5191 DEVNAME(sc), code))do { ; } while (0);
5192 } else if (ring->queued > 0)
5193 ring->queued--;
5194}
5195
5196/*
5197 * Fill in various bit for management frames, and leave them
5198 * unfilled for data frames (firmware takes care of that).
5199 * Return the selected TX rate.
5200 */
5201const struct iwx_rate *
5202iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5203 struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
5204{
5205 struct ieee80211com *ic = &sc->sc_ic;
5206 struct ieee80211_node *ni = &in->in_ni;
5207 struct ieee80211_rateset *rs = &ni->ni_rates;
5208 const struct iwx_rate *rinfo;
5209 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5210 int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
5211 int ridx, rate_flags;
5212 uint32_t flags = 0;
5213
5214 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ||
5215 type != IEEE80211_FC0_TYPE_DATA0x08) {
5216 /* for non-data, use the lowest supported rate */
5217 ridx = min_ridx;
5218 flags |= IWX_TX_FLAGS_CMD_RATE(1 << 0);
5219 } else if (ic->ic_fixed_mcs != -1) {
5220 ridx = sc->sc_fixed_ridx;
5221 flags |= IWX_TX_FLAGS_CMD_RATE(1 << 0);
5222 } else if (ic->ic_fixed_rate != -1) {
5223 ridx = sc->sc_fixed_ridx;
5224 flags |= IWX_TX_FLAGS_CMD_RATE(1 << 0);
5225 } else if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
5226 ridx = iwx_mcs2ridx[ni->ni_txmcs];
5227 } else {
5228 uint8_t rval;
5229 rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL0x7f);
5230 ridx = iwx_rval2ridx(rval);
5231 if (ridx < min_ridx)
5232 ridx = min_ridx;
5233 }
5234
5235 if ((ic->ic_flags & IEEE80211_F_RSNON0x00200000) &&
5236 ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
5237 flags |= IWX_TX_FLAGS_HIGH_PRI(1 << 2);
5238 tx->flags = htole32(flags)((__uint32_t)(flags));
5239
5240 rinfo = &iwx_rates[ridx];
5241 if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
5242 rate_flags = IWX_RATE_MCS_ANT_AB_MSK((1 << 14) | (2 << 14));
5243 else
5244 rate_flags = IWX_RATE_MCS_ANT_A_MSK(1 << 14);
5245 if (IWX_RIDX_IS_CCK(ridx)((ridx) < 4))
5246 rate_flags |= IWX_RATE_MCS_CCK_MSK(1 << 9);
5247 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
5248 type == IEEE80211_FC0_TYPE_DATA0x08 &&
5249 rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP0x20) {
5250 uint8_t sco;
5251 if (ieee80211_node_supports_ht_chan40(ni))
5252 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
5253 else
5254 sco = IEEE80211_HTOP0_SCO_SCN0;
5255 rate_flags |= IWX_RATE_MCS_HT_MSK(1 << 8);
5256 if ((sco == IEEE80211_HTOP0_SCO_SCA1 ||
5257 sco == IEEE80211_HTOP0_SCO_SCB3) &&
5258 in->in_phyctxt != NULL((void *)0) && in->in_phyctxt->sco == sco) {
5259 rate_flags |= IWX_RATE_MCS_CHAN_WIDTH_40(1 << 11);
5260 if (ieee80211_node_supports_ht_sgi40(ni))
5261 rate_flags |= IWX_RATE_MCS_SGI_MSK(1 << 13);
5262 } else if (ieee80211_node_supports_ht_sgi20(ni))
5263 rate_flags |= IWX_RATE_MCS_SGI_MSK(1 << 13);
5264 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp)((__uint32_t)(rate_flags | rinfo->ht_plcp));
5265 } else
5266 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp)((__uint32_t)(rate_flags | rinfo->plcp));
5267
5268 return rinfo;
5269}
5270
5271void
5272iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
5273 uint16_t num_tbs)
5274{
5275 uint8_t filled_tfd_size, num_fetch_chunks;
5276 uint16_t len = byte_cnt;
5277 uint16_t bc_ent;
5278 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5279
5280 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs)__builtin_offsetof(struct iwx_tfh_tfd, tbs) +
5281 num_tbs * sizeof(struct iwx_tfh_tb);
5282 /*
5283 * filled_tfd_size contains the number of filled bytes in the TFD.
5284 * Dividing it by 64 will give the number of chunks to fetch
5285 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5286 * If, for example, TFD contains only 3 TBs then 32 bytes
5287 * of the TFD are used, and only one chunk of 64 bytes should
5288 * be fetched
5289 */
5290 num_fetch_chunks = howmany(filled_tfd_size, 64)(((filled_tfd_size) + ((64) - 1)) / (64)) - 1;
5291
5292 /* Before AX210, the HW expects DW */
5293 len = howmany(len, 4)(((len) + ((4) - 1)) / (4));
5294 bc_ent = htole16(len | (num_fetch_chunks << 12))((__uint16_t)(len | (num_fetch_chunks << 12)));
5295 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5296}
5297
5298int
5299iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5300{
5301 struct ieee80211com *ic = &sc->sc_ic;
5302 struct iwx_node *in = (void *)ni;
5303 struct iwx_tx_ring *ring;
5304 struct iwx_tx_data *data;
5305 struct iwx_tfh_tfd *desc;
5306 struct iwx_device_cmd *cmd;
5307 struct iwx_tx_cmd_gen2 *tx;
5308 struct ieee80211_frame *wh;
5309 struct ieee80211_key *k = NULL((void *)0);
5310 const struct iwx_rate *rinfo;
5311 uint64_t paddr;
5312 u_int hdrlen;
5313 bus_dma_segment_t *seg;
5314 uint16_t num_tbs;
5315 uint8_t type, subtype;
5316 int i, totlen, err, pad, qid;
5317
5318 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5319 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5320 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
5321 if (type == IEEE80211_FC0_TYPE_CTL0x04)
5322 hdrlen = sizeof(struct ieee80211_frame_min);
5323 else
5324 hdrlen = ieee80211_get_hdrlen(wh);
5325
5326 qid = sc->first_data_qid;
5327
5328 /* Put QoS frames on the data queue which maps to their TID. */
5329 if (ieee80211_has_qos(wh)) {
5330 struct ieee80211_tx_ba *ba;
5331 uint16_t qos = ieee80211_get_qos(wh);
5332 uint8_t tid = qos & IEEE80211_QOS_TID0x000f;
5333
5334 ba = &ni->ni_tx_ba[tid];
5335 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
5336 type == IEEE80211_FC0_TYPE_DATA0x08 &&
5337 subtype != IEEE80211_FC0_SUBTYPE_NODATA0x40 &&
5338 sc->aggqid[tid] != 0 &&
5339 ba->ba_state == IEEE80211_BA_AGREED2) {
5340 qid = sc->aggqid[tid];
5341 }
5342 }
5343
5344 ring = &sc->txq[qid];
5345 desc = &ring->desc[ring->cur];
5346 memset(desc, 0, sizeof(*desc))__builtin_memset((desc), (0), (sizeof(*desc)));
5347 data = &ring->data[ring->cur];
5348
5349 cmd = &ring->cmd[ring->cur];
5350 cmd->hdr.code = IWX_TX_CMD0x1c;
5351 cmd->hdr.flags = 0;
5352 cmd->hdr.qid = ring->qid;
5353 cmd->hdr.idx = ring->cur;
5354
5355 tx = (void *)cmd->data;
5356 memset(tx, 0, sizeof(*tx))__builtin_memset((tx), (0), (sizeof(*tx)));
5357
5358 rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
5359
5360#if NBPFILTER1 > 0
5361 if (sc->sc_drvbpf != NULL((void *)0)) {
5362 struct iwx_tx_radiotap_header *tap = &sc->sc_txtapsc_txtapu.th;
5363 uint16_t chan_flags;
5364
5365 tap->wt_flags = 0;
5366 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq)((__uint16_t)(ni->ni_chan->ic_freq));
5367 chan_flags = ni->ni_chan->ic_flags;
5368 if (ic->ic_curmode != IEEE80211_MODE_11N)
5369 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
5370 tap->wt_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
5371 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
5372 !IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
5373 type == IEEE80211_FC0_TYPE_DATA0x08 &&
5374 rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP0x20) {
5375 tap->wt_rate = (0x80 | rinfo->ht_plcp);
5376 } else
5377 tap->wt_rate = rinfo->rate;
5378 if ((ic->ic_flags & IEEE80211_F_WEPON0x00000100) &&
5379 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40))
5380 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP0x04;
5381
5382 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
5383 m, BPF_DIRECTION_OUT(1 << 1));
5384 }
5385#endif
5386
5387 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40) {
5388 k = ieee80211_get_txkey(ic, wh, ni);
5389 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
5390 if ((m = ieee80211_encrypt(ic, m, k)) == NULL((void *)0))
5391 return ENOBUFS55;
5392 /* 802.11 header may have moved. */
5393 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5394 tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS)((__uint32_t)((1 << 1)));
5395 } else {
5396 k->k_tsc++;
5397 /* Hardware increments PN internally and adds IV. */
5398 }
5399 } else
5400 tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS)((__uint32_t)((1 << 1)));
5401
5402 totlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
5403
5404 if (hdrlen & 3) {
5405 /* First segment length must be a multiple of 4. */
5406 pad = 4 - (hdrlen & 3);
5407 tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD)((__uint16_t)((1 << 13)));
5408 } else
5409 pad = 0;
5410
5411 tx->len = htole16(totlen)((__uint16_t)(totlen));
5412
5413 /* Copy 802.11 header in TX command. */
5414 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen)__builtin_memcpy((((uint8_t *)tx) + sizeof(*tx)), (wh), (hdrlen
))
;
5415
5416 /* Trim 802.11 header. */
5417 m_adj(m, hdrlen);
5418
5419 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
5420 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
5421 if (err && err != EFBIG27) {
5422 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5423 m_freem(m);
5424 return err;
5425 }
5426 if (err) {
5427 /* Too many DMA segments, linearize mbuf. */
5428 if (m_defrag(m, M_DONTWAIT0x0002)) {
5429 m_freem(m);
5430 return ENOBUFS55;
5431 }
5432 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
5433 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
5434 if (err) {
5435 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
5436 err);
5437 m_freem(m);
5438 return err;
5439 }
5440 }
5441 data->m = m;
5442 data->in = in;
5443
5444 /* Fill TX descriptor. */
5445 num_tbs = 2 + data->map->dm_nsegs;
5446 desc->num_tbs = htole16(num_tbs)((__uint16_t)(num_tbs));
5447
5448 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE)((__uint16_t)(20));
5449 paddr = htole64(data->cmd_paddr)((__uint64_t)(data->cmd_paddr));
5450 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr))__builtin_memcpy((&desc->tbs[0].addr), (&paddr), (
sizeof(paddr)))
;
5451 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)((__uint32_t)(desc->tbs[0].tb_len))) >> 32)
5452 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__))do { ; } while (0);
5453 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +((__uint16_t)(sizeof(struct iwx_cmd_header) + sizeof(*tx) + hdrlen
+ pad - 20))
5454 sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE)((__uint16_t)(sizeof(struct iwx_cmd_header) + sizeof(*tx) + hdrlen
+ pad - 20))
;
5455 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE)((__uint64_t)(data->cmd_paddr + 20));
5456 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr))__builtin_memcpy((&desc->tbs[1].addr), (&paddr), (
sizeof(paddr)))
;
5457
5458 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)((__uint32_t)(desc->tbs[1].tb_len))) >> 32)
5459 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__))do { ; } while (0);
5460
5461 /* Other DMA segments are for data payload. */
5462 seg = data->map->dm_segs;
5463 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5464 desc->tbs[i + 2].tb_len = htole16(seg->ds_len)((__uint16_t)(seg->ds_len));
5465 paddr = htole64(seg->ds_addr)((__uint64_t)(seg->ds_addr));
5466 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr))__builtin_memcpy((&desc->tbs[i + 2].addr), (&paddr
), (sizeof(paddr)))
;
5467 if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)((__uint32_t)(desc->tbs[i + 2].tb_len))) >> 32)
5468 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2))do { ; } while (0);
5469 }
5470
5471 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
5472 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
;
5473 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
5474 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
5475 sizeof (*cmd), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
;
5476 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5477 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
5478 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
5479
5480 iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
5481
5482 /* Kick TX ring. */
5483 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT(256);
5484 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 16 | ring->cur))))
;
5485
5486 /* Mark TX ring as full if we reach a certain threshold. */
5487 if (++ring->queued > IWX_TX_RING_HIMARK224) {
5488 sc->qfullmsk |= 1 << ring->qid;
5489 }
5490
5491 if (ic->ic_ific_ac.ac_if.if_flags & IFF_UP0x1)
5492 sc->sc_tx_timer[ring->qid] = 15;
5493
5494 return 0;
5495}
5496
5497int
5498iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5499{
5500 struct iwx_rx_packet *pkt;
5501 struct iwx_tx_path_flush_cmd_rsp *resp;
5502 struct iwx_tx_path_flush_cmd flush_cmd = {
5503 .sta_id = htole32(sta_id)((__uint32_t)(sta_id)),
5504 .tid_mask = htole16(tids)((__uint16_t)(tids)),
5505 };
5506 struct iwx_host_cmd hcmd = {
5507 .id = IWX_TXPATH_FLUSH0x1e,
5508 .len = { sizeof(flush_cmd), },
5509 .data = { &flush_cmd, },
5510 .flags = IWX_CMD_WANT_RESP,
5511 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5512 };
5513 int err, resp_len, i, num_flushed_queues;
5514
5515 err = iwx_send_cmd(sc, &hcmd);
5516 if (err)
5517 return err;
5518
5519 pkt = hcmd.resp_pkt;
5520 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
5521 err = EIO5;
5522 goto out;
5523 }
5524
5525 resp_len = iwx_rx_packet_payload_len(pkt);
5526 /* Some firmware versions don't provide a response. */
5527 if (resp_len == 0)
5528 goto out;
5529 else if (resp_len != sizeof(*resp)) {
5530 err = EIO5;
5531 goto out;
5532 }
5533
5534 resp = (void *)pkt->data;
5535
5536 if (le16toh(resp->sta_id)((__uint16_t)(resp->sta_id)) != sta_id) {
5537 err = EIO5;
5538 goto out;
5539 }
5540
5541 num_flushed_queues = le16toh(resp->num_flushed_queues)((__uint16_t)(resp->num_flushed_queues));
5542 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP16) {
5543 err = EIO5;
5544 goto out;
5545 }
5546
5547 for (i = 0; i < num_flushed_queues; i++) {
5548 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5549 uint16_t tid = le16toh(queue_info->tid)((__uint16_t)(queue_info->tid));
5550 uint16_t read_after = le16toh(queue_info->read_after_flush)((__uint16_t)(queue_info->read_after_flush));
5551 uint16_t qid = le16toh(queue_info->queue_num)((__uint16_t)(queue_info->queue_num));
5552 struct iwx_tx_ring *txq;
5553
5554 if (qid >= nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])))
5555 continue;
5556
5557 txq = &sc->txq[qid];
5558 if (tid != txq->tid)
5559 continue;
5560
5561 iwx_txq_advance(sc, txq, read_after);
5562 }
5563out:
5564 iwx_free_resp(sc, &hcmd);
5565 return err;
5566}
5567
5568#define IWX_FLUSH_WAIT_MS2000 2000
5569
5570int
5571iwx_wait_tx_queues_empty(struct iwx_softc *sc)
5572{
5573 int i, err;
5574
5575 for (i = 0; i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); i++) {
5576 struct iwx_tx_ring *ring = &sc->txq[i];
5577
5578 if (i == IWX_DQA_CMD_QUEUE0)
5579 continue;
5580
5581 while (ring->queued > 0) {
5582 err = tsleep_nsec(ring, 0, "iwxflush",
5583 MSEC_TO_NSEC(IWX_FLUSH_WAIT_MS2000));
5584 if (err)
5585 return err;
5586 }
5587 }
5588
5589 return 0;
5590}
5591
5592int
5593iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5594{
5595 struct iwx_add_sta_cmd cmd;
5596 int err;
5597 uint32_t status;
5598
5599 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5600 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
5601 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
5602 cmd.sta_id = IWX_STATION_ID0;
5603 cmd.add_modify = IWX_STA_MODE_MODIFY1;
5604 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12))) : 0;
5605 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12)));
5606
5607 status = IWX_ADD_STA_SUCCESS0x1;
5608 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA0x18,
5609 sizeof(cmd), &cmd, &status);
5610 if (err) {
5611 printf("%s: could not update sta (error %d)\n",
5612 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5613 return err;
5614 }
5615
5616 switch (status & IWX_ADD_STA_STATUS_MASK0xFF) {
5617 case IWX_ADD_STA_SUCCESS0x1:
5618 break;
5619 default:
5620 err = EIO5;
5621 printf("%s: Couldn't %s draining for station\n",
5622 DEVNAME(sc)((sc)->sc_dev.dv_xname), drain ? "enable" : "disable");
5623 break;
5624 }
5625
5626 return err;
5627}
5628
5629int
5630iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5631{
5632 int err;
5633
5634 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
5635
5636 sc->sc_flags |= IWX_FLAG_TXFLUSH0x400;
5637
5638 err = iwx_drain_sta(sc, in, 1);
5639 if (err)
5640 goto done;
5641
5642 err = iwx_flush_sta_tids(sc, IWX_STATION_ID0, 0xffff);
5643 if (err) {
5644 printf("%s: could not flush Tx path (error %d)\n",
5645 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5646 goto done;
5647 }
5648
5649 err = iwx_wait_tx_queues_empty(sc);
5650 if (err) {
5651 printf("%s: Could not empty Tx queues (error %d)\n",
5652 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5653 goto done;
5654 }
5655
5656 err = iwx_drain_sta(sc, in, 0);
5657done:
5658 sc->sc_flags &= ~IWX_FLAG_TXFLUSH0x400;
5659 return err;
5660}
5661
5662#define IWX_POWER_KEEP_ALIVE_PERIOD_SEC25 25
5663
5664int
5665iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
5666 struct iwx_beacon_filter_cmd *cmd)
5667{
5668 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD0xd2,
5669 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
5670}
5671
5672int
5673iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
5674{
5675 struct iwx_beacon_filter_cmd cmd = {
5676 IWX_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
5677 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
5678 .ba_enable_beacon_abort = htole32(enable)((__uint32_t)(enable)),
5679 };
5680
5681 if (!sc->sc_bf.bf_enabled)
5682 return 0;
5683
5684 sc->sc_bf.ba_enabled = enable;
5685 return iwx_beacon_filter_send_cmd(sc, &cmd);
5686}
5687
5688void
5689iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
5690 struct iwx_mac_power_cmd *cmd)
5691{
5692 struct ieee80211com *ic = &sc->sc_ic;
5693 struct ieee80211_node *ni = &in->in_ni;
5694 int dtim_period, dtim_msec, keep_alive;
5695
5696 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
5697 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
5698 if (ni->ni_dtimperiod)
5699 dtim_period = ni->ni_dtimperiod;
5700 else
5701 dtim_period = 1;
5702
5703 /*
5704 * Regardless of power management state the driver must set
5705 * keep alive period. FW will use it for sending keep alive NDPs
5706 * immediately after association. Check that keep alive period
5707 * is at least 3 * DTIM.
5708 */
5709 dtim_msec = dtim_period * ni->ni_intval;
5710 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC)(((3 * dtim_msec)>(1000 * 25))?(3 * dtim_msec):(1000 * 25)
)
;
5711 keep_alive = roundup(keep_alive, 1000)((((keep_alive)+((1000)-1))/(1000))*(1000)) / 1000;
5712 cmd->keep_alive_seconds = htole16(keep_alive)((__uint16_t)(keep_alive));
5713
5714 if (ic->ic_opmode != IEEE80211_M_MONITOR)
5715 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
5716}
5717
5718int
5719iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
5720{
5721 int err;
5722 int ba_enable;
5723 struct iwx_mac_power_cmd cmd;
5724
5725 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5726
5727 iwx_power_build_cmd(sc, in, &cmd);
5728
5729 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE0xa9, 0,
5730 sizeof(cmd), &cmd);
5731 if (err != 0)
5732 return err;
5733
5734 ba_enable = !!(cmd.flags &
5735 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)((__uint16_t)((1 << 1))));
5736 return iwx_update_beacon_abort(sc, in, ba_enable);
5737}
5738
5739int
5740iwx_power_update_device(struct iwx_softc *sc)
5741{
5742 struct iwx_device_power_cmd cmd = { };
5743 struct ieee80211com *ic = &sc->sc_ic;
5744
5745 if (ic->ic_opmode != IEEE80211_M_MONITOR)
5746 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
5747
5748 return iwx_send_cmd_pdu(sc,
5749 IWX_POWER_TABLE_CMD0x77, 0, sizeof(cmd), &cmd);
5750}
5751
5752int
5753iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
5754{
5755 struct iwx_beacon_filter_cmd cmd = {
5756 IWX_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
5757 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
5758 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled)((__uint32_t)(sc->sc_bf.ba_enabled)),
5759 };
5760 int err;
5761
5762 err = iwx_beacon_filter_send_cmd(sc, &cmd);
5763 if (err == 0)
5764 sc->sc_bf.bf_enabled = 1;
5765
5766 return err;
5767}
5768
5769int
5770iwx_disable_beacon_filter(struct iwx_softc *sc)
5771{
5772 struct iwx_beacon_filter_cmd cmd;
5773 int err;
5774
5775 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
5776
5777 err = iwx_beacon_filter_send_cmd(sc, &cmd);
5778 if (err == 0)
5779 sc->sc_bf.bf_enabled = 0;
5780
5781 return err;
5782}
5783
5784int
5785iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
5786{
5787 struct iwx_add_sta_cmd add_sta_cmd;
5788 int err;
5789 uint32_t status;
5790 struct ieee80211com *ic = &sc->sc_ic;
5791
5792 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE0x20))
5793 panic("STA already added");
5794
5795 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd))__builtin_memset((&add_sta_cmd), (0), (sizeof(add_sta_cmd
)))
;
5796
5797 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5798 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID2;
5799 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE1;
5800 } else {
5801 add_sta_cmd.sta_id = IWX_STATION_ID0;
5802 add_sta_cmd.station_type = IWX_STA_LINK0;
5803 }
5804 add_sta_cmd.mac_id_n_color
5805 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
5806 if (!update) {
5807 if (ic->ic_opmode == IEEE80211_M_MONITOR)
5808 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,__builtin_memcpy((&add_sta_cmd.addr), (etheranyaddr), (6)
)
5809 etheranyaddr)__builtin_memcpy((&add_sta_cmd.addr), (etheranyaddr), (6)
)
;
5810 else
5811 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,__builtin_memcpy((&add_sta_cmd.addr), (in->in_macaddr)
, (6))
5812 in->in_macaddr)__builtin_memcpy((&add_sta_cmd.addr), (in->in_macaddr)
, (6))
;
5813 }
5814 add_sta_cmd.add_modify = update ? 1 : 0;
5815 add_sta_cmd.station_flags_msk
5816 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK)((__uint32_t)((3 << 26) | (3 << 28)));
5817
5818 if (in->in_ni.ni_flags & IEEE80211_NODE_HT0x0400) {
5819 add_sta_cmd.station_flags_msk
5820 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |((__uint32_t)((7 << 19) | (7 << 23)))
5821 IWX_STA_FLG_AGG_MPDU_DENS_MSK)((__uint32_t)((7 << 19) | (7 << 23)));
5822
5823 if (iwx_mimo_enabled(sc)) {
5824 if (in->in_ni.ni_rxmcs[1] != 0) {
5825 add_sta_cmd.station_flags |=
5826 htole32(IWX_STA_FLG_MIMO_EN_MIMO2)((__uint32_t)((1 << 28)));
5827 }
5828 if (in->in_ni.ni_rxmcs[2] != 0) {
5829 add_sta_cmd.station_flags |=
5830 htole32(IWX_STA_FLG_MIMO_EN_MIMO3)((__uint32_t)((2 << 28)));
5831 }
5832 }
5833
5834 if (ieee80211_node_supports_ht_chan40(&in->in_ni)) {
5835 add_sta_cmd.station_flags |= htole32(((__uint32_t)((1 << 26)))
5836 IWX_STA_FLG_FAT_EN_40MHZ)((__uint32_t)((1 << 26)));
5837 }
5838
5839 add_sta_cmd.station_flags
5840 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K)((__uint32_t)((3 << 19)));
5841 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS0x1c) {
5842 case IEEE80211_AMPDU_PARAM_SS_2(4 << 2):
5843 add_sta_cmd.station_flags
5844 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US)((__uint32_t)((4 << 23)));
5845 break;
5846 case IEEE80211_AMPDU_PARAM_SS_4(5 << 2):
5847 add_sta_cmd.station_flags
5848 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US)((__uint32_t)((5 << 23)));
5849 break;
5850 case IEEE80211_AMPDU_PARAM_SS_8(6 << 2):
5851 add_sta_cmd.station_flags
5852 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US)((__uint32_t)((6 << 23)));
5853 break;
5854 case IEEE80211_AMPDU_PARAM_SS_16(7 << 2):
5855 add_sta_cmd.station_flags
5856 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US)((__uint32_t)((7 << 23)));
5857 break;
5858 default:
5859 break;
5860 }
5861 }
5862
5863 status = IWX_ADD_STA_SUCCESS0x1;
5864 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA0x18, sizeof(add_sta_cmd),
5865 &add_sta_cmd, &status);
5866 if (!err && (status & IWX_ADD_STA_STATUS_MASK0xFF) != IWX_ADD_STA_SUCCESS0x1)
5867 err = EIO5;
5868
5869 return err;
5870}
5871
5872int
5873iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
5874{
5875 struct ieee80211com *ic = &sc->sc_ic;
5876 struct iwx_rm_sta_cmd rm_sta_cmd;
5877 int err;
5878
5879 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE0x20) == 0)
5880 panic("sta already removed");
5881
5882 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd))__builtin_memset((&rm_sta_cmd), (0), (sizeof(rm_sta_cmd))
)
;
5883 if (ic->ic_opmode == IEEE80211_M_MONITOR)
5884 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID2;
5885 else
5886 rm_sta_cmd.sta_id = IWX_STATION_ID0;
5887
5888 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA0x19, 0, sizeof(rm_sta_cmd),
5889 &rm_sta_cmd);
5890
5891 return err;
5892}
5893
5894int
5895iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
5896{
5897 struct ieee80211com *ic = &sc->sc_ic;
5898 struct ieee80211_node *ni = &in->in_ni;
5899 int err, i;
5900
5901 err = iwx_flush_sta(sc, in);
5902 if (err) {
5903 printf("%s: could not flush Tx path (error %d)\n",
5904 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5905 return err;
5906 }
5907 err = iwx_rm_sta_cmd(sc, in);
5908 if (err) {
5909 printf("%s: could not remove STA (error %d)\n",
5910 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
5911 return err;
5912 }
5913
5914 in->in_flags = 0;
5915
5916 sc->sc_rx_ba_sessions = 0;
5917 sc->ba_rx.start_tidmask = 0;
5918 sc->ba_rx.stop_tidmask = 0;
5919 memset(sc->aggqid, 0, sizeof(sc->aggqid))__builtin_memset((sc->aggqid), (0), (sizeof(sc->aggqid)
))
;
5920 sc->ba_tx.start_tidmask = 0;
5921 sc->ba_tx.stop_tidmask = 0;
5922 for (i = IWX_FIRST_AGG_TX_QUEUE(1 + 1); i < IWX_LAST_AGG_TX_QUEUE((1 + 1) + 8 - 1); i++)
5923 sc->qenablemsk &= ~(1 << i);
5924 for (i = 0; i < IEEE80211_NUM_TID16; i++) {
5925 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
5926 if (ba->ba_state != IEEE80211_BA_AGREED2)
5927 continue;
5928 ieee80211_delba_request(ic, ni, 0, 1, i);
5929 }
5930
5931 return 0;
5932}
5933
5934uint8_t
5935iwx_umac_scan_fill_channels(struct iwx_softc *sc,
5936 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
5937 int n_ssids, int bgscan)
5938{
5939 struct ieee80211com *ic = &sc->sc_ic;
5940 struct ieee80211_channel *c;
5941 uint8_t nchan;
5942
5943 for (nchan = 0, c = &ic->ic_channels[1];
5944 c <= &ic->ic_channels[IEEE80211_CHAN_MAX255] &&
5945 nchan < chan_nitems &&
5946 nchan < sc->sc_capa_n_scan_channels;
5947 c++) {
5948 uint8_t channel_num;
5949
5950 if (c->ic_flags == 0)
5951 continue;
5952
5953 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5954 if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
5955 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
) {
5956 chan->v2.channel_num = channel_num;
5957 if (IEEE80211_IS_CHAN_2GHZ(c)(((c)->ic_flags & 0x0080) != 0))
5958 chan->v2.band = IWX_PHY_BAND_24(1);
5959 else
5960 chan->v2.band = IWX_PHY_BAND_5(0);
5961 chan->v2.iter_count = 1;
5962 chan->v2.iter_interval = 0;
5963 } else {
5964 chan->v1.channel_num = channel_num;
5965 chan->v1.iter_count = 1;
5966 chan->v1.iter_interval = htole16(0)((__uint16_t)(0));
5967 }
5968 /*
5969 * Firmware may become unresponsive when asked to send
5970 * a directed probe request on a passive channel.
5971 */
5972#if 0 /* Some people see "device timeout" after active scans. */
5973 if (n_ssids != 0 && !bgscan &&
5974 (c->ic_flags & IEEE80211_CHAN_PASSIVE0x0200) == 0)
5975 chan->flags = htole32(1 << 0)((__uint32_t)(1 << 0)); /* select SSID 0 */
5976#endif
5977 chan++;
5978 nchan++;
5979 }
5980
5981 return nchan;
5982}
5983
5984int
5985iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
5986{
5987 struct ieee80211com *ic = &sc->sc_ic;
5988 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5989 struct ieee80211_rateset *rs;
5990 size_t remain = sizeof(preq->buf);
5991 uint8_t *frm, *pos;
5992
5993 memset(preq, 0, sizeof(*preq))__builtin_memset((preq), (0), (sizeof(*preq)));
5994
5995 if (remain < sizeof(*wh) + 2)
5996 return ENOBUFS55;
5997
5998 /*
5999 * Build a probe request frame. Most of the following code is a
6000 * copy & paste of what is done in net80211.
6001 */
6002 wh->i_fc[0] = IEEE80211_FC0_VERSION_00x00 | IEEE80211_FC0_TYPE_MGT0x00 |
6003 IEEE80211_FC0_SUBTYPE_PROBE_REQ0x40;
6004 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS0x00;
6005 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr)__builtin_memcpy((wh->i_addr1), (etherbroadcastaddr), (6));
6006 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr)__builtin_memcpy((wh->i_addr2), (ic->ic_myaddr), (6));
6007 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr)__builtin_memcpy((wh->i_addr3), (etherbroadcastaddr), (6));
6008 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6009 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6010
6011 frm = (uint8_t *)(wh + 1);
6012 *frm++ = IEEE80211_ELEMID_SSID;
6013 *frm++ = 0;
6014 /* hardware inserts SSID */
6015
6016 /* Tell the firmware where the MAC header is. */
6017 preq->mac_header.offset = 0;
6018 preq->mac_header.len = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
6019 remain -= frm - (uint8_t *)wh;
6020
6021 /* Fill in 2GHz IEs and tell firmware where they are. */
6022 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6023 if (rs->rs_nrates > IEEE80211_RATE_SIZE8) {
6024 if (remain < 4 + rs->rs_nrates)
6025 return ENOBUFS55;
6026 } else if (remain < 2 + rs->rs_nrates)
6027 return ENOBUFS55;
6028 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
6029 pos = frm;
6030 frm = ieee80211_add_rates(frm, rs);
6031 if (rs->rs_nrates > IEEE80211_RATE_SIZE8)
6032 frm = ieee80211_add_xrates(frm, rs);
6033 remain -= frm - pos;
6034
6035 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
6036 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
) {
6037 if (remain < 3)
6038 return ENOBUFS55;
6039 *frm++ = IEEE80211_ELEMID_DSPARMS;
6040 *frm++ = 1;
6041 *frm++ = 0;
6042 remain -= 3;
6043 }
6044 preq->band_data[0].len = htole16(frm - pos)((__uint16_t)(frm - pos));
6045
6046 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6047 /* Fill in 5GHz IEs. */
6048 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6049 if (rs->rs_nrates > IEEE80211_RATE_SIZE8) {
6050 if (remain < 4 + rs->rs_nrates)
6051 return ENOBUFS55;
6052 } else if (remain < 2 + rs->rs_nrates)
6053 return ENOBUFS55;
6054 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
6055 pos = frm;
6056 frm = ieee80211_add_rates(frm, rs);
6057 if (rs->rs_nrates > IEEE80211_RATE_SIZE8)
6058 frm = ieee80211_add_xrates(frm, rs);
6059 preq->band_data[1].len = htole16(frm - pos)((__uint16_t)(frm - pos));
6060 remain -= frm - pos;
6061 }
6062
6063 /* Send 11n IEs on both 2GHz and 5GHz bands. */
6064 preq->common_data.offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
6065 pos = frm;
6066 if (ic->ic_flags & IEEE80211_F_HTON0x02000000) {
6067 if (remain < 28)
6068 return ENOBUFS55;
6069 frm = ieee80211_add_htcaps(frm, ic);
6070 /* XXX add WME info? */
6071 }
6072 preq->common_data.len = htole16(frm - pos)((__uint16_t)(frm - pos));
6073
6074 return 0;
6075}
6076
6077int
6078iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6079{
6080 struct iwx_scan_config scan_cfg;
6081 struct iwx_host_cmd hcmd = {
6082 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD0xc, IWX_LONG_GROUP0x1, 0),
6083 .len[0] = sizeof(scan_cfg),
6084 .data[0] = &scan_cfg,
6085 .flags = 0,
6086 };
6087 int cmdver;
6088
6089 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)((sc->sc_ucode_api)[(56)>>3] & (1<<((56)&
(8 -1))))
) {
6090 printf("%s: firmware does not support reduced scan config\n",
6091 DEVNAME(sc)((sc)->sc_dev.dv_xname));
6092 return ENOTSUP91;
6093 }
6094
6095 memset(&scan_cfg, 0, sizeof(scan_cfg))__builtin_memset((&scan_cfg), (0), (sizeof(scan_cfg)));
6096
6097 /*
6098 * SCAN_CFG version >= 5 implies that the broadcast
6099 * STA ID field is deprecated.
6100 */
6101 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP0x1, IWX_SCAN_CFG_CMD0xc);
6102 if (cmdver == IWX_FW_CMD_VER_UNKNOWN99 || cmdver < 5)
6103 scan_cfg.bcast_sta_id = 0xff;
6104
6105 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc))((__uint32_t)(iwx_fw_valid_tx_ant(sc)));
6106 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc))((__uint32_t)(iwx_fw_valid_rx_ant(sc)));
6107
6108 return iwx_send_cmd(sc, &hcmd);
6109}
6110
6111uint16_t
6112iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6113{
6114 struct ieee80211com *ic = &sc->sc_ic;
6115 uint16_t flags = 0;
6116
6117 if (ic->ic_des_esslen == 0)
6118 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE(1 << 11);
6119
6120 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL(1 << 1);
6121 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE(1 << 2);
6122 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL(1 << 7);
6123
6124 return flags;
6125}
6126
6127#define IWX_SCAN_DWELL_ACTIVE10 10
6128#define IWX_SCAN_DWELL_PASSIVE110 110
6129
6130/* adaptive dwell max budget time [TU] for full scan */
6131#define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN300 300
6132/* adaptive dwell max budget time [TU] for directed scan */
6133#define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN100 100
6134/* adaptive dwell default high band APs number */
6135#define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS8 8
6136/* adaptive dwell default low band APs number */
6137#define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS2 2
6138/* adaptive dwell default APs number in social channels (1, 6, 11) */
6139#define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL10 10
6140/* adaptive dwell number of APs override for p2p friendly GO channels */
6141#define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY10 10
6142/* adaptive dwell number of APs override for social channels */
6143#define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS2 2
6144
6145void
6146iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6147 struct iwx_scan_general_params_v10 *general_params, int bgscan)
6148{
6149 uint32_t suspend_time, max_out_time;
6150 uint8_t active_dwell, passive_dwell;
6151
6152 active_dwell = IWX_SCAN_DWELL_ACTIVE10;
6153 passive_dwell = IWX_SCAN_DWELL_PASSIVE110;
6154
6155 general_params->adwell_default_social_chn =
6156 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL10;
6157 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS2;
6158 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS8;
6159
6160 if (bgscan)
6161 general_params->adwell_max_budget =
6162 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN)((__uint16_t)(100));
6163 else
6164 general_params->adwell_max_budget =
6165 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN)((__uint16_t)(300));
6166
6167 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6)((__uint32_t)(IWX_SCAN_PRIORITY_EXT_6));
6168 if (bgscan) {
6169 max_out_time = htole32(120)((__uint32_t)(120));
6170 suspend_time = htole32(120)((__uint32_t)(120));
6171 } else {
6172 max_out_time = htole32(0)((__uint32_t)(0));
6173 suspend_time = htole32(0)((__uint32_t)(0));
6174 }
6175 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX0] =
6176 htole32(max_out_time)((__uint32_t)(max_out_time));
6177 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX0] =
6178 htole32(suspend_time)((__uint32_t)(suspend_time));
6179 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX1] =
6180 htole32(max_out_time)((__uint32_t)(max_out_time));
6181 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX1] =
6182 htole32(suspend_time)((__uint32_t)(suspend_time));
6183
6184 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX0] = active_dwell;
6185 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX0] = passive_dwell;
6186 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX1] = active_dwell;
6187 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX1] = passive_dwell;
6188}
6189
6190void
6191iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6192 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6193{
6194 iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6195
6196 gp->flags = htole16(gen_flags)((__uint16_t)(gen_flags));
6197
6198 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1(1 << 3))
6199 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX0] = 3;
6200 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2(1 << 4))
6201 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX1] = 3;
6202
6203 gp->scan_start_mac_id = 0;
6204}
6205
6206void
6207iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6208 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6209 int n_ssid, int bgscan)
6210{
6211 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER(1 << 5);
6212
6213 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6214 nitems(cp->channel_config)(sizeof((cp->channel_config)) / sizeof((cp->channel_config
)[0]))
, n_ssid, bgscan);
6215
6216 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY10;
6217 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS2;
6218}
6219
6220int
6221iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6222{
6223#if 0 /* Some people see "device timeout" after active scans. */
6224 struct ieee80211com *ic = &sc->sc_ic;
6225#endif
6226 struct iwx_host_cmd hcmd = {
6227 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC0xd, IWX_LONG_GROUP0x1, 0),
6228 .len = { 0, },
6229 .data = { NULL((void *)0), },
6230 .flags = 0,
6231 };
6232 struct iwx_scan_req_umac_v14 *cmd;
6233 struct iwx_scan_req_params_v14 *scan_p;
6234 int err, async = bgscan, n_ssid = 0;
6235 uint16_t gen_flags;
6236 uint32_t bitmap_ssid = 0;
6237
6238 cmd = malloc(sizeof(*cmd), M_DEVBUF2,
6239 (async ? M_NOWAIT0x0002 : M_WAIT0x0001) | M_CANFAIL0x0004 | M_ZERO0x0008);
6240 if (cmd == NULL((void *)0))
6241 return ENOMEM12;
6242
6243 scan_p = &cmd->scan_params;
6244
6245 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6)((__uint32_t)(IWX_SCAN_PRIORITY_EXT_6));
6246 cmd->uid = htole32(0)((__uint32_t)(0));
6247
6248 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6249 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6250 gen_flags, bgscan);
6251
6252 scan_p->periodic_params.schedule[0].interval = htole16(0)((__uint16_t)(0));
6253 scan_p->periodic_params.schedule[0].iter_count = 1;
6254
6255 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6256 if (err) {
6257 free(cmd, M_DEVBUF2, sizeof(*cmd));
6258 return err;
6259 }
6260
6261#if 0 /* Some people see "device timeout" after active scans. */
6262 if (ic->ic_des_esslen != 0) {
6263 scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
6264 scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
6265 memcpy(scan_p->probe_params.direct_scan[0].ssid,__builtin_memcpy((scan_p->probe_params.direct_scan[0].ssid
), (ic->ic_des_essid), (ic->ic_des_esslen))
6266 ic->ic_des_essid, ic->ic_des_esslen)__builtin_memcpy((scan_p->probe_params.direct_scan[0].ssid
), (ic->ic_des_essid), (ic->ic_des_esslen))
;
6267 bitmap_ssid |= (1 << 0);
6268 n_ssid = 1;
6269 }
6270#endif
6271
6272 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6273 n_ssid, bgscan);
6274
6275 hcmd.len[0] = sizeof(*cmd);
6276 hcmd.data[0] = (void *)cmd;
6277 hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6278
6279 err = iwx_send_cmd(sc, &hcmd);
6280 free(cmd, M_DEVBUF2, sizeof(*cmd));
6281 return err;
6282}
6283
6284void
6285iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6286{
6287 struct ieee80211com *ic = &sc->sc_ic;
6288 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
6289 char alpha2[3];
6290
6291 snprintf(alpha2, sizeof(alpha2), "%c%c",
6292 (le16toh(notif->mcc)((__uint16_t)(notif->mcc)) & 0xff00) >> 8, le16toh(notif->mcc)((__uint16_t)(notif->mcc)) & 0xff);
6293
6294 if (ifp->if_flags & IFF_DEBUG0x4) {
6295 printf("%s: firmware has detected regulatory domain '%s' "
6296 "(0x%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), alpha2, le16toh(notif->mcc)((__uint16_t)(notif->mcc)));
6297 }
6298
6299 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6300}
6301
6302uint8_t
6303iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6304{
6305 int i;
6306 uint8_t rval;
6307
6308 for (i = 0; i < rs->rs_nrates; i++) {
6309 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL0x7f);
6310 if (rval == iwx_rates[ridx].rate)
6311 return rs->rs_rates[i];
6312 }
6313
6314 return 0;
6315}
6316
6317int
6318iwx_rval2ridx(int rval)
6319{
6320 int ridx;
6321
6322 for (ridx = 0; ridx < nitems(iwx_rates)(sizeof((iwx_rates)) / sizeof((iwx_rates)[0])); ridx++) {
6323 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP0xff)
6324 continue;
6325 if (rval == iwx_rates[ridx].rate)
6326 break;
6327 }
6328
6329 return ridx;
6330}
6331
6332void
6333iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6334 int *ofdm_rates)
6335{
6336 struct ieee80211_node *ni = &in->in_ni;
6337 struct ieee80211_rateset *rs = &ni->ni_rates;
6338 int lowest_present_ofdm = -1;
6339 int lowest_present_cck = -1;
6340 uint8_t cck = 0;
6341 uint8_t ofdm = 0;
6342 int i;
6343
6344 if (ni->ni_chan == IEEE80211_CHAN_ANYC((struct ieee80211_channel *) ((void *)0)) ||
6345 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x0080) != 0)) {
6346 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6347 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC0x80) == 0)
6348 continue;
6349 cck |= (1 << i);
6350 if (lowest_present_cck == -1 || lowest_present_cck > i)
6351 lowest_present_cck = i;
6352 }
6353 }
6354 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6355 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC0x80) == 0)
6356 continue;
6357 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6358 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6359 lowest_present_ofdm = i;
6360 }
6361
6362 /*
6363 * Now we've got the basic rates as bitmaps in the ofdm and cck
6364 * variables. This isn't sufficient though, as there might not
6365 * be all the right rates in the bitmap. E.g. if the only basic
6366 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6367 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6368 *
6369 * [...] a STA responding to a received frame shall transmit
6370 * its Control Response frame [...] at the highest rate in the
6371 * BSSBasicRateSet parameter that is less than or equal to the
6372 * rate of the immediately previous frame in the frame exchange
6373 * sequence ([...]) and that is of the same modulation class
6374 * ([...]) as the received frame. If no rate contained in the
6375 * BSSBasicRateSet parameter meets these conditions, then the
6376 * control frame sent in response to a received frame shall be
6377 * transmitted at the highest mandatory rate of the PHY that is
6378 * less than or equal to the rate of the received frame, and
6379 * that is of the same modulation class as the received frame.
6380 *
6381 * As a consequence, we need to add all mandatory rates that are
6382 * lower than all of the basic rates to these bitmaps.
6383 */
6384
6385 if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6386 ofdm |= IWX_RATE_BIT_MSK(24)(1 << (IWX_RATE_24M_INDEX)) >> IWX_FIRST_OFDM_RATE;
6387 if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6388 ofdm |= IWX_RATE_BIT_MSK(12)(1 << (IWX_RATE_12M_INDEX)) >> IWX_FIRST_OFDM_RATE;
6389 /* 6M already there or needed so always add */
6390 ofdm |= IWX_RATE_BIT_MSK(6)(1 << (IWX_RATE_6M_INDEX)) >> IWX_FIRST_OFDM_RATE;
6391
6392 /*
6393 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6394 * Note, however:
6395 * - if no CCK rates are basic, it must be ERP since there must
6396 * be some basic rates at all, so they're OFDM => ERP PHY
6397 * (or we're in 5 GHz, and the cck bitmap will never be used)
6398 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6399 * - if 5.5M is basic, 1M and 2M are mandatory
6400 * - if 2M is basic, 1M is mandatory
6401 * - if 1M is basic, that's the only valid ACK rate.
6402 * As a consequence, it's not as complicated as it sounds, just add
6403 * any lower rates to the ACK rate bitmap.
6404 */
6405 if (IWX_RATE_11M_INDEX < lowest_present_cck)
6406 cck |= IWX_RATE_BIT_MSK(11)(1 << (IWX_RATE_11M_INDEX)) >> IWX_FIRST_CCK_RATE;
6407 if (IWX_RATE_5M_INDEX < lowest_present_cck)
6408 cck |= IWX_RATE_BIT_MSK(5)(1 << (IWX_RATE_5M_INDEX)) >> IWX_FIRST_CCK_RATE;
6409 if (IWX_RATE_2M_INDEX < lowest_present_cck)
6410 cck |= IWX_RATE_BIT_MSK(2)(1 << (IWX_RATE_2M_INDEX)) >> IWX_FIRST_CCK_RATE;
6411 /* 1M already there or needed so always add */
6412 cck |= IWX_RATE_BIT_MSK(1)(1 << (IWX_RATE_1M_INDEX)) >> IWX_FIRST_CCK_RATE;
6413
6414 *cck_rates = cck;
6415 *ofdm_rates = ofdm;
6416}
6417
6418void
6419iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6420 struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6421{
6422#define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6423 struct ieee80211com *ic = &sc->sc_ic;
6424 struct ieee80211_node *ni = ic->ic_bss;
6425 int cck_ack_rates, ofdm_ack_rates;
6426 int i;
6427
6428 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
6429 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
6430 cmd->action = htole32(action)((__uint32_t)(action));
6431
6432 if (action == IWX_FW_CTXT_ACTION_REMOVE3)
6433 return;
6434
6435 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6436 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER)((__uint32_t)(2));
6437 else if (ic->ic_opmode == IEEE80211_M_STA)
6438 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA)((__uint32_t)(5));
6439 else
6440 panic("unsupported operating mode %d", ic->ic_opmode);
6441 cmd->tsf_id = htole32(IWX_TSF_ID_A)((__uint32_t)(0));
6442
6443 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr)__builtin_memcpy((cmd->node_addr), (ic->ic_myaddr), (6)
)
;
6444 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6445 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr)__builtin_memcpy((cmd->bssid_addr), (etherbroadcastaddr), (
6))
;
6446 return;
6447 }
6448
6449 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr)__builtin_memcpy((cmd->bssid_addr), (in->in_macaddr), (
6))
;
6450 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6451 cmd->cck_rates = htole32(cck_ack_rates)((__uint32_t)(cck_ack_rates));
6452 cmd->ofdm_rates = htole32(ofdm_ack_rates)((__uint32_t)(ofdm_ack_rates));
6453
6454 cmd->cck_short_preamble
6455 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)((__uint32_t)((ic->ic_flags & 0x00040000) ? (1 <<
5) : 0))
6456 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0)((__uint32_t)((ic->ic_flags & 0x00040000) ? (1 <<
5) : 0))
;
6457 cmd->short_slot
6458 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)((__uint32_t)((ic->ic_flags & 0x00020000) ? (1 <<
4) : 0))
6459 ? IWX_MAC_FLG_SHORT_SLOT : 0)((__uint32_t)((ic->ic_flags & 0x00020000) ? (1 <<
4) : 0))
;
6460
6461 for (i = 0; i < EDCA_NUM_AC4; i++) {
6462 struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6463 int txf = iwx_ac_to_tx_fifo[i];
6464
6465 cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin))((__uint16_t)(IWX_EXP2(ac->ac_ecwmin)));
6466 cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax))((__uint16_t)(IWX_EXP2(ac->ac_ecwmax)));
6467 cmd->ac[txf].aifsn = ac->ac_aifsn;
6468 cmd->ac[txf].fifos_mask = (1 << txf);
6469 cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32)((__uint16_t)(ac->ac_txoplimit * 32));
6470 }
6471 if (ni->ni_flags & IEEE80211_NODE_QOS0x0002)
6472 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA)((__uint32_t)((1 << 0)));
6473
6474 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
6475 enum ieee80211_htprot htprot =
6476 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK0x0003);
6477 switch (htprot) {
6478 case IEEE80211_HTPROT_NONE:
6479 break;
6480 case IEEE80211_HTPROT_NONMEMBER:
6481 case IEEE80211_HTPROT_NONHT_MIXED:
6482 cmd->protection_flags |=
6483 htole32(IWX_MAC_PROT_FLG_HT_PROT |((__uint32_t)((1 << 23) | (1 << 24)))
6484 IWX_MAC_PROT_FLG_FAT_PROT)((__uint32_t)((1 << 23) | (1 << 24)));
6485 break;
6486 case IEEE80211_HTPROT_20MHZ:
6487 if (in->in_phyctxt &&
6488 (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA1 ||
6489 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB3)) {
6490 cmd->protection_flags |=
6491 htole32(IWX_MAC_PROT_FLG_HT_PROT |((__uint32_t)((1 << 23) | (1 << 24)))
6492 IWX_MAC_PROT_FLG_FAT_PROT)((__uint32_t)((1 << 23) | (1 << 24)));
6493 }
6494 break;
6495 default:
6496 break;
6497 }
6498
6499 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN)((__uint32_t)((1 << 1)));
6500 }
6501 if (ic->ic_flags & IEEE80211_F_USEPROT0x00100000)
6502 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT)((__uint32_t)((1 << 3)));
6503
6504 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP)((__uint32_t)((1 << 2)));
6505#undef IWX_EXP2
6506}
6507
6508void
6509iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6510 struct iwx_mac_data_sta *sta, int assoc)
6511{
6512 struct ieee80211_node *ni = &in->in_ni;
6513 uint32_t dtim_off;
6514 uint64_t tsf;
6515
6516 dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU1024;
6517 memcpy(&tsf, ni->ni_tstamp, sizeof(tsf))__builtin_memcpy((&tsf), (ni->ni_tstamp), (sizeof(tsf)
))
;
6518 tsf = letoh64(tsf)((__uint64_t)(tsf));
6519
6520 sta->is_assoc = htole32(assoc)((__uint32_t)(assoc));
6521 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off)((__uint32_t)(ni->ni_rstamp + dtim_off));
6522 sta->dtim_tsf = htole64(tsf + dtim_off)((__uint64_t)(tsf + dtim_off));
6523 sta->bi = htole32(ni->ni_intval)((__uint32_t)(ni->ni_intval));
6524 sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval))((__uint32_t)(iwx_reciprocal(ni->ni_intval)));
6525 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod)((__uint32_t)(ni->ni_intval * ni->ni_dtimperiod));
6526 sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval))((__uint32_t)(iwx_reciprocal(sta->dtim_interval)));
6527 sta->listen_interval = htole32(10)((__uint32_t)(10));
6528 sta->assoc_id = htole32(ni->ni_associd)((__uint32_t)(ni->ni_associd));
6529 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp)((__uint32_t)(ni->ni_rstamp));
6530}
6531
6532int
6533iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6534 int assoc)
6535{
6536 struct ieee80211com *ic = &sc->sc_ic;
6537 struct ieee80211_node *ni = &in->in_ni;
6538 struct iwx_mac_ctx_cmd cmd;
6539 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE0x08);
6540
6541 if (action == IWX_FW_CTXT_ACTION_ADD1 && active)
6542 panic("MAC already added");
6543 if (action == IWX_FW_CTXT_ACTION_REMOVE3 && !active)
6544 panic("MAC already removed");
6545
6546 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
6547
6548 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6549
6550 if (action == IWX_FW_CTXT_ACTION_REMOVE3) {
6551 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD0x28, 0,
6552 sizeof(cmd), &cmd);
6553 }
6554
6555 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6556 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
6557 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
6558 IWX_MAC_FILTER_ACCEPT_GRP |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
6559 IWX_MAC_FILTER_IN_BEACON |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
6560 IWX_MAC_FILTER_IN_PROBE_REQUEST |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
6561 IWX_MAC_FILTER_IN_CRC32)((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
;
6562 } else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
6563 /*
6564 * Allow beacons to pass through as long as we are not
6565 * associated or we do not have dtim period information.
6566 */
6567 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON)((__uint32_t)((1 << 6)));
6568 else
6569 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6570
6571 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD0x28, 0, sizeof(cmd), &cmd);
6572}
6573
6574int
6575iwx_clear_statistics(struct iwx_softc *sc)
6576{
6577 struct iwx_statistics_cmd scmd = {
6578 .flags = htole32(IWX_STATISTICS_FLG_CLEAR)((__uint32_t)(0x01))
6579 };
6580 struct iwx_host_cmd cmd = {
6581 .id = IWX_STATISTICS_CMD0x9c,
6582 .len[0] = sizeof(scmd),
6583 .data[0] = &scmd,
6584 .flags = IWX_CMD_WANT_RESP,
6585 .resp_pkt_len = sizeof(struct iwx_notif_statistics),
6586 };
6587 int err;
6588
6589 err = iwx_send_cmd(sc, &cmd);
6590 if (err)
6591 return err;
6592
6593 iwx_free_resp(sc, &cmd);
6594 return 0;
6595}
6596
6597void
6598iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6599{
6600 int s = splnet()splraise(0x7);
6601
6602 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) {
6603 splx(s)spllower(s);
6604 return;
6605 }
6606
6607 refcnt_take(&sc->task_refs);
6608 if (!task_add(taskq, task))
6609 refcnt_rele_wake(&sc->task_refs);
6610 splx(s)spllower(s);
6611}
6612
6613void
6614iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
6615{
6616 if (task_del(taskq, task))
6617 refcnt_rele(&sc->task_refs);
6618}
6619
6620int
6621iwx_scan(struct iwx_softc *sc)
6622{
6623 struct ieee80211com *ic = &sc->sc_ic;
6624 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
6625 int err;
6626
6627 if (sc->sc_flags & IWX_FLAG_BGSCAN0x200) {
6628 err = iwx_scan_abort(sc);
6629 if (err) {
6630 printf("%s: could not abort background scan\n",
6631 DEVNAME(sc)((sc)->sc_dev.dv_xname));
6632 return err;
6633 }
6634 }
6635
6636 err = iwx_umac_scan_v14(sc, 0);
6637 if (err) {
6638 printf("%s: could not initiate scan\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
6639 return err;
6640 }
6641
6642 /*
6643 * The current mode might have been fixed during association.
6644 * Ensure all channels get scanned.
6645 */
6646 if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media)((ic->ic_media.ifm_cur->ifm_media) & 0x000000ff00000000ULL
)
== IFM_AUTO0ULL)
6647 ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6648
6649 sc->sc_flags |= IWX_FLAG_SCANNING0x04;
6650 if (ifp->if_flags & IFF_DEBUG0x4)
6651 printf("%s: %s -> %s\n", ifp->if_xname,
6652 ieee80211_state_name[ic->ic_state],
6653 ieee80211_state_name[IEEE80211_S_SCAN]);
6654 if ((sc->sc_flags & IWX_FLAG_BGSCAN0x200) == 0) {
6655 ieee80211_set_link_state(ic, LINK_STATE_DOWN2);
6656 ieee80211_node_cleanup(ic, ic->ic_bss);
6657 }
6658 ic->ic_state = IEEE80211_S_SCAN;
6659 wakeup(&ic->ic_state); /* wake iwx_init() */
6660
6661 return 0;
6662}
6663
6664int
6665iwx_bgscan(struct ieee80211com *ic)
6666{
6667 struct iwx_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
6668 int err;
6669
6670 if (sc->sc_flags & IWX_FLAG_SCANNING0x04)
6671 return 0;
6672
6673 err = iwx_umac_scan_v14(sc, 1);
6674 if (err) {
6675 printf("%s: could not initiate scan\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
6676 return err;
6677 }
6678
6679 sc->sc_flags |= IWX_FLAG_BGSCAN0x200;
6680 return 0;
6681}
6682
6683void
6684iwx_bgscan_done(struct ieee80211com *ic,
6685 struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
6686{
6687 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
6688
6689 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
6690 sc->bgscan_unref_arg = arg;
6691 sc->bgscan_unref_arg_size = arg_size;
6692 iwx_add_task(sc, sc->sc_nswq, &sc->bgscan_done_task);
6693}
6694
6695void
6696iwx_bgscan_done_task(void *arg)
6697{
6698 struct iwx_softc *sc = arg;
6699 struct ieee80211com *ic = &sc->sc_ic;
6700 struct iwx_node *in = (void *)ic->ic_bss;
6701 struct ieee80211_node *ni = &in->in_ni;
6702 int tid, err = 0, s = splnet()splraise(0x7);
6703
6704 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) ||
6705 (ic->ic_flags & IEEE80211_F_BGSCAN0x08000000) == 0 ||
6706 ic->ic_state != IEEE80211_S_RUN) {
6707 err = ENXIO6;
6708 goto done;
6709 }
6710
6711 err = iwx_flush_sta(sc, in);
6712 if (err)
6713 goto done;
6714
6715 for (tid = 0; tid < IWX_MAX_TID_COUNT8; tid++) {
6716 int qid = IWX_FIRST_AGG_TX_QUEUE(1 + 1) + tid;
6717
6718 if (sc->aggqid[tid] == 0)
6719 continue;
6720
6721 err = iwx_disable_txq(sc, IWX_STATION_ID0, qid, tid);
6722 if (err)
6723 goto done;
6724#if 0 /* disabled for now; we are going to DEAUTH soon anyway */
6725 IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
6726 IEEE80211_ACTION_DELBA,((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
6727 IEEE80211_REASON_AUTH_LEAVE << 16 |((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
6728 IEEE80211_FC1_DIR_TODS << 8 | tid)((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
;
6729#endif
6730 ieee80211_node_tx_ba_clear(ni, tid);
6731 sc->aggqid[tid] = 0;
6732 }
6733
6734 /*
6735 * Tx queues have been flushed and Tx agg has been stopped.
6736 * Allow roaming to proceed.
6737 */
6738 ni->ni_unref_arg = sc->bgscan_unref_arg;
6739 ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
6740 sc->bgscan_unref_arg = NULL((void *)0);
6741 sc->bgscan_unref_arg_size = 0;
6742 ieee80211_node_tx_stopped(ic, &in->in_ni);
6743done:
6744 if (err) {
6745 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
6746 sc->bgscan_unref_arg = NULL((void *)0);
6747 sc->bgscan_unref_arg_size = 0;
6748 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0)
6749 task_add(systq, &sc->init_task);
6750 }
6751 refcnt_rele_wake(&sc->task_refs);
6752 splx(s)spllower(s);
6753}
6754
6755int
6756iwx_umac_scan_abort(struct iwx_softc *sc)
6757{
6758 struct iwx_umac_scan_abort cmd = { 0 };
6759
6760 return iwx_send_cmd_pdu(sc,
6761 IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC)((0x1 << 8) | 0xe),
6762 0, sizeof(cmd), &cmd);
6763}
6764
6765int
6766iwx_scan_abort(struct iwx_softc *sc)
6767{
6768 int err;
6769
6770 err = iwx_umac_scan_abort(sc);
6771 if (err == 0)
6772 sc->sc_flags &= ~(IWX_FLAG_SCANNING0x04 | IWX_FLAG_BGSCAN0x200);
6773 return err;
6774}
6775
6776int
6777iwx_enable_mgmt_queue(struct iwx_softc *sc)
6778{
6779 int err;
6780
6781 sc->first_data_qid = IWX_DQA_CMD_QUEUE0 + 1;
6782
6783 /*
6784 * Non-QoS frames use the "MGMT" TID and queue.
6785 * Other TIDs and data queues are reserved for QoS data frames.
6786 */
6787 err = iwx_enable_txq(sc, IWX_STATION_ID0, sc->first_data_qid,
6788 IWX_MGMT_TID15, IWX_TX_RING_COUNT(256));
6789 if (err) {
6790 printf("%s: could not enable Tx queue %d (error %d)\n",
6791 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->first_data_qid, err);
6792 return err;
6793 }
6794
6795 return 0;
6796}
6797
6798int
6799iwx_rs_rval2idx(uint8_t rval)
6800{
6801 /* Firmware expects indices which match our 11g rate set. */
6802 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
6803 int i;
6804
6805 for (i = 0; i < rs->rs_nrates; i++) {
6806 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL0x7f) == rval)
6807 return i;
6808 }
6809
6810 return -1;
6811}
6812
6813uint16_t
6814iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
6815{
6816 struct ieee80211com *ic = &sc->sc_ic;
6817 const struct ieee80211_ht_rateset *rs;
6818 uint16_t htrates = 0;
6819 int mcs;
6820
6821 rs = &ieee80211_std_ratesets_11n[rsidx];
6822 for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
6823 if (!isset(ni->ni_rxmcs, mcs)((ni->ni_rxmcs)[(mcs)>>3] & (1<<((mcs)&
(8 -1))))
||
6824 !isset(ic->ic_sup_mcs, mcs)((ic->ic_sup_mcs)[(mcs)>>3] & (1<<((mcs)&
(8 -1))))
)
6825 continue;
6826 htrates |= (1 << (mcs - rs->min_mcs));
6827 }
6828
6829 return htrates;
6830}
6831
6832int
6833iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
6834{
6835 struct ieee80211_node *ni = &in->in_ni;
6836 struct ieee80211_rateset *rs = &ni->ni_rates;
6837 struct iwx_tlc_config_cmd cfg_cmd;
6838 uint32_t cmd_id;
6839 int i;
6840 size_t cmd_size = sizeof(cfg_cmd);
6841
6842 memset(&cfg_cmd, 0, sizeof(cfg_cmd))__builtin_memset((&cfg_cmd), (0), (sizeof(cfg_cmd)));
6843
6844 for (i = 0; i < rs->rs_nrates; i++) {
6845 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL0x7f;
6846 int idx = iwx_rs_rval2idx(rval);
6847 if (idx == -1)
6848 return EINVAL22;
6849 cfg_cmd.non_ht_rates |= (1 << idx);
6850 }
6851
6852 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
6853 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
6854 cfg_cmd.ht_rates[IWX_TLC_NSS_10][IWX_TLC_HT_BW_NONE_1600] =
6855 iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_SISO0);
6856 cfg_cmd.ht_rates[IWX_TLC_NSS_21][IWX_TLC_HT_BW_NONE_1600] =
6857 iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_MIMO22);
6858 } else
6859 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
6860
6861 cfg_cmd.sta_id = IWX_STATION_ID0;
6862 if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA1 ||
6863 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB3)
6864 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
6865 else
6866 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
6867 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK(1 << 0) | IWX_TLC_MNG_CHAIN_B_MSK(1 << 1);
6868 cfg_cmd.max_mpdu_len = 3839;
6869 if (ieee80211_node_supports_ht_sgi20(ni))
6870 cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_20MHZ);
6871 if (ieee80211_node_supports_ht_sgi40(ni))
6872 cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_40MHZ);
6873
6874 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD0x0f, IWX_DATA_PATH_GROUP0x5, 0);
6875 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
6876}
6877
6878void
6879iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
6880{
6881 struct ieee80211com *ic = &sc->sc_ic;
6882 struct ieee80211_node *ni = ic->ic_bss;
6883 struct ieee80211_rateset *rs = &ni->ni_rates;
6884 uint32_t rate_n_flags;
6885 int i;
6886
6887 if (notif->sta_id != IWX_STATION_ID0 ||
6888 (le32toh(notif->flags)((__uint32_t)(notif->flags)) & IWX_TLC_NOTIF_FLAG_RATE(1 << 0)) == 0)
6889 return;
6890
6891 rate_n_flags = le32toh(notif->rate)((__uint32_t)(notif->rate));
6892 if (rate_n_flags & IWX_RATE_MCS_HT_MSK(1 << 8)) {
6893 ni->ni_txmcs = (rate_n_flags &
6894 (IWX_RATE_HT_MCS_RATE_CODE_MSK0x7 |
6895 IWX_RATE_HT_MCS_NSS_MSK(3 << 3)));
6896 } else {
6897 uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK0xff);
6898 uint8_t rval = 0;
6899 for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates)(sizeof((iwx_rates)) / sizeof((iwx_rates)[0])); i++) {
6900 if (iwx_rates[i].plcp == plcp) {
6901 rval = iwx_rates[i].rate;
6902 break;
6903 }
6904 }
6905 if (rval) {
6906 uint8_t rv;
6907 for (i = 0; i < rs->rs_nrates; i++) {
6908 rv = rs->rs_rates[i] & IEEE80211_RATE_VAL0x7f;
6909 if (rv == rval) {
6910 ni->ni_txrate = i;
6911 break;
6912 }
6913 }
6914 }
6915 }
6916}
6917
6918int
6919iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
6920 struct ieee80211_channel *chan, uint8_t chains_static,
6921 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco)
6922{
6923 uint16_t band_flags = (IEEE80211_CHAN_2GHZ0x0080 | IEEE80211_CHAN_5GHZ0x0100);
6924 int err;
6925
6926 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
6927 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
&&
6928 (phyctxt->channel->ic_flags & band_flags) !=
6929 (chan->ic_flags & band_flags)) {
6930 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6931 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE3, apply_time, sco);
6932 if (err) {
6933 printf("%s: could not remove PHY context "
6934 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6935 return err;
6936 }
6937 phyctxt->channel = chan;
6938 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6939 chains_dynamic, IWX_FW_CTXT_ACTION_ADD1, apply_time, sco);
6940 if (err) {
6941 printf("%s: could not add PHY context "
6942 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6943 return err;
6944 }
6945 } else {
6946 phyctxt->channel = chan;
6947 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
6948 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY2, apply_time, sco);
6949 if (err) {
6950 printf("%s: could not update PHY context (error %d)\n",
6951 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6952 return err;
6953 }
6954 }
6955
6956 phyctxt->sco = sco;
6957 return 0;
6958}
6959
6960int
6961iwx_auth(struct iwx_softc *sc)
6962{
6963 struct ieee80211com *ic = &sc->sc_ic;
6964 struct iwx_node *in = (void *)ic->ic_bss;
6965 uint32_t duration;
6966 int generation = sc->sc_generation, err;
6967
6968 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
6969
6970 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6971 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
6972 ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
6973 if (err)
6974 return err;
6975 } else {
6976 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
6977 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
6978 if (err)
6979 return err;
6980 }
6981 in->in_phyctxt = &sc->sc_phyctxt[0];
6982 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr)__builtin_memcpy((in->in_macaddr), (in->in_ni.ni_macaddr
), (6))
;
6983
6984 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD1, 0);
6985 if (err) {
6986 printf("%s: could not add MAC context (error %d)\n",
6987 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6988 return err;
6989 }
6990 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE0x08;
6991
6992 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD1);
6993 if (err) {
6994 printf("%s: could not add binding (error %d)\n",
6995 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6996 goto rm_mac_ctxt;
6997 }
6998 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE0x10;
6999
7000 err = iwx_add_sta_cmd(sc, in, 0);
7001 if (err) {
7002 printf("%s: could not add sta (error %d)\n",
7003 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7004 goto rm_binding;
7005 }
7006 sc->sc_flags |= IWX_FLAG_STA_ACTIVE0x20;
7007
7008 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7009 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID2,
7010 IWX_DQA_INJECT_MONITOR_QUEUE2, IWX_MGMT_TID15,
7011 IWX_TX_RING_COUNT(256));
7012 if (err)
7013 goto rm_sta;
7014 return 0;
7015 }
7016
7017 err = iwx_enable_mgmt_queue(sc);
7018 if (err)
7019 goto rm_sta;
7020
7021 err = iwx_clear_statistics(sc);
7022 if (err)
7023 goto rm_sta;
7024
7025 /*
7026 * Prevent the FW from wandering off channel during association
7027 * by "protecting" the session with a time event.
7028 */
7029 if (in->in_ni.ni_intval)
7030 duration = in->in_ni.ni_intval * 2;
7031 else
7032 duration = IEEE80211_DUR_TU1024;
7033 return iwx_schedule_session_protection(sc, in, duration);
7034rm_sta:
7035 if (generation == sc->sc_generation) {
7036 iwx_rm_sta_cmd(sc, in);
7037 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE0x20;
7038 }
7039rm_binding:
7040 if (generation == sc->sc_generation) {
7041 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE3);
7042 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE0x10;
7043 }
7044rm_mac_ctxt:
7045 if (generation == sc->sc_generation) {
7046 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE3, 0);
7047 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE0x08;
7048 }
7049 return err;
7050}
7051
7052int
7053iwx_deauth(struct iwx_softc *sc)
7054{
7055 struct ieee80211com *ic = &sc->sc_ic;
7056 struct iwx_node *in = (void *)ic->ic_bss;
7057 int err;
7058
7059 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
7060
7061 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE0x20) {
7062 err = iwx_rm_sta(sc, in);
7063 if (err)
7064 return err;
7065 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE0x20;
7066 }
7067
7068 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE0x10) {
7069 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE3);
7070 if (err) {
7071 printf("%s: could not remove binding (error %d)\n",
7072 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7073 return err;
7074 }
7075 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE0x10;
7076 }
7077
7078 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE0x08) {
7079 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE3, 0);
7080 if (err) {
7081 printf("%s: could not remove MAC context (error %d)\n",
7082 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7083 return err;
7084 }
7085 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE0x08;
7086 }
7087
7088 /* Move unused PHY context to a default channel. */
7089 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7090 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
7091 if (err)
7092 return err;
7093
7094 return 0;
7095}
7096
7097int
7098iwx_run(struct iwx_softc *sc)
7099{
7100 struct ieee80211com *ic = &sc->sc_ic;
7101 struct iwx_node *in = (void *)ic->ic_bss;
7102 struct ieee80211_node *ni = &in->in_ni;
7103 int err;
7104
7105 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
7106
7107 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7108 /* Add a MAC context and a sniffing STA. */
7109 err = iwx_auth(sc);
7110 if (err)
7111 return err;
7112 }
7113
7114 /* Configure Rx chains for MIMO and configure 40 MHz channel. */
7115 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7116 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7117 err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7118 in->in_phyctxt->channel, chains, chains,
7119 0, IEEE80211_HTOP0_SCO_SCN0);
7120 if (err) {
7121 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
7122 return err;
7123 }
7124 } else if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
7125 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7126 uint8_t sco;
7127 if (ieee80211_node_supports_ht_chan40(ni))
7128 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
7129 else
7130 sco = IEEE80211_HTOP0_SCO_SCN0;
7131 err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7132 in->in_phyctxt->channel, chains, chains,
7133 0, sco);
7134 if (err) {
7135 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
7136 return err;
7137 }
7138 }
7139
7140 /* We have now been assigned an associd by the AP. */
7141 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY2, 1);
7142 if (err) {
7143 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
7144 return err;
7145 }
7146
7147 err = iwx_sf_config(sc, IWX_SF_FULL_ON1);
7148 if (err) {
7149 printf("%s: could not set sf full on (error %d)\n",
7150 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7151 return err;
7152 }
7153
7154 err = iwx_allow_mcast(sc);
7155 if (err) {
7156 printf("%s: could not allow mcast (error %d)\n",
7157 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7158 return err;
7159 }
7160
7161 err = iwx_power_update_device(sc);
7162 if (err) {
7163 printf("%s: could not send power command (error %d)\n",
7164 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7165 return err;
7166 }
7167#ifdef notyet
7168 /*
7169 * Disabled for now. Default beacon filter settings
7170 * prevent net80211 from getting ERP and HT protection
7171 * updates from beacons.
7172 */
7173 err = iwx_enable_beacon_filter(sc, in);
7174 if (err) {
7175 printf("%s: could not enable beacon filter\n",
7176 DEVNAME(sc)((sc)->sc_dev.dv_xname));
7177 return err;
7178 }
7179#endif
7180 err = iwx_power_mac_update_mode(sc, in);
7181 if (err) {
7182 printf("%s: could not update MAC power (error %d)\n",
7183 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7184 return err;
7185 }
7186
7187 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7188 return 0;
7189
7190 /* Start at lowest available bit-rate. Firmware will raise. */
7191 in->in_ni.ni_txrate = 0;
7192 in->in_ni.ni_txmcs = 0;
7193
7194 err = iwx_rs_init(sc, in);
7195 if (err) {
7196 printf("%s: could not init rate scaling (error %d)\n",
7197 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7198 return err;
7199 }
7200
7201 return 0;
7202}
7203
7204int
7205iwx_run_stop(struct iwx_softc *sc)
7206{
7207 struct ieee80211com *ic = &sc->sc_ic;
7208 struct iwx_node *in = (void *)ic->ic_bss;
7209 struct ieee80211_node *ni = &in->in_ni;
7210 int err, i;
7211
7212 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x7, __func__
); } } while (0)
;
7213
7214 err = iwx_flush_sta(sc, in);
7215 if (err) {
7216 printf("%s: could not flush Tx path (error %d)\n",
7217 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7218 return err;
7219 }
7220
7221 /*
7222 * Stop Rx BA sessions now. We cannot rely on the BA task
7223 * for this when moving out of RUN state since it runs in a
7224 * separate thread.
7225 * Note that in->in_ni (struct ieee80211_node) already represents
7226 * our new access point in case we are roaming between APs.
7227 * This means we cannot rely on struct ieee802111_node to tell
7228 * us which BA sessions exist.
7229 */
7230 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
7231 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7232 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID0x7f)
7233 continue;
7234 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7235 }
7236
7237 err = iwx_sf_config(sc, IWX_SF_INIT_OFF3);
7238 if (err)
7239 return err;
7240
7241 err = iwx_disable_beacon_filter(sc);
7242 if (err) {
7243 printf("%s: could not disable beacon filter (error %d)\n",
7244 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7245 return err;
7246 }
7247
7248 /* Mark station as disassociated. */
7249 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY2, 0);
7250 if (err) {
7251 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
7252 return err;
7253 }
7254
7255 /* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
7256 if (in->in_ni.ni_flags & IEEE80211_NODE_HT0x0400) {
7257 err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
7258 in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0);
7259 if (err) {
7260 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
7261 return err;
7262 }
7263 }
7264
7265 return 0;
7266}
7267
7268struct ieee80211_node *
7269iwx_node_alloc(struct ieee80211com *ic)
7270{
7271 return malloc(sizeof (struct iwx_node), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
7272}
7273
7274int
7275iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7276 struct ieee80211_key *k)
7277{
7278 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
7279 struct iwx_node *in = (void *)ni;
7280 struct iwx_setkey_task_arg *a;
7281 int err;
7282
7283 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7284 /* Fallback to software crypto for other ciphers. */
7285 err = ieee80211_set_key(ic, ni, k);
7286 if (!err && (k->k_flags & IEEE80211_KEY_GROUP0x00000001))
7287 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY0x02;
7288 return err;
7289 }
7290
7291 if (sc->setkey_nkeys >= nitems(sc->setkey_arg)(sizeof((sc->setkey_arg)) / sizeof((sc->setkey_arg)[0])
)
)
7292 return ENOSPC28;
7293
7294 a = &sc->setkey_arg[sc->setkey_cur];
7295 a->sta_id = IWX_STATION_ID0;
7296 a->ni = ni;
7297 a->k = k;
7298 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg)(sizeof((sc->setkey_arg)) / sizeof((sc->setkey_arg)[0])
)
;
7299 sc->setkey_nkeys++;
7300 iwx_add_task(sc, systq, &sc->setkey_task);
7301 return EBUSY16;
7302}
7303
7304int
7305iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7306 struct ieee80211_key *k)
7307{
7308 struct ieee80211com *ic = &sc->sc_ic;
7309 struct iwx_node *in = (void *)ni;
7310 struct iwx_add_sta_key_cmd cmd;
7311 uint32_t status;
7312 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY0x01 |
7313 IWX_NODE_FLAG_HAVE_GROUP_KEY0x02);
7314 int err;
7315
7316 /*
7317 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7318 * Currently we only implement station mode where 'ni' is always
7319 * ic->ic_bss so there is no need to validate arguments beyond this:
7320 */
7321 KASSERT(ni == ic->ic_bss)((ni == ic->ic_bss) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 7321, "ni == ic->ic_bss"))
;
7322
7323 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7324
7325 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
7326 IWX_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
7327 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
7328 IWX_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
;
7329 if (k->k_flags & IEEE80211_KEY_GROUP0x00000001) {
7330 cmd.common.key_offset = 1;
7331 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST)((__uint16_t)((1 << 14)));
7332 } else
7333 cmd.common.key_offset = 0;
7334
7335 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
7336 cmd.common.sta_id = sta_id;
7337
7338 cmd.transmit_seq_cnt = htole64(k->k_tsc)((__uint64_t)(k->k_tsc));
7339
7340 status = IWX_ADD_STA_SUCCESS0x1;
7341 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY0x17, sizeof(cmd), &cmd,
7342 &status);
7343 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100)
7344 return ECANCELED88;
7345 if (!err && (status & IWX_ADD_STA_STATUS_MASK0xFF) != IWX_ADD_STA_SUCCESS0x1)
7346 err = EIO5;
7347 if (err) {
7348 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,((*(ic)->ic_send_mgmt)(ic, ni, 0xc0, IEEE80211_REASON_AUTH_LEAVE
, 0))
7349 IEEE80211_REASON_AUTH_LEAVE)((*(ic)->ic_send_mgmt)(ic, ni, 0xc0, IEEE80211_REASON_AUTH_LEAVE
, 0))
;
7350 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_SCAN), (-1)));
7351 return err;
7352 }
7353
7354 if (k->k_flags & IEEE80211_KEY_GROUP0x00000001)
7355 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY0x02;
7356 else
7357 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY0x01;
7358
7359 if ((in->in_flags & want_keymask) == want_keymask) {
7360 DPRINTF(("marking port %s valid\n",do { ; } while (0)
7361 ether_sprintf(ni->ni_macaddr)))do { ; } while (0);
7362 ni->ni_port_valid = 1;
7363 ieee80211_set_link_state(ic, LINK_STATE_UP4);
7364 }
7365
7366 return 0;
7367}
7368
7369void
7370iwx_setkey_task(void *arg)
7371{
7372 struct iwx_softc *sc = arg;
7373 struct iwx_setkey_task_arg *a;
7374 int err = 0, s = splnet()splraise(0x7);
7375
7376 while (sc->setkey_nkeys > 0) {
7377 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100))
7378 break;
7379 a = &sc->setkey_arg[sc->setkey_tail];
7380 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7381 a->sta_id = 0;
7382 a->ni = NULL((void *)0);
7383 a->k = NULL((void *)0);
7384 sc->setkey_tail = (sc->setkey_tail + 1) %
7385 nitems(sc->setkey_arg)(sizeof((sc->setkey_arg)) / sizeof((sc->setkey_arg)[0])
)
;
7386 sc->setkey_nkeys--;
7387 }
7388
7389 refcnt_rele_wake(&sc->task_refs);
7390 splx(s)spllower(s);
7391}
7392
7393void
7394iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7395 struct ieee80211_key *k)
7396{
7397 struct iwx_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
7398 struct iwx_add_sta_key_cmd cmd;
7399
7400 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7401 /* Fallback to software crypto for other ciphers. */
7402 ieee80211_delete_key(ic, ni, k);
7403 return;
7404 }
7405
7406 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7407
7408 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
7409 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
7410 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
7411 IWX_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
;
7412 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
7413 if (k->k_flags & IEEE80211_KEY_GROUP0x00000001)
7414 cmd.common.key_offset = 1;
7415 else
7416 cmd.common.key_offset = 0;
7417 cmd.common.sta_id = IWX_STATION_ID0;
7418
7419 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY0x17, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
7420}
7421
7422int
7423iwx_media_change(struct ifnet *ifp)
7424{
7425 struct iwx_softc *sc = ifp->if_softc;
7426 struct ieee80211com *ic = &sc->sc_ic;
7427 uint8_t rate, ridx;
7428 int err;
7429
7430 err = ieee80211_media_change(ifp);
7431 if (err != ENETRESET52)
7432 return err;
7433
7434 if (ic->ic_fixed_mcs != -1)
7435 sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
7436 else if (ic->ic_fixed_rate != -1) {
7437 rate = ic->ic_sup_rates[ic->ic_curmode].
7438 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL0x7f;
7439 /* Map 802.11 rate to HW rate index. */
7440 for (ridx = 0; ridx <= IWX_RIDX_MAX((sizeof((iwx_rates)) / sizeof((iwx_rates)[0]))-1); ridx++)
7441 if (iwx_rates[ridx].rate == rate)
7442 break;
7443 sc->sc_fixed_ridx = ridx;
7444 }
7445
7446 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) ==
7447 (IFF_UP0x1 | IFF_RUNNING0x40)) {
7448 iwx_stop(ifp);
7449 err = iwx_init(ifp);
7450 }
7451 return err;
7452}
7453
7454void
7455iwx_newstate_task(void *psc)
7456{
7457 struct iwx_softc *sc = (struct iwx_softc *)psc;
7458 struct ieee80211com *ic = &sc->sc_ic;
7459 enum ieee80211_state nstate = sc->ns_nstate;
7460 enum ieee80211_state ostate = ic->ic_state;
7461 int arg = sc->ns_arg;
7462 int err = 0, s = splnet()splraise(0x7);
7463
7464 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) {
7465 /* iwx_stop() is waiting for us. */
7466 refcnt_rele_wake(&sc->task_refs);
7467 splx(s)spllower(s);
7468 return;
7469 }
7470
7471 if (ostate == IEEE80211_S_SCAN) {
7472 if (nstate == ostate) {
7473 if (sc->sc_flags & IWX_FLAG_SCANNING0x04) {
7474 refcnt_rele_wake(&sc->task_refs);
7475 splx(s)spllower(s);
7476 return;
7477 }
7478 /* Firmware is no longer scanning. Do another scan. */
7479 goto next_scan;
7480 }
7481 }
7482
7483 if (nstate <= ostate) {
7484 switch (ostate) {
7485 case IEEE80211_S_RUN:
7486 err = iwx_run_stop(sc);
7487 if (err)
7488 goto out;
7489 /* FALLTHROUGH */
7490 case IEEE80211_S_ASSOC:
7491 case IEEE80211_S_AUTH:
7492 if (nstate <= IEEE80211_S_AUTH) {
7493 err = iwx_deauth(sc);
7494 if (err)
7495 goto out;
7496 }
7497 /* FALLTHROUGH */
7498 case IEEE80211_S_SCAN:
7499 case IEEE80211_S_INIT:
7500 break;
7501 }
7502
7503 /* Die now if iwx_stop() was called while we were sleeping. */
7504 if (sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) {
7505 refcnt_rele_wake(&sc->task_refs);
7506 splx(s)spllower(s);
7507 return;
7508 }
7509 }
7510
7511 switch (nstate) {
7512 case IEEE80211_S_INIT:
7513 break;
7514
7515 case IEEE80211_S_SCAN:
7516next_scan:
7517 err = iwx_scan(sc);
7518 if (err)
7519 break;
7520 refcnt_rele_wake(&sc->task_refs);
7521 splx(s)spllower(s);
7522 return;
7523
7524 case IEEE80211_S_AUTH:
7525 err = iwx_auth(sc);
7526 break;
7527
7528 case IEEE80211_S_ASSOC:
7529 break;
7530
7531 case IEEE80211_S_RUN:
7532 err = iwx_run(sc);
7533 break;
7534 }
7535
7536out:
7537 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0) {
7538 if (err)
7539 task_add(systq, &sc->init_task);
7540 else
7541 sc->sc_newstate(ic, nstate, arg);
7542 }
7543 refcnt_rele_wake(&sc->task_refs);
7544 splx(s)spllower(s);
7545}
7546
7547int
7548iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
7549{
7550 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
7551 struct iwx_softc *sc = ifp->if_softc;
7552
7553 /*
7554 * Prevent attempts to transition towards the same state, unless
7555 * we are scanning in which case a SCAN -> SCAN transition
7556 * triggers another scan iteration. And AUTH -> AUTH is needed
7557 * to support band-steering.
7558 */
7559 if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
7560 nstate != IEEE80211_S_AUTH)
7561 return 0;
7562
7563 if (ic->ic_state == IEEE80211_S_RUN) {
7564 iwx_del_task(sc, systq, &sc->ba_task);
7565 iwx_del_task(sc, systq, &sc->setkey_task);
7566 memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg))__builtin_memset((sc->setkey_arg), (0), (sizeof(sc->setkey_arg
)))
;
7567 sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
7568 iwx_del_task(sc, systq, &sc->mac_ctxt_task);
7569 iwx_del_task(sc, systq, &sc->phy_ctxt_task);
7570 iwx_del_task(sc, systq, &sc->bgscan_done_task);
7571 }
7572
7573 sc->ns_nstate = nstate;
7574 sc->ns_arg = arg;
7575
7576 iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
7577
7578 return 0;
7579}
7580
7581void
7582iwx_endscan(struct iwx_softc *sc)
7583{
7584 struct ieee80211com *ic = &sc->sc_ic;
7585
7586 if ((sc->sc_flags & (IWX_FLAG_SCANNING0x04 | IWX_FLAG_BGSCAN0x200)) == 0)
7587 return;
7588
7589 sc->sc_flags &= ~(IWX_FLAG_SCANNING0x04 | IWX_FLAG_BGSCAN0x200);
7590 ieee80211_end_scan(&ic->ic_ific_ac.ac_if);
7591}
7592
7593/*
7594 * Aging and idle timeouts for the different possible scenarios
7595 * in default configuration
7596 */
7597static const uint32_t
7598iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO5][IWX_SF_NUM_TIMEOUT_TYPES2] = {
7599 {
7600 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF)((__uint32_t)(400)),
7601 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)((__uint32_t)(160))
7602 },
7603 {
7604 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF)((__uint32_t)(400)),
7605 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)((__uint32_t)(160))
7606 },
7607 {
7608 htole32(IWX_SF_MCAST_AGING_TIMER_DEF)((__uint32_t)(400)),
7609 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)((__uint32_t)(160))
7610 },
7611 {
7612 htole32(IWX_SF_BA_AGING_TIMER_DEF)((__uint32_t)(400)),
7613 htole32(IWX_SF_BA_IDLE_TIMER_DEF)((__uint32_t)(160))
7614 },
7615 {
7616 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF)((__uint32_t)(400)),
7617 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)((__uint32_t)(160))
7618 },
7619};
7620
7621/*
7622 * Aging and idle timeouts for the different possible scenarios
7623 * in single BSS MAC configuration.
7624 */
7625static const uint32_t
7626iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO5][IWX_SF_NUM_TIMEOUT_TYPES2] = {
7627 {
7628 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER)((__uint32_t)(2016)),
7629 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)((__uint32_t)(320))
7630 },
7631 {
7632 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER)((__uint32_t)(2016)),
7633 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)((__uint32_t)(320))
7634 },
7635 {
7636 htole32(IWX_SF_MCAST_AGING_TIMER)((__uint32_t)(10016)),
7637 htole32(IWX_SF_MCAST_IDLE_TIMER)((__uint32_t)(2016))
7638 },
7639 {
7640 htole32(IWX_SF_BA_AGING_TIMER)((__uint32_t)(2016)),
7641 htole32(IWX_SF_BA_IDLE_TIMER)((__uint32_t)(320))
7642 },
7643 {
7644 htole32(IWX_SF_TX_RE_AGING_TIMER)((__uint32_t)(2016)),
7645 htole32(IWX_SF_TX_RE_IDLE_TIMER)((__uint32_t)(320))
7646 },
7647};
7648
7649void
7650iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
7651 struct ieee80211_node *ni)
7652{
7653 int i, j, watermark;
7654
7655 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON0] = htole32(IWX_SF_W_MARK_SCAN)((__uint32_t)(4096));
7656
7657 /*
7658 * If we are in association flow - check antenna configuration
7659 * capabilities of the AP station, and choose the watermark accordingly.
7660 */
7661 if (ni) {
7662 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
7663 if (ni->ni_rxmcs[1] != 0)
7664 watermark = IWX_SF_W_MARK_MIMO28192;
7665 else
7666 watermark = IWX_SF_W_MARK_SISO4096;
7667 } else {
7668 watermark = IWX_SF_W_MARK_LEGACY4096;
7669 }
7670 /* default watermark value for unassociated mode. */
7671 } else {
7672 watermark = IWX_SF_W_MARK_MIMO28192;
7673 }
7674 sf_cmd->watermark[IWX_SF_FULL_ON1] = htole32(watermark)((__uint32_t)(watermark));
7675
7676 for (i = 0; i < IWX_SF_NUM_SCENARIO5; i++) {
7677 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES2; j++) {
7678 sf_cmd->long_delay_timeouts[i][j] =
7679 htole32(IWX_SF_LONG_DELAY_AGING_TIMER)((__uint32_t)(1000000));
7680 }
7681 }
7682
7683 if (ni) {
7684 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,__builtin_memcpy((sf_cmd->full_on_timeouts), (iwx_sf_full_timeout
), (sizeof(iwx_sf_full_timeout)))
7685 sizeof(iwx_sf_full_timeout))__builtin_memcpy((sf_cmd->full_on_timeouts), (iwx_sf_full_timeout
), (sizeof(iwx_sf_full_timeout)))
;
7686 } else {
7687 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,__builtin_memcpy((sf_cmd->full_on_timeouts), (iwx_sf_full_timeout_def
), (sizeof(iwx_sf_full_timeout_def)))
7688 sizeof(iwx_sf_full_timeout_def))__builtin_memcpy((sf_cmd->full_on_timeouts), (iwx_sf_full_timeout_def
), (sizeof(iwx_sf_full_timeout_def)))
;
7689 }
7690
7691}
7692
7693int
7694iwx_sf_config(struct iwx_softc *sc, int new_state)
7695{
7696 struct ieee80211com *ic = &sc->sc_ic;
7697 struct iwx_sf_cfg_cmd sf_cmd = {
7698 .state = htole32(new_state)((__uint32_t)(new_state)),
7699 };
7700 int err = 0;
7701
7702 switch (new_state) {
7703 case IWX_SF_UNINIT2:
7704 case IWX_SF_INIT_OFF3:
7705 iwx_fill_sf_command(sc, &sf_cmd, NULL((void *)0));
7706 break;
7707 case IWX_SF_FULL_ON1:
7708 iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7709 break;
7710 default:
7711 return EINVAL22;
7712 }
7713
7714 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD0xd1, IWX_CMD_ASYNC,
7715 sizeof(sf_cmd), &sf_cmd);
7716 return err;
7717}
7718
7719int
7720iwx_send_bt_init_conf(struct iwx_softc *sc)
7721{
7722 struct iwx_bt_coex_cmd bt_cmd;
7723
7724 bt_cmd.mode = htole32(IWX_BT_COEX_WIFI)((__uint32_t)(0x3));
7725 bt_cmd.enabled_modules = 0;
7726
7727 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG0x9b, 0, sizeof(bt_cmd),
7728 &bt_cmd);
7729}
7730
7731int
7732iwx_send_soc_conf(struct iwx_softc *sc)
7733{
7734 struct iwx_soc_configuration_cmd cmd;
7735 int err;
7736 uint32_t cmd_id, flags = 0;
7737
7738 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7739
7740 /*
7741 * In VER_1 of this command, the discrete value is considered
7742 * an integer; In VER_2, it's a bitmask. Since we have only 2
7743 * values in VER_1, this is backwards-compatible with VER_2,
7744 * as long as we don't set any other flag bits.
7745 */
7746 if (!sc->sc_integrated) { /* VER_1 */
7747 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE(1 << 0);
7748 } else { /* VER_2 */
7749 uint8_t scan_cmd_ver;
7750 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE0)
7751 flags |= (sc->sc_ltr_delay &
7752 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK0xc);
7753 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP0x1,
7754 IWX_SCAN_REQ_UMAC0xd);
7755 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN99 &&
7756 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
7757 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY(1 << 1);
7758 }
7759 cmd.flags = htole32(flags)((__uint32_t)(flags));
7760
7761 cmd.latency = htole32(sc->sc_xtal_latency)((__uint32_t)(sc->sc_xtal_latency));
7762
7763 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD0x01, IWX_SYSTEM_GROUP0x2, 0);
7764 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7765 if (err)
7766 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7767 return err;
7768}
7769
7770int
7771iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
7772{
7773 struct iwx_mcc_update_cmd mcc_cmd;
7774 struct iwx_host_cmd hcmd = {
7775 .id = IWX_MCC_UPDATE_CMD0xc8,
7776 .flags = IWX_CMD_WANT_RESP,
7777 .data = { &mcc_cmd },
7778 };
7779 struct iwx_rx_packet *pkt;
7780 struct iwx_mcc_update_resp *resp;
7781 size_t resp_len;
7782 int err;
7783
7784 memset(&mcc_cmd, 0, sizeof(mcc_cmd))__builtin_memset((&mcc_cmd), (0), (sizeof(mcc_cmd)));
7785 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1])((__uint16_t)(alpha2[0] << 8 | alpha2[1]));
7786 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE)((sc->sc_ucode_api)[(9)>>3] & (1<<((9)&
(8 -1))))
||
7787 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC)((sc->sc_enabled_capa)[(29)>>3] & (1<<((29
)&(8 -1))))
)
7788 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT0x10;
7789 else
7790 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW0;
7791
7792 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
7793 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX(1 << 12);
7794
7795 err = iwx_send_cmd(sc, &hcmd);
7796 if (err)
7797 return err;
7798
7799 pkt = hcmd.resp_pkt;
7800 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40)) {
7801 err = EIO5;
7802 goto out;
7803 }
7804
7805 resp_len = iwx_rx_packet_payload_len(pkt);
7806 if (resp_len < sizeof(*resp)) {
7807 err = EIO5;
7808 goto out;
7809 }
7810
7811 resp = (void *)pkt->data;
7812 if (resp_len != sizeof(*resp) +
7813 resp->n_channels * sizeof(resp->channels[0])) {
7814 err = EIO5;
7815 goto out;
7816 }
7817
7818 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",do { ; } while (0)
7819 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels))do { ; } while (0);
7820
7821 /* Update channel map for net80211 and our scan configuration. */
7822 iwx_init_channel_map(sc, NULL((void *)0), resp->channels, resp->n_channels);
7823
7824out:
7825 iwx_free_resp(sc, &hcmd);
7826
7827 return err;
7828}
7829
7830int
7831iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
7832{
7833 struct iwx_temp_report_ths_cmd cmd;
7834 int err;
7835
7836 /*
7837 * In order to give responsibility for critical-temperature-kill
7838 * and TX backoff to FW we need to send an empty temperature
7839 * reporting command at init time.
7840 */
7841 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7842
7843 err = iwx_send_cmd_pdu(sc,
7844 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD)((0x4 << 8) | 0x04),
7845 0, sizeof(cmd), &cmd);
7846 if (err)
7847 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
7848 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7849
7850 return err;
7851}
7852
7853int
7854iwx_init_hw(struct iwx_softc *sc)
7855{
7856 struct ieee80211com *ic = &sc->sc_ic;
7857 int err, i;
7858
7859 err = iwx_run_init_mvm_ucode(sc, 0);
7860 if (err)
7861 return err;
7862
7863 if (!iwx_nic_lock(sc))
7864 return EBUSY16;
7865
7866 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
7867 if (err) {
7868 printf("%s: could not init tx ant config (error %d)\n",
7869 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7870 goto err;
7871 }
7872
7873 if (sc->sc_tx_with_siso_diversity) {
7874 err = iwx_send_phy_cfg_cmd(sc);
7875 if (err) {
7876 printf("%s: could not send phy config (error %d)\n",
7877 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7878 goto err;
7879 }
7880 }
7881
7882 err = iwx_send_bt_init_conf(sc);
7883 if (err) {
7884 printf("%s: could not init bt coex (error %d)\n",
7885 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7886 return err;
7887 }
7888
7889 err = iwx_send_soc_conf(sc);
7890 if (err)
7891 return err;
7892
7893 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
) {
7894 err = iwx_send_dqa_cmd(sc);
7895 if (err)
7896 return err;
7897 }
7898
7899 for (i = 0; i < IWX_NUM_PHY_CTX3; i++) {
7900 /*
7901 * The channel used here isn't relevant as it's
7902 * going to be overwritten in the other flows.
7903 * For now use the first channel we have.
7904 */
7905 sc->sc_phyctxt[i].id = i;
7906 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
7907 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
7908 IWX_FW_CTXT_ACTION_ADD1, 0, IEEE80211_HTOP0_SCO_SCN0);
7909 if (err) {
7910 printf("%s: could not add phy context %d (error %d)\n",
7911 DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err);
7912 goto err;
7913 }
7914 }
7915
7916 err = iwx_config_ltr(sc);
7917 if (err) {
7918 printf("%s: PCIe LTR configuration failed (error %d)\n",
7919 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7920 }
7921
7922 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)((sc->sc_enabled_capa)[(74)>>3] & (1<<((74
)&(8 -1))))
) {
7923 err = iwx_send_temp_report_ths_cmd(sc);
7924 if (err)
7925 goto err;
7926 }
7927
7928 err = iwx_power_update_device(sc);
7929 if (err) {
7930 printf("%s: could not send power command (error %d)\n",
7931 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7932 goto err;
7933 }
7934
7935 if (sc->sc_nvm.lar_enabled) {
7936 err = iwx_send_update_mcc_cmd(sc, "ZZ");
7937 if (err) {
7938 printf("%s: could not init LAR (error %d)\n",
7939 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7940 goto err;
7941 }
7942 }
7943
7944 err = iwx_config_umac_scan_reduced(sc);
7945 if (err) {
7946 printf("%s: could not configure scan (error %d)\n",
7947 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7948 goto err;
7949 }
7950
7951 err = iwx_disable_beacon_filter(sc);
7952 if (err) {
7953 printf("%s: could not disable beacon filter (error %d)\n",
7954 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7955 goto err;
7956 }
7957
7958err:
7959 iwx_nic_unlock(sc);
7960 return err;
7961}
7962
7963/* Allow multicast from our BSSID. */
7964int
7965iwx_allow_mcast(struct iwx_softc *sc)
7966{
7967 struct ieee80211com *ic = &sc->sc_ic;
7968 struct iwx_node *in = (void *)ic->ic_bss;
7969 struct iwx_mcast_filter_cmd *cmd;
7970 size_t size;
7971 int err;
7972
7973 size = roundup(sizeof(*cmd), 4)((((sizeof(*cmd))+((4)-1))/(4))*(4));
7974 cmd = malloc(size, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
7975 if (cmd == NULL((void *)0))
7976 return ENOMEM12;
7977 cmd->filter_own = 1;
7978 cmd->port_id = 0;
7979 cmd->count = 0;
7980 cmd->pass_all = 1;
7981 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr)__builtin_memcpy((cmd->bssid), (in->in_macaddr), (6));
7982
7983 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD0xd0,
7984 0, size, cmd);
7985 free(cmd, M_DEVBUF2, size);
7986 return err;
7987}
7988
7989int
7990iwx_init(struct ifnet *ifp)
7991{
7992 struct iwx_softc *sc = ifp->if_softc;
7993 struct ieee80211com *ic = &sc->sc_ic;
7994 int err, generation;
7995
7996 rw_assert_wrlock(&sc->ioctl_rwl);
7997
7998 generation = ++sc->sc_generation;
7999
8000 err = iwx_preinit(sc);
8001 if (err)
8002 return err;
8003
8004 err = iwx_start_hw(sc);
8005 if (err) {
8006 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8007 return err;
8008 }
8009
8010 err = iwx_init_hw(sc);
8011 if (err) {
8012 if (generation == sc->sc_generation)
8013 iwx_stop_device(sc);
8014 return err;
8015 }
8016
8017 if (sc->sc_nvm.sku_cap_11n_enable)
8018 iwx_setup_ht_rates(sc);
8019
8020 KASSERT(sc->task_refs.refs == 0)((sc->task_refs.refs == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwx.c", 8020, "sc->task_refs.refs == 0"
))
;
8021 refcnt_init(&sc->task_refs);
8022 ifq_clr_oactive(&ifp->if_snd);
8023 ifp->if_flags |= IFF_RUNNING0x40;
8024
8025 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8026 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
8027 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
8028 return 0;
8029 }
8030
8031 ieee80211_begin_scan(ifp);
8032
8033 /*
8034 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
8035 * Wait until the transition to SCAN state has completed.
8036 */
8037 do {
8038 err = tsleep_nsec(&ic->ic_state, PCATCH0x100, "iwxinit",
8039 SEC_TO_NSEC(1));
8040 if (generation != sc->sc_generation)
8041 return ENXIO6;
8042 if (err) {
8043 iwx_stop(ifp);
8044 return err;
8045 }
8046 } while (ic->ic_state != IEEE80211_S_SCAN);
8047
8048 return 0;
8049}
8050
8051void
8052iwx_start(struct ifnet *ifp)
8053{
8054 struct iwx_softc *sc = ifp->if_softc;
8055 struct ieee80211com *ic = &sc->sc_ic;
8056 struct ieee80211_node *ni;
8057 struct ether_header *eh;
8058 struct mbuf *m;
8059
8060 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
8061 return;
8062
8063 for (;;) {
8064 /* why isn't this done per-queue? */
8065 if (sc->qfullmsk != 0) {
8066 ifq_set_oactive(&ifp->if_snd);
8067 break;
8068 }
8069
8070 /* Don't queue additional frames while flushing Tx queues. */
8071 if (sc->sc_flags & IWX_FLAG_TXFLUSH0x400)
8072 break;
8073
8074 /* need to send management frames even if we're not RUNning */
8075 m = mq_dequeue(&ic->ic_mgtq);
8076 if (m) {
8077 ni = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_cookie;
8078 goto sendit;
8079 }
8080
8081 if (ic->ic_state != IEEE80211_S_RUN ||
8082 (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY0x00000001))
8083 break;
8084
8085 m = ifq_dequeue(&ifp->if_snd);
8086 if (!m)
8087 break;
8088 if (m->m_lenm_hdr.mh_len < sizeof (*eh) &&
8089 (m = m_pullup(m, sizeof (*eh))) == NULL((void *)0)) {
8090 ifp->if_oerrorsif_data.ifi_oerrors++;
8091 continue;
8092 }
8093#if NBPFILTER1 > 0
8094 if (ifp->if_bpf != NULL((void *)0))
8095 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
8096#endif
8097 if ((m = ieee80211_encap(ifp, m, &ni)) == NULL((void *)0)) {
8098 ifp->if_oerrorsif_data.ifi_oerrors++;
8099 continue;
8100 }
8101
8102 sendit:
8103#if NBPFILTER1 > 0
8104 if (ic->ic_rawbpf != NULL((void *)0))
8105 bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT(1 << 1));
8106#endif
8107 if (iwx_tx(sc, m, ni) != 0) {
8108 ieee80211_release_node(ic, ni);
8109 ifp->if_oerrorsif_data.ifi_oerrors++;
8110 continue;
8111 }
8112
8113 if (ifp->if_flags & IFF_UP0x1)
8114 ifp->if_timer = 1;
8115 }
8116
8117 return;
8118}
8119
8120void
8121iwx_stop(struct ifnet *ifp)
8122{
8123 struct iwx_softc *sc = ifp->if_softc;
8124 struct ieee80211com *ic = &sc->sc_ic;
8125 struct iwx_node *in = (void *)ic->ic_bss;
8126 int i, s = splnet()splraise(0x7);
8127
8128 rw_assert_wrlock(&sc->ioctl_rwl);
8129
8130 sc->sc_flags |= IWX_FLAG_SHUTDOWN0x100; /* Disallow new tasks. */
8131
8132 /* Cancel scheduled tasks and let any stale tasks finish up. */
8133 task_del(systq, &sc->init_task);
8134 iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8135 iwx_del_task(sc, systq, &sc->ba_task);
8136 iwx_del_task(sc, systq, &sc->setkey_task);
8137 memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg))__builtin_memset((sc->setkey_arg), (0), (sizeof(sc->setkey_arg
)))
;
8138 sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8139 iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8140 iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8141 iwx_del_task(sc, systq, &sc->bgscan_done_task);
8142 KASSERT(sc->task_refs.refs >= 1)((sc->task_refs.refs >= 1) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwx.c", 8142, "sc->task_refs.refs >= 1"
))
;
8143 refcnt_finalize(&sc->task_refs, "iwxstop");
8144
8145 iwx_stop_device(sc);
8146
8147 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
8148 sc->bgscan_unref_arg = NULL((void *)0);
8149 sc->bgscan_unref_arg_size = 0;
8150
8151 /* Reset soft state. */
8152
8153 sc->sc_generation++;
8154 for (i = 0; i < nitems(sc->sc_cmd_resp_pkt)(sizeof((sc->sc_cmd_resp_pkt)) / sizeof((sc->sc_cmd_resp_pkt
)[0]))
; i++) {
8155 free(sc->sc_cmd_resp_pkt[i], M_DEVBUF2, sc->sc_cmd_resp_len[i]);
8156 sc->sc_cmd_resp_pkt[i] = NULL((void *)0);
8157 sc->sc_cmd_resp_len[i] = 0;
8158 }
8159 ifp->if_flags &= ~IFF_RUNNING0x40;
8160 ifq_clr_oactive(&ifp->if_snd);
8161
8162 in->in_phyctxt = NULL((void *)0);
8163 in->in_flags = 0;
8164 IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr)__builtin_memcpy((in->in_macaddr), (etheranyaddr), (6));
8165
8166 sc->sc_flags &= ~(IWX_FLAG_SCANNING0x04 | IWX_FLAG_BGSCAN0x200);
8167 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE0x08;
8168 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE0x10;
8169 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE0x20;
8170 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE0x40;
8171 sc->sc_flags &= ~IWX_FLAG_HW_ERR0x80;
8172 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN0x100;
8173 sc->sc_flags &= ~IWX_FLAG_TXFLUSH0x400;
8174
8175 sc->sc_rx_ba_sessions = 0;
8176 sc->ba_rx.start_tidmask = 0;
8177 sc->ba_rx.stop_tidmask = 0;
8178 memset(sc->aggqid, 0, sizeof(sc->aggqid))__builtin_memset((sc->aggqid), (0), (sizeof(sc->aggqid)
))
;
8179 sc->ba_tx.start_tidmask = 0;
8180 sc->ba_tx.stop_tidmask = 0;
8181
8182 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8183 sc->ns_nstate = IEEE80211_S_INIT;
8184
8185 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
8186 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8187 iwx_clear_reorder_buffer(sc, rxba);
8188 }
8189 memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer))__builtin_memset((sc->sc_tx_timer), (0), (sizeof(sc->sc_tx_timer
)))
;
8190 ifp->if_timer = 0;
8191
8192 splx(s)spllower(s);
8193}
8194
8195void
8196iwx_watchdog(struct ifnet *ifp)
8197{
8198 struct iwx_softc *sc = ifp->if_softc;
8199 int i;
8200
8201 ifp->if_timer = 0;
8202
8203 /*
8204 * We maintain a separate timer for each Tx queue because
8205 * Tx aggregation queues can get "stuck" while other queues
8206 * keep working. The Linux driver uses a similar workaround.
8207 */
8208 for (i = 0; i < nitems(sc->sc_tx_timer)(sizeof((sc->sc_tx_timer)) / sizeof((sc->sc_tx_timer)[0
]))
; i++) {
8209 if (sc->sc_tx_timer[i] > 0) {
8210 if (--sc->sc_tx_timer[i] == 0) {
8211 printf("%s: device timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8212 if (ifp->if_flags & IFF_DEBUG0x4) {
8213 iwx_nic_error(sc);
8214 iwx_dump_driver_status(sc);
8215 }
8216 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0)
8217 task_add(systq, &sc->init_task);
8218 ifp->if_oerrorsif_data.ifi_oerrors++;
8219 return;
8220 }
8221 ifp->if_timer = 1;
8222 }
8223 }
8224
8225 ieee80211_watchdog(ifp);
8226}
8227
8228int
8229iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
8230{
8231 struct iwx_softc *sc = ifp->if_softc;
8232 int s, err = 0, generation = sc->sc_generation;
8233
8234 /*
8235 * Prevent processes from entering this function while another
8236 * process is tsleep'ing in it.
8237 */
8238 err = rw_enter(&sc->ioctl_rwl, RW_WRITE0x0001UL | RW_INTR0x0010UL);
8239 if (err == 0 && generation != sc->sc_generation) {
8240 rw_exit(&sc->ioctl_rwl);
8241 return ENXIO6;
8242 }
8243 if (err)
8244 return err;
8245 s = splnet()splraise(0x7);
8246
8247 switch (cmd) {
8248 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
8249 ifp->if_flags |= IFF_UP0x1;
8250 /* FALLTHROUGH */
8251 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
8252 if (ifp->if_flags & IFF_UP0x1) {
8253 if (!(ifp->if_flags & IFF_RUNNING0x40)) {
8254 /* Force reload of firmware image from disk. */
8255 sc->sc_fw.fw_status = IWX_FW_STATUS_NONE0;
8256 err = iwx_init(ifp);
8257 }
8258 } else {
8259 if (ifp->if_flags & IFF_RUNNING0x40)
8260 iwx_stop(ifp);
8261 }
8262 break;
8263
8264 default:
8265 err = ieee80211_ioctl(ifp, cmd, data);
8266 }
8267
8268 if (err == ENETRESET52) {
8269 err = 0;
8270 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) ==
8271 (IFF_UP0x1 | IFF_RUNNING0x40)) {
8272 iwx_stop(ifp);
8273 err = iwx_init(ifp);
8274 }
8275 }
8276
8277 splx(s)spllower(s);
8278 rw_exit(&sc->ioctl_rwl);
8279
8280 return err;
8281}
8282
8283/*
8284 * Note: This structure is read from the device with IO accesses,
8285 * and the reading already does the endian conversion. As it is
8286 * read with uint32_t-sized accesses, any members with a different size
8287 * need to be ordered correctly though!
8288 */
8289struct iwx_error_event_table {
8290 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8291 uint32_t error_id; /* type of error */
8292 uint32_t trm_hw_status0; /* TRM HW status */
8293 uint32_t trm_hw_status1; /* TRM HW status */
8294 uint32_t blink2; /* branch link */
8295 uint32_t ilink1; /* interrupt link */
8296 uint32_t ilink2; /* interrupt link */
8297 uint32_t data1; /* error-specific data */
8298 uint32_t data2; /* error-specific data */
8299 uint32_t data3; /* error-specific data */
8300 uint32_t bcon_time; /* beacon timer */
8301 uint32_t tsf_low; /* network timestamp function timer */
8302 uint32_t tsf_hi; /* network timestamp function timer */
8303 uint32_t gp1; /* GP1 timer register */
8304 uint32_t gp2; /* GP2 timer register */
8305 uint32_t fw_rev_type; /* firmware revision type */
8306 uint32_t major; /* uCode version major */
8307 uint32_t minor; /* uCode version minor */
8308 uint32_t hw_ver; /* HW Silicon version */
8309 uint32_t brd_ver; /* HW board version */
8310 uint32_t log_pc; /* log program counter */
8311 uint32_t frame_ptr; /* frame pointer */
8312 uint32_t stack_ptr; /* stack pointer */
8313 uint32_t hcmd; /* last host command header */
8314 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
8315 * rxtx_flag */
8316 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
8317 * host_flag */
8318 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
8319 * enc_flag */
8320 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
8321 * time_flag */
8322 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
8323 * wico interrupt */
8324 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
8325 uint32_t wait_event; /* wait event() caller address */
8326 uint32_t l2p_control; /* L2pControlField */
8327 uint32_t l2p_duration; /* L2pDurationField */
8328 uint32_t l2p_mhvalid; /* L2pMhValidBits */
8329 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
8330 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
8331 * (LMPM_PMG_SEL) */
8332 uint32_t u_timestamp; /* indicate when the date and time of the
8333 * compilation */
8334 uint32_t flow_handler; /* FH read/write pointers, RX credit */
8335} __packed__attribute__((__packed__)) /* LOG_ERROR_TABLE_API_S_VER_3 */;
8336
8337/*
8338 * UMAC error struct - relevant starting from family 8000 chip.
8339 * Note: This structure is read from the device with IO accesses,
8340 * and the reading already does the endian conversion. As it is
8341 * read with u32-sized accesses, any members with a different size
8342 * need to be ordered correctly though!
8343 */
8344struct iwx_umac_error_event_table {
8345 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8346 uint32_t error_id; /* type of error */
8347 uint32_t blink1; /* branch link */
8348 uint32_t blink2; /* branch link */
8349 uint32_t ilink1; /* interrupt link */
8350 uint32_t ilink2; /* interrupt link */
8351 uint32_t data1; /* error-specific data */
8352 uint32_t data2; /* error-specific data */
8353 uint32_t data3; /* error-specific data */
8354 uint32_t umac_major;
8355 uint32_t umac_minor;
8356 uint32_t frame_pointer; /* core register 27*/
8357 uint32_t stack_pointer; /* core register 28 */
8358 uint32_t cmd_header; /* latest host cmd sent to UMAC */
8359 uint32_t nic_isr_pref; /* ISR status register */
8360} __packed__attribute__((__packed__));
8361
8362#define ERROR_START_OFFSET(1 * sizeof(uint32_t)) (1 * sizeof(uint32_t))
8363#define ERROR_ELEM_SIZE(7 * sizeof(uint32_t)) (7 * sizeof(uint32_t))
8364
8365void
8366iwx_nic_umac_error(struct iwx_softc *sc)
8367{
8368 struct iwx_umac_error_event_table table;
8369 uint32_t base;
8370
8371 base = sc->sc_uc.uc_umac_error_event_table;
8372
8373 if (base < 0x800000) {
8374 printf("%s: Invalid error log pointer 0x%08x\n",
8375 DEVNAME(sc)((sc)->sc_dev.dv_xname), base);
8376 return;
8377 }
8378
8379 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8380 printf("%s: reading errlog failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8381 return;
8382 }
8383
8384 if (ERROR_START_OFFSET(1 * sizeof(uint32_t)) <= table.valid * ERROR_ELEM_SIZE(7 * sizeof(uint32_t))) {
8385 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8386 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8387 sc->sc_flags, table.valid);
8388 }
8389
8390 printf("%s: 0x%08X | %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.error_id,
8391 iwx_desc_lookup(table.error_id));
8392 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink1);
8393 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink2);
8394 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink1);
8395 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink2);
8396 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data1);
8397 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data2);
8398 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data3);
8399 printf("%s: 0x%08X | umac major\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.umac_major);
8400 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.umac_minor);
8401 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8402 table.frame_pointer);
8403 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8404 table.stack_pointer);
8405 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.cmd_header);
8406 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8407 table.nic_isr_pref);
8408}
8409
8410#define IWX_FW_SYSASSERT_CPU_MASK0xf0000000 0xf0000000
8411static struct {
8412 const char *name;
8413 uint8_t num;
8414} advanced_lookup[] = {
8415 { "NMI_INTERRUPT_WDG", 0x34 },
8416 { "SYSASSERT", 0x35 },
8417 { "UCODE_VERSION_MISMATCH", 0x37 },
8418 { "BAD_COMMAND", 0x38 },
8419 { "BAD_COMMAND", 0x39 },
8420 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8421 { "FATAL_ERROR", 0x3D },
8422 { "NMI_TRM_HW_ERR", 0x46 },
8423 { "NMI_INTERRUPT_TRM", 0x4C },
8424 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8425 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8426 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8427 { "NMI_INTERRUPT_HOST", 0x66 },
8428 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8429 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8430 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8431 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
8432 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
8433 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8434 { "ADVANCED_SYSASSERT", 0 },
8435};
8436
8437const char *
8438iwx_desc_lookup(uint32_t num)
8439{
8440 int i;
8441
8442 for (i = 0; i < nitems(advanced_lookup)(sizeof((advanced_lookup)) / sizeof((advanced_lookup)[0])) - 1; i++)
8443 if (advanced_lookup[i].num ==
8444 (num & ~IWX_FW_SYSASSERT_CPU_MASK0xf0000000))
8445 return advanced_lookup[i].name;
8446
8447 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8448 return advanced_lookup[i].name;
8449}
8450
8451/*
8452 * Support for dumping the error log seemed like a good idea ...
8453 * but it's mostly hex junk and the only sensible thing is the
8454 * hw/ucode revision (which we know anyway). Since it's here,
8455 * I'll just leave it in, just in case e.g. the Intel guys want to
8456 * help us decipher some "ADVANCED_SYSASSERT" later.
8457 */
8458void
8459iwx_nic_error(struct iwx_softc *sc)
8460{
8461 struct iwx_error_event_table table;
8462 uint32_t base;
8463
8464 printf("%s: dumping device error log\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8465 base = sc->sc_uc.uc_lmac_error_event_table[0];
8466 if (base < 0x800000) {
8467 printf("%s: Invalid error log pointer 0x%08x\n",
8468 DEVNAME(sc)((sc)->sc_dev.dv_xname), base);
8469 return;
8470 }
8471
8472 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8473 printf("%s: reading errlog failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8474 return;
8475 }
8476
8477 if (!table.valid) {
8478 printf("%s: errlog not found, skipping\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8479 return;
8480 }
8481
8482 if (ERROR_START_OFFSET(1 * sizeof(uint32_t)) <= table.valid * ERROR_ELEM_SIZE(7 * sizeof(uint32_t))) {
8483 printf("%s: Start Error Log Dump:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8484 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8485 sc->sc_flags, table.valid);
8486 }
8487
8488 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.error_id,
8489 iwx_desc_lookup(table.error_id));
8490 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8491 table.trm_hw_status0);
8492 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8493 table.trm_hw_status1);
8494 printf("%s: %08X | branchlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink2);
8495 printf("%s: %08X | interruptlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink1);
8496 printf("%s: %08X | interruptlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink2);
8497 printf("%s: %08X | data1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data1);
8498 printf("%s: %08X | data2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data2);
8499 printf("%s: %08X | data3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data3);
8500 printf("%s: %08X | beacon time\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.bcon_time);
8501 printf("%s: %08X | tsf low\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.tsf_low);
8502 printf("%s: %08X | tsf hi\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.tsf_hi);
8503 printf("%s: %08X | time gp1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.gp1);
8504 printf("%s: %08X | time gp2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.gp2);
8505 printf("%s: %08X | uCode revision type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8506 table.fw_rev_type);
8507 printf("%s: %08X | uCode version major\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8508 table.major);
8509 printf("%s: %08X | uCode version minor\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
8510 table.minor);
8511 printf("%s: %08X | hw version\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.hw_ver);
8512 printf("%s: %08X | board version\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.brd_ver);
8513 printf("%s: %08X | hcmd\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.hcmd);
8514 printf("%s: %08X | isr0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr0);
8515 printf("%s: %08X | isr1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr1);
8516 printf("%s: %08X | isr2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr2);
8517 printf("%s: %08X | isr3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr3);
8518 printf("%s: %08X | isr4\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr4);
8519 printf("%s: %08X | last cmd Id\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.last_cmd_id);
8520 printf("%s: %08X | wait_event\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.wait_event);
8521 printf("%s: %08X | l2p_control\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_control);
8522 printf("%s: %08X | l2p_duration\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_duration);
8523 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_mhvalid);
8524 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_addr_match);
8525 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.lmpm_pmg_sel);
8526 printf("%s: %08X | timestamp\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.u_timestamp);
8527 printf("%s: %08X | flow_handler\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.flow_handler);
8528
8529 if (sc->sc_uc.uc_umac_error_event_table)
8530 iwx_nic_umac_error(sc);
8531}
8532
8533void
8534iwx_dump_driver_status(struct iwx_softc *sc)
8535{
8536 int i;
8537
8538 printf("driver status:\n");
8539 for (i = 0; i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); i++) {
8540 struct iwx_tx_ring *ring = &sc->txq[i];
8541 printf(" tx ring %2d: qid=%-2d cur=%-3d "
8542 "queued=%-3d\n",
8543 i, ring->qid, ring->cur, ring->queued);
8544 }
8545 printf(" rx ring: cur=%d\n", sc->rxq.cur);
8546 printf(" 802.11 state %s\n",
8547 ieee80211_state_name[sc->sc_ic.ic_state]);
8548}
8549
8550#define SYNC_RESP_STRUCT(_var_, _pkt_)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02)
); _var_ = (void *)((_pkt_)+1); } while ( 0)
\
8551do { \
8552 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02))
8553 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02))
; \
8554 _var_ = (void *)((_pkt_)+1); \
8555} while (/*CONSTCOND*/0)
8556
8557#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02)); _ptr_
= (void *)((_pkt_)+1); } while ( 0)
\
8558do { \
8559 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02))
8560 sizeof(len), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02))
; \
8561 _ptr_ = (void *)((_pkt_)+1); \
8562} while (/*CONSTCOND*/0)
8563
8564int
8565iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8566{
8567 int qid, idx, code;
8568
8569 qid = pkt->hdr.qid & ~0x80;
8570 idx = pkt->hdr.idx;
8571 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)((pkt->hdr.flags << 8) | pkt->hdr.code);
8572
8573 return (!(qid == 0 && idx == 0 && code == 0) &&
8574 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID)((__uint32_t)(0x55550000)));
8575}
8576
8577void
8578iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
8579{
8580 struct ifnet *ifp = IC2IFP(&sc->sc_ic)(&(&sc->sc_ic)->ic_ac.ac_if);
8581 struct iwx_rx_packet *pkt, *nextpkt;
8582 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8583 struct mbuf *m0, *m;
8584 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8585 int qid, idx, code, handled = 1;
8586
8587 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
8588 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
8589
8590 m0 = data->m;
8591 while (m0 && offset + minsz < IWX_RBUF_SIZE4096) {
8592 pkt = (struct iwx_rx_packet *)(m0->m_datam_hdr.mh_data + offset);
8593 qid = pkt->hdr.qid;
8594 idx = pkt->hdr.idx;
8595
8596 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)((pkt->hdr.flags << 8) | pkt->hdr.code);
8597
8598 if (!iwx_rx_pkt_valid(pkt))
8599 break;
8600
8601 /*
8602 * XXX Intel inside (tm)
8603 * Any commands in the LONG_GROUP could actually be in the
8604 * LEGACY group. Firmware API versions >= 50 reject commands
8605 * in group 0, forcing us to use this hack.
8606 */
8607 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP0x1) {
8608 struct iwx_tx_ring *ring = &sc->txq[qid];
8609 struct iwx_tx_data *txdata = &ring->data[idx];
8610 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW0x01)
8611 code = iwx_cmd_opcode(code);
8612 }
8613
8614 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8615 if (len < minsz || len > (IWX_RBUF_SIZE4096 - offset))
8616 break;
8617
8618 if (code == IWX_REPLY_RX_MPDU_CMD0xc1 && ++nmpdu == 1) {
8619 /* Take mbuf m0 off the RX ring. */
8620 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE4096, sc->rxq.cur)) {
8621 ifp->if_ierrorsif_data.ifi_ierrors++;
8622 break;
8623 }
8624 KASSERT(data->m != m0)((data->m != m0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 8624, "data->m != m0"))
;
8625 }
8626
8627 switch (code) {
8628 case IWX_REPLY_RX_PHY_CMD0xc0:
8629 iwx_rx_rx_phy_cmd(sc, pkt, data);
8630 break;
8631
8632 case IWX_REPLY_RX_MPDU_CMD0xc1: {
8633 size_t maxlen = IWX_RBUF_SIZE4096 - offset - minsz;
8634 nextoff = offset +
8635 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN)((((len)+((0x40)-1))/(0x40))*(0x40));
8636 nextpkt = (struct iwx_rx_packet *)
8637 (m0->m_datam_hdr.mh_data + nextoff);
8638 if (nextoff + minsz >= IWX_RBUF_SIZE4096 ||
8639 !iwx_rx_pkt_valid(nextpkt)) {
8640 /* No need to copy last frame in buffer. */
8641 if (offset > 0)
8642 m_adj(m0, offset);
8643 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
8644 m0 = NULL((void *)0); /* stack owns m0 now; abort loop */
8645 } else {
8646 /*
8647 * Create an mbuf which points to the current
8648 * packet. Always copy from offset zero to
8649 * preserve m_pkthdr.
8650 */
8651 m = m_copym(m0, 0, M_COPYALL1000000000, M_DONTWAIT0x0002);
8652 if (m == NULL((void *)0)) {
8653 ifp->if_ierrorsif_data.ifi_ierrors++;
8654 m_freem(m0);
8655 m0 = NULL((void *)0);
8656 break;
8657 }
8658 m_adj(m, offset);
8659 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
8660 }
8661 break;
8662 }
8663
8664 case IWX_BAR_FRAME_RELEASE0xc2:
8665 iwx_rx_bar_frame_release(sc, pkt, ml);
8666 break;
8667
8668 case IWX_TX_CMD0x1c:
8669 iwx_rx_tx_cmd(sc, pkt, data);
8670 break;
8671
8672 case IWX_BA_NOTIF0xc5:
8673 iwx_rx_compressed_ba(sc, pkt);
8674 break;
8675
8676 case IWX_MISSED_BEACONS_NOTIFICATION0xa2:
8677 iwx_rx_bmiss(sc, pkt, data);
8678 break;
8679
8680 case IWX_MFUART_LOAD_NOTIFICATION0xb1:
8681 break;
8682
8683 case IWX_ALIVE0x1: {
8684 struct iwx_alive_resp_v4 *resp4;
8685 struct iwx_alive_resp_v5 *resp5;
8686
8687 DPRINTF(("%s: firmware alive\n", __func__))do { ; } while (0);
8688 sc->sc_uc.uc_ok = 0;
8689
8690 /*
8691 * For v5 and above, we can check the version, for older
8692 * versions we need to check the size.
8693 */
8694 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP0x0,
8695 IWX_ALIVE0x1) == 5) {
8696 SYNC_RESP_STRUCT(resp5, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp5))), (0x02));
resp5 = (void *)((pkt)+1); } while ( 0)
;
8697 if (iwx_rx_packet_payload_len(pkt) !=
8698 sizeof(*resp5)) {
8699 sc->sc_uc.uc_intr = 1;
8700 wakeup(&sc->sc_uc);
8701 break;
8702 }
8703 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(((__uint32_t)(resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr
))
8704 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr)((__uint32_t)(resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr
))
;
8705 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(((__uint32_t)(resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr
))
8706 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr)((__uint32_t)(resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr
))
;
8707 sc->sc_uc.uc_log_event_table = le32toh(((__uint32_t)(resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr
))
8708 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr)((__uint32_t)(resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr
))
;
8709 sc->sc_uc.uc_umac_error_event_table = le32toh(((__uint32_t)(resp5->umac_data.dbg_ptrs.error_info_addr))
8710 resp5->umac_data.dbg_ptrs.error_info_addr)((__uint32_t)(resp5->umac_data.dbg_ptrs.error_info_addr));
8711 if (resp5->status == IWX_ALIVE_STATUS_OK0xCAFE)
8712 sc->sc_uc.uc_ok = 1;
8713 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
8714 SYNC_RESP_STRUCT(resp4, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp4))), (0x02));
resp4 = (void *)((pkt)+1); } while ( 0)
;
8715 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(((__uint32_t)(resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr
))
8716 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr)((__uint32_t)(resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr
))
;
8717 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(((__uint32_t)(resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr
))
8718 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr)((__uint32_t)(resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr
))
;
8719 sc->sc_uc.uc_log_event_table = le32toh(((__uint32_t)(resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr
))
8720 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr)((__uint32_t)(resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr
))
;
8721 sc->sc_uc.uc_umac_error_event_table = le32toh(((__uint32_t)(resp4->umac_data.dbg_ptrs.error_info_addr))
8722 resp4->umac_data.dbg_ptrs.error_info_addr)((__uint32_t)(resp4->umac_data.dbg_ptrs.error_info_addr));
8723 if (resp4->status == IWX_ALIVE_STATUS_OK0xCAFE)
8724 sc->sc_uc.uc_ok = 1;
8725 }
8726
8727 sc->sc_uc.uc_intr = 1;
8728 wakeup(&sc->sc_uc);
8729 break;
8730 }
8731
8732 case IWX_STATISTICS_NOTIFICATION0x9d: {
8733 struct iwx_notif_statistics *stats;
8734 SYNC_RESP_STRUCT(stats, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(stats))), (0x02));
stats = (void *)((pkt)+1); } while ( 0)
;
8735 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats))__builtin_memcpy((&sc->sc_stats), (stats), (sizeof(sc->
sc_stats)))
;
8736 sc->sc_noise = iwx_get_noise(&stats->rx.general);
8737 break;
8738 }
8739
8740 case IWX_DTS_MEASUREMENT_NOTIFICATION0xdd:
8741 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,((0x4 << 8) | 0xFF)
8742 IWX_DTS_MEASUREMENT_NOTIF_WIDE)((0x4 << 8) | 0xFF):
8743 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,((0x4 << 8) | 0x04)
8744 IWX_TEMP_REPORTING_THRESHOLDS_CMD)((0x4 << 8) | 0x04):
8745 break;
8746
8747 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,((0x4 << 8) | 0xFE)
8748 IWX_CT_KILL_NOTIFICATION)((0x4 << 8) | 0xFE): {
8749 struct iwx_ct_kill_notif *notif;
8750 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
8751 printf("%s: device at critical temperature (%u degC), "
8752 "stopping device\n",
8753 DEVNAME(sc)((sc)->sc_dev.dv_xname), le16toh(notif->temperature)((__uint16_t)(notif->temperature)));
8754 sc->sc_flags |= IWX_FLAG_HW_ERR0x80;
8755 task_add(systq, &sc->init_task);
8756 break;
8757 }
8758
8759 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,((0x3 << 8) | 0x05)
8760 IWX_SESSION_PROTECTION_CMD)((0x3 << 8) | 0x05):
8761 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x02)
8762 IWX_NVM_GET_INFO)((0xc << 8) | 0x02):
8763 case IWX_ADD_STA_KEY0x17:
8764 case IWX_PHY_CONFIGURATION_CMD0x6a:
8765 case IWX_TX_ANT_CONFIGURATION_CMD0x98:
8766 case IWX_ADD_STA0x18:
8767 case IWX_MAC_CONTEXT_CMD0x28:
8768 case IWX_REPLY_SF_CFG_CMD0xd1:
8769 case IWX_POWER_TABLE_CMD0x77:
8770 case IWX_LTR_CONFIG0xee:
8771 case IWX_PHY_CONTEXT_CMD0x8:
8772 case IWX_BINDING_CONTEXT_CMD0x2b:
8773 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD)((0x1 << 8) | 0xc):
8774 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC)((0x1 << 8) | 0xd):
8775 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC)((0x1 << 8) | 0xe):
8776 case IWX_REPLY_BEACON_FILTERING_CMD0xd2:
8777 case IWX_MAC_PM_POWER_TABLE0xa9:
8778 case IWX_TIME_QUOTA_CMD0x2c:
8779 case IWX_REMOVE_STA0x19:
8780 case IWX_TXPATH_FLUSH0x1e:
8781 case IWX_BT_CONFIG0x9b:
8782 case IWX_MCC_UPDATE_CMD0xc8:
8783 case IWX_TIME_EVENT_CMD0x29:
8784 case IWX_STATISTICS_CMD0x9c:
8785 case IWX_SCD_QUEUE_CFG0x1d: {
8786 size_t pkt_len;
8787
8788 if (sc->sc_cmd_resp_pkt[idx] == NULL((void *)0))
8789 break;
8790
8791 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (sizeof(*pkt)), (0x02))
8792 sizeof(*pkt), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (sizeof(*pkt)), (0x02))
;
8793
8794 pkt_len = sizeof(pkt->len_n_flags) +
8795 iwx_rx_packet_len(pkt);
8796
8797 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK0x40) ||
8798 pkt_len < sizeof(*pkt) ||
8799 pkt_len > sc->sc_cmd_resp_len[idx]) {
8800 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF2,
8801 sc->sc_cmd_resp_len[idx]);
8802 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
8803 break;
8804 }
8805
8806 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (pkt_len - sizeof(*pkt)), (0x02))
8807 pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (pkt_len - sizeof(*pkt)), (0x02))
;
8808 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len)__builtin_memcpy((sc->sc_cmd_resp_pkt[idx]), (pkt), (pkt_len
))
;
8809 break;
8810 }
8811
8812 case IWX_INIT_COMPLETE_NOTIF0x4:
8813 sc->sc_init_complete |= IWX_INIT_COMPLETE0x01;
8814 wakeup(&sc->sc_init_complete);
8815 break;
8816
8817 case IWX_SCAN_COMPLETE_UMAC0xf: {
8818 struct iwx_umac_scan_complete *notif;
8819 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
8820 iwx_endscan(sc);
8821 break;
8822 }
8823
8824 case IWX_SCAN_ITERATION_COMPLETE_UMAC0xb5: {
8825 struct iwx_umac_scan_iter_complete_notif *notif;
8826 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
8827 iwx_endscan(sc);
8828 break;
8829 }
8830
8831 case IWX_MCC_CHUB_UPDATE_CMD0xc9: {
8832 struct iwx_mcc_chub_notif *notif;
8833 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
8834 iwx_mcc_update(sc, notif);
8835 break;
8836 }
8837
8838 case IWX_REPLY_ERROR0x2: {
8839 struct iwx_error_resp *resp;
8840 SYNC_RESP_STRUCT(resp, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp))), (0x02)); resp
= (void *)((pkt)+1); } while ( 0)
;
8841 printf("%s: firmware error 0x%x, cmd 0x%x\n",
8842 DEVNAME(sc)((sc)->sc_dev.dv_xname), le32toh(resp->error_type)((__uint32_t)(resp->error_type)),
8843 resp->cmd_id);
8844 break;
8845 }
8846
8847 case IWX_TIME_EVENT_NOTIFICATION0x2a: {
8848 struct iwx_time_event_notif *notif;
8849 uint32_t action;
8850 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
8851
8852 if (sc->sc_time_event_uid != le32toh(notif->unique_id)((__uint32_t)(notif->unique_id)))
8853 break;
8854 action = le32toh(notif->action)((__uint32_t)(notif->action));
8855 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END(1 << 1))
8856 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE0x40;
8857 break;
8858 }
8859
8860 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,((0x3 << 8) | 0xfb)
8861 IWX_SESSION_PROTECTION_NOTIF)((0x3 << 8) | 0xfb):
8862 break;
8863
8864 case IWX_WIDE_ID(IWX_SYSTEM_GROUP,((0x2 << 8) | 0xff)
8865 IWX_FSEQ_VER_MISMATCH_NOTIFICATION)((0x2 << 8) | 0xff):
8866 break;
8867
8868 /*
8869 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
8870 * messages. Just ignore them for now.
8871 */
8872 case IWX_DEBUG_LOG_MSG0xf7:
8873 break;
8874
8875 case IWX_MCAST_FILTER_CMD0xd0:
8876 break;
8877
8878 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD)((0x5 << 8) | 0x00):
8879 break;
8880
8881 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD)((0x2 << 8) | 0x01):
8882 break;
8883
8884 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD)((0x2 << 8) | 0x03):
8885 break;
8886
8887 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,((0xc << 8) | 0x00)
8888 IWX_NVM_ACCESS_COMPLETE)((0xc << 8) | 0x00):
8889 break;
8890
8891 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF)((0x5 << 8) | 0xf5):
8892 break; /* happens in monitor mode; ignore for now */
8893
8894 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD)((0x5 << 8) | 0x0f):
8895 break;
8896
8897 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,((0x5 << 8) | 0xf7)
8898 IWX_TLC_MNG_UPDATE_NOTIF)((0x5 << 8) | 0xf7): {
8899 struct iwx_tlc_update_notif *notif;
8900 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
8901 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
8902 iwx_rs_update(sc, notif);
8903 break;
8904 }
8905
8906 default:
8907 handled = 0;
8908 printf("%s: unhandled firmware response 0x%x/0x%x "
8909 "rx ring %d[%d]\n",
8910 DEVNAME(sc)((sc)->sc_dev.dv_xname), code, pkt->len_n_flags,
8911 (qid & ~0x80), idx);
8912 break;
8913 }
8914
8915 /*
8916 * uCode sets bit 0x80 when it originates the notification,
8917 * i.e. when the notification is not a direct response to a
8918 * command sent by the driver.
8919 * For example, uCode issues IWX_REPLY_RX when it sends a
8920 * received frame to the driver.
8921 */
8922 if (handled && !(qid & (1 << 7))) {
8923 iwx_cmd_done(sc, qid, idx, code);
8924 }
8925
8926 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN)((((len)+((0x40)-1))/(0x40))*(0x40));
8927 }
8928
8929 if (m0 && m0 != data->m)
8930 m_freem(m0);
8931}
8932
8933void
8934iwx_notif_intr(struct iwx_softc *sc)
8935{
8936 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
8937 uint16_t hw;
8938
8939 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.stat_dma.map), (0), (sc->rxq.stat_dma.size), (0x02))
8940 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.stat_dma.map), (0), (sc->rxq.stat_dma.size), (0x02))
;
8941
8942 hw = le16toh(sc->rxq.stat->closed_rb_num)((__uint16_t)(sc->rxq.stat->closed_rb_num)) & 0xfff;
8943 hw &= (IWX_RX_MQ_RING_COUNT512 - 1);
8944 while (sc->rxq.cur != hw) {
8945 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
8946 iwx_rx_pkt(sc, data, &ml);
8947 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT512;
8948 }
8949 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
8950
8951 /*
8952 * Tell the firmware what we have processed.
8953 * Seems like the hardware gets upset unless we align the write by 8??
8954 */
8955 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT512 - 1 : hw - 1;
8956 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1C80)), (
(hw & ~7))))
;
8957}
8958
8959int
8960iwx_intr(void *arg)
8961{
8962 struct iwx_softc *sc = arg;
8963 struct ieee80211com *ic = &sc->sc_ic;
8964 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
8965 int handled = 0;
8966 int r1, r2, rv = 0;
8967
8968 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
8969
8970 if (sc->sc_flags & IWX_FLAG_USE_ICT0x01) {
8971 uint32_t *ict = sc->ict_dma.vaddr;
8972 int tmp;
8973
8974 tmp = htole32(ict[sc->ict_cur])((__uint32_t)(ict[sc->ict_cur]));
8975 if (!tmp)
8976 goto out_ena;
8977
8978 /*
8979 * ok, there was something. keep plowing until we have all.
8980 */
8981 r1 = r2 = 0;
8982 while (tmp) {
8983 r1 |= tmp;
8984 ict[sc->ict_cur] = 0;
8985 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT(4096 / sizeof (uint32_t));
8986 tmp = htole32(ict[sc->ict_cur])((__uint32_t)(ict[sc->ict_cur]));
8987 }
8988
8989 /* this is where the fun begins. don't ask */
8990 if (r1 == 0xffffffff)
8991 r1 = 0;
8992
8993 /* i am not expected to understand this */
8994 if (r1 & 0xc0000)
8995 r1 |= 0x8000;
8996 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
8997 } else {
8998 r1 = IWX_READ(sc, IWX_CSR_INT)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x008)))));
8999 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9000 goto out;
9001 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x010)))));
9002 }
9003 if (r1 == 0 && r2 == 0) {
9004 goto out_ena;
9005 }
9006
9007 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(r1 | ~sc->sc_intmask))))
;
9008
9009 if (r1 & IWX_CSR_INT_BIT_ALIVE(1 << 0)) {
9010 int i;
9011
9012 /* Firmware has now configured the RFH. */
9013 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++)
9014 iwx_update_rx_desc(sc, &sc->rxq, i);
9015 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1C80)), (
(8))))
;
9016 }
9017
9018 handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE(1 << 0) /*| IWX_CSR_INT_BIT_SCD*/));
9019
9020 if (r1 & IWX_CSR_INT_BIT_RF_KILL(1 << 7)) {
9021 handled |= IWX_CSR_INT_BIT_RF_KILL(1 << 7);
9022 iwx_check_rfkill(sc);
9023 task_add(systq, &sc->init_task);
9024 rv = 1;
9025 goto out_ena;
9026 }
9027
9028 if (r1 & IWX_CSR_INT_BIT_SW_ERR(1 << 25)) {
9029 if (ifp->if_flags & IFF_DEBUG0x4) {
9030 iwx_nic_error(sc);
9031 iwx_dump_driver_status(sc);
9032 }
9033 printf("%s: fatal firmware error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9034 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0)
9035 task_add(systq, &sc->init_task);
9036 rv = 1;
9037 goto out;
9038
9039 }
9040
9041 if (r1 & IWX_CSR_INT_BIT_HW_ERR(1 << 29)) {
9042 handled |= IWX_CSR_INT_BIT_HW_ERR(1 << 29);
9043 printf("%s: hardware error, stopping device \n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9044 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0) {
9045 sc->sc_flags |= IWX_FLAG_HW_ERR0x80;
9046 task_add(systq, &sc->init_task);
9047 }
9048 rv = 1;
9049 goto out;
9050 }
9051
9052 /* firmware chunk loaded */
9053 if (r1 & IWX_CSR_INT_BIT_FH_TX(1 << 27)) {
9054 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(((1 << 1) | (1 << 0))))))
;
9055 handled |= IWX_CSR_INT_BIT_FH_TX(1 << 27);
9056
9057 sc->sc_fw_chunk_done = 1;
9058 wakeup(&sc->sc_fw);
9059 }
9060
9061 if (r1 & (IWX_CSR_INT_BIT_FH_RX(1U << 31) | IWX_CSR_INT_BIT_SW_RX(1 << 3) |
9062 IWX_CSR_INT_BIT_RX_PERIODIC(1 << 28))) {
9063 if (r1 & (IWX_CSR_INT_BIT_FH_RX(1U << 31) | IWX_CSR_INT_BIT_SW_RX(1 << 3))) {
9064 handled |= (IWX_CSR_INT_BIT_FH_RX(1U << 31) | IWX_CSR_INT_BIT_SW_RX(1 << 3));
9065 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(((1 << 30) | (1 << 17) | (1 << 16))))))
;
9066 }
9067 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC(1 << 28)) {
9068 handled |= IWX_CSR_INT_BIT_RX_PERIODIC(1 << 28);
9069 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
((1 << 28)))))
;
9070 }
9071
9072 /* Disable periodic interrupt; we use it as just a one-shot. */
9073 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0x00)))))
;
9074
9075 /*
9076 * Enable periodic interrupt in 8 msec only if we received
9077 * real RX interrupt (instead of just periodic int), to catch
9078 * any dangling Rx interrupt. If it was just the periodic
9079 * interrupt, there was no dangling Rx activity, and no need
9080 * to extend the periodic interrupt; one-shot is enough.
9081 */
9082 if (r1 & (IWX_CSR_INT_BIT_FH_RX(1U << 31) | IWX_CSR_INT_BIT_SW_RX(1 << 3)))
9083 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0xFF)))))
9084 IWX_CSR_INT_PERIODIC_ENA)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0xFF)))))
;
9085
9086 iwx_notif_intr(sc);
9087 }
9088
9089 rv = 1;
9090
9091 out_ena:
9092 iwx_restore_interrupts(sc);
9093 out:
9094 return rv;
9095}
9096
9097int
9098iwx_intr_msix(void *arg)
9099{
9100 struct iwx_softc *sc = arg;
9101 struct ieee80211com *ic = &sc->sc_ic;
9102 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
9103 uint32_t inta_fh, inta_hw;
9104 int vector = 0;
9105
9106 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x800
)))))
;
9107 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x808
)))))
;
9108 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x800))), ((inta_fh))))
;
9109 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x808))), ((inta_hw))))
;
9110 inta_fh &= sc->sc_fh_mask;
9111 inta_hw &= sc->sc_hw_mask;
9112
9113 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9114 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9115 iwx_notif_intr(sc);
9116 }
9117
9118 /* firmware chunk loaded */
9119 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9120 sc->sc_fw_chunk_done = 1;
9121 wakeup(&sc->sc_fw);
9122 }
9123
9124 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9125 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9126 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9127 if (ifp->if_flags & IFF_DEBUG0x4) {
9128 iwx_nic_error(sc);
9129 iwx_dump_driver_status(sc);
9130 }
9131 printf("%s: fatal firmware error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9132 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0)
9133 task_add(systq, &sc->init_task);
9134 return 1;
9135 }
9136
9137 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9138 iwx_check_rfkill(sc);
9139 task_add(systq, &sc->init_task);
9140 }
9141
9142 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9143 printf("%s: hardware error, stopping device \n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9144 if ((sc->sc_flags & IWX_FLAG_SHUTDOWN0x100) == 0) {
9145 sc->sc_flags |= IWX_FLAG_HW_ERR0x80;
9146 task_add(systq, &sc->init_task);
9147 }
9148 return 1;
9149 }
9150
9151 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9152 int i;
9153
9154 /* Firmware has now configured the RFH. */
9155 for (i = 0; i < IWX_RX_MQ_RING_COUNT512; i++)
9156 iwx_update_rx_desc(sc, &sc->rxq, i);
9157 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1C80)), (
(8))))
;
9158 }
9159
9160 /*
9161 * Before sending the interrupt the HW disables it to prevent
9162 * a nested interrupt. This is done by writing 1 to the corresponding
9163 * bit in the mask register. After handling the interrupt, it should be
9164 * re-enabled by clearing this bit. This register is defined as
9165 * write 1 clear (W1C) register, meaning that it's being clear
9166 * by writing 1 to the bit.
9167 */
9168 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x810))), ((1 << vector))))
;
9169 return 1;
9170}
9171
9172typedef void *iwx_match_t;
9173
9174static const struct pci_matchid iwx_devices[] = {
9175 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_22500_10x2723 },
9176 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_22500_20x02f0 },
9177 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_22500_30xa0f0 },
9178 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_22500_40x34f0,},
9179 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_22500_50x06f0,},
9180};
9181
9182static const struct pci_matchid iwx_subsystem_id_ax201[] = {
9183 { PCI_VENDOR_INTEL0x8086, 0x0070 },
9184 { PCI_VENDOR_INTEL0x8086, 0x0074 },
9185 { PCI_VENDOR_INTEL0x8086, 0x0078 },
9186 { PCI_VENDOR_INTEL0x8086, 0x007c },
9187 { PCI_VENDOR_INTEL0x8086, 0x0310 },
9188 { PCI_VENDOR_INTEL0x8086, 0x2074 },
9189 { PCI_VENDOR_INTEL0x8086, 0x4070 },
9190 /* TODO: There are more ax201 devices with "main" product ID 0x06f0 */
9191};
9192
9193int
9194iwx_match(struct device *parent, iwx_match_t match __unused__attribute__((__unused__)), void *aux)
9195{
9196 struct pci_attach_args *pa = aux;
9197 pcireg_t subid;
9198 pci_vendor_id_t svid;
9199 pci_product_id_t spid;
9200 int i;
9201
9202 if (!pci_matchbyid(pa, iwx_devices, nitems(iwx_devices)(sizeof((iwx_devices)) / sizeof((iwx_devices)[0]))))
9203 return 0;
9204
9205 /*
9206 * Some PCI product IDs are shared among devices which use distinct
9207 * chips or firmware. We need to match the subsystem ID as well to
9208 * ensure that we have in fact found a supported device.
9209 */
9210 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG0x2c);
9211 svid = PCI_VENDOR(subid)(((subid) >> 0) & 0xffff);
9212 spid = PCI_PRODUCT(subid)(((subid) >> 16) & 0xffff);
9213
9214 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
9215 case PCI_PRODUCT_INTEL_WL_22500_10x2723: /* AX200 */
9216 return 1; /* match any device */
9217 case PCI_PRODUCT_INTEL_WL_22500_20x02f0: /* AX201 */
9218 case PCI_PRODUCT_INTEL_WL_22500_30xa0f0: /* AX201 */
9219 case PCI_PRODUCT_INTEL_WL_22500_40x34f0: /* AX201 */
9220 case PCI_PRODUCT_INTEL_WL_22500_50x06f0: /* AX201 */
9221 for (i = 0; i < nitems(iwx_subsystem_id_ax201)(sizeof((iwx_subsystem_id_ax201)) / sizeof((iwx_subsystem_id_ax201
)[0]))
; i++) {
9222 if (svid == iwx_subsystem_id_ax201[i].pm_vid &&
9223 spid == iwx_subsystem_id_ax201[i].pm_pid)
9224 return 1;
9225
9226 }
9227 break;
9228 default:
9229 break;
9230 }
9231
9232 return 0;
9233}
9234
9235int
9236iwx_preinit(struct iwx_softc *sc)
9237{
9238 struct ieee80211com *ic = &sc->sc_ic;
9239 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
9240 int err;
9241 static int attached;
9242
9243 err = iwx_prepare_card_hw(sc);
9244 if (err) {
9245 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9246 return err;
9247 }
9248
9249 if (attached) {
9250 /* Update MAC in case the upper layers changed it. */
9251 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (((struct arpcom *
)ifp)->ac_enaddr), (6))
9252 ((struct arpcom *)ifp)->ac_enaddr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (((struct arpcom *
)ifp)->ac_enaddr), (6))
;
9253 return 0;
9254 }
9255
9256 err = iwx_start_hw(sc);
9257 if (err) {
9258 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9259 return err;
9260 }
9261
9262 err = iwx_run_init_mvm_ucode(sc, 1);
9263 iwx_stop_device(sc);
9264 if (err)
9265 return err;
9266
9267 /* Print version info and MAC address on first successful fw load. */
9268 attached = 1;
9269 printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
9270 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK(0x000FFF0),
9271 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9272
9273 if (sc->sc_nvm.sku_cap_11n_enable)
9274 iwx_setup_ht_rates(sc);
9275
9276 /* not all hardware can do 5GHz band */
9277 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9278 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,__builtin_memset((&ic->ic_sup_rates[IEEE80211_MODE_11A
]), (0), (sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])))
9279 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]))__builtin_memset((&ic->ic_sup_rates[IEEE80211_MODE_11A
]), (0), (sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])))
;
9280
9281 /* Configure channel information obtained from firmware. */
9282 ieee80211_channel_init(ifp);
9283
9284 /* Configure MAC address. */
9285 err = if_setlladdr(ifp, ic->ic_myaddr);
9286 if (err)
9287 printf("%s: could not set MAC address (error %d)\n",
9288 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9289
9290 ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9291
9292 return 0;
9293}
9294
9295void
9296iwx_attach_hook(struct device *self)
9297{
9298 struct iwx_softc *sc = (void *)self;
9299
9300 KASSERT(!cold)((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwx.c"
, 9300, "!cold"))
;
9301
9302 iwx_preinit(sc);
9303}
9304
9305void
9306iwx_attach(struct device *parent, struct device *self, void *aux)
9307{
9308 struct iwx_softc *sc = (void *)self;
9309 struct pci_attach_args *pa = aux;
9310 pci_intr_handle_t ih;
9311 pcireg_t reg, memtype;
9312 struct ieee80211com *ic = &sc->sc_ic;
9313 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
9314 const char *intrstr;
9315 int err;
9316 int txq_i, i, j;
9317
9318 sc->sc_pct = pa->pa_pc;
9319 sc->sc_pcitag = pa->pa_tag;
9320 sc->sc_dmat = pa->pa_dmat;
9321
9322 rw_init(&sc->ioctl_rwl, "iwxioctl")_rw_init_flags(&sc->ioctl_rwl, "iwxioctl", 0, ((void *
)0))
;
9323
9324 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9325 PCI_CAP_PCIEXPRESS0x10, &sc->sc_cap_off, NULL((void *)0));
9326 if (err == 0) {
9327 printf("%s: PCIe capability structure not found!\n",
9328 DEVNAME(sc)((sc)->sc_dev.dv_xname));
9329 return;
9330 }
9331
9332 /*
9333 * We disable the RETRY_TIMEOUT register (0x41) to keep
9334 * PCI Tx retries from interfering with C3 CPU state.
9335 */
9336 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9337 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9338
9339 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START0x10);
9340 err = pci_mapreg_map(pa, PCI_MAPREG_START0x10, memtype, 0,
9341 &sc->sc_st, &sc->sc_sh, NULL((void *)0), &sc->sc_sz, 0);
9342 if (err) {
9343 printf("%s: can't map mem space\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9344 return;
9345 }
9346
9347 if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9348 sc->sc_msix = 1;
9349 } else if (pci_intr_map_msi(pa, &ih)) {
9350 if (pci_intr_map(pa, &ih)) {
9351 printf("%s: can't map interrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9352 return;
9353 }
9354 /* Hardware bug workaround. */
9355 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
9356 PCI_COMMAND_STATUS_REG0x04);
9357 if (reg & PCI_COMMAND_INTERRUPT_DISABLE0x00000400)
9358 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE0x00000400;
9359 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
9360 PCI_COMMAND_STATUS_REG0x04, reg);
9361 }
9362
9363 intrstr = pci_intr_string(sc->sc_pct, ih);
9364 if (sc->sc_msix)
9365 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET0x7,
9366 iwx_intr_msix, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
9367 else
9368 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET0x7,
9369 iwx_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
9370
9371 if (sc->sc_ih == NULL((void *)0)) {
9372 printf("\n");
9373 printf("%s: can't establish interrupt", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9374 if (intrstr != NULL((void *)0))
9375 printf(" at %s", intrstr);
9376 printf("\n");
9377 return;
9378 }
9379 printf(", %s\n", intrstr);
9380
9381 /* Clear pending interrupts. */
9382 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
9383 IWX_WRITE(sc, IWX_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
9384 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(~0))))
;
9385
9386 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x028)))));
9387
9388 /*
9389 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
9390 * changed, and now the revision step also includes bit 0-1 (no more
9391 * "dash" value). To keep hw_rev backwards compatible - we'll store it
9392 * in the old format.
9393 */
9394 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
9395 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2)(((sc->sc_hw_rev << 2) & 0x000000C) >> 2) << 2);
9396
9397 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
9398 case PCI_PRODUCT_INTEL_WL_22500_10x2723:
9399 sc->sc_fwname = "iwx-cc-a0-67";
9400 sc->sc_device_family = IWX_DEVICE_FAMILY_220001;
9401 sc->sc_integrated = 0;
9402 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE0;
9403 sc->sc_low_latency_xtal = 0;
9404 sc->sc_xtal_latency = 0;
9405 sc->sc_tx_with_siso_diversity = 0;
9406 sc->sc_uhb_supported = 0;
9407 break;
9408 case PCI_PRODUCT_INTEL_WL_22500_20x02f0:
9409 case PCI_PRODUCT_INTEL_WL_22500_30xa0f0:
9410 case PCI_PRODUCT_INTEL_WL_22500_50x06f0:
9411 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ(0x0000354)) {
9412 printf("%s: unsupported AX201 adapter\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9413 return;
9414 }
9415
9416 sc->sc_fwname = "iwx-QuZ-a0-hr-b0-67";
9417 sc->sc_device_family = IWX_DEVICE_FAMILY_220001;
9418 sc->sc_integrated = 1;
9419 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2001;
9420 sc->sc_low_latency_xtal = 0;
9421 sc->sc_xtal_latency = 500;
9422 sc->sc_tx_with_siso_diversity = 0;
9423 sc->sc_uhb_supported = 0;
9424 break;
9425 case PCI_PRODUCT_INTEL_WL_22500_40x34f0:
9426 sc->sc_fwname = "iwx-Qu-c0-hr-b0-63";
9427 sc->sc_device_family = IWX_DEVICE_FAMILY_220001;
9428 sc->sc_integrated = 1;
9429 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_18203;
9430 sc->sc_low_latency_xtal = 0;
9431 sc->sc_xtal_latency = 1820;
9432 sc->sc_tx_with_siso_diversity = 0;
9433 sc->sc_uhb_supported = 0;
9434 break;
9435 default:
9436 printf("%s: unknown adapter type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9437 return;
9438 }
9439
9440 /* Allocate DMA memory for loading firmware. */
9441 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
9442 sizeof(struct iwx_context_info), 0);
9443 if (err) {
9444 printf("%s: could not allocate memory for loading firmware\n",
9445 DEVNAME(sc)((sc)->sc_dev.dv_xname));
9446 return;
9447 }
9448
9449 /* Allocate interrupt cause table (ICT).*/
9450 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
9451 IWX_ICT_SIZE4096, 1<<IWX_ICT_PADDR_SHIFT12);
9452 if (err) {
9453 printf("%s: could not allocate ICT table\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9454 goto fail1;
9455 }
9456
9457 for (txq_i = 0; txq_i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); txq_i++) {
9458 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
9459 if (err) {
9460 printf("%s: could not allocate TX ring %d\n",
9461 DEVNAME(sc)((sc)->sc_dev.dv_xname), txq_i);
9462 goto fail4;
9463 }
9464 }
9465
9466 err = iwx_alloc_rx_ring(sc, &sc->rxq);
9467 if (err) {
9468 printf("%s: could not allocate RX ring\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9469 goto fail4;
9470 }
9471
9472 sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET0x7, 0);
9473 if (sc->sc_nswq == NULL((void *)0))
9474 goto fail4;
9475
9476 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
9477 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
9478 ic->ic_state = IEEE80211_S_INIT;
9479
9480 /* Set device capabilities. */
9481 ic->ic_caps =
9482 IEEE80211_C_QOS0x00000800 | IEEE80211_C_TX_AMPDU0x00010000 | /* A-MPDU */
9483 IEEE80211_C_ADDBA_OFFLOAD0x00020000 | /* device sends ADDBA/DELBA frames */
9484 IEEE80211_C_WEP0x00000001 | /* WEP */
9485 IEEE80211_C_RSN0x00001000 | /* WPA/RSN */
9486 IEEE80211_C_SCANALL0x00000400 | /* device scans all channels at once */
9487 IEEE80211_C_SCANALLBAND0x00008000 | /* device scans all bands at once */
9488 IEEE80211_C_MONITOR0x00000200 | /* monitor mode supported */
9489 IEEE80211_C_SHSLOT0x00000080 | /* short slot time supported */
9490 IEEE80211_C_SHPREAMBLE0x00000100; /* short preamble supported */
9491
9492 ic->ic_htcaps = IEEE80211_HTCAP_SGI200x00000020 | IEEE80211_HTCAP_SGI400x00000040;
9493 ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_400x00000002;
9494 ic->ic_htcaps |=
9495 (IEEE80211_HTCAP_SMPS_DIS3 << IEEE80211_HTCAP_SMPS_SHIFT2);
9496 ic->ic_htxcaps = 0;
9497 ic->ic_txbfcaps = 0;
9498 ic->ic_aselcaps = 0;
9499 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4(5 << 2) | 0x3 /* 64k */);
9500
9501 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
9502 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
9503 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
9504
9505 for (i = 0; i < nitems(sc->sc_phyctxt)(sizeof((sc->sc_phyctxt)) / sizeof((sc->sc_phyctxt)[0])
)
; i++) {
9506 sc->sc_phyctxt[i].id = i;
9507 sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN0;
9508 }
9509
9510 /* IBSS channel undefined for now. */
9511 ic->ic_ibss_chan = &ic->ic_channels[1];
9512
9513 ic->ic_max_rssi = IWX_MAX_DBM-33 - IWX_MIN_DBM-100;
9514
9515 ifp->if_softc = sc;
9516 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
9517 ifp->if_ioctl = iwx_ioctl;
9518 ifp->if_start = iwx_start;
9519 ifp->if_watchdog = iwx_watchdog;
9520 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ)__builtin_memcpy((ifp->if_xname), (((sc)->sc_dev.dv_xname
)), (16))
;
9521
9522 if_attach(ifp);
9523 ieee80211_ifattach(ifp);
9524 ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
9525
9526#if NBPFILTER1 > 0
9527 iwx_radiotap_attach(sc);
9528#endif
9529 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
9530 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
9531 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID0x7f;
9532 rxba->sc = sc;
9533 timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
9534 rxba);
9535 timeout_set(&rxba->reorder_buf.reorder_timer,
9536 iwx_reorder_timer_expired, &rxba->reorder_buf);
9537 for (j = 0; j < nitems(rxba->entries)(sizeof((rxba->entries)) / sizeof((rxba->entries)[0])); j++)
9538 ml_init(&rxba->entries[j].frames);
9539 }
9540 task_set(&sc->init_task, iwx_init_task, sc);
9541 task_set(&sc->newstate_task, iwx_newstate_task, sc);
9542 task_set(&sc->ba_task, iwx_ba_task, sc);
9543 task_set(&sc->setkey_task, iwx_setkey_task, sc);
9544 task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
9545 task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
9546 task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
9547
9548 ic->ic_node_alloc = iwx_node_alloc;
9549 ic->ic_bgscan_start = iwx_bgscan;
9550 ic->ic_bgscan_done = iwx_bgscan_done;
9551 ic->ic_set_key = iwx_set_key;
9552 ic->ic_delete_key = iwx_delete_key;
9553
9554 /* Override 802.11 state transition machine. */
9555 sc->sc_newstate = ic->ic_newstate;
9556 ic->ic_newstate = iwx_newstate;
9557 ic->ic_updateprot = iwx_updateprot;
9558 ic->ic_updateslot = iwx_updateslot;
9559 ic->ic_updateedca = iwx_updateedca;
9560 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
9561 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
9562 ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
9563 ic->ic_ampdu_tx_stop = NULL((void *)0);
9564 /*
9565 * We cannot read the MAC address without loading the
9566 * firmware from disk. Postpone until mountroot is done.
9567 */
9568 config_mountroot(self, iwx_attach_hook);
9569
9570 return;
9571
9572fail4: while (--txq_i >= 0)
9573 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
9574 iwx_free_rx_ring(sc, &sc->rxq);
9575 if (sc->ict_dma.vaddr != NULL((void *)0))
9576 iwx_dma_contig_free(&sc->ict_dma);
9577
9578fail1: iwx_dma_contig_free(&sc->ctxt_info_dma);
9579 return;
9580}
9581
9582#if NBPFILTER1 > 0
9583void
9584iwx_radiotap_attach(struct iwx_softc *sc)
9585{
9586 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_ific_ac.ac_if, DLT_IEEE802_11_RADIO127,
9587 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN64);
9588
9589 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
9590 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_len = htole16(sc->sc_rxtap_len)((__uint16_t)(sc->sc_rxtap_len));
9591 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_TSFT) | (1 <<
IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_RATE
) | (1 << IEEE80211_RADIOTAP_CHANNEL) | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL
) | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))))
;
9592
9593 sc->sc_txtap_len = sizeof sc->sc_txtapu;
9594 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_len = htole16(sc->sc_txtap_len)((__uint16_t)(sc->sc_txtap_len));
9595 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
))))
;
9596}
9597#endif
9598
9599void
9600iwx_init_task(void *arg1)
9601{
9602 struct iwx_softc *sc = arg1;
9603 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
9604 int s = splnet()splraise(0x7);
9605 int generation = sc->sc_generation;
9606 int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR0x80 | IWX_FLAG_RFKILL0x02));
9607
9608 rw_enter_write(&sc->ioctl_rwl);
9609 if (generation != sc->sc_generation) {
9610 rw_exit(&sc->ioctl_rwl);
9611 splx(s)spllower(s);
9612 return;
9613 }
9614
9615 if (ifp->if_flags & IFF_RUNNING0x40)
9616 iwx_stop(ifp);
9617 else
9618 sc->sc_flags &= ~IWX_FLAG_HW_ERR0x80;
9619
9620 if (!fatal && (ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) == IFF_UP0x1)
9621 iwx_init(ifp);
9622
9623 rw_exit(&sc->ioctl_rwl);
9624 splx(s)spllower(s);
9625}
9626
9627void
9628iwx_resume(struct iwx_softc *sc)
9629{
9630 pcireg_t reg;
9631
9632 /*
9633 * We disable the RETRY_TIMEOUT register (0x41) to keep
9634 * PCI Tx retries from interfering with C3 CPU state.
9635 */
9636 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9637 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9638
9639 if (!sc->sc_msix) {
9640 /* Hardware bug workaround. */
9641 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
9642 PCI_COMMAND_STATUS_REG0x04);
9643 if (reg & PCI_COMMAND_INTERRUPT_DISABLE0x00000400)
9644 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE0x00000400;
9645 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
9646 PCI_COMMAND_STATUS_REG0x04, reg);
9647 }
9648
9649 iwx_disable_interrupts(sc);
9650}
9651
9652int
9653iwx_wakeup(struct iwx_softc *sc)
9654{
9655 struct ieee80211com *ic = &sc->sc_ic;
9656 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
9657 int err;
9658
9659 err = iwx_start_hw(sc);
9660 if (err)
9661 return err;
9662
9663 err = iwx_init_hw(sc);
9664 if (err)
9665 return err;
9666
9667 refcnt_init(&sc->task_refs);
9668 ifq_clr_oactive(&ifp->if_snd);
9669 ifp->if_flags |= IFF_RUNNING0x40;
9670
9671 if (ic->ic_opmode == IEEE80211_M_MONITOR)
9672 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
9673 else
9674 ieee80211_begin_scan(ifp);
9675
9676 return 0;
9677}
9678
9679int
9680iwx_activate(struct device *self, int act)
9681{
9682 struct iwx_softc *sc = (struct iwx_softc *)self;
9683 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
9684 int err = 0;
9685
9686 switch (act) {
9687 case DVACT_QUIESCE2:
9688 if (ifp->if_flags & IFF_RUNNING0x40) {
9689 rw_enter_write(&sc->ioctl_rwl);
9690 iwx_stop(ifp);
9691 rw_exit(&sc->ioctl_rwl);
9692 }
9693 break;
9694 case DVACT_RESUME4:
9695 iwx_resume(sc);
9696 break;
9697 case DVACT_WAKEUP5:
9698 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) == IFF_UP0x1) {
9699 err = iwx_wakeup(sc);
9700 if (err)
9701 printf("%s: could not initialize hardware\n",
9702 DEVNAME(sc)((sc)->sc_dev.dv_xname));
9703 }
9704 break;
9705 }
9706
9707 return 0;
9708}
9709
9710struct cfdriver iwx_cd = {
9711 NULL((void *)0), "iwx", DV_IFNET
9712};
9713
9714struct cfattach iwx_ca = {
9715 sizeof(struct iwx_softc), iwx_match, iwx_attach,
9716 NULL((void *)0), iwx_activate
9717};