Bug Summary

File:dev/pci/if_iwm.c
Warning:line 11469, column 4
Value stored to 'handled' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_iwm.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/if_iwm.c
1/* $OpenBSD: if_iwm.c,v 1.413 2023/12/20 07:32:05 stsp Exp $ */
2
3/*
4 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5 * Author: Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw@linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 * Copyright(c) 2016 Intel Deutschland GmbH
64 * All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 *
70 * * Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * * Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in
74 * the documentation and/or other materials provided with the
75 * distribution.
76 * * Neither the name Intel Corporation nor the names of its
77 * contributors may be used to endorse or promote products derived
78 * from this software without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93/*-
94 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95 *
96 * Permission to use, copy, modify, and distribute this software for any
97 * purpose with or without fee is hereby granted, provided that the above
98 * copyright notice and this permission notice appear in all copies.
99 *
100 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 */
108
109#include "bpfilter.h"
110
111#include <sys/param.h>
112#include <sys/conf.h>
113#include <sys/kernel.h>
114#include <sys/malloc.h>
115#include <sys/mbuf.h>
116#include <sys/mutex.h>
117#include <sys/proc.h>
118#include <sys/rwlock.h>
119#include <sys/socket.h>
120#include <sys/sockio.h>
121#include <sys/systm.h>
122#include <sys/endian.h>
123
124#include <sys/refcnt.h>
125#include <sys/task.h>
126#include <machine/bus.h>
127#include <machine/intr.h>
128
129#include <dev/pci/pcireg.h>
130#include <dev/pci/pcivar.h>
131#include <dev/pci/pcidevs.h>
132
133#if NBPFILTER1 > 0
134#include <net/bpf.h>
135#endif
136#include <net/if.h>
137#include <net/if_dl.h>
138#include <net/if_media.h>
139
140#include <netinet/in.h>
141#include <netinet/if_ether.h>
142
143#include <net80211/ieee80211_var.h>
144#include <net80211/ieee80211_amrr.h>
145#include <net80211/ieee80211_ra.h>
146#include <net80211/ieee80211_ra_vht.h>
147#include <net80211/ieee80211_radiotap.h>
148#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
149#undef DPRINTF /* defined in ieee80211_priv.h */
150
151#define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname)
152
153#define IC2IFP(_ic_)(&(_ic_)->ic_ac.ac_if) (&(_ic_)->ic_ific_ac.ac_if)
154
155#define le16_to_cpup(_a_)(((__uint16_t)(*(const uint16_t *)(_a_)))) (le16toh(*(const uint16_t *)(_a_))((__uint16_t)(*(const uint16_t *)(_a_))))
156#define le32_to_cpup(_a_)(((__uint32_t)(*(const uint32_t *)(_a_)))) (le32toh(*(const uint32_t *)(_a_))((__uint32_t)(*(const uint32_t *)(_a_))))
157
158#ifdef IWM_DEBUG
159#define DPRINTF(x)do { ; } while (0) do { if (iwm_debug > 0) printf x; } while (0)
160#define DPRINTFN(n, x)do { ; } while (0) do { if (iwm_debug >= (n)) printf x; } while (0)
161int iwm_debug = 1;
162#else
163#define DPRINTF(x)do { ; } while (0) do { ; } while (0)
164#define DPRINTFN(n, x)do { ; } while (0) do { ; } while (0)
165#endif
166
167#include <dev/pci/if_iwmreg.h>
168#include <dev/pci/if_iwmvar.h>
169
170const uint8_t iwm_nvm_channels[] = {
171 /* 2.4 GHz */
172 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
173 /* 5 GHz */
174 36, 40, 44 , 48, 52, 56, 60, 64,
175 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
176 149, 153, 157, 161, 165
177};
178
179const uint8_t iwm_nvm_channels_8000[] = {
180 /* 2.4 GHz */
181 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182 /* 5 GHz */
183 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185 149, 153, 157, 161, 165, 169, 173, 177, 181
186};
187
188#define IWM_NUM_2GHZ_CHANNELS14 14
189
190const struct iwm_rate {
191 uint16_t rate;
192 uint8_t plcp;
193 uint8_t ht_plcp;
194} iwm_rates[] = {
195 /* Legacy */ /* HT */
196 { 2, IWM_RATE_1M_PLCP10, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
197 { 4, IWM_RATE_2M_PLCP20, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
198 { 11, IWM_RATE_5M_PLCP55, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
199 { 22, IWM_RATE_11M_PLCP110, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
200 { 12, IWM_RATE_6M_PLCP13, IWM_RATE_HT_SISO_MCS_0_PLCP0 },
201 { 18, IWM_RATE_9M_PLCP15, IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 },
202 { 24, IWM_RATE_12M_PLCP5, IWM_RATE_HT_SISO_MCS_1_PLCP1 },
203 { 26, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_8_PLCP0x8 },
204 { 36, IWM_RATE_18M_PLCP7, IWM_RATE_HT_SISO_MCS_2_PLCP2 },
205 { 48, IWM_RATE_24M_PLCP9, IWM_RATE_HT_SISO_MCS_3_PLCP3 },
206 { 52, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_9_PLCP0x9 },
207 { 72, IWM_RATE_36M_PLCP11, IWM_RATE_HT_SISO_MCS_4_PLCP4 },
208 { 78, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_10_PLCP0xA },
209 { 96, IWM_RATE_48M_PLCP1, IWM_RATE_HT_SISO_MCS_5_PLCP5 },
210 { 104, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_11_PLCP0xB },
211 { 108, IWM_RATE_54M_PLCP3, IWM_RATE_HT_SISO_MCS_6_PLCP6 },
212 { 128, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_SISO_MCS_7_PLCP7 },
213 { 156, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_12_PLCP0xC },
214 { 208, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_13_PLCP0xD },
215 { 234, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_14_PLCP0xE },
216 { 260, IWM_RATE_INVM_PLCP0xff, IWM_RATE_HT_MIMO2_MCS_15_PLCP0xF },
217};
218#define IWM_RIDX_CCK0 0
219#define IWM_RIDX_OFDM4 4
220#define IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) (nitems(iwm_rates)(sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1)
221#define IWM_RIDX_IS_CCK(_i_)((_i_) < 4) ((_i_) < IWM_RIDX_OFDM4)
222#define IWM_RIDX_IS_OFDM(_i_)((_i_) >= 4) ((_i_) >= IWM_RIDX_OFDM4)
223#define IWM_RVAL_IS_OFDM(_i_)((_i_) >= 12 && (_i_) != 22) ((_i_) >= 12 && (_i_) != 22)
224
225/* Convert an MCS index into an iwm_rates[] index. */
226const int iwm_ht_mcs2ridx[] = {
227 IWM_RATE_MCS_0_INDEX,
228 IWM_RATE_MCS_1_INDEX,
229 IWM_RATE_MCS_2_INDEX,
230 IWM_RATE_MCS_3_INDEX,
231 IWM_RATE_MCS_4_INDEX,
232 IWM_RATE_MCS_5_INDEX,
233 IWM_RATE_MCS_6_INDEX,
234 IWM_RATE_MCS_7_INDEX,
235 IWM_RATE_MCS_8_INDEX,
236 IWM_RATE_MCS_9_INDEX,
237 IWM_RATE_MCS_10_INDEX,
238 IWM_RATE_MCS_11_INDEX,
239 IWM_RATE_MCS_12_INDEX,
240 IWM_RATE_MCS_13_INDEX,
241 IWM_RATE_MCS_14_INDEX,
242 IWM_RATE_MCS_15_INDEX,
243};
244
245struct iwm_nvm_section {
246 uint16_t length;
247 uint8_t *data;
248};
249
250int iwm_is_mimo_ht_plcp(uint8_t);
251int iwm_is_mimo_ht_mcs(int);
252int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
253int iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
254 uint8_t *, size_t);
255int iwm_set_default_calib(struct iwm_softc *, const void *);
256void iwm_fw_info_free(struct iwm_fw_info *);
257void iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
258int iwm_read_firmware(struct iwm_softc *);
259uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
260uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
261void iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
262void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
263int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
264int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
265int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
266int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
267int iwm_nic_lock(struct iwm_softc *);
268void iwm_nic_assert_locked(struct iwm_softc *);
269void iwm_nic_unlock(struct iwm_softc *);
270int iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
271 uint32_t);
272int iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
273int iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
274int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
275 bus_size_t);
276void iwm_dma_contig_free(struct iwm_dma_info *);
277int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
278void iwm_disable_rx_dma(struct iwm_softc *);
279void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
280void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
281int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
282void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
283void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
284void iwm_enable_rfkill_int(struct iwm_softc *);
285int iwm_check_rfkill(struct iwm_softc *);
286void iwm_enable_interrupts(struct iwm_softc *);
287void iwm_enable_fwload_interrupt(struct iwm_softc *);
288void iwm_restore_interrupts(struct iwm_softc *);
289void iwm_disable_interrupts(struct iwm_softc *);
290void iwm_ict_reset(struct iwm_softc *);
291int iwm_set_hw_ready(struct iwm_softc *);
292int iwm_prepare_card_hw(struct iwm_softc *);
293void iwm_apm_config(struct iwm_softc *);
294int iwm_apm_init(struct iwm_softc *);
295void iwm_apm_stop(struct iwm_softc *);
296int iwm_allow_mcast(struct iwm_softc *);
297void iwm_init_msix_hw(struct iwm_softc *);
298void iwm_conf_msix_hw(struct iwm_softc *, int);
299int iwm_clear_persistence_bit(struct iwm_softc *);
300int iwm_start_hw(struct iwm_softc *);
301void iwm_stop_device(struct iwm_softc *);
302void iwm_nic_config(struct iwm_softc *);
303int iwm_nic_rx_init(struct iwm_softc *);
304int iwm_nic_rx_legacy_init(struct iwm_softc *);
305int iwm_nic_rx_mq_init(struct iwm_softc *);
306int iwm_nic_tx_init(struct iwm_softc *);
307int iwm_nic_init(struct iwm_softc *);
308int iwm_enable_ac_txq(struct iwm_softc *, int, int);
309int iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
310 uint16_t);
311int iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
312int iwm_post_alive(struct iwm_softc *);
313struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
314 uint16_t);
315int iwm_phy_db_set_section(struct iwm_softc *,
316 struct iwm_calib_res_notif_phy_db *);
317int iwm_is_valid_channel(uint16_t);
318uint8_t iwm_ch_id_to_ch_index(uint16_t);
319uint16_t iwm_channel_id_to_papd(uint16_t);
320uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
321int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
322 uint16_t *, uint16_t);
323int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
324int iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
325 uint8_t);
326int iwm_send_phy_db_data(struct iwm_softc *);
327void iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
328 uint32_t);
329void iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
330int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
331 uint8_t *, uint16_t *);
332int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
333 uint16_t *, size_t);
334uint8_t iwm_fw_valid_tx_ant(struct iwm_softc *);
335uint8_t iwm_fw_valid_rx_ant(struct iwm_softc *);
336int iwm_valid_siso_ant_rate_mask(struct iwm_softc *);
337void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
338 const uint8_t *nvm_channels, int nchan);
339int iwm_mimo_enabled(struct iwm_softc *);
340void iwm_setup_ht_rates(struct iwm_softc *);
341void iwm_setup_vht_rates(struct iwm_softc *);
342void iwm_mac_ctxt_task(void *);
343void iwm_phy_ctxt_task(void *);
344void iwm_updateprot(struct ieee80211com *);
345void iwm_updateslot(struct ieee80211com *);
346void iwm_updateedca(struct ieee80211com *);
347void iwm_updatechan(struct ieee80211com *);
348void iwm_updatedtim(struct ieee80211com *);
349void iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
350 uint16_t);
351void iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
352int iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
353 uint8_t);
354void iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
355 uint8_t);
356void iwm_rx_ba_session_expired(void *);
357void iwm_reorder_timer_expired(void *);
358int iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
359 uint16_t, uint16_t, int, int);
360int iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
361 uint8_t);
362void iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
363 uint8_t);
364void iwm_ba_task(void *);
365
366int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
367 const uint16_t *, const uint16_t *,
368 const uint16_t *, const uint16_t *,
369 const uint16_t *, int);
370void iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
371 const uint16_t *, const uint16_t *);
372int iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
373int iwm_nvm_init(struct iwm_softc *);
374int iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
375 uint32_t);
376int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
377 uint32_t);
378int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
379int iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
380 int , int *);
381int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
382int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
383int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
384int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
385int iwm_send_phy_cfg_cmd(struct iwm_softc *);
386int iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
387int iwm_send_dqa_cmd(struct iwm_softc *);
388int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
389int iwm_config_ltr(struct iwm_softc *);
390int iwm_rx_addbuf(struct iwm_softc *, int, int);
391int iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
392int iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
393void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
394 struct iwm_rx_data *);
395int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
396int iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
397 struct ieee80211_rxinfo *);
398int iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
399 struct ieee80211_node *, struct ieee80211_rxinfo *);
400void iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
401 uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
402void iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
403 int, uint8_t, int);
404void iwm_vht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
405 int, int, uint8_t, int);
406void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
407 struct iwm_node *, int, int);
408void iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
409void iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
410void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
411 struct iwm_rx_data *);
412void iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
413void iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
414 struct iwm_tx_ring *, int, uint16_t, uint16_t);
415void iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *);
416void iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
417 struct iwm_rx_data *);
418int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
419uint8_t iwm_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
420int iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
421 uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
422void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
423 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
424void iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
425 struct ieee80211_channel *, uint8_t, uint8_t, uint8_t, uint8_t);
426int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
427 uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
428int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
429int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
430 const void *);
431int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
432 uint32_t *);
433int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
434 const void *, uint32_t *);
435void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
436void iwm_cmd_done(struct iwm_softc *, int, int, int);
437void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
438void iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
439uint8_t iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
440 struct ieee80211_frame *, struct iwm_tx_cmd *);
441int iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
442int iwm_flush_tx_path(struct iwm_softc *, int);
443int iwm_wait_tx_queues_empty(struct iwm_softc *);
444void iwm_led_enable(struct iwm_softc *);
445void iwm_led_disable(struct iwm_softc *);
446int iwm_led_is_enabled(struct iwm_softc *);
447void iwm_led_blink_timeout(void *);
448void iwm_led_blink_start(struct iwm_softc *);
449void iwm_led_blink_stop(struct iwm_softc *);
450int iwm_beacon_filter_send_cmd(struct iwm_softc *,
451 struct iwm_beacon_filter_cmd *);
452void iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
453 struct iwm_beacon_filter_cmd *);
454int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
455void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
456 struct iwm_mac_power_cmd *);
457int iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
458int iwm_power_update_device(struct iwm_softc *);
459int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
460int iwm_disable_beacon_filter(struct iwm_softc *);
461int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
462int iwm_add_aux_sta(struct iwm_softc *);
463int iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
464int iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
465int iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
466uint16_t iwm_scan_rx_chain(struct iwm_softc *);
467uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
468uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
469 struct iwm_scan_channel_cfg_lmac *, int, int);
470int iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
471int iwm_lmac_scan(struct iwm_softc *, int);
472int iwm_config_umac_scan(struct iwm_softc *);
473int iwm_umac_scan(struct iwm_softc *, int);
474void iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
475uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
476int iwm_rval2ridx(int);
477void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
478void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
479 struct iwm_mac_ctx_cmd *, uint32_t);
480void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
481 struct iwm_mac_data_sta *, int);
482int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
483int iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
484void iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
485void iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
486int iwm_scan(struct iwm_softc *);
487int iwm_bgscan(struct ieee80211com *);
488void iwm_bgscan_done(struct ieee80211com *,
489 struct ieee80211_node_switch_bss_arg *, size_t);
490void iwm_bgscan_done_task(void *);
491int iwm_umac_scan_abort(struct iwm_softc *);
492int iwm_lmac_scan_abort(struct iwm_softc *);
493int iwm_scan_abort(struct iwm_softc *);
494int iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
495 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
496 uint8_t);
497int iwm_auth(struct iwm_softc *);
498int iwm_deauth(struct iwm_softc *);
499int iwm_run(struct iwm_softc *);
500int iwm_run_stop(struct iwm_softc *);
501struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
502int iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
503 struct ieee80211_key *);
504int iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
505 struct ieee80211_key *);
506void iwm_delete_key_v1(struct ieee80211com *,
507 struct ieee80211_node *, struct ieee80211_key *);
508void iwm_delete_key(struct ieee80211com *,
509 struct ieee80211_node *, struct ieee80211_key *);
510void iwm_calib_timeout(void *);
511void iwm_set_rate_table_vht(struct iwm_node *, struct iwm_lq_cmd *);
512void iwm_set_rate_table(struct iwm_node *, struct iwm_lq_cmd *);
513void iwm_setrates(struct iwm_node *, int);
514int iwm_media_change(struct ifnet *);
515void iwm_newstate_task(void *);
516int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
517void iwm_endscan(struct iwm_softc *);
518void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
519 struct ieee80211_node *);
520int iwm_sf_config(struct iwm_softc *, int);
521int iwm_send_bt_init_conf(struct iwm_softc *);
522int iwm_send_soc_conf(struct iwm_softc *);
523int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
524int iwm_send_temp_report_ths_cmd(struct iwm_softc *);
525void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
526void iwm_free_fw_paging(struct iwm_softc *);
527int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
528int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
529int iwm_init_hw(struct iwm_softc *);
530int iwm_init(struct ifnet *);
531void iwm_start(struct ifnet *);
532void iwm_stop(struct ifnet *);
533void iwm_watchdog(struct ifnet *);
534int iwm_ioctl(struct ifnet *, u_long, caddr_t);
535const char *iwm_desc_lookup(uint32_t);
536void iwm_nic_error(struct iwm_softc *);
537void iwm_dump_driver_status(struct iwm_softc *);
538void iwm_nic_umac_error(struct iwm_softc *);
539void iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
540 struct mbuf_list *);
541void iwm_flip_address(uint8_t *);
542int iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
543 struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
544int iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
545void iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
546 struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
547 struct mbuf_list *);
548int iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
549 int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
550int iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
551 struct iwm_rx_mpdu_desc *, int, int, uint32_t,
552 struct ieee80211_rxinfo *, struct mbuf_list *);
553void iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
554 struct mbuf_list *);
555int iwm_rx_pkt_valid(struct iwm_rx_packet *);
556void iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
557 struct mbuf_list *);
558void iwm_notif_intr(struct iwm_softc *);
559int iwm_intr(void *);
560int iwm_intr_msix(void *);
561int iwm_match(struct device *, void *, void *);
562int iwm_preinit(struct iwm_softc *);
563void iwm_attach_hook(struct device *);
564void iwm_attach(struct device *, struct device *, void *);
565void iwm_init_task(void *);
566int iwm_activate(struct device *, int);
567void iwm_resume(struct iwm_softc *);
568int iwm_wakeup(struct iwm_softc *);
569
570#if NBPFILTER1 > 0
571void iwm_radiotap_attach(struct iwm_softc *);
572#endif
573
574uint8_t
575iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
576{
577 const struct iwm_fw_cmd_version *entry;
578 int i;
579
580 for (i = 0; i < sc->n_cmd_versions; i++) {
581 entry = &sc->cmd_versions[i];
582 if (entry->group == grp && entry->cmd == cmd)
583 return entry->cmd_ver;
584 }
585
586 return IWM_FW_CMD_VER_UNKNOWN99;
587}
588
589int
590iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
591{
592 return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP0x20 &&
593 (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK(3 << 3)));
594}
595
596int
597iwm_is_mimo_ht_mcs(int mcs)
598{
599 int ridx = iwm_ht_mcs2ridx[mcs];
600 return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
601
602}
603
604int
605iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
606{
607 struct iwm_fw_cscheme_list *l = (void *)data;
608
609 if (dlen < sizeof(*l) ||
610 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
611 return EINVAL22;
612
613 /* we don't actually store anything for now, always use s/w crypto */
614
615 return 0;
616}
617
618int
619iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
620 uint8_t *data, size_t dlen)
621{
622 struct iwm_fw_sects *fws;
623 struct iwm_fw_onesect *fwone;
624
625 if (type >= IWM_UCODE_TYPE_MAX)
626 return EINVAL22;
627 if (dlen < sizeof(uint32_t))
628 return EINVAL22;
629
630 fws = &sc->sc_fw.fw_sects[type];
631 if (fws->fw_count >= IWM_UCODE_SECT_MAX16)
632 return EINVAL22;
633
634 fwone = &fws->fw_sect[fws->fw_count];
635
636 /* first 32bit are device load offset */
637 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t))__builtin_memcpy((&fwone->fws_devoff), (data), (sizeof
(uint32_t)))
;
638
639 /* rest is data */
640 fwone->fws_data = data + sizeof(uint32_t);
641 fwone->fws_len = dlen - sizeof(uint32_t);
642
643 fws->fw_count++;
644 fws->fw_totlen += fwone->fws_len;
645
646 return 0;
647}
648
649#define IWM_DEFAULT_SCAN_CHANNELS40 40
650/* Newer firmware might support more channels. Raise this value if needed. */
651#define IWM_MAX_SCAN_CHANNELS52 52 /* as of 8265-34 firmware image */
652
653struct iwm_tlv_calib_data {
654 uint32_t ucode_type;
655 struct iwm_tlv_calib_ctrl calib;
656} __packed__attribute__((__packed__));
657
658int
659iwm_set_default_calib(struct iwm_softc *sc, const void *data)
660{
661 const struct iwm_tlv_calib_data *def_calib = data;
662 uint32_t ucode_type = le32toh(def_calib->ucode_type)((__uint32_t)(def_calib->ucode_type));
663
664 if (ucode_type >= IWM_UCODE_TYPE_MAX)
665 return EINVAL22;
666
667 sc->sc_default_calib[ucode_type].flow_trigger =
668 def_calib->calib.flow_trigger;
669 sc->sc_default_calib[ucode_type].event_trigger =
670 def_calib->calib.event_trigger;
671
672 return 0;
673}
674
675void
676iwm_fw_info_free(struct iwm_fw_info *fw)
677{
678 free(fw->fw_rawdata, M_DEVBUF2, fw->fw_rawsize);
679 fw->fw_rawdata = NULL((void *)0);
680 fw->fw_rawsize = 0;
681 /* don't touch fw->fw_status */
682 memset(fw->fw_sects, 0, sizeof(fw->fw_sects))__builtin_memset((fw->fw_sects), (0), (sizeof(fw->fw_sects
)))
;
683}
684
685void
686iwm_fw_version_str(char *buf, size_t bufsize,
687 uint32_t major, uint32_t minor, uint32_t api)
688{
689 /*
690 * Starting with major version 35 the Linux driver prints the minor
691 * version in hexadecimal.
692 */
693 if (major >= 35)
694 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
695 else
696 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
697}
698
699int
700iwm_read_firmware(struct iwm_softc *sc)
701{
702 struct iwm_fw_info *fw = &sc->sc_fw;
703 struct iwm_tlv_ucode_header *uhdr;
704 struct iwm_ucode_tlv tlv;
705 uint32_t tlv_type;
706 uint8_t *data;
707 uint32_t usniffer_img;
708 uint32_t paging_mem_size;
709 int err;
710 size_t len;
711
712 if (fw->fw_status == IWM_FW_STATUS_DONE2)
713 return 0;
714
715 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS1)
716 tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP0xffffffffffffffffULL);
717 fw->fw_status = IWM_FW_STATUS_INPROGRESS1;
718
719 if (fw->fw_rawdata != NULL((void *)0))
720 iwm_fw_info_free(fw);
721
722 err = loadfirmware(sc->sc_fwname,
723 (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
724 if (err) {
725 printf("%s: could not read firmware %s (error %d)\n",
726 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname, err);
727 goto out;
728 }
729
730 sc->sc_capaflags = 0;
731 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS40;
732 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa))__builtin_memset((sc->sc_enabled_capa), (0), (sizeof(sc->
sc_enabled_capa)))
;
733 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api))__builtin_memset((sc->sc_ucode_api), (0), (sizeof(sc->sc_ucode_api
)))
;
734 sc->n_cmd_versions = 0;
735
736 uhdr = (void *)fw->fw_rawdata;
737 if (*(uint32_t *)fw->fw_rawdata != 0
738 || le32toh(uhdr->magic)((__uint32_t)(uhdr->magic)) != IWM_TLV_UCODE_MAGIC0x0a4c5749) {
739 printf("%s: invalid firmware %s\n",
740 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_fwname);
741 err = EINVAL22;
742 goto out;
743 }
744
745 iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
746 IWM_UCODE_MAJOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0xFF000000) >> 24
)
,
747 IWM_UCODE_MINOR(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x00FF0000) >> 16
)
,
748 IWM_UCODE_API(le32toh(uhdr->ver))(((((__uint32_t)(uhdr->ver))) & 0x0000FF00) >> 8
)
);
749
750 data = uhdr->data;
751 len = fw->fw_rawsize - sizeof(*uhdr);
752
753 while (len >= sizeof(tlv)) {
754 size_t tlv_len;
755 void *tlv_data;
756
757 memcpy(&tlv, data, sizeof(tlv))__builtin_memcpy((&tlv), (data), (sizeof(tlv)));
758 tlv_len = le32toh(tlv.length)((__uint32_t)(tlv.length));
759 tlv_type = le32toh(tlv.type)((__uint32_t)(tlv.type));
760
761 len -= sizeof(tlv);
762 data += sizeof(tlv);
763 tlv_data = data;
764
765 if (len < tlv_len) {
766 printf("%s: firmware too short: %zu bytes\n",
767 DEVNAME(sc)((sc)->sc_dev.dv_xname), len);
768 err = EINVAL22;
769 goto parse_out;
770 }
771
772 switch (tlv_type) {
773 case IWM_UCODE_TLV_PROBE_MAX_LEN6:
774 if (tlv_len < sizeof(uint32_t)) {
775 err = EINVAL22;
776 goto parse_out;
777 }
778 sc->sc_capa_max_probe_len
779 = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
780 if (sc->sc_capa_max_probe_len >
781 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE512) {
782 err = EINVAL22;
783 goto parse_out;
784 }
785 break;
786 case IWM_UCODE_TLV_PAN7:
787 if (tlv_len) {
788 err = EINVAL22;
789 goto parse_out;
790 }
791 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN(1 << 0);
792 break;
793 case IWM_UCODE_TLV_FLAGS18:
794 if (tlv_len < sizeof(uint32_t)) {
795 err = EINVAL22;
796 goto parse_out;
797 }
798 /*
799 * Apparently there can be many flags, but Linux driver
800 * parses only the first one, and so do we.
801 *
802 * XXX: why does this override IWM_UCODE_TLV_PAN?
803 * Intentional or a bug? Observations from
804 * current firmware file:
805 * 1) TLV_PAN is parsed first
806 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
807 * ==> this resets TLV_PAN to itself... hnnnk
808 */
809 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
810 break;
811 case IWM_UCODE_TLV_CSCHEME28:
812 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
813 if (err)
814 goto parse_out;
815 break;
816 case IWM_UCODE_TLV_NUM_OF_CPU27: {
817 uint32_t num_cpu;
818 if (tlv_len != sizeof(uint32_t)) {
819 err = EINVAL22;
820 goto parse_out;
821 }
822 num_cpu = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
823 if (num_cpu < 1 || num_cpu > 2) {
824 err = EINVAL22;
825 goto parse_out;
826 }
827 break;
828 }
829 case IWM_UCODE_TLV_SEC_RT19:
830 err = iwm_firmware_store_section(sc,
831 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
832 if (err)
833 goto parse_out;
834 break;
835 case IWM_UCODE_TLV_SEC_INIT20:
836 err = iwm_firmware_store_section(sc,
837 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
838 if (err)
839 goto parse_out;
840 break;
841 case IWM_UCODE_TLV_SEC_WOWLAN21:
842 err = iwm_firmware_store_section(sc,
843 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
844 if (err)
845 goto parse_out;
846 break;
847 case IWM_UCODE_TLV_DEF_CALIB22:
848 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
849 err = EINVAL22;
850 goto parse_out;
851 }
852 err = iwm_set_default_calib(sc, tlv_data);
853 if (err)
854 goto parse_out;
855 break;
856 case IWM_UCODE_TLV_PHY_SKU23:
857 if (tlv_len != sizeof(uint32_t)) {
858 err = EINVAL22;
859 goto parse_out;
860 }
861 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
862 break;
863
864 case IWM_UCODE_TLV_API_CHANGES_SET29: {
865 struct iwm_ucode_api *api;
866 int idx, i;
867 if (tlv_len != sizeof(*api)) {
868 err = EINVAL22;
869 goto parse_out;
870 }
871 api = (struct iwm_ucode_api *)tlv_data;
872 idx = le32toh(api->api_index)((__uint32_t)(api->api_index));
873 if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)(((128) + ((32) - 1)) / (32))) {
874 err = EINVAL22;
875 goto parse_out;
876 }
877 for (i = 0; i < 32; i++) {
878 if ((le32toh(api->api_flags)((__uint32_t)(api->api_flags)) & (1 << i)) == 0)
879 continue;
880 setbit(sc->sc_ucode_api, i + (32 * idx))((sc->sc_ucode_api)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
881 }
882 break;
883 }
884
885 case IWM_UCODE_TLV_ENABLED_CAPABILITIES30: {
886 struct iwm_ucode_capa *capa;
887 int idx, i;
888 if (tlv_len != sizeof(*capa)) {
889 err = EINVAL22;
890 goto parse_out;
891 }
892 capa = (struct iwm_ucode_capa *)tlv_data;
893 idx = le32toh(capa->api_index)((__uint32_t)(capa->api_index));
894 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)(((128) + ((32) - 1)) / (32))) {
895 goto parse_out;
896 }
897 for (i = 0; i < 32; i++) {
898 if ((le32toh(capa->api_capa)((__uint32_t)(capa->api_capa)) & (1 << i)) == 0)
899 continue;
900 setbit(sc->sc_enabled_capa, i + (32 * idx))((sc->sc_enabled_capa)[(i + (32 * idx))>>3] |= 1<<
((i + (32 * idx))&(8 -1)))
;
901 }
902 break;
903 }
904
905 case IWM_UCODE_TLV_CMD_VERSIONS48:
906 if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
907 tlv_len /= sizeof(struct iwm_fw_cmd_version);
908 tlv_len *= sizeof(struct iwm_fw_cmd_version);
909 }
910 if (sc->n_cmd_versions != 0) {
911 err = EINVAL22;
912 goto parse_out;
913 }
914 if (tlv_len > sizeof(sc->cmd_versions)) {
915 err = EINVAL22;
916 goto parse_out;
917 }
918 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len)__builtin_memcpy((&sc->cmd_versions[0]), (tlv_data), (
tlv_len))
;
919 sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
920 break;
921
922 case IWM_UCODE_TLV_SDIO_ADMA_ADDR35:
923 case IWM_UCODE_TLV_FW_GSCAN_CAPA50:
924 /* ignore, not used by current driver */
925 break;
926
927 case IWM_UCODE_TLV_SEC_RT_USNIFFER34:
928 err = iwm_firmware_store_section(sc,
929 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
930 tlv_len);
931 if (err)
932 goto parse_out;
933 break;
934
935 case IWM_UCODE_TLV_PAGING32:
936 if (tlv_len != sizeof(uint32_t)) {
937 err = EINVAL22;
938 goto parse_out;
939 }
940 paging_mem_size = le32toh(*(const uint32_t *)tlv_data)((__uint32_t)(*(const uint32_t *)tlv_data));
941
942 DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",do { ; } while (0)
943 DEVNAME(sc), paging_mem_size))do { ; } while (0);
944 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12)))) {
945 printf("%s: Driver only supports up to %u"
946 " bytes for paging image (%u requested)\n",
947 DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_MAX_PAGING_IMAGE_SIZE((1 << 5) * ((1 << 3) * (1 << 12))),
948 paging_mem_size);
949 err = EINVAL22;
950 goto out;
951 }
952 if (paging_mem_size & (IWM_FW_PAGING_SIZE(1 << 12) - 1)) {
953 printf("%s: Paging: image isn't multiple of %u\n",
954 DEVNAME(sc)((sc)->sc_dev.dv_xname), IWM_FW_PAGING_SIZE(1 << 12));
955 err = EINVAL22;
956 goto out;
957 }
958
959 fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
960 paging_mem_size;
961 usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
962 fw->fw_sects[usniffer_img].paging_mem_size =
963 paging_mem_size;
964 break;
965
966 case IWM_UCODE_TLV_N_SCAN_CHANNELS31:
967 if (tlv_len != sizeof(uint32_t)) {
968 err = EINVAL22;
969 goto parse_out;
970 }
971 sc->sc_capa_n_scan_channels =
972 le32toh(*(uint32_t *)tlv_data)((__uint32_t)(*(uint32_t *)tlv_data));
973 if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS52) {
974 err = ERANGE34;
975 goto parse_out;
976 }
977 break;
978
979 case IWM_UCODE_TLV_FW_VERSION36:
980 if (tlv_len != sizeof(uint32_t) * 3) {
981 err = EINVAL22;
982 goto parse_out;
983 }
984
985 iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
986 le32toh(((uint32_t *)tlv_data)[0])((__uint32_t)(((uint32_t *)tlv_data)[0])),
987 le32toh(((uint32_t *)tlv_data)[1])((__uint32_t)(((uint32_t *)tlv_data)[1])),
988 le32toh(((uint32_t *)tlv_data)[2])((__uint32_t)(((uint32_t *)tlv_data)[2])));
989 break;
990
991 case IWM_UCODE_TLV_FW_DBG_DEST38:
992 case IWM_UCODE_TLV_FW_DBG_CONF39:
993 case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS54:
994 case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS55:
995 case IWM_UCODE_TLV_TYPE_DEBUG_INFO(0x1000005 + 0):
996 case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION(0x1000005 + 1):
997 case IWM_UCODE_TLV_TYPE_HCMD(0x1000005 + 2):
998 case IWM_UCODE_TLV_TYPE_REGIONS(0x1000005 + 3):
999 case IWM_UCODE_TLV_TYPE_TRIGGERS(0x1000005 + 4):
1000 break;
1001
1002 case IWM_UCODE_TLV_HW_TYPE58:
1003 break;
1004
1005 case IWM_UCODE_TLV_FW_MEM_SEG51:
1006 break;
1007
1008 /* undocumented TLVs found in iwm-9000-43 image */
1009 case 0x1000003:
1010 case 0x1000004:
1011 break;
1012
1013 default:
1014 err = EINVAL22;
1015 goto parse_out;
1016 }
1017
1018 /*
1019 * Check for size_t overflow and ignore missing padding at
1020 * end of firmware file.
1021 */
1022 if (roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4)) > len)
1023 break;
1024
1025 len -= roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1026 data += roundup(tlv_len, 4)((((tlv_len)+((4)-1))/(4))*(4));
1027 }
1028
1029 KASSERT(err == 0)((err == 0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 1029, "err == 0"))
;
1030
1031 parse_out:
1032 if (err) {
1033 printf("%s: firmware parse error %d, "
1034 "section type %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err, tlv_type);
1035 }
1036
1037 out:
1038 if (err) {
1039 fw->fw_status = IWM_FW_STATUS_NONE0;
1040 if (fw->fw_rawdata != NULL((void *)0))
1041 iwm_fw_info_free(fw);
1042 } else
1043 fw->fw_status = IWM_FW_STATUS_DONE2;
1044 wakeup(&sc->sc_fw);
1045
1046 return err;
1047}
1048
1049uint32_t
1050iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1051{
1052 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
1053 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x048
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1054 IWM_BARRIER_READ_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x01 | 0x02)
;
1055 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x050
)))))
;
1056}
1057
1058uint32_t
1059iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1060{
1061 iwm_nic_assert_locked(sc);
1062 return iwm_read_prph_unlocked(sc, addr);
1063}
1064
1065void
1066iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1067{
1068 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
1069 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)))(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x044
))), ((((addr & 0x000fffff) | (3 << 24))))))
;
1070 IWM_BARRIER_WRITE(sc)bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->
sc_sz, 0x02)
;
1071 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x04c
))), ((val))))
;
1072}
1073
1074void
1075iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1076{
1077 iwm_nic_assert_locked(sc);
1078 iwm_write_prph_unlocked(sc, addr, val);
1079}
1080
1081void
1082iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1083{
1084 iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1085 iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1086}
1087
1088int
1089iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1090{
1091 int offs, err = 0;
1092 uint32_t *vals = buf;
1093
1094 if (iwm_nic_lock(sc)) {
1095 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x00c
))), ((addr))))
;
1096 for (offs = 0; offs < dwords; offs++)
1097 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x400)+0x01c
)))))
;
1098 iwm_nic_unlock(sc);
1099 } else {
1100 err = EBUSY16;
1101 }
1102 return err;
1103}
1104
1105int
1106iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1107{
1108 int offs;
1109 const uint32_t *vals = buf;
1110
1111 if (iwm_nic_lock(sc)) {
1112 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x010
))), ((addr))))
;
1113 /* WADDR auto-increments */
1114 for (offs = 0; offs < dwords; offs++) {
1115 uint32_t val = vals ? vals[offs] : 0;
1116 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x018
))), ((val))))
;
1117 }
1118 iwm_nic_unlock(sc);
1119 } else {
1120 return EBUSY16;
1121 }
1122 return 0;
1123}
1124
1125int
1126iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1127{
1128 return iwm_write_mem(sc, addr, &val, 1);
1129}
1130
1131int
1132iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1133 int timo)
1134{
1135 for (;;) {
1136 if ((IWM_READ(sc, reg)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((reg)))) & mask) == (bits & mask)) {
1137 return 1;
1138 }
1139 if (timo < 10) {
1140 return 0;
1141 }
1142 timo -= 10;
1143 DELAY(10)(*delay_func)(10);
1144 }
1145}
1146
1147int
1148iwm_nic_lock(struct iwm_softc *sc)
1149{
1150 if (sc->sc_nic_locks > 0) {
1151 iwm_nic_assert_locked(sc);
1152 sc->sc_nic_locks++;
1153 return 1; /* already locked */
1154 }
1155
1156 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
1157 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000008))))))
;
1158
1159 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002)
1160 DELAY(2)(*delay_func)(2);
1161
1162 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024),
1163 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN(0x00000001),
1164 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001)
1165 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP(0x00000010), 150000)) {
1166 sc->sc_nic_locks++;
1167 return 1;
1168 }
1169
1170 printf("%s: acquiring device failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1171 return 0;
1172}
1173
1174void
1175iwm_nic_assert_locked(struct iwm_softc *sc)
1176{
1177 if (sc->sc_nic_locks <= 0)
1178 panic("%s: nic locks counter %d", DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
1179}
1180
1181void
1182iwm_nic_unlock(struct iwm_softc *sc)
1183{
1184 if (sc->sc_nic_locks > 0) {
1185 if (--sc->sc_nic_locks == 0)
1186 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
1187 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
1188 } else
1189 printf("%s: NIC already unlocked\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1190}
1191
1192int
1193iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1194 uint32_t mask)
1195{
1196 uint32_t val;
1197
1198 if (iwm_nic_lock(sc)) {
1199 val = iwm_read_prph(sc, reg) & mask;
1200 val |= bits;
1201 iwm_write_prph(sc, reg, val);
1202 iwm_nic_unlock(sc);
1203 return 0;
1204 }
1205 return EBUSY16;
1206}
1207
1208int
1209iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1210{
1211 return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1212}
1213
1214int
1215iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1216{
1217 return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1218}
1219
1220int
1221iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1222 bus_size_t size, bus_size_t alignment)
1223{
1224 int nsegs, err;
1225 caddr_t va;
1226
1227 dma->tag = tag;
1228 dma->size = size;
1229
1230 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
1231 &dma->map)(*(tag)->_dmamap_create)((tag), (size), (1), (size), (0), (
0x0001), (&dma->map))
;
1232 if (err)
1233 goto fail;
1234
1235 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
1236 BUS_DMA_NOWAIT)(*(tag)->_dmamem_alloc)((tag), (size), (alignment), (0), (
&dma->seg), (1), (&nsegs), (0x0001))
;
1237 if (err)
1238 goto fail;
1239
1240 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
1241 BUS_DMA_NOWAIT)(*(tag)->_dmamem_map)((tag), (&dma->seg), (1), (size
), (&va), (0x0001))
;
1242 if (err)
1243 goto fail;
1244 dma->vaddr = va;
1245
1246 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
1247 BUS_DMA_NOWAIT)(*(tag)->_dmamap_load)((tag), (dma->map), (dma->vaddr
), (size), (((void *)0)), (0x0001))
;
1248 if (err)
1249 goto fail;
1250
1251 memset(dma->vaddr, 0, size)__builtin_memset((dma->vaddr), (0), (size));
1252 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE)(*(tag)->_dmamap_sync)((tag), (dma->map), (0), (size), (
0x04))
;
1253 dma->paddr = dma->map->dm_segs[0].ds_addr;
1254
1255 return 0;
1256
1257fail: iwm_dma_contig_free(dma);
1258 return err;
1259}
1260
1261void
1262iwm_dma_contig_free(struct iwm_dma_info *dma)
1263{
1264 if (dma->map != NULL((void *)0)) {
1265 if (dma->vaddr != NULL((void *)0)) {
1266 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
1267 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(dma->tag)->_dmamap_sync)((dma->tag), (dma->map
), (0), (dma->size), (0x02 | 0x08))
;
1268 bus_dmamap_unload(dma->tag, dma->map)(*(dma->tag)->_dmamap_unload)((dma->tag), (dma->map
))
;
1269 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size)(*(dma->tag)->_dmamem_unmap)((dma->tag), (dma->vaddr
), (dma->size))
;
1270 bus_dmamem_free(dma->tag, &dma->seg, 1)(*(dma->tag)->_dmamem_free)((dma->tag), (&dma->
seg), (1))
;
1271 dma->vaddr = NULL((void *)0);
1272 }
1273 bus_dmamap_destroy(dma->tag, dma->map)(*(dma->tag)->_dmamap_destroy)((dma->tag), (dma->
map))
;
1274 dma->map = NULL((void *)0);
1275 }
1276}
1277
1278int
1279iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1280{
1281 bus_size_t size;
1282 size_t descsz;
1283 int count, i, err;
1284
1285 ring->cur = 0;
1286
1287 if (sc->sc_mqrx_supported) {
1288 count = IWM_RX_MQ_RING_COUNT512;
1289 descsz = sizeof(uint64_t);
1290 } else {
1291 count = IWM_RX_RING_COUNT256;
1292 descsz = sizeof(uint32_t);
1293 }
1294
1295 /* Allocate RX descriptors (256-byte aligned). */
1296 size = count * descsz;
1297 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1298 if (err) {
1299 printf("%s: could not allocate RX ring DMA memory\n",
1300 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1301 goto fail;
1302 }
1303 ring->desc = ring->free_desc_dma.vaddr;
1304
1305 /* Allocate RX status area (16-byte aligned). */
1306 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1307 sizeof(*ring->stat), 16);
1308 if (err) {
1309 printf("%s: could not allocate RX status DMA memory\n",
1310 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1311 goto fail;
1312 }
1313 ring->stat = ring->stat_dma.vaddr;
1314
1315 if (sc->sc_mqrx_supported) {
1316 size = count * sizeof(uint32_t);
1317 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1318 size, 256);
1319 if (err) {
1320 printf("%s: could not allocate RX ring DMA memory\n",
1321 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1322 goto fail;
1323 }
1324 }
1325
1326 for (i = 0; i < count; i++) {
1327 struct iwm_rx_data *data = &ring->data[i];
1328
1329 memset(data, 0, sizeof(*data))__builtin_memset((data), (0), (sizeof(*data)));
1330 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1331 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
1332 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (4096
), (1), (4096), (0), (0x0001 | 0x0002), (&data->map))
;
1333 if (err) {
1334 printf("%s: could not create RX buf DMA map\n",
1335 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1336 goto fail;
1337 }
1338
1339 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE4096, i);
1340 if (err)
1341 goto fail;
1342 }
1343 return 0;
1344
1345fail: iwm_free_rx_ring(sc, ring);
1346 return err;
1347}
1348
1349void
1350iwm_disable_rx_dma(struct iwm_softc *sc)
1351{
1352 int ntries;
1353
1354 if (iwm_nic_lock(sc)) {
1355 if (sc->sc_mqrx_supported) {
1356 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0);
1357 for (ntries = 0; ntries < 1000; ntries++) {
1358 if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS0xA09808) &
1359 IWM_RXF_DMA_IDLE(1U << 31))
1360 break;
1361 DELAY(10)(*delay_func)(10);
1362 }
1363 } else {
1364 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), ((0))))
;
1365 for (ntries = 0; ntries < 1000; ntries++) {
1366 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) +
0xC40) + 0x004)))))
&
1367 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE(0x01000000))
1368 break;
1369 DELAY(10)(*delay_func)(10);
1370 }
1371 }
1372 iwm_nic_unlock(sc);
1373 }
1374}
1375
1376void
1377iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1378{
1379 ring->cur = 0;
1380 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
1381 ring->stat_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x04))
;
1382 memset(ring->stat, 0, sizeof(*ring->stat))__builtin_memset((ring->stat), (0), (sizeof(*ring->stat
)))
;
1383 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
1384 ring->stat_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
stat_dma.map), (0), (ring->stat_dma.size), (0x08))
;
1385
1386}
1387
1388void
1389iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1390{
1391 int count, i;
1392
1393 iwm_dma_contig_free(&ring->free_desc_dma);
1394 iwm_dma_contig_free(&ring->stat_dma);
1395 iwm_dma_contig_free(&ring->used_desc_dma);
1396
1397 if (sc->sc_mqrx_supported)
1398 count = IWM_RX_MQ_RING_COUNT512;
1399 else
1400 count = IWM_RX_RING_COUNT256;
1401
1402 for (i = 0; i < count; i++) {
1403 struct iwm_rx_data *data = &ring->data[i];
1404
1405 if (data->m != NULL((void *)0)) {
1406 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
1407 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x02))
;
1408 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1409 m_freem(data->m);
1410 data->m = NULL((void *)0);
1411 }
1412 if (data->map != NULL((void *)0))
1413 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1414 }
1415}
1416
1417int
1418iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1419{
1420 bus_addr_t paddr;
1421 bus_size_t size;
1422 int i, err;
1423
1424 ring->qid = qid;
1425 ring->queued = 0;
1426 ring->cur = 0;
1427 ring->tail = 0;
1428
1429 /* Allocate TX descriptors (256-byte aligned). */
1430 size = IWM_TX_RING_COUNT256 * sizeof (struct iwm_tfd);
1431 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1432 if (err) {
1433 printf("%s: could not allocate TX ring DMA memory\n",
1434 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1435 goto fail;
1436 }
1437 ring->desc = ring->desc_dma.vaddr;
1438
1439 /*
1440 * There is no need to allocate DMA buffers for unused rings.
1441 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1442 * than we currently need.
1443 *
1444 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1445 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1446 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1447 * in order to provide one queue per EDCA category.
1448 * Tx aggregation requires additional queues, one queue per TID for
1449 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1450 *
1451 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1452 * and Tx aggregation is not supported.
1453 *
1454 * Unfortunately, we cannot tell if DQA will be used until the
1455 * firmware gets loaded later, so just allocate sufficient rings
1456 * in order to satisfy both cases.
1457 */
1458 if (qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
1459 return 0;
1460
1461 size = IWM_TX_RING_COUNT256 * sizeof(struct iwm_device_cmd);
1462 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1463 if (err) {
1464 printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1465 goto fail;
1466 }
1467 ring->cmd = ring->cmd_dma.vaddr;
1468
1469 paddr = ring->cmd_dma.paddr;
1470 for (i = 0; i < IWM_TX_RING_COUNT256; i++) {
1471 struct iwm_tx_data *data = &ring->data[i];
1472 size_t mapsize;
1473
1474 data->cmd_paddr = paddr;
1475 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1476 + offsetof(struct iwm_tx_cmd, scratch)__builtin_offsetof(struct iwm_tx_cmd, scratch);
1477 paddr += sizeof(struct iwm_device_cmd);
1478
1479 /* FW commands may require more mapped space than packets. */
1480 if (qid == IWM_CMD_QUEUE9 || qid == IWM_DQA_CMD_QUEUE0)
1481 mapsize = (sizeof(struct iwm_cmd_header) +
1482 IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header)));
1483 else
1484 mapsize = MCLBYTES(1 << 11);
1485 err = bus_dmamap_create(sc->sc_dmat, mapsize,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (20 - 2), (mapsize), (0), (0x0001), (&data->map))
1486 IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (20 - 2), (mapsize), (0), (0x0001), (&data->map))
1487 &data->map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (mapsize
), (20 - 2), (mapsize), (0), (0x0001), (&data->map))
;
1488 if (err) {
1489 printf("%s: could not create TX buf DMA map\n",
1490 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1491 goto fail;
1492 }
1493 }
1494 KASSERT(paddr == ring->cmd_dma.paddr + size)((paddr == ring->cmd_dma.paddr + size) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 1494, "paddr == ring->cmd_dma.paddr + size"
))
;
1495 return 0;
1496
1497fail: iwm_free_tx_ring(sc, ring);
1498 return err;
1499}
1500
1501void
1502iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1503{
1504 int i;
1505
1506 for (i = 0; i < IWM_TX_RING_COUNT256; i++) {
1507 struct iwm_tx_data *data = &ring->data[i];
1508
1509 if (data->m != NULL((void *)0)) {
1510 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1511 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1512 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1513 m_freem(data->m);
1514 data->m = NULL((void *)0);
1515 }
1516 }
1517 /* Clear TX descriptors. */
1518 memset(ring->desc, 0, ring->desc_dma.size)__builtin_memset((ring->desc), (0), (ring->desc_dma.size
))
;
1519 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
1520 ring->desc_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), (0), (ring->desc_dma.size), (0x04))
;
1521 sc->qfullmsk &= ~(1 << ring->qid);
1522 sc->qenablemsk &= ~(1 << ring->qid);
1523 /* 7000 family NICs are locked while commands are in progress. */
1524 if (ring->qid == sc->cmdqid && ring->queued > 0) {
1525 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
1526 iwm_nic_unlock(sc);
1527 }
1528 ring->queued = 0;
1529 ring->cur = 0;
1530 ring->tail = 0;
1531}
1532
1533void
1534iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1535{
1536 int i;
1537
1538 iwm_dma_contig_free(&ring->desc_dma);
1539 iwm_dma_contig_free(&ring->cmd_dma);
1540
1541 for (i = 0; i < IWM_TX_RING_COUNT256; i++) {
1542 struct iwm_tx_data *data = &ring->data[i];
1543
1544 if (data->m != NULL((void *)0)) {
1545 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
1546 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
1547 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
1548 m_freem(data->m);
1549 data->m = NULL((void *)0);
1550 }
1551 if (data->map != NULL((void *)0))
1552 bus_dmamap_destroy(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (data
->map))
;
1553 }
1554}
1555
1556void
1557iwm_enable_rfkill_int(struct iwm_softc *sc)
1558{
1559 if (!sc->sc_msix) {
1560 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL(1 << 7);
1561 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1562 } else {
1563 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
1564 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
1565 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
1566 ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL))))
;
1567 sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1568 }
1569
1570 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_90003)
1571 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
1572 IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x04000000))))))
;
1573}
1574
1575int
1576iwm_check_rfkill(struct iwm_softc *sc)
1577{
1578 uint32_t v;
1579 int rv;
1580
1581 /*
1582 * "documentation" is not really helpful here:
1583 * 27: HW_RF_KILL_SW
1584 * Indicates state of (platform's) hardware RF-Kill switch
1585 *
1586 * But apparently when it's off, it's on ...
1587 */
1588 v = IWM_READ(sc, IWM_CSR_GP_CNTRL)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))));
1589 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW(0x08000000)) == 0;
1590 if (rv) {
1591 sc->sc_flags |= IWM_FLAG_RFKILL0x02;
1592 } else {
1593 sc->sc_flags &= ~IWM_FLAG_RFKILL0x02;
1594 }
1595
1596 return rv;
1597}
1598
1599void
1600iwm_enable_interrupts(struct iwm_softc *sc)
1601{
1602 if (!sc->sc_msix) {
1603 sc->sc_intmask = IWM_CSR_INI_SET_MASK((1U << 31) | (1 << 29) | (1 << 27) | (1 <<
25) | (1 << 7) | (1 << 3) | (1 << 1) | (1 <<
0) | (1 << 28))
;
1604 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1605 } else {
1606 /*
1607 * fh/hw_mask keeps all the unmasked causes.
1608 * Unlike msi, in msix cause is enabled when it is unset.
1609 */
1610 sc->sc_hw_mask = sc->sc_hw_init_mask;
1611 sc->sc_fh_mask = sc->sc_fh_init_mask;
1612 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
1613 ~sc->sc_fh_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~sc->sc_fh_mask))))
;
1614 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
1615 ~sc->sc_hw_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~sc->sc_hw_mask))))
;
1616 }
1617}
1618
1619void
1620iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1621{
1622 if (!sc->sc_msix) {
1623 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX(1 << 27);
1624 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1625 } else {
1626 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
1627 sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
;
1628 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM))))
1629 ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM))))
;
1630 sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1631 }
1632}
1633
1634void
1635iwm_restore_interrupts(struct iwm_softc *sc)
1636{
1637 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(sc->sc_intmask))))
;
1638}
1639
1640void
1641iwm_disable_interrupts(struct iwm_softc *sc)
1642{
1643 if (!sc->sc_msix) {
1644 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
1645
1646 /* acknowledge all interrupts */
1647 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
1648 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(~0))))
;
1649 } else {
1650 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
1651 sc->sc_fh_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((sc->sc_fh_init_mask))))
;
1652 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
1653 sc->sc_hw_init_mask)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((sc->sc_hw_init_mask))))
;
1654 }
1655}
1656
1657void
1658iwm_ict_reset(struct iwm_softc *sc)
1659{
1660 iwm_disable_interrupts(sc);
1661
1662 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE)__builtin_memset((sc->ict_dma.vaddr), (0), (4096));
1663 sc->ict_cur = 0;
1664
1665 /* Set physical address of ICT (4KB aligned). */
1666 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1667 IWM_CSR_DRAM_INT_TBL_ENABLE(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1668 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1669 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
1670 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A0))), (
((1U << 31) | (1 << 27) | (1 << 28) | sc->
ict_dma.paddr >> 12))))
;
1671
1672 /* Switch to ICT interrupt mode in driver. */
1673 sc->sc_flags |= IWM_FLAG_USE_ICT0x01;
1674
1675 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
1676 iwm_enable_interrupts(sc);
1677}
1678
1679#define IWM_HW_READY_TIMEOUT 50
1680int
1681iwm_set_hw_ready(struct iwm_softc *sc)
1682{
1683 int ready;
1684
1685 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
1686 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00400000))))))
;
1687
1688 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG(0x000),
1689 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
1690 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY(0x00400000),
1691 IWM_HW_READY_TIMEOUT);
1692 if (ready)
1693 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
1694 IWM_CSR_MBOX_SET_REG_OS_ALIVE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x088))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x088)))))
| (0x20)))))
;
1695
1696 return ready;
1697}
1698#undef IWM_HW_READY_TIMEOUT
1699
1700int
1701iwm_prepare_card_hw(struct iwm_softc *sc)
1702{
1703 int t = 0;
1704 int ntries;
1705
1706 if (iwm_set_hw_ready(sc))
1707 return 0;
1708
1709 IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
1710 IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
1711 DELAY(1000)(*delay_func)(1000);
1712
1713 for (ntries = 0; ntries < 10; ntries++) {
1714 /* If HW is not ready, prepare the conditions to check again */
1715 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
1716 IWM_CSR_HW_IF_CONFIG_REG_PREPARE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000))))))
;
1717
1718 do {
1719 if (iwm_set_hw_ready(sc))
1720 return 0;
1721 DELAY(200)(*delay_func)(200);
1722 t += 200;
1723 } while (t < 150000);
1724 DELAY(25000)(*delay_func)(25000);
1725 }
1726
1727 return ETIMEDOUT60;
1728}
1729
1730void
1731iwm_apm_config(struct iwm_softc *sc)
1732{
1733 pcireg_t lctl, cap;
1734
1735 /*
1736 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1737 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1738 * If so (likely), disable L0S, so device moves directly L0->L1;
1739 * costs negligible amount of power savings.
1740 * If not (unlikely), enable L0S, so there is at least some
1741 * power savings, even without L1.
1742 */
1743 lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1744 sc->sc_cap_off + PCI_PCIE_LCSR0x10);
1745 if (lctl & PCI_PCIE_LCSR_ASPM_L10x00000002) {
1746 IWM_SETBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
| ((0x00000002))))))
1747 IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
| ((0x00000002))))))
;
1748 } else {
1749 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
& ~((0x00000002))))))
1750 IWM_CSR_GIO_REG_VAL_L0S_ENABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x03C))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x03C)))))
& ~((0x00000002))))))
;
1751 }
1752
1753 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1754 sc->sc_cap_off + PCI_PCIE_DCSR20x28);
1755 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN0x00000400) ? 1 : 0;
1756 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",do { ; } while (0)
1757 DEVNAME(sc),do { ; } while (0)
1758 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",do { ; } while (0)
1759 sc->sc_ltr_enabled ? "En" : "Dis"))do { ; } while (0);
1760}
1761
1762/*
1763 * Start up NIC's basic functionality after it has been reset
1764 * e.g. after platform boot or shutdown.
1765 * NOTE: This does not load uCode nor start the embedded processor
1766 */
1767int
1768iwm_apm_init(struct iwm_softc *sc)
1769{
1770 int err = 0;
1771
1772 /* Disable L0S exit timer (platform NMI workaround) */
1773 if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002)
1774 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x20000000))))))
1775 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x20000000))))))
;
1776
1777 /*
1778 * Disable L0s without affecting L1;
1779 * don't wait for ICH L0s (ICH bug W/A)
1780 */
1781 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
1782 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x100))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x100)))))
| ((0x00800000))))))
;
1783
1784 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1785 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x240))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x240)))))
| ((0xFFFF0000))))))
;
1786
1787 /*
1788 * Enable HAP INTA (interrupt from management bus) to
1789 * wake device's PCI Express link L1a -> L0s
1790 */
1791 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
1792 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x00080000))))))
;
1793
1794 iwm_apm_config(sc);
1795
1796#if 0 /* not for 7k/8k */
1797 /* Configure analog phase-lock-loop before activating to D0A */
1798 if (trans->cfg->base_params->pll_cfg_val)
1799 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c
))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), (
((0x20c))))) | (trans->cfg->base_params->pll_cfg_val
)))))
1800 trans->cfg->base_params->pll_cfg_val)(((trans)->sc_st)->write_4(((trans)->sc_sh), (((0x20c
))), (((((trans)->sc_st)->read_4(((trans)->sc_sh), (
((0x20c))))) | (trans->cfg->base_params->pll_cfg_val
)))))
;
1801#endif
1802
1803 /*
1804 * Set "initialization complete" bit to move adapter from
1805 * D0U* --> D0A* (powered-up active) state.
1806 */
1807 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
1808
1809 /*
1810 * Wait for clock stabilization; once stabilized, access to
1811 * device-internal resources is supported, e.g. iwm_write_prph()
1812 * and accesses to uCode SRAM.
1813 */
1814 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024),
1815 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
1816 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001), 25000)) {
1817 printf("%s: timeout waiting for clock stabilization\n",
1818 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1819 err = ETIMEDOUT60;
1820 goto out;
1821 }
1822
1823 if (sc->host_interrupt_operation_mode) {
1824 /*
1825 * This is a bit of an abuse - This is needed for 7260 / 3160
1826 * only check host_interrupt_operation_mode even if this is
1827 * not related to host_interrupt_operation_mode.
1828 *
1829 * Enable the oscillator to count wake up time for L1 exit. This
1830 * consumes slightly more power (100uA) - but allows to be sure
1831 * that we wake up from L1 on time.
1832 *
1833 * This looks weird: read twice the same register, discard the
1834 * value, set a bit, and yet again, read that same register
1835 * just to discard the value. But that's the way the hardware
1836 * seems to like it.
1837 */
1838 if (iwm_nic_lock(sc)) {
1839 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1840 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1841 iwm_nic_unlock(sc);
1842 }
1843 err = iwm_set_bits_prph(sc, IWM_OSC_CLK(0xa04068),
1844 IWM_OSC_CLK_FORCE_CONTROL(0x8));
1845 if (err)
1846 goto out;
1847 if (iwm_nic_lock(sc)) {
1848 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1849 iwm_read_prph(sc, IWM_OSC_CLK(0xa04068));
1850 iwm_nic_unlock(sc);
1851 }
1852 }
1853
1854 /*
1855 * Enable DMA clock and wait for it to stabilize.
1856 *
1857 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1858 * do not disable clocks. This preserves any hardware bits already
1859 * set by default in "CLK_CTRL_REG" after reset.
1860 */
1861 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
1862 if (iwm_nic_lock(sc)) {
1863 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG(((0x00000) + 0x3000) + 0x0004),
1864 IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200));
1865 iwm_nic_unlock(sc);
1866 }
1867 DELAY(20)(*delay_func)(20);
1868
1869 /* Disable L1-Active */
1870 err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010),
1871 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800));
1872 if (err)
1873 goto out;
1874
1875 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1876 if (iwm_nic_lock(sc)) {
1877 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG(((0x00000) + 0x3000) + 0x001c),
1878 IWM_APMG_RTC_INT_STT_RFKILL(0x10000000));
1879 iwm_nic_unlock(sc);
1880 }
1881 }
1882 out:
1883 if (err)
1884 printf("%s: apm init error %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
1885 return err;
1886}
1887
1888void
1889iwm_apm_stop(struct iwm_softc *sc)
1890{
1891 IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
1892 IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
| ((0x80000000))))))
;
1893 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
1894 IWM_CSR_HW_IF_CONFIG_REG_PREPARE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
1895 IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))))
| ((0x08000000) | (0x10000000))))))
;
1896 DELAY(1000)(*delay_func)(1000);
1897 IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
1898 IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x250))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x250)))))
& ~((0x80000000))))))
;
1899 DELAY(5000)(*delay_func)(5000);
1900
1901 /* stop device's busmaster DMA activity */
1902 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x020)))))
| ((0x00000200))))))
;
1903
1904 if (!iwm_poll_bit(sc, IWM_CSR_RESET(0x020),
1905 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100),
1906 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED(0x00000100), 100))
1907 printf("%s: timeout waiting for master\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1908
1909 /*
1910 * Clear "initialization complete" bit to move adapter from
1911 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1912 */
1913 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
1914 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000004))))))
;
1915}
1916
1917void
1918iwm_init_msix_hw(struct iwm_softc *sc)
1919{
1920 iwm_conf_msix_hw(sc, 0);
1921
1922 if (!sc->sc_msix)
1923 return;
1924
1925 sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x804
)))))
;
1926 sc->sc_fh_mask = sc->sc_fh_init_mask;
1927 sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x80C
)))))
;
1928 sc->sc_hw_mask = sc->sc_hw_init_mask;
1929}
1930
1931void
1932iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1933{
1934 int vector = 0;
1935
1936 if (!sc->sc_msix) {
1937 /* Newer chips default to MSIX. */
1938 if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1939 iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00,
1940 IWM_UREG_CHICK_MSI_ENABLE(1 << 24));
1941 iwm_nic_unlock(sc);
1942 }
1943 return;
1944 }
1945
1946 if (!stopped && iwm_nic_lock(sc)) {
1947 iwm_write_prph(sc, IWM_UREG_CHICK0xa05c00, IWM_UREG_CHICK_MSIX_ENABLE(1 << 25));
1948 iwm_nic_unlock(sc);
1949 }
1950
1951 /* Disable all interrupts */
1952 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), ((~0))))
;
1953 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), ((~0))))
;
1954
1955 /* Map fallback-queue (command/mgmt) to a single vector */
1956 IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
1957 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (0)))), ((vector | (1 << 7)))))
;
1958 /* Map RSS queue (data) to the same vector */
1959 IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
1960 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x880) + (1)))), ((vector | (1 << 7)))))
;
1961
1962 /* Enable the RX queues cause interrupts */
1963 IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1
)))))
1964 IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1
)))))
;
1965
1966 /* Map non-RX causes to the same vector */
1967 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
1968 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM)))), ((vector | (1
<< 7)))))
;
1969 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
1970 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM)))), ((vector | (1
<< 7)))))
;
1971 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
1972 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_S2D)))), ((vector | (1 <<
7)))))
;
1973 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
1974 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_FH_ERR)))), ((vector | (1 <<
7)))))
;
1975 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
1976 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_ALIVE)))), ((vector | (1 <<
7)))))
;
1977 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
1978 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_WAKEUP)))), ((vector | (1 <<
7)))))
;
1979 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
1980 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_IML)))), ((vector | (1 <<
7)))))
;
1981 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
1982 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_CT_KILL)))), ((vector | (1
<< 7)))))
;
1983 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
1984 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_RF_KILL)))), ((vector | (1
<< 7)))))
;
1985 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
1986 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_PERIODIC)))), ((vector | (
1 << 7)))))
;
1987 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
1988 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SW_ERR)))), ((vector | (1 <<
7)))))
;
1989 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
1990 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_SCD)))), ((vector | (1 <<
7)))))
;
1991 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
1992 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_FH_TX)))), ((vector | (1 <<
7)))))
;
1993 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
1994 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HW_ERR)))), ((vector | (1 <<
7)))))
;
1995 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
1996 vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((((0x2000) +
0x890) + (IWM_MSIX_IVAR_CAUSE_REG_HAP)))), ((vector | (1 <<
7)))))
;
1997
1998 /* Enable non-RX causes interrupts */
1999 IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
2000 IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
2001 IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
2002 IWM_MSIX_FH_INT_CAUSES_S2D |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
2003 IWM_MSIX_FH_INT_CAUSES_FH_ERR)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x804))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x804))))) & ~(IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM
| IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM | IWM_MSIX_FH_INT_CAUSES_S2D
| IWM_MSIX_FH_INT_CAUSES_FH_ERR)))))
;
2004 IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2005 IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2006 IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2007 IWM_MSIX_HW_INT_CAUSES_REG_IML |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2008 IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2009 IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2010 IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2011 IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2012 IWM_MSIX_HW_INT_CAUSES_REG_SCD |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2013 IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2014 IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
2015 IWM_MSIX_HW_INT_CAUSES_REG_HAP)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x80C))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x2000) + 0x80C))))) & ~(IWM_MSIX_HW_INT_CAUSES_REG_ALIVE
| IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP | IWM_MSIX_HW_INT_CAUSES_REG_IML
| IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL | IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL
| IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC | IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR
| IWM_MSIX_HW_INT_CAUSES_REG_SCD | IWM_MSIX_HW_INT_CAUSES_REG_FH_TX
| IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR | IWM_MSIX_HW_INT_CAUSES_REG_HAP
)))))
;
2016}
2017
2018int
2019iwm_clear_persistence_bit(struct iwm_softc *sc)
2020{
2021 uint32_t hpm, wprot;
2022
2023 hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440);
2024 if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT(1 << 12))) {
2025 wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_90000xa04ce0);
2026 if (wprot & IWM_PREG_WFPM_ACCESS(1 << 12)) {
2027 printf("%s: cannot clear persistence bit\n",
2028 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2029 return EPERM1;
2030 }
2031 iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG0xa03440,
2032 hpm & ~IWM_HPM_PERSISTENCE_BIT(1 << 12));
2033 }
2034
2035 return 0;
2036}
2037
2038int
2039iwm_start_hw(struct iwm_softc *sc)
2040{
2041 int err;
2042
2043 err = iwm_prepare_card_hw(sc);
2044 if (err)
2045 return err;
2046
2047 if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003) {
2048 err = iwm_clear_persistence_bit(sc);
2049 if (err)
2050 return err;
2051 }
2052
2053 /* Reset the entire device */
2054 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((0x00000080)))))
;
2055 DELAY(5000)(*delay_func)(5000);
2056
2057 err = iwm_apm_init(sc);
2058 if (err)
2059 return err;
2060
2061 iwm_init_msix_hw(sc);
2062
2063 iwm_enable_rfkill_int(sc);
2064 iwm_check_rfkill(sc);
2065
2066 return 0;
2067}
2068
2069
2070void
2071iwm_stop_device(struct iwm_softc *sc)
2072{
2073 int chnl, ntries;
2074 int qid;
2075
2076 iwm_disable_interrupts(sc);
2077 sc->sc_flags &= ~IWM_FLAG_USE_ICT0x01;
2078
2079 /* Stop all DMA channels. */
2080 if (iwm_nic_lock(sc)) {
2081 /* Deactivate TX scheduler. */
2082 iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0);
2083
2084 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) {
2085 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), ((0))))
2086 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), ((0))))
;
2087 for (ntries = 0; ntries < 200; ntries++) {
2088 uint32_t r;
2089
2090 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((((0x1000) +
0xEA0) + 0x010)))))
;
2091 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(((1 << (chnl)) << 16)
2092 chnl)((1 << (chnl)) << 16))
2093 break;
2094 DELAY(20)(*delay_func)(20);
2095 }
2096 }
2097 iwm_nic_unlock(sc);
2098 }
2099 iwm_disable_rx_dma(sc);
2100
2101 iwm_reset_rx_ring(sc, &sc->rxq);
2102
2103 for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++)
2104 iwm_reset_tx_ring(sc, &sc->txq[qid]);
2105
2106 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
2107 if (iwm_nic_lock(sc)) {
2108 /* Power-down device's busmaster DMA clocks */
2109 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG(((0x00000) + 0x3000) + 0x0008),
2110 IWM_APMG_CLK_VAL_DMA_CLK_RQT(0x00000200));
2111 iwm_nic_unlock(sc);
2112 }
2113 DELAY(5)(*delay_func)(5);
2114 }
2115
2116 /* Make sure (redundant) we've released our request to stay awake */
2117 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
2118 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
& ~((0x00000008))))))
;
2119 if (sc->sc_nic_locks > 0)
2120 printf("%s: %d active NIC locks forcefully cleared\n",
2121 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_nic_locks);
2122 sc->sc_nic_locks = 0;
2123
2124 /* Stop the device, and put it in low power state */
2125 iwm_apm_stop(sc);
2126
2127 /* Reset the on-board processor. */
2128 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
((0x00000080)))))
;
2129 DELAY(5000)(*delay_func)(5000);
2130
2131 /*
2132 * Upon stop, the IVAR table gets erased, so msi-x won't
2133 * work. This causes a bug in RF-KILL flows, since the interrupt
2134 * that enables radio won't fire on the correct irq, and the
2135 * driver won't be able to handle the interrupt.
2136 * Configure the IVAR table again after reset.
2137 */
2138 iwm_conf_msix_hw(sc, 1);
2139
2140 /*
2141 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2142 * Clear the interrupt again.
2143 */
2144 iwm_disable_interrupts(sc);
2145
2146 /* Even though we stop the HW we still want the RF kill interrupt. */
2147 iwm_enable_rfkill_int(sc);
2148 iwm_check_rfkill(sc);
2149
2150 iwm_prepare_card_hw(sc);
2151}
2152
2153void
2154iwm_nic_config(struct iwm_softc *sc)
2155{
2156 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2157 uint32_t mask, val, reg_val = 0;
2158
2159 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE(0x3 << 0)) >>
2160 IWM_FW_PHY_CFG_RADIO_TYPE_POS0;
2161 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP(0x3 << 2)) >>
2162 IWM_FW_PHY_CFG_RADIO_STEP_POS2;
2163 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH(0x3 << 4)) >>
2164 IWM_FW_PHY_CFG_RADIO_DASH_POS4;
2165
2166 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x000000C) >> 2) <<
2167 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP(2);
2168 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev)(((sc->sc_hw_rev) & 0x0000003) >> 0) <<
2169 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH(0);
2170
2171 /* radio configuration */
2172 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE(10);
2173 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP(14);
2174 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH(12);
2175
2176 mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH(0x00000003) |
2177 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP(0x0000000C) |
2178 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP(0x0000C000) |
2179 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH(0x00003000) |
2180 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE(0x00000C00) |
2181 IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI(0x00000200) |
2182 IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI(0x00000100);
2183
2184 val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x000)))));
2185 val &= ~mask;
2186 val |= reg_val;
2187 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x000))), (
(val))))
;
2188
2189 /*
2190 * W/A : NIC is stuck in a reset state after Early PCIe power off
2191 * (PCIe power is lost before PERST# is asserted), causing ME FW
2192 * to lose ownership and not being able to obtain it back.
2193 */
2194 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
2195 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c),
2196 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000),
2197 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS(0x00400000));
2198}
2199
2200int
2201iwm_nic_rx_init(struct iwm_softc *sc)
2202{
2203 if (sc->sc_mqrx_supported)
2204 return iwm_nic_rx_mq_init(sc);
2205 else
2206 return iwm_nic_rx_legacy_init(sc);
2207}
2208
2209int
2210iwm_nic_rx_mq_init(struct iwm_softc *sc)
2211{
2212 int enabled;
2213
2214 if (!iwm_nic_lock(sc))
2215 return EBUSY16;
2216
2217 /* Stop RX DMA. */
2218 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820, 0);
2219 /* Disable RX used and free queue operation. */
2220 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, 0);
2221
2222 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB0xA08000,
2223 sc->rxq.free_desc_dma.paddr);
2224 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB0xA08100,
2225 sc->rxq.used_desc_dma.paddr);
2226 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB0xA08200,
2227 sc->rxq.stat_dma.paddr);
2228 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX0xA08080, 0);
2229 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX0xA080C0, 0);
2230 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX0xA08180, 0);
2231
2232 /* We configure only queue 0 for now. */
2233 enabled = ((1 << 0) << 16) | (1 << 0);
2234
2235 /* Enable RX DMA, 4KB buffer size. */
2236 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG0xA09820,
2237 IWM_RFH_DMA_EN_ENABLE_VAL(1U << 31) |
2238 IWM_RFH_RXF_DMA_RB_SIZE_4K(0x4 << 16) |
2239 IWM_RFH_RXF_DMA_MIN_RB_4_8(3 << 24) |
2240 IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK(0x04000000) |
2241 IWM_RFH_RXF_DMA_RBDCB_SIZE_512(0x9 << 20));
2242
2243 /* Enable RX DMA snooping. */
2244 iwm_write_prph(sc, IWM_RFH_GEN_CFG0xA09800,
2245 IWM_RFH_GEN_CFG_RFH_DMA_SNOOP(1 << 1) |
2246 IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP(1 << 0) |
2247 (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_640x00000000 :
2248 IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_1280x00000010));
2249
2250 /* Enable the configured queue(s). */
2251 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE0xA0980C, enabled);
2252
2253 iwm_nic_unlock(sc);
2254
2255 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), (
((0x40)))))
;
2256
2257 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1C80)), (
(8))))
;
2258
2259 return 0;
2260}
2261
2262int
2263iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2264{
2265 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat))__builtin_memset((sc->rxq.stat), (0), (sizeof(*sc->rxq.
stat)))
;
2266
2267 iwm_disable_rx_dma(sc);
2268
2269 if (!iwm_nic_lock(sc))
2270 return EBUSY16;
2271
2272 /* reset and flush pointers */
2273 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00)) + 0x8))), ((0))))
;
2274 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00)) + 0x10))), ((0))))
;
2275 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x00c))), ((0))))
;
2276 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x008))), ((0))))
;
2277
2278 /* Set physical address of RX ring (256-byte aligned). */
2279 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >>
8))))
2280 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0)) + 0x004))), ((sc->rxq.free_desc_dma.paddr >>
8))))
;
2281
2282 /* Set physical address of RX status (16-byte aligned). */
2283 IWM_WRITE(sc,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4))))
2284 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xBC0))))), ((sc->rxq.stat_dma.paddr >> 4))))
;
2285
2286 /* Enable RX. */
2287 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2288 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2289 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2290 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2291 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2292 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
2293 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((((0x1000)
+ 0xC00))))), (((0x80000000) | (0x00000004) | (0x00001000) |
((0x11) << (4)) | (0x00000000) | 8 << (20)))))
;
2294
2295 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x004))), (
((0x40)))))
;
2296
2297 /* W/A for interrupt coalescing bug in 7260 and 3160 */
2298 if (sc->host_interrupt_operation_mode)
2299 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x004))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x004)))))
| ((1U << 31))))))
;
2300
2301 iwm_nic_unlock(sc);
2302
2303 /*
2304 * This value should initially be 0 (before preparing any RBs),
2305 * and should be 8 after preparing the first 8 RBs (for example).
2306 */
2307 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((((0x1000
) + 0xBC0)) + 0x008)))), ((8))))
;
2308
2309 return 0;
2310}
2311
2312int
2313iwm_nic_tx_init(struct iwm_softc *sc)
2314{
2315 int qid, err;
2316
2317 if (!iwm_nic_lock(sc))
2318 return EBUSY16;
2319
2320 /* Deactivate TX scheduler. */
2321 iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0);
2322
2323 /* Set physical address of "keep warm" page (16-byte aligned). */
2324 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) +
0x97C))), ((sc->kw_dma.paddr >> 4))))
;
2325
2326 for (qid = 0; qid < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); qid++) {
2327 struct iwm_tx_ring *txq = &sc->txq[qid];
2328
2329 /* Set physical address of TX ring (256-byte aligned). */
2330 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE
(qid))), ((txq->desc_dma.paddr >> 8))))
2331 txq->desc_dma.paddr >> 8)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((IWM_FH_MEM_CBBC_QUEUE
(qid))), ((txq->desc_dma.paddr >> 8))))
;
2332 }
2333
2334 err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL(((0x00000) + 0xa02c00) + 0x1a8),
2335 IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE(1 << 18) |
2336 IWM_SCD_GP_CTRL_ENABLE_31_QUEUES(1 << 0));
2337
2338 iwm_nic_unlock(sc);
2339
2340 return err;
2341}
2342
2343int
2344iwm_nic_init(struct iwm_softc *sc)
2345{
2346 int err;
2347
2348 iwm_apm_init(sc);
2349 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
2350 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG(((0x00000) + 0x3000) + 0x000c),
2351 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN(0x00000000),
2352 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC(0x03000000));
2353
2354 iwm_nic_config(sc);
2355
2356 err = iwm_nic_rx_init(sc);
2357 if (err)
2358 return err;
2359
2360 err = iwm_nic_tx_init(sc);
2361 if (err)
2362 return err;
2363
2364 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x0A8))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x0A8)))))
| (0x800fffff)))))
;
2365
2366 return 0;
2367}
2368
2369/* Map a TID to an ieee80211_edca_ac category. */
2370const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT8] = {
2371 EDCA_AC_BE,
2372 EDCA_AC_BK,
2373 EDCA_AC_BK,
2374 EDCA_AC_BE,
2375 EDCA_AC_VI,
2376 EDCA_AC_VI,
2377 EDCA_AC_VO,
2378 EDCA_AC_VO,
2379};
2380
2381/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2382const uint8_t iwm_ac_to_tx_fifo[] = {
2383 IWM_TX_FIFO_BE1,
2384 IWM_TX_FIFO_BK0,
2385 IWM_TX_FIFO_VI2,
2386 IWM_TX_FIFO_VO3,
2387};
2388
2389int
2390iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2391{
2392 int err;
2393 iwm_nic_assert_locked(sc);
2394
2395 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((qid << 8 | 0))))
;
2396
2397 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2398 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3))
2399 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN(19)));
2400
2401 err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL(((0x00000) + 0xa02c00) + 0x248), (1 << qid));
2402 if (err) {
2403 return err;
2404 }
2405
2406 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2407
2408 iwm_write_mem32(sc,
2409 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)), 0);
2410
2411 /* Set scheduler window size and frame limit. */
2412 iwm_write_mem32(sc,
2413 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid)(((0x0000) + 0x600) + ((qid) * 8)) +
2414 sizeof(uint32_t),
2415 ((IWM_FRAME_LIMIT64 << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS(0)) &
2416 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK(0x0000007F)) |
2417 ((IWM_FRAME_LIMIT64
2418 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS(16)) &
2419 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK(0x007F0000)));
2420
2421 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2422 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE(3)) |
2423 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF(0)) |
2424 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL(4)) |
2425 IWM_SCD_QUEUE_STTS_REG_MSK(0x017F0000));
2426
2427 if (qid == sc->cmdqid)
2428 iwm_write_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254),
2429 iwm_read_prph(sc, IWM_SCD_EN_CTRL(((0x00000) + 0xa02c00) + 0x254)) | (1 << qid));
2430
2431 return 0;
2432}
2433
2434int
2435iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2436 int aggregate, uint8_t tid, uint16_t ssn)
2437{
2438 struct iwm_tx_ring *ring = &sc->txq[qid];
2439 struct iwm_scd_txq_cfg_cmd cmd;
2440 int err, idx, scd_bug;
2441
2442 iwm_nic_assert_locked(sc);
2443
2444 /*
2445 * If we need to move the SCD write pointer by steps of
2446 * 0x40, 0x80 or 0xc0, it gets stuck.
2447 * This is really ugly, but this is the easiest way out for
2448 * this sad hardware issue.
2449 * This bug has been fixed on devices 9000 and up.
2450 */
2451 scd_bug = !sc->sc_mqrx_supported &&
2452 !((ssn - ring->cur) & 0x3f) &&
2453 (ssn != ring->cur);
2454 if (scd_bug)
2455 ssn = (ssn + 1) & 0xfff;
2456
2457 idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1));
2458 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((qid << 8 | idx))))
;
2459 ring->cur = idx;
2460 ring->tail = idx;
2461
2462 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2463 cmd.tid = tid;
2464 cmd.scd_queue = qid;
2465 cmd.enable = 1;
2466 cmd.sta_id = sta_id;
2467 cmd.tx_fifo = fifo;
2468 cmd.aggregate = aggregate;
2469 cmd.ssn = htole16(ssn)((__uint16_t)(ssn));
2470 cmd.window = IWM_FRAME_LIMIT64;
2471
2472 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0,
2473 sizeof(cmd), &cmd);
2474 if (err)
2475 return err;
2476
2477 sc->qenablemsk |= (1 << qid);
2478 return 0;
2479}
2480
2481int
2482iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2483{
2484 struct iwm_scd_txq_cfg_cmd cmd;
2485 int err;
2486
2487 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
2488 cmd.tid = tid;
2489 cmd.scd_queue = qid;
2490 cmd.enable = 0;
2491 cmd.sta_id = sta_id;
2492
2493 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG0x1d, 0, sizeof(cmd), &cmd);
2494 if (err)
2495 return err;
2496
2497 sc->qenablemsk &= ~(1 << qid);
2498 return 0;
2499}
2500
2501int
2502iwm_post_alive(struct iwm_softc *sc)
2503{
2504 int nwords;
2505 int err, chnl;
2506 uint32_t base;
2507
2508 if (!iwm_nic_lock(sc))
2509 return EBUSY16;
2510
2511 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x0));
2512
2513 iwm_ict_reset(sc);
2514
2515 iwm_nic_unlock(sc);
2516
2517 /* Clear TX scheduler state in SRAM. */
2518 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND((0x0000) + 0x808) -
2519 IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600))
2520 / sizeof(uint32_t);
2521 err = iwm_write_mem(sc,
2522 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND((0x0000) + 0x600),
2523 NULL((void *)0), nwords);
2524 if (err)
2525 return err;
2526
2527 if (!iwm_nic_lock(sc))
2528 return EBUSY16;
2529
2530 /* Set physical address of TX scheduler rings (1KB aligned). */
2531 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR(((0x00000) + 0xa02c00) + 0x8), sc->sched_dma.paddr >> 10);
2532
2533 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN(((0x00000) + 0xa02c00) + 0x244), 0);
2534
2535 /* enable command channel */
2536 err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD7);
2537 if (err) {
2538 iwm_nic_unlock(sc);
2539 return err;
2540 }
2541
2542 /* Activate TX scheduler. */
2543 iwm_write_prph(sc, IWM_SCD_TXFACT(((0x00000) + 0xa02c00) + 0x10), 0xff);
2544
2545 /* Enable DMA channels. */
2546 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM(8); chnl++) {
2547 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008)))))
2548 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008)))))
2549 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * (chnl)))), (((0x80000000) | (0x00000008)))))
;
2550 }
2551
2552 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) +
0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x1000) + 0xE98))))) | ((0x00000002))))))
2553 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x1000) +
0xE98))), (((((sc)->sc_st)->read_4(((sc)->sc_sh), (
(((0x1000) + 0xE98))))) | ((0x00000002))))))
;
2554
2555 iwm_nic_unlock(sc);
2556
2557 /* Enable L1-Active */
2558 if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) {
2559 err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG(((0x00000) + 0x3000) + 0x0010),
2560 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS(0x00000800));
2561 }
2562
2563 return err;
2564}
2565
2566struct iwm_phy_db_entry *
2567iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2568{
2569 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2570
2571 if (type >= IWM_PHY_DB_MAX6)
2572 return NULL((void *)0);
2573
2574 switch (type) {
2575 case IWM_PHY_DB_CFG1:
2576 return &phy_db->cfg;
2577 case IWM_PHY_DB_CALIB_NCH2:
2578 return &phy_db->calib_nch;
2579 case IWM_PHY_DB_CALIB_CHG_PAPD4:
2580 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS9)
2581 return NULL((void *)0);
2582 return &phy_db->calib_ch_group_papd[chg_id];
2583 case IWM_PHY_DB_CALIB_CHG_TXP5:
2584 if (chg_id >= IWM_NUM_TXP_CH_GROUPS9)
2585 return NULL((void *)0);
2586 return &phy_db->calib_ch_group_txp[chg_id];
2587 default:
2588 return NULL((void *)0);
2589 }
2590 return NULL((void *)0);
2591}
2592
2593int
2594iwm_phy_db_set_section(struct iwm_softc *sc,
2595 struct iwm_calib_res_notif_phy_db *phy_db_notif)
2596{
2597 uint16_t type = le16toh(phy_db_notif->type)((__uint16_t)(phy_db_notif->type));
2598 uint16_t size = le16toh(phy_db_notif->length)((__uint16_t)(phy_db_notif->length));
2599 struct iwm_phy_db_entry *entry;
2600 uint16_t chg_id = 0;
2601
2602 if (type == IWM_PHY_DB_CALIB_CHG_PAPD4 ||
2603 type == IWM_PHY_DB_CALIB_CHG_TXP5)
2604 chg_id = le16toh(*(uint16_t *)phy_db_notif->data)((__uint16_t)(*(uint16_t *)phy_db_notif->data));
2605
2606 entry = iwm_phy_db_get_section(sc, type, chg_id);
2607 if (!entry)
2608 return EINVAL22;
2609
2610 if (entry->data)
2611 free(entry->data, M_DEVBUF2, entry->size);
2612 entry->data = malloc(size, M_DEVBUF2, M_NOWAIT0x0002);
2613 if (!entry->data) {
2614 entry->size = 0;
2615 return ENOMEM12;
2616 }
2617 memcpy(entry->data, phy_db_notif->data, size)__builtin_memcpy((entry->data), (phy_db_notif->data), (
size))
;
2618 entry->size = size;
2619
2620 return 0;
2621}
2622
2623int
2624iwm_is_valid_channel(uint16_t ch_id)
2625{
2626 if (ch_id <= 14 ||
2627 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2628 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2629 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2630 return 1;
2631 return 0;
2632}
2633
2634uint8_t
2635iwm_ch_id_to_ch_index(uint16_t ch_id)
2636{
2637 if (!iwm_is_valid_channel(ch_id))
2638 return 0xff;
2639
2640 if (ch_id <= 14)
2641 return ch_id - 1;
2642 if (ch_id <= 64)
2643 return (ch_id + 20) / 4;
2644 if (ch_id <= 140)
2645 return (ch_id - 12) / 4;
2646 return (ch_id - 13) / 4;
2647}
2648
2649
2650uint16_t
2651iwm_channel_id_to_papd(uint16_t ch_id)
2652{
2653 if (!iwm_is_valid_channel(ch_id))
2654 return 0xff;
2655
2656 if (1 <= ch_id && ch_id <= 14)
2657 return 0;
2658 if (36 <= ch_id && ch_id <= 64)
2659 return 1;
2660 if (100 <= ch_id && ch_id <= 140)
2661 return 2;
2662 return 3;
2663}
2664
2665uint16_t
2666iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2667{
2668 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2669 struct iwm_phy_db_chg_txp *txp_chg;
2670 int i;
2671 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2672
2673 if (ch_index == 0xff)
2674 return 0xff;
2675
2676 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS9; i++) {
2677 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2678 if (!txp_chg)
2679 return 0xff;
2680 /*
2681 * Looking for the first channel group the max channel
2682 * of which is higher than the requested channel.
2683 */
2684 if (le16toh(txp_chg->max_channel_idx)((__uint16_t)(txp_chg->max_channel_idx)) >= ch_index)
2685 return i;
2686 }
2687 return 0xff;
2688}
2689
2690int
2691iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2692 uint16_t *size, uint16_t ch_id)
2693{
2694 struct iwm_phy_db_entry *entry;
2695 uint16_t ch_group_id = 0;
2696
2697 if (type == IWM_PHY_DB_CALIB_CHG_PAPD4)
2698 ch_group_id = iwm_channel_id_to_papd(ch_id);
2699 else if (type == IWM_PHY_DB_CALIB_CHG_TXP5)
2700 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2701
2702 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2703 if (!entry)
2704 return EINVAL22;
2705
2706 *data = entry->data;
2707 *size = entry->size;
2708
2709 return 0;
2710}
2711
2712int
2713iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2714 void *data)
2715{
2716 struct iwm_phy_db_cmd phy_db_cmd;
2717 struct iwm_host_cmd cmd = {
2718 .id = IWM_PHY_DB_CMD0x6c,
2719 .flags = IWM_CMD_ASYNC,
2720 };
2721
2722 phy_db_cmd.type = le16toh(type)((__uint16_t)(type));
2723 phy_db_cmd.length = le16toh(length)((__uint16_t)(length));
2724
2725 cmd.data[0] = &phy_db_cmd;
2726 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2727 cmd.data[1] = data;
2728 cmd.len[1] = length;
2729
2730 return iwm_send_cmd(sc, &cmd);
2731}
2732
2733int
2734iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2735 uint8_t max_ch_groups)
2736{
2737 uint16_t i;
2738 int err;
2739 struct iwm_phy_db_entry *entry;
2740
2741 for (i = 0; i < max_ch_groups; i++) {
2742 entry = iwm_phy_db_get_section(sc, type, i);
2743 if (!entry)
2744 return EINVAL22;
2745
2746 if (!entry->size)
2747 continue;
2748
2749 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2750 if (err)
2751 return err;
2752
2753 DELAY(1000)(*delay_func)(1000);
2754 }
2755
2756 return 0;
2757}
2758
2759int
2760iwm_send_phy_db_data(struct iwm_softc *sc)
2761{
2762 uint8_t *data = NULL((void *)0);
2763 uint16_t size = 0;
2764 int err;
2765
2766 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG1, &data, &size, 0);
2767 if (err)
2768 return err;
2769
2770 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG1, size, data);
2771 if (err)
2772 return err;
2773
2774 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH2,
2775 &data, &size, 0);
2776 if (err)
2777 return err;
2778
2779 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH2, size, data);
2780 if (err)
2781 return err;
2782
2783 err = iwm_phy_db_send_all_channel_groups(sc,
2784 IWM_PHY_DB_CALIB_CHG_PAPD4, IWM_NUM_PAPD_CH_GROUPS9);
2785 if (err)
2786 return err;
2787
2788 err = iwm_phy_db_send_all_channel_groups(sc,
2789 IWM_PHY_DB_CALIB_CHG_TXP5, IWM_NUM_TXP_CH_GROUPS9);
2790 if (err)
2791 return err;
2792
2793 return 0;
2794}
2795
2796/*
2797 * For the high priority TE use a time event type that has similar priority to
2798 * the FW's action scan priority.
2799 */
2800#define IWM_ROC_TE_TYPE_NORMAL4 IWM_TE_P2P_DEVICE_DISCOVERABLE4
2801#define IWM_ROC_TE_TYPE_MGMT_TX9 IWM_TE_P2P_CLIENT_ASSOC9
2802
2803int
2804iwm_send_time_event_cmd(struct iwm_softc *sc,
2805 const struct iwm_time_event_cmd *cmd)
2806{
2807 struct iwm_rx_packet *pkt;
2808 struct iwm_time_event_resp *resp;
2809 struct iwm_host_cmd hcmd = {
2810 .id = IWM_TIME_EVENT_CMD0x29,
2811 .flags = IWM_CMD_WANT_RESP,
2812 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2813 };
2814 uint32_t resp_len;
2815 int err;
2816
2817 hcmd.data[0] = cmd;
2818 hcmd.len[0] = sizeof(*cmd);
2819 err = iwm_send_cmd(sc, &hcmd);
2820 if (err)
2821 return err;
2822
2823 pkt = hcmd.resp_pkt;
2824 if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40)) {
2825 err = EIO5;
2826 goto out;
2827 }
2828
2829 resp_len = iwm_rx_packet_payload_len(pkt);
2830 if (resp_len != sizeof(*resp)) {
2831 err = EIO5;
2832 goto out;
2833 }
2834
2835 resp = (void *)pkt->data;
2836 if (le32toh(resp->status)((__uint32_t)(resp->status)) == 0)
2837 sc->sc_time_event_uid = le32toh(resp->unique_id)((__uint32_t)(resp->unique_id));
2838 else
2839 err = EIO5;
2840out:
2841 iwm_free_resp(sc, &hcmd);
2842 return err;
2843}
2844
2845void
2846iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2847 uint32_t duration, uint32_t max_delay)
2848{
2849 struct iwm_time_event_cmd time_cmd;
2850
2851 /* Do nothing if a time event is already scheduled. */
2852 if (sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40)
2853 return;
2854
2855 memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd)));
2856
2857 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD)((__uint32_t)(1));
2858 time_cmd.id_and_color =
2859 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
2860 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC)((__uint32_t)(0));
2861
2862 time_cmd.apply_time = htole32(0)((__uint32_t)(0));
2863
2864 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE0;
2865 time_cmd.max_delay = htole32(max_delay)((__uint32_t)(max_delay));
2866 /* TODO: why do we need to interval = bi if it is not periodic? */
2867 time_cmd.interval = htole32(1)((__uint32_t)(1));
2868 time_cmd.duration = htole32(duration)((__uint32_t)(duration));
2869 time_cmd.repeat = 1;
2870 time_cmd.policy
2871 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11
)))
2872 IWM_TE_V2_NOTIF_HOST_EVENT_END |((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11
)))
2873 IWM_T2_V2_START_IMMEDIATELY)((__uint16_t)((1 << 0) | (1 << 1) | (1 << 11
)))
;
2874
2875 if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2876 sc->sc_flags |= IWM_FLAG_TE_ACTIVE0x40;
2877
2878 DELAY(100)(*delay_func)(100);
2879}
2880
2881void
2882iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2883{
2884 struct iwm_time_event_cmd time_cmd;
2885
2886 /* Do nothing if the time event has already ended. */
2887 if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE0x40) == 0)
2888 return;
2889
2890 memset(&time_cmd, 0, sizeof(time_cmd))__builtin_memset((&time_cmd), (0), (sizeof(time_cmd)));
2891
2892 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE)((__uint32_t)(3));
2893 time_cmd.id_and_color =
2894 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
2895 time_cmd.id = htole32(sc->sc_time_event_uid)((__uint32_t)(sc->sc_time_event_uid));
2896
2897 if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2898 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40;
2899
2900 DELAY(100)(*delay_func)(100);
2901}
2902
2903/*
2904 * NVM read access and content parsing. We do not support
2905 * external NVM or writing NVM.
2906 */
2907
2908/* list of NVM sections we are allowed/need to read */
2909const int iwm_nvm_to_read[] = {
2910 IWM_NVM_SECTION_TYPE_HW0,
2911 IWM_NVM_SECTION_TYPE_SW1,
2912 IWM_NVM_SECTION_TYPE_REGULATORY3,
2913 IWM_NVM_SECTION_TYPE_CALIBRATION4,
2914 IWM_NVM_SECTION_TYPE_PRODUCTION5,
2915 IWM_NVM_SECTION_TYPE_REGULATORY_SDP8,
2916 IWM_NVM_SECTION_TYPE_HW_800010,
2917 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11,
2918 IWM_NVM_SECTION_TYPE_PHY_SKU12,
2919};
2920
2921#define IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024) (2*1024)
2922
2923#define IWM_NVM_WRITE_OPCODE1 1
2924#define IWM_NVM_READ_OPCODE0 0
2925
2926int
2927iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2928 uint16_t length, uint8_t *data, uint16_t *len)
2929{
2930 offset = 0;
2931 struct iwm_nvm_access_cmd nvm_access_cmd = {
2932 .offset = htole16(offset)((__uint16_t)(offset)),
2933 .length = htole16(length)((__uint16_t)(length)),
2934 .type = htole16(section)((__uint16_t)(section)),
2935 .op_code = IWM_NVM_READ_OPCODE0,
2936 };
2937 struct iwm_nvm_access_resp *nvm_resp;
2938 struct iwm_rx_packet *pkt;
2939 struct iwm_host_cmd cmd = {
2940 .id = IWM_NVM_ACCESS_CMD0x88,
2941 .flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2942 .resp_pkt_len = IWM_CMD_RESP_MAX(1 << 12),
2943 .data = { &nvm_access_cmd, },
2944 };
2945 int err, offset_read;
2946 size_t bytes_read;
2947 uint8_t *resp_data;
2948
2949 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2950
2951 err = iwm_send_cmd(sc, &cmd);
2952 if (err)
2953 return err;
2954
2955 pkt = cmd.resp_pkt;
2956 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40) {
2957 err = EIO5;
2958 goto exit;
2959 }
2960
2961 /* Extract NVM response */
2962 nvm_resp = (void *)pkt->data;
2963 if (nvm_resp == NULL((void *)0))
2964 return EIO5;
2965
2966 err = le16toh(nvm_resp->status)((__uint16_t)(nvm_resp->status));
2967 bytes_read = le16toh(nvm_resp->length)((__uint16_t)(nvm_resp->length));
2968 offset_read = le16toh(nvm_resp->offset)((__uint16_t)(nvm_resp->offset));
2969 resp_data = nvm_resp->data;
2970 if (err) {
2971 err = EINVAL22;
2972 goto exit;
2973 }
2974
2975 if (offset_read != offset) {
2976 err = EINVAL22;
2977 goto exit;
2978 }
2979
2980 if (bytes_read > length) {
2981 err = EINVAL22;
2982 goto exit;
2983 }
2984
2985 memcpy(data + offset, resp_data, bytes_read)__builtin_memcpy((data + offset), (resp_data), (bytes_read));
2986 *len = bytes_read;
2987
2988 exit:
2989 iwm_free_resp(sc, &cmd);
2990 return err;
2991}
2992
2993/*
2994 * Reads an NVM section completely.
2995 * NICs prior to 7000 family doesn't have a real NVM, but just read
2996 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2997 * by uCode, we need to manually check in this case that we don't
2998 * overflow and try to read more than the EEPROM size.
2999 */
3000int
3001iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
3002 uint16_t *len, size_t max_len)
3003{
3004 uint16_t chunklen, seglen;
3005 int err = 0;
3006
3007 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE(2*1024);
3008 *len = 0;
3009
3010 /* Read NVM chunks until exhausted (reading less than requested) */
3011 while (seglen == chunklen && *len < max_len) {
3012 err = iwm_nvm_read_chunk(sc,
3013 section, *len, chunklen, data, &seglen);
3014 if (err)
3015 return err;
3016
3017 *len += seglen;
3018 }
3019
3020 return err;
3021}
3022
3023uint8_t
3024iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3025{
3026 uint8_t tx_ant;
3027
3028 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN(0xf << 16))
3029 >> IWM_FW_PHY_CFG_TX_CHAIN_POS16);
3030
3031 if (sc->sc_nvm.valid_tx_ant)
3032 tx_ant &= sc->sc_nvm.valid_tx_ant;
3033
3034 return tx_ant;
3035}
3036
3037uint8_t
3038iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3039{
3040 uint8_t rx_ant;
3041
3042 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN(0xf << 20))
3043 >> IWM_FW_PHY_CFG_RX_CHAIN_POS20);
3044
3045 if (sc->sc_nvm.valid_rx_ant)
3046 rx_ant &= sc->sc_nvm.valid_rx_ant;
3047
3048 return rx_ant;
3049}
3050
3051int
3052iwm_valid_siso_ant_rate_mask(struct iwm_softc *sc)
3053{
3054 uint8_t valid_tx_ant = iwm_fw_valid_tx_ant(sc);
3055
3056 /*
3057 * According to the Linux driver, antenna B should be preferred
3058 * on 9k devices since it is not shared with bluetooth. However,
3059 * there are 9k devices which do not support antenna B at all.
3060 */
3061 if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003 &&
3062 (valid_tx_ant & IWM_ANT_B(1 << 1)))
3063 return IWM_RATE_MCS_ANT_B_MSK(2 << 14);
3064
3065 return IWM_RATE_MCS_ANT_A_MSK(1 << 14);
3066}
3067
3068void
3069iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3070 const uint8_t *nvm_channels, int nchan)
3071{
3072 struct ieee80211com *ic = &sc->sc_ic;
3073 struct iwm_nvm_data *data = &sc->sc_nvm;
3074 int ch_idx;
3075 struct ieee80211_channel *channel;
3076 uint16_t ch_flags;
3077 int is_5ghz;
3078 int flags, hw_value;
3079
3080 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3081 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx)(((__uint16_t)(*(const uint16_t *)(nvm_ch_flags + ch_idx))));
3082
3083 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS14 &&
3084 !data->sku_cap_band_52GHz_enable)
3085 ch_flags &= ~IWM_NVM_CHANNEL_VALID(1 << 0);
3086
3087 if (!(ch_flags & IWM_NVM_CHANNEL_VALID(1 << 0)))
3088 continue;
3089
3090 hw_value = nvm_channels[ch_idx];
3091 channel = &ic->ic_channels[hw_value];
3092
3093 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS14;
3094 if (!is_5ghz) {
3095 flags = IEEE80211_CHAN_2GHZ0x0080;
3096 channel->ic_flags
3097 = IEEE80211_CHAN_CCK0x0020
3098 | IEEE80211_CHAN_OFDM0x0040
3099 | IEEE80211_CHAN_DYN0x0400
3100 | IEEE80211_CHAN_2GHZ0x0080;
3101 } else {
3102 flags = IEEE80211_CHAN_5GHZ0x0100;
3103 channel->ic_flags =
3104 IEEE80211_CHAN_A(0x0100 | 0x0040);
3105 }
3106 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3107
3108 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE(1 << 3)))
3109 channel->ic_flags |= IEEE80211_CHAN_PASSIVE0x0200;
3110
3111 if (data->sku_cap_11n_enable) {
3112 channel->ic_flags |= IEEE80211_CHAN_HT0x2000;
3113 if (ch_flags & IWM_NVM_CHANNEL_40MHZ(1 << 9))
3114 channel->ic_flags |= IEEE80211_CHAN_40MHZ0x8000;
3115 }
3116
3117 if (is_5ghz && data->sku_cap_11ac_enable) {
3118 channel->ic_flags |= IEEE80211_CHAN_VHT0x4000;
3119 if (ch_flags & IWM_NVM_CHANNEL_80MHZ(1 << 10))
3120 channel->ic_xflags |= IEEE80211_CHANX_80MHZ0x00000001;
3121 }
3122 }
3123}
3124
3125int
3126iwm_mimo_enabled(struct iwm_softc *sc)
3127{
3128 struct ieee80211com *ic = &sc->sc_ic;
3129
3130 return !sc->sc_nvm.sku_cap_mimo_disable &&
3131 (ic->ic_userflags & IEEE80211_F_NOMIMO0x00000008) == 0;
3132}
3133
3134void
3135iwm_setup_ht_rates(struct iwm_softc *sc)
3136{
3137 struct ieee80211com *ic = &sc->sc_ic;
3138 uint8_t rx_ant;
3139
3140 /* TX is supported with the same MCS as RX. */
3141 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED0x01;
3142
3143 memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs))__builtin_memset((ic->ic_sup_mcs), (0), (sizeof(ic->ic_sup_mcs
)))
;
3144 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
3145
3146 if (!iwm_mimo_enabled(sc))
3147 return;
3148
3149 rx_ant = iwm_fw_valid_rx_ant(sc);
3150 if ((rx_ant & IWM_ANT_AB((1 << 0) | (1 << 1))) == IWM_ANT_AB((1 << 0) | (1 << 1)) ||
3151 (rx_ant & IWM_ANT_BC((1 << 1) | (1 << 2))) == IWM_ANT_BC((1 << 1) | (1 << 2)))
3152 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
3153}
3154
3155void
3156iwm_setup_vht_rates(struct iwm_softc *sc)
3157{
3158 struct ieee80211com *ic = &sc->sc_ic;
3159 uint8_t rx_ant = iwm_fw_valid_rx_ant(sc);
3160 int n;
3161
3162 ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_92 <<
3163 IEEE80211_VHT_MCS_FOR_SS_SHIFT(1)(2*((1)-1)));
3164
3165 if (iwm_mimo_enabled(sc) &&
3166 ((rx_ant & IWM_ANT_AB((1 << 0) | (1 << 1))) == IWM_ANT_AB((1 << 0) | (1 << 1)) ||
3167 (rx_ant & IWM_ANT_BC((1 << 1) | (1 << 2))) == IWM_ANT_BC((1 << 1) | (1 << 2)))) {
3168 ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_92 <<
3169 IEEE80211_VHT_MCS_FOR_SS_SHIFT(2)(2*((2)-1)));
3170 } else {
3171 ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP3 <<
3172 IEEE80211_VHT_MCS_FOR_SS_SHIFT(2)(2*((2)-1)));
3173 }
3174
3175 for (n = 3; n <= IEEE80211_VHT_NUM_SS8; n++) {
3176 ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP3 <<
3177 IEEE80211_VHT_MCS_FOR_SS_SHIFT(n)(2*((n)-1)));
3178 }
3179
3180 ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3181}
3182
3183void
3184iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3185 uint16_t ssn, uint16_t buf_size)
3186{
3187 reorder_buf->head_sn = ssn;
3188 reorder_buf->num_stored = 0;
3189 reorder_buf->buf_size = buf_size;
3190 reorder_buf->last_amsdu = 0;
3191 reorder_buf->last_sub_index = 0;
3192 reorder_buf->removed = 0;
3193 reorder_buf->valid = 0;
3194 reorder_buf->consec_oldsn_drops = 0;
3195 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3196 reorder_buf->consec_oldsn_prev_drop = 0;
3197}
3198
3199void
3200iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3201{
3202 int i;
3203 struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3204 struct iwm_reorder_buf_entry *entry;
3205
3206 for (i = 0; i < reorder_buf->buf_size; i++) {
3207 entry = &rxba->entries[i];
3208 ml_purge(&entry->frames);
3209 timerclear(&entry->reorder_time)(&entry->reorder_time)->tv_sec = (&entry->reorder_time
)->tv_usec = 0
;
3210 }
3211
3212 reorder_buf->removed = 1;
3213 timeout_del(&reorder_buf->reorder_timer);
3214 timerclear(&rxba->last_rx)(&rxba->last_rx)->tv_sec = (&rxba->last_rx)->
tv_usec = 0
;
3215 timeout_del(&rxba->session_timer);
3216 rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID0x7f;
3217}
3218
3219#define RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL) (100000ULL)
3220
3221void
3222iwm_rx_ba_session_expired(void *arg)
3223{
3224 struct iwm_rxba_data *rxba = arg;
3225 struct iwm_softc *sc = rxba->sc;
3226 struct ieee80211com *ic = &sc->sc_ic;
3227 struct ieee80211_node *ni = ic->ic_bss;
3228 struct timeval now, timeout, expiry;
3229 int s;
3230
3231 s = splnet()splraise(0x4);
3232 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0 &&
3233 ic->ic_state == IEEE80211_S_RUN &&
3234 rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) {
3235 getmicrouptime(&now);
3236 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
3237 timeradd(&rxba->last_rx, &timeout, &expiry)do { (&expiry)->tv_sec = (&rxba->last_rx)->tv_sec
+ (&timeout)->tv_sec; (&expiry)->tv_usec = (&
rxba->last_rx)->tv_usec + (&timeout)->tv_usec; if
((&expiry)->tv_usec >= 1000000) { (&expiry)->
tv_sec++; (&expiry)->tv_usec -= 1000000; } } while (0)
;
3238 if (timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
) {
3239 timeout_add_usec(&rxba->session_timer, rxba->timeout);
3240 } else {
3241 ic->ic_stats.is_ht_rx_ba_timeout++;
3242 ieee80211_delba_request(ic, ni,
3243 IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3244 }
3245 }
3246 splx(s)spllower(s);
3247}
3248
3249void
3250iwm_reorder_timer_expired(void *arg)
3251{
3252 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
3253 struct iwm_reorder_buffer *buf = arg;
3254 struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3255 struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3256 struct iwm_softc *sc = rxba->sc;
3257 struct ieee80211com *ic = &sc->sc_ic;
3258 struct ieee80211_node *ni = ic->ic_bss;
3259 int i, s;
3260 uint16_t sn = 0, index = 0;
3261 int expired = 0;
3262 int cont = 0;
3263 struct timeval now, timeout, expiry;
3264
3265 if (!buf->num_stored || buf->removed)
3266 return;
3267
3268 s = splnet()splraise(0x4);
3269 getmicrouptime(&now);
3270 USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL), &timeout);
3271
3272 for (i = 0; i < buf->buf_size ; i++) {
3273 index = (buf->head_sn + i) % buf->buf_size;
3274
3275 if (ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
3276 /*
3277 * If there is a hole and the next frame didn't expire
3278 * we want to break and not advance SN.
3279 */
3280 cont = 0;
3281 continue;
3282 }
3283 timeradd(&entries[index].reorder_time, &timeout, &expiry)do { (&expiry)->tv_sec = (&entries[index].reorder_time
)->tv_sec + (&timeout)->tv_sec; (&expiry)->tv_usec
= (&entries[index].reorder_time)->tv_usec + (&timeout
)->tv_usec; if ((&expiry)->tv_usec >= 1000000) {
(&expiry)->tv_sec++; (&expiry)->tv_usec -= 1000000
; } } while (0)
;
3284 if (!cont && timercmp(&now, &expiry, <)(((&now)->tv_sec == (&expiry)->tv_sec) ? ((&
now)->tv_usec < (&expiry)->tv_usec) : ((&now
)->tv_sec < (&expiry)->tv_sec))
)
3285 break;
3286
3287 expired = 1;
3288 /* continue until next hole after this expired frame */
3289 cont = 1;
3290 sn = (buf->head_sn + (i + 1)) & 0xfff;
3291 }
3292
3293 if (expired) {
3294 /* SN is set to the last expired frame + 1 */
3295 iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3296 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
3297 ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3298 } else {
3299 /*
3300 * If no frame expired and there are stored frames, index is now
3301 * pointing to the first unexpired frame - modify reorder timeout
3302 * accordingly.
3303 */
3304 timeout_add_usec(&buf->reorder_timer,
3305 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
3306 }
3307
3308 splx(s)spllower(s);
3309}
3310
3311#define IWM_MAX_RX_BA_SESSIONS16 16
3312
3313int
3314iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3315 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3316{
3317 struct ieee80211com *ic = &sc->sc_ic;
3318 struct iwm_add_sta_cmd cmd;
3319 struct iwm_node *in = (void *)ni;
3320 int err, s;
3321 uint32_t status;
3322 size_t cmdsize;
3323 struct iwm_rxba_data *rxba = NULL((void *)0);
3324 uint8_t baid = 0;
3325
3326 s = splnet()splraise(0x4);
3327
3328 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16) {
3329 ieee80211_addba_req_refuse(ic, ni, tid);
3330 splx(s)spllower(s);
3331 return 0;
3332 }
3333
3334 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3335
3336 cmd.sta_id = IWM_STATION_ID0;
3337 cmd.mac_id_n_color
3338 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
3339 cmd.add_modify = IWM_STA_MODE_MODIFY1;
3340
3341 if (start) {
3342 cmd.add_immediate_ba_tid = (uint8_t)tid;
3343 cmd.add_immediate_ba_ssn = ssn;
3344 cmd.rx_ba_window = winsize;
3345 } else {
3346 cmd.remove_immediate_ba_tid = (uint8_t)tid;
3347 }
3348 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID(1 << 3) :
3349 IWM_STA_MODIFY_REMOVE_BA_TID(1 << 4);
3350
3351 status = IWM_ADD_STA_SUCCESS0x1;
3352 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
3353 cmdsize = sizeof(cmd);
3354 else
3355 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3356 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd,
3357 &status);
3358 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
3359 err = EIO5;
3360 if (err) {
3361 if (start)
3362 ieee80211_addba_req_refuse(ic, ni, tid);
3363 splx(s)spllower(s);
3364 return err;
3365 }
3366
3367 if (sc->sc_mqrx_supported) {
3368 /* Deaggregation is done in hardware. */
3369 if (start) {
3370 if (!(status & IWM_ADD_STA_BAID_VALID_MASK0x8000)) {
3371 ieee80211_addba_req_refuse(ic, ni, tid);
3372 splx(s)spllower(s);
3373 return EIO5;
3374 }
3375 baid = (status & IWM_ADD_STA_BAID_MASK0x7F00) >>
3376 IWM_ADD_STA_BAID_SHIFT8;
3377 if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f ||
3378 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
) {
3379 ieee80211_addba_req_refuse(ic, ni, tid);
3380 splx(s)spllower(s);
3381 return EIO5;
3382 }
3383 rxba = &sc->sc_rxba_data[baid];
3384 if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID0x7f) {
3385 ieee80211_addba_req_refuse(ic, ni, tid);
3386 splx(s)spllower(s);
3387 return 0;
3388 }
3389 rxba->sta_id = IWM_STATION_ID0;
3390 rxba->tid = tid;
3391 rxba->baid = baid;
3392 rxba->timeout = timeout_val;
3393 getmicrouptime(&rxba->last_rx);
3394 iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3395 winsize);
3396 if (timeout_val != 0) {
3397 struct ieee80211_rx_ba *ba;
3398 timeout_add_usec(&rxba->session_timer,
3399 timeout_val);
3400 /* XXX disable net80211's BA timeout handler */
3401 ba = &ni->ni_rx_ba[tid];
3402 ba->ba_timeout_val = 0;
3403 }
3404 } else {
3405 int i;
3406 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
3407 rxba = &sc->sc_rxba_data[i];
3408 if (rxba->baid ==
3409 IWM_RX_REORDER_DATA_INVALID_BAID0x7f)
3410 continue;
3411 if (rxba->tid != tid)
3412 continue;
3413 iwm_clear_reorder_buffer(sc, rxba);
3414 break;
3415 }
3416 }
3417 }
3418
3419 if (start) {
3420 sc->sc_rx_ba_sessions++;
3421 ieee80211_addba_req_accept(ic, ni, tid);
3422 } else if (sc->sc_rx_ba_sessions > 0)
3423 sc->sc_rx_ba_sessions--;
3424
3425 splx(s)spllower(s);
3426 return 0;
3427}
3428
3429void
3430iwm_mac_ctxt_task(void *arg)
3431{
3432 struct iwm_softc *sc = arg;
3433 struct ieee80211com *ic = &sc->sc_ic;
3434 struct iwm_node *in = (void *)ic->ic_bss;
3435 int err, s = splnet()splraise(0x4);
3436
3437 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
3438 ic->ic_state != IEEE80211_S_RUN) {
3439 refcnt_rele_wake(&sc->task_refs);
3440 splx(s)spllower(s);
3441 return;
3442 }
3443
3444 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 1);
3445 if (err)
3446 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3447
3448 iwm_unprotect_session(sc, in);
3449
3450 refcnt_rele_wake(&sc->task_refs);
3451 splx(s)spllower(s);
3452}
3453
3454void
3455iwm_updateprot(struct ieee80211com *ic)
3456{
3457 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3458
3459 if (ic->ic_state == IEEE80211_S_RUN &&
3460 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3461 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3462}
3463
3464void
3465iwm_updateslot(struct ieee80211com *ic)
3466{
3467 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3468
3469 if (ic->ic_state == IEEE80211_S_RUN &&
3470 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3471 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3472}
3473
3474void
3475iwm_updateedca(struct ieee80211com *ic)
3476{
3477 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3478
3479 if (ic->ic_state == IEEE80211_S_RUN &&
3480 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3481 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3482}
3483
3484void
3485iwm_phy_ctxt_task(void *arg)
3486{
3487 struct iwm_softc *sc = arg;
3488 struct ieee80211com *ic = &sc->sc_ic;
3489 struct iwm_node *in = (void *)ic->ic_bss;
3490 struct ieee80211_node *ni = &in->in_ni;
3491 uint8_t chains, sco, vht_chan_width;
3492 int err, s = splnet()splraise(0x4);
3493
3494 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
3495 ic->ic_state != IEEE80211_S_RUN ||
3496 in->in_phyctxt == NULL((void *)0)) {
3497 refcnt_rele_wake(&sc->task_refs);
3498 splx(s)spllower(s);
3499 return;
3500 }
3501
3502 chains = iwm_mimo_enabled(sc) ? 2 : 1;
3503 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
3504 IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x8000) != 0) &&
3505 ieee80211_node_supports_ht_chan40(ni))
3506 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
3507 else
3508 sco = IEEE80211_HTOP0_SCO_SCN0;
3509 if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) &&
3510 IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan)(((in->in_ni.ni_chan)->ic_xflags & 0x00000001) != 0
)
&&
3511 ieee80211_node_supports_vht_chan80(ni))
3512 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_801;
3513 else
3514 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT0;
3515 if (in->in_phyctxt->sco != sco ||
3516 in->in_phyctxt->vht_chan_width != vht_chan_width) {
3517 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3518 in->in_phyctxt->channel, chains, chains, 0, sco,
3519 vht_chan_width);
3520 if (err)
3521 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3522 iwm_setrates(in, 0);
3523 }
3524
3525 refcnt_rele_wake(&sc->task_refs);
3526 splx(s)spllower(s);
3527}
3528
3529void
3530iwm_updatechan(struct ieee80211com *ic)
3531{
3532 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3533
3534 if (ic->ic_state == IEEE80211_S_RUN &&
3535 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3536 iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3537}
3538
3539void
3540iwm_updatedtim(struct ieee80211com *ic)
3541{
3542 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
3543
3544 if (ic->ic_state == IEEE80211_S_RUN &&
3545 !task_pending(&sc->newstate_task)((&sc->newstate_task)->t_flags & 1))
3546 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3547}
3548
3549int
3550iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3551 uint16_t ssn, uint16_t winsize, int start)
3552{
3553 struct iwm_add_sta_cmd cmd;
3554 struct ieee80211com *ic = &sc->sc_ic;
3555 struct iwm_node *in = (void *)ni;
3556 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
3557 struct iwm_tx_ring *ring;
3558 enum ieee80211_edca_ac ac;
3559 int fifo;
3560 uint32_t status;
3561 int err;
3562 size_t cmdsize;
3563
3564 /* Ensure we can map this TID to an aggregation queue. */
3565 if (tid >= IWM_MAX_TID_COUNT8 || qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
3566 return ENOSPC28;
3567
3568 if (start) {
3569 if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3570 return 0;
3571 } else {
3572 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3573 return 0;
3574 }
3575
3576 ring = &sc->txq[qid];
3577 ac = iwm_tid_to_ac[tid];
3578 fifo = iwm_ac_to_tx_fifo[ac];
3579
3580 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
3581
3582 cmd.sta_id = IWM_STATION_ID0;
3583 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
3584 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
3585 cmd.add_modify = IWM_STA_MODE_MODIFY1;
3586
3587 if (start) {
3588 /* Enable Tx aggregation for this queue. */
3589 in->tid_disable_ampdu &= ~(1 << tid);
3590 in->tfd_queue_msk |= (1 << qid);
3591 } else {
3592 in->tid_disable_ampdu |= (1 << tid);
3593 /*
3594 * Queue remains enabled in the TFD queue mask
3595 * until we leave RUN state.
3596 */
3597 err = iwm_flush_sta(sc, in);
3598 if (err)
3599 return err;
3600 }
3601
3602 cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk)((__uint32_t)(in->tfd_queue_msk));
3603 cmd.tid_disable_tx = htole16(in->tid_disable_ampdu)((__uint16_t)(in->tid_disable_ampdu));
3604 cmd.modify_mask = (IWM_STA_MODIFY_QUEUES(1 << 7) |
3605 IWM_STA_MODIFY_TID_DISABLE_TX(1 << 1));
3606
3607 if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3608 if (!iwm_nic_lock(sc)) {
3609 if (start)
3610 ieee80211_addba_resp_refuse(ic, ni, tid,
3611 IEEE80211_STATUS_UNSPECIFIED);
3612 return EBUSY16;
3613 }
3614 err = iwm_enable_txq(sc, IWM_STATION_ID0, qid, fifo, 1, tid,
3615 ssn);
3616 iwm_nic_unlock(sc);
3617 if (err) {
3618 printf("%s: could not enable Tx queue %d (error %d)\n",
3619 DEVNAME(sc)((sc)->sc_dev.dv_xname), qid, err);
3620 if (start)
3621 ieee80211_addba_resp_refuse(ic, ni, tid,
3622 IEEE80211_STATUS_UNSPECIFIED);
3623 return err;
3624 }
3625 /*
3626 * If iwm_enable_txq() employed the SCD hardware bug
3627 * workaround we must skip the frame with seqnum SSN.
3628 */
3629 if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1))) {
3630 ssn = (ssn + 1) & 0xfff;
3631 KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn))((ring->cur == ((ssn) & (256 - 1))) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 3631, "ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn)"
))
;
3632 ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3633 ni->ni_qos_txseqs[tid] = ssn;
3634 }
3635 }
3636
3637 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
3638 cmdsize = sizeof(cmd);
3639 else
3640 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3641
3642 status = 0;
3643 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd, &status);
3644 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
3645 err = EIO5;
3646 if (err) {
3647 printf("%s: could not update sta (error %d)\n",
3648 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
3649 if (start)
3650 ieee80211_addba_resp_refuse(ic, ni, tid,
3651 IEEE80211_STATUS_UNSPECIFIED);
3652 return err;
3653 }
3654
3655 if (start) {
3656 sc->tx_ba_queue_mask |= (1 << qid);
3657 ieee80211_addba_resp_accept(ic, ni, tid);
3658 } else {
3659 sc->tx_ba_queue_mask &= ~(1 << qid);
3660
3661 /*
3662 * Clear pending frames but keep the queue enabled.
3663 * Firmware panics if we disable the queue here.
3664 */
3665 iwm_txq_advance(sc, ring, ring->cur);
3666 iwm_clear_oactive(sc, ring);
3667 }
3668
3669 return 0;
3670}
3671
3672void
3673iwm_ba_task(void *arg)
3674{
3675 struct iwm_softc *sc = arg;
3676 struct ieee80211com *ic = &sc->sc_ic;
3677 struct ieee80211_node *ni = ic->ic_bss;
3678 int s = splnet()splraise(0x4);
3679 int tid, err = 0;
3680
3681 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
3682 ic->ic_state != IEEE80211_S_RUN) {
3683 refcnt_rele_wake(&sc->task_refs);
3684 splx(s)spllower(s);
3685 return;
3686 }
3687
3688 for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) {
3689 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100)
3690 break;
3691 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3692 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3693 err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3694 ba->ba_winsize, ba->ba_timeout_val, 1);
3695 sc->ba_rx.start_tidmask &= ~(1 << tid);
3696 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3697 err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3698 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3699 }
3700 }
3701
3702 for (tid = 0; tid < IWM_MAX_TID_COUNT8 && !err; tid++) {
3703 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100)
3704 break;
3705 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3706 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3707 err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3708 ba->ba_winsize, 1);
3709 sc->ba_tx.start_tidmask &= ~(1 << tid);
3710 } else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3711 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3712 sc->ba_tx.stop_tidmask &= ~(1 << tid);
3713 }
3714 }
3715
3716 /*
3717 * We "recover" from failure to start or stop a BA session
3718 * by resetting the device.
3719 */
3720 if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
3721 task_add(systq, &sc->init_task);
3722
3723 refcnt_rele_wake(&sc->task_refs);
3724 splx(s)spllower(s);
3725}
3726
3727/*
3728 * This function is called by upper layer when an ADDBA request is received
3729 * from another STA and before the ADDBA response is sent.
3730 */
3731int
3732iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3733 uint8_t tid)
3734{
3735 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3736
3737 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS16 ||
3738 tid > IWM_MAX_TID_COUNT8)
3739 return ENOSPC28;
3740
3741 if (sc->ba_rx.start_tidmask & (1 << tid))
3742 return EBUSY16;
3743
3744 sc->ba_rx.start_tidmask |= (1 << tid);
3745 iwm_add_task(sc, systq, &sc->ba_task);
3746
3747 return EBUSY16;
3748}
3749
3750/*
3751 * This function is called by upper layer on teardown of an HT-immediate
3752 * Block Ack agreement (eg. upon receipt of a DELBA frame).
3753 */
3754void
3755iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3756 uint8_t tid)
3757{
3758 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3759
3760 if (tid > IWM_MAX_TID_COUNT8 || sc->ba_rx.stop_tidmask & (1 << tid))
3761 return;
3762
3763 sc->ba_rx.stop_tidmask |= (1 << tid);
3764 iwm_add_task(sc, systq, &sc->ba_task);
3765}
3766
3767int
3768iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3769 uint8_t tid)
3770{
3771 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3772 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3773 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
3774
3775 /* We only implement Tx aggregation with DQA-capable firmware. */
3776 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
3777 return ENOTSUP91;
3778
3779 /* Ensure we can map this TID to an aggregation queue. */
3780 if (tid >= IWM_MAX_TID_COUNT8)
3781 return EINVAL22;
3782
3783 /* We only support a fixed Tx aggregation window size, for now. */
3784 if (ba->ba_winsize != IWM_FRAME_LIMIT64)
3785 return ENOTSUP91;
3786
3787 /* Is firmware already using Tx aggregation on this queue? */
3788 if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3789 return ENOSPC28;
3790
3791 /* Are we already processing an ADDBA request? */
3792 if (sc->ba_tx.start_tidmask & (1 << tid))
3793 return EBUSY16;
3794
3795 sc->ba_tx.start_tidmask |= (1 << tid);
3796 iwm_add_task(sc, systq, &sc->ba_task);
3797
3798 return EBUSY16;
3799}
3800
3801void
3802iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3803 uint8_t tid)
3804{
3805 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
3806 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
3807
3808 if (tid > IWM_MAX_TID_COUNT8 || sc->ba_tx.stop_tidmask & (1 << tid))
3809 return;
3810
3811 /* Is firmware currently using Tx aggregation on this queue? */
3812 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3813 return;
3814
3815 sc->ba_tx.stop_tidmask |= (1 << tid);
3816 iwm_add_task(sc, systq, &sc->ba_task);
3817}
3818
3819void
3820iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3821 const uint16_t *mac_override, const uint16_t *nvm_hw)
3822{
3823 const uint8_t *hw_addr;
3824
3825 if (mac_override) {
3826 static const uint8_t reserved_mac[] = {
3827 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3828 };
3829
3830 hw_addr = (const uint8_t *)(mac_override +
3831 IWM_MAC_ADDRESS_OVERRIDE_80001);
3832
3833 /*
3834 * Store the MAC address from MAO section.
3835 * No byte swapping is required in MAO section
3836 */
3837 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN)__builtin_memcpy((data->hw_addr), (hw_addr), (6));
3838
3839 /*
3840 * Force the use of the OTP MAC address in case of reserved MAC
3841 * address in the NVM, or if address is given but invalid.
3842 */
3843 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN)__builtin_memcmp((reserved_mac), (hw_addr), (6)) != 0 &&
3844 (memcmp(etherbroadcastaddr, data->hw_addr,__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof
(etherbroadcastaddr)))
3845 sizeof(etherbroadcastaddr))__builtin_memcmp((etherbroadcastaddr), (data->hw_addr), (sizeof
(etherbroadcastaddr)))
!= 0) &&
3846 (memcmp(etheranyaddr, data->hw_addr,__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof(
etheranyaddr)))
3847 sizeof(etheranyaddr))__builtin_memcmp((etheranyaddr), (data->hw_addr), (sizeof(
etheranyaddr)))
!= 0) &&
3848 !ETHER_IS_MULTICAST(data->hw_addr)(*(data->hw_addr) & 0x01))
3849 return;
3850 }
3851
3852 if (nvm_hw) {
3853 /* Read the mac address from WFMP registers. */
3854 uint32_t mac_addr0, mac_addr1;
3855
3856 if (!iwm_nic_lock(sc))
3857 goto out;
3858 mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0))((__uint32_t)(iwm_read_prph(sc, 0xa03080)));
3859 mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1))((__uint32_t)(iwm_read_prph(sc, 0xa03084)));
3860 iwm_nic_unlock(sc);
3861
3862 hw_addr = (const uint8_t *)&mac_addr0;
3863 data->hw_addr[0] = hw_addr[3];
3864 data->hw_addr[1] = hw_addr[2];
3865 data->hw_addr[2] = hw_addr[1];
3866 data->hw_addr[3] = hw_addr[0];
3867
3868 hw_addr = (const uint8_t *)&mac_addr1;
3869 data->hw_addr[4] = hw_addr[1];
3870 data->hw_addr[5] = hw_addr[0];
3871
3872 return;
3873 }
3874out:
3875 printf("%s: mac address not found\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3876 memset(data->hw_addr, 0, sizeof(data->hw_addr))__builtin_memset((data->hw_addr), (0), (sizeof(data->hw_addr
)))
;
3877}
3878
3879int
3880iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3881 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3882 const uint16_t *mac_override, const uint16_t *phy_sku,
3883 const uint16_t *regulatory, int n_regulatory)
3884{
3885 struct iwm_nvm_data *data = &sc->sc_nvm;
3886 uint8_t hw_addr[ETHER_ADDR_LEN6];
3887 uint32_t sku;
3888 uint16_t lar_config;
3889
3890 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 0))));
3891
3892 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3893 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 1))));
3894 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg)((radio_cfg >> 4) & 0x3);
3895 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg)((radio_cfg >> 2) & 0x3);
3896 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg)(radio_cfg & 0x3);
3897 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg)((radio_cfg >> 6) & 0x3);
3898
3899 sku = le16_to_cpup(nvm_sw + IWM_SKU)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 2))));
3900 } else {
3901 uint32_t radio_cfg =
3902 le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 0))
)))
;
3903 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg)((radio_cfg >> 12) & 0xFFF);
3904 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg)((radio_cfg >> 8) & 0xF);
3905 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg)((radio_cfg >> 4) & 0xF);
3906 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg)(radio_cfg & 0xF);
3907 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 24) & 0xF);
3908 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg)((radio_cfg >> 28) & 0xF);
3909
3910 sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000))(((__uint32_t)(*(const uint32_t *)((uint32_t *)(phy_sku + 2))
)))
;
3911 }
3912
3913 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ(1 << 0);
3914 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ(1 << 1);
3915 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE(1 << 2);
3916 data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE(1 << 3);
3917 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE(1 << 5);
3918
3919 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) {
3920 uint16_t lar_offset = data->nvm_version < 0xE39 ?
3921 IWM_NVM_LAR_OFFSET_8000_OLD0x4C7 :
3922 IWM_NVM_LAR_OFFSET_80000x507;
3923
3924 lar_config = le16_to_cpup(regulatory + lar_offset)(((__uint16_t)(*(const uint16_t *)(regulatory + lar_offset)))
)
;
3925 data->lar_enabled = !!(lar_config &
3926 IWM_NVM_LAR_ENABLED_80000x7);
3927 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3))));
3928 } else
3929 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS)(((__uint16_t)(*(const uint16_t *)(nvm_sw + 3))));
3930
3931
3932 /* The byte order is little endian 16 bit, meaning 214365 */
3933 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3934 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN)__builtin_memcpy((hw_addr), (nvm_hw + 0x15), (6));
3935 data->hw_addr[0] = hw_addr[1];
3936 data->hw_addr[1] = hw_addr[0];
3937 data->hw_addr[2] = hw_addr[3];
3938 data->hw_addr[3] = hw_addr[2];
3939 data->hw_addr[4] = hw_addr[5];
3940 data->hw_addr[5] = hw_addr[4];
3941 } else
3942 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3943
3944 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3945 if (sc->nvm_type == IWM_NVM_SDP) {
3946 iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3947 MIN(n_regulatory, nitems(iwm_nvm_channels))(((n_regulatory)<((sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels
)[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels)) / sizeof
((iwm_nvm_channels)[0]))))
);
3948 } else {
3949 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS0x1E0 - 0x1C0],
3950 iwm_nvm_channels, nitems(iwm_nvm_channels)(sizeof((iwm_nvm_channels)) / sizeof((iwm_nvm_channels)[0])));
3951 }
3952 } else
3953 iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_80000],
3954 iwm_nvm_channels_8000,
3955 MIN(n_regulatory, nitems(iwm_nvm_channels_8000))(((n_regulatory)<((sizeof((iwm_nvm_channels_8000)) / sizeof
((iwm_nvm_channels_8000)[0]))))?(n_regulatory):((sizeof((iwm_nvm_channels_8000
)) / sizeof((iwm_nvm_channels_8000)[0]))))
);
3956
3957 data->calib_version = 255; /* TODO:
3958 this value will prevent some checks from
3959 failing, we need to check if this
3960 field is still needed, and if it does,
3961 where is it in the NVM */
3962
3963 return 0;
3964}
3965
3966int
3967iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3968{
3969 const uint16_t *hw, *sw, *calib, *mac_override = NULL((void *)0), *phy_sku = NULL((void *)0);
3970 const uint16_t *regulatory = NULL((void *)0);
3971 int n_regulatory = 0;
3972
3973 /* Checking for required sections */
3974 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
3975 if (!sections[IWM_NVM_SECTION_TYPE_SW1].data ||
3976 !sections[IWM_NVM_SECTION_TYPE_HW0].data) {
3977 return ENOENT2;
3978 }
3979
3980 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW0].data;
3981
3982 if (sc->nvm_type == IWM_NVM_SDP) {
3983 if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data)
3984 return ENOENT2;
3985 regulatory = (const uint16_t *)
3986 sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].data;
3987 n_regulatory =
3988 sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP8].length;
3989 }
3990 } else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) {
3991 /* SW and REGULATORY sections are mandatory */
3992 if (!sections[IWM_NVM_SECTION_TYPE_SW1].data ||
3993 !sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data) {
3994 return ENOENT2;
3995 }
3996 /* MAC_OVERRIDE or at least HW section must exist */
3997 if (!sections[IWM_NVM_SECTION_TYPE_HW_800010].data &&
3998 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data) {
3999 return ENOENT2;
4000 }
4001
4002 /* PHY_SKU section is mandatory in B0 */
4003 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data) {
4004 return ENOENT2;
4005 }
4006
4007 regulatory = (const uint16_t *)
4008 sections[IWM_NVM_SECTION_TYPE_REGULATORY3].data;
4009 n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY3].length;
4010 hw = (const uint16_t *)
4011 sections[IWM_NVM_SECTION_TYPE_HW_800010].data;
4012 mac_override =
4013 (const uint16_t *)
4014 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE11].data;
4015 phy_sku = (const uint16_t *)
4016 sections[IWM_NVM_SECTION_TYPE_PHY_SKU12].data;
4017 } else {
4018 panic("unknown device family %d", sc->sc_device_family);
4019 }
4020
4021 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW1].data;
4022 calib = (const uint16_t *)
4023 sections[IWM_NVM_SECTION_TYPE_CALIBRATION4].data;
4024
4025 /* XXX should pass in the length of every section */
4026 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
4027 phy_sku, regulatory, n_regulatory);
4028}
4029
4030int
4031iwm_nvm_init(struct iwm_softc *sc)
4032{
4033 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS13];
4034 int i, section, err;
4035 uint16_t len;
4036 uint8_t *buf;
4037 const size_t bufsz = sc->sc_nvm_max_section_size;
4038
4039 memset(nvm_sections, 0, sizeof(nvm_sections))__builtin_memset((nvm_sections), (0), (sizeof(nvm_sections)));
4040
4041 buf = malloc(bufsz, M_DEVBUF2, M_WAIT0x0001);
4042 if (buf == NULL((void *)0))
4043 return ENOMEM12;
4044
4045 for (i = 0; i < nitems(iwm_nvm_to_read)(sizeof((iwm_nvm_to_read)) / sizeof((iwm_nvm_to_read)[0])); i++) {
4046 section = iwm_nvm_to_read[i];
4047 KASSERT(section <= nitems(nvm_sections))((section <= (sizeof((nvm_sections)) / sizeof((nvm_sections
)[0]))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 4047, "section <= nitems(nvm_sections)"))
;
4048
4049 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
4050 if (err) {
4051 err = 0;
4052 continue;
4053 }
4054 nvm_sections[section].data = malloc(len, M_DEVBUF2, M_WAIT0x0001);
4055 if (nvm_sections[section].data == NULL((void *)0)) {
4056 err = ENOMEM12;
4057 break;
4058 }
4059 memcpy(nvm_sections[section].data, buf, len)__builtin_memcpy((nvm_sections[section].data), (buf), (len));
4060 nvm_sections[section].length = len;
4061 }
4062 free(buf, M_DEVBUF2, bufsz);
4063 if (err == 0)
4064 err = iwm_parse_nvm_sections(sc, nvm_sections);
4065
4066 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS13; i++) {
4067 if (nvm_sections[i].data != NULL((void *)0))
4068 free(nvm_sections[i].data, M_DEVBUF2,
4069 nvm_sections[i].length);
4070 }
4071
4072 return err;
4073}
4074
4075int
4076iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
4077 const uint8_t *section, uint32_t byte_cnt)
4078{
4079 int err = EINVAL22;
4080 uint32_t chunk_sz, offset;
4081
4082 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt)(((0x20000)<(byte_cnt))?(0x20000):(byte_cnt));
4083
4084 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
4085 uint32_t addr, len;
4086 const uint8_t *data;
4087
4088 addr = dst_addr + offset;
4089 len = MIN(chunk_sz, byte_cnt - offset)(((chunk_sz)<(byte_cnt - offset))?(chunk_sz):(byte_cnt - offset
))
;
4090 data = section + offset;
4091
4092 err = iwm_firmware_load_chunk(sc, addr, data, len);
4093 if (err)
4094 break;
4095 }
4096
4097 return err;
4098}
4099
4100int
4101iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4102 const uint8_t *chunk, uint32_t byte_cnt)
4103{
4104 struct iwm_dma_info *dma = &sc->fw_dma;
4105 int err;
4106
4107 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
4108 memcpy(dma->vaddr, chunk, byte_cnt)__builtin_memcpy((dma->vaddr), (chunk), (byte_cnt));
4109 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma->
map), (0), (byte_cnt), (0x04))
4110 dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dma->
map), (0), (byte_cnt), (0x04))
;
4111
4112 if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 &&
4113 dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) {
4114 err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8,
4115 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01);
4116 if (err)
4117 return err;
4118 }
4119
4120 sc->sc_fw_chunk_done = 0;
4121
4122 if (!iwm_nic_lock(sc))
4123 return EBUSY16;
4124
4125 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x00000000)))))
4126 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x00000000)))))
;
4127 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr))))
4128 dst_addr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x9C8) + (((9)) - 9) * 0x4))), ((dst_addr))))
;
4129 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF))
)))
4130 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9))))), ((dma->paddr & (0xFFFFFFFF))
)))
;
4131 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma->
paddr) << 28) | byte_cnt))))
4132 (iwm_get_dma_hi_addr(dma->paddr)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma->
paddr) << 28) | byte_cnt))))
4133 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0x900) + 0x8 * ((9)) + 0x4))), (((iwm_get_dma_hi_addr(dma->
paddr) << 28) | byte_cnt))))
;
4134 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
4135 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
4136 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
4137 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9)) + 0x8))), ((1 << (20) | 1 <<
(12) | (0x00000003)))))
;
4138 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
4139 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
4140 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
4141 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((((0x1000) +
0xD00) + 0x20 * ((9))))), (((0x80000000) | (0x00000000) | (0x00100000
)))))
;
4142
4143 iwm_nic_unlock(sc);
4144
4145 /* Wait for this segment to load. */
4146 err = 0;
4147 while (!sc->sc_fw_chunk_done) {
4148 err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4149 if (err)
4150 break;
4151 }
4152
4153 if (!sc->sc_fw_chunk_done)
4154 printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4155 DEVNAME(sc)((sc)->sc_dev.dv_xname), dst_addr, byte_cnt);
4156
4157 if (dst_addr >= IWM_FW_MEM_EXTENDED_START0x40000 &&
4158 dst_addr <= IWM_FW_MEM_EXTENDED_END0x57FFF) {
4159 int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK0xa01ff8,
4160 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE0x01);
4161 if (!err)
4162 err = err2;
4163 }
4164
4165 return err;
4166}
4167
4168int
4169iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4170{
4171 struct iwm_fw_sects *fws;
4172 int err, i;
4173 void *data;
4174 uint32_t dlen;
4175 uint32_t offset;
4176
4177 fws = &sc->sc_fw.fw_sects[ucode_type];
4178 for (i = 0; i < fws->fw_count; i++) {
4179 data = fws->fw_sect[i].fws_data;
4180 dlen = fws->fw_sect[i].fws_len;
4181 offset = fws->fw_sect[i].fws_devoff;
4182 if (dlen > sc->sc_fwdmasegsz) {
4183 err = EFBIG27;
4184 } else
4185 err = iwm_firmware_load_sect(sc, offset, data, dlen);
4186 if (err) {
4187 printf("%s: could not load firmware chunk %u of %u\n",
4188 DEVNAME(sc)((sc)->sc_dev.dv_xname), i, fws->fw_count);
4189 return err;
4190 }
4191 }
4192
4193 iwm_enable_interrupts(sc);
4194
4195 IWM_WRITE(sc, IWM_CSR_RESET, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x020))), (
(0))))
;
4196
4197 return 0;
4198}
4199
4200int
4201iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4202 int cpu, int *first_ucode_section)
4203{
4204 int shift_param;
4205 int i, err = 0, sec_num = 0x1;
4206 uint32_t val, last_read_idx = 0;
4207 void *data;
4208 uint32_t dlen;
4209 uint32_t offset;
4210
4211 if (cpu == 1) {
4212 shift_param = 0;
4213 *first_ucode_section = 0;
4214 } else {
4215 shift_param = 16;
4216 (*first_ucode_section)++;
4217 }
4218
4219 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX16; i++) {
4220 last_read_idx = i;
4221 data = fws->fw_sect[i].fws_data;
4222 dlen = fws->fw_sect[i].fws_len;
4223 offset = fws->fw_sect[i].fws_devoff;
4224
4225 /*
4226 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4227 * CPU1 to CPU2.
4228 * PAGING_SEPARATOR_SECTION delimiter - separate between
4229 * CPU2 non paged to CPU2 paging sec.
4230 */
4231 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION0xFFFFCCCC ||
4232 offset == IWM_PAGING_SEPARATOR_SECTION0xAAAABBBB)
4233 break;
4234
4235 if (dlen > sc->sc_fwdmasegsz) {
4236 err = EFBIG27;
4237 } else
4238 err = iwm_firmware_load_sect(sc, offset, data, dlen);
4239 if (err) {
4240 printf("%s: could not load firmware chunk %d "
4241 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err);
4242 return err;
4243 }
4244
4245 /* Notify the ucode of the loaded section number and status */
4246 if (iwm_nic_lock(sc)) {
4247 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((0x1af0))));
4248 val = val | (sec_num << shift_param);
4249 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), (
(val))))
;
4250 sec_num = (sec_num << 1) | 0x1;
4251 iwm_nic_unlock(sc);
4252 } else {
4253 err = EBUSY16;
4254 printf("%s: could not load firmware chunk %d "
4255 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err);
4256 return err;
4257 }
4258 }
4259
4260 *first_ucode_section = last_read_idx;
4261
4262 if (iwm_nic_lock(sc)) {
4263 if (cpu == 1)
4264 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), (
(0xFFFF))))
;
4265 else
4266 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((0x1af0)), (
(0xFFFFFFFF))))
;
4267 iwm_nic_unlock(sc);
4268 } else {
4269 err = EBUSY16;
4270 printf("%s: could not finalize firmware loading (error %d)\n",
4271 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
4272 return err;
4273 }
4274
4275 return 0;
4276}
4277
4278int
4279iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4280{
4281 struct iwm_fw_sects *fws;
4282 int err = 0;
4283 int first_ucode_section;
4284
4285 fws = &sc->sc_fw.fw_sects[ucode_type];
4286
4287 /* configure the ucode to be ready to get the secured image */
4288 /* release CPU reset */
4289 if (iwm_nic_lock(sc)) {
4290 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET0x300c,
4291 IWM_RELEASE_CPU_RESET_BIT0x1000000);
4292 iwm_nic_unlock(sc);
4293 }
4294
4295 /* load to FW the binary Secured sections of CPU1 */
4296 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4297 if (err)
4298 return err;
4299
4300 /* load to FW the binary sections of CPU2 */
4301 err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4302 if (err)
4303 return err;
4304
4305 iwm_enable_interrupts(sc);
4306 return 0;
4307}
4308
4309int
4310iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4311{
4312 int err;
4313
4314 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
4315
4316 sc->sc_uc.uc_intr = 0;
4317 sc->sc_uc.uc_ok = 0;
4318
4319 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002)
4320 err = iwm_load_firmware_8000(sc, ucode_type);
4321 else
4322 err = iwm_load_firmware_7000(sc, ucode_type);
4323
4324 if (err)
4325 return err;
4326
4327 /* wait for the firmware to load */
4328 err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4329 if (err || !sc->sc_uc.uc_ok)
4330 printf("%s: could not load firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4331
4332 return err;
4333}
4334
4335int
4336iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4337{
4338 int err;
4339
4340 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
4341
4342 err = iwm_nic_init(sc);
4343 if (err) {
4344 printf("%s: unable to init nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4345 return err;
4346 }
4347
4348 /* make sure rfkill handshake bits are cleared */
4349 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
4350 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
4351 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000004)))))
;
4352
4353 /* clear (again), then enable firmware load interrupt */
4354 IWM_WRITE(sc, IWM_CSR_INT, ~0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(~0))))
;
4355 iwm_enable_fwload_interrupt(sc);
4356
4357 /* really make sure rfkill handshake bits are cleared */
4358 /* maybe we should write a few times more? just to make sure */
4359 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
4360 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x05c))), (
((0x00000002)))))
;
4361
4362 return iwm_load_firmware(sc, ucode_type);
4363}
4364
4365int
4366iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4367{
4368 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4369 .valid = htole32(valid_tx_ant)((__uint32_t)(valid_tx_ant)),
4370 };
4371
4372 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD0x98,
4373 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4374}
4375
4376int
4377iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4378{
4379 struct iwm_phy_cfg_cmd phy_cfg_cmd;
4380 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4381
4382 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config
))
4383 sc->sc_extra_phy_config)((__uint32_t)(sc->sc_fw_phy_config | sc->sc_extra_phy_config
))
;
4384 phy_cfg_cmd.calib_control.event_trigger =
4385 sc->sc_default_calib[ucode_type].event_trigger;
4386 phy_cfg_cmd.calib_control.flow_trigger =
4387 sc->sc_default_calib[ucode_type].flow_trigger;
4388
4389 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD0x6a, 0,
4390 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4391}
4392
4393int
4394iwm_send_dqa_cmd(struct iwm_softc *sc)
4395{
4396 struct iwm_dqa_enable_cmd dqa_cmd = {
4397 .cmd_queue = htole32(IWM_DQA_CMD_QUEUE)((__uint32_t)(0)),
4398 };
4399 uint32_t cmd_id;
4400
4401 cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD0x00, IWM_DATA_PATH_GROUP0x5, 0);
4402 return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4403}
4404
4405int
4406iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4407 enum iwm_ucode_type ucode_type)
4408{
4409 enum iwm_ucode_type old_type = sc->sc_uc_current;
4410 struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4411 int err;
4412
4413 err = iwm_read_firmware(sc);
4414 if (err)
4415 return err;
4416
4417 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
4418 sc->cmdqid = IWM_DQA_CMD_QUEUE0;
4419 else
4420 sc->cmdqid = IWM_CMD_QUEUE9;
4421
4422 sc->sc_uc_current = ucode_type;
4423 err = iwm_start_fw(sc, ucode_type);
4424 if (err) {
4425 sc->sc_uc_current = old_type;
4426 return err;
4427 }
4428
4429 err = iwm_post_alive(sc);
4430 if (err)
4431 return err;
4432
4433 /*
4434 * configure and operate fw paging mechanism.
4435 * driver configures the paging flow only once, CPU2 paging image
4436 * included in the IWM_UCODE_INIT image.
4437 */
4438 if (fw->paging_mem_size) {
4439 err = iwm_save_fw_paging(sc, fw);
4440 if (err) {
4441 printf("%s: failed to save the FW paging image\n",
4442 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4443 return err;
4444 }
4445
4446 err = iwm_send_paging_cmd(sc, fw);
4447 if (err) {
4448 printf("%s: failed to send the paging cmd\n",
4449 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4450 iwm_free_fw_paging(sc);
4451 return err;
4452 }
4453 }
4454
4455 return 0;
4456}
4457
4458int
4459iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4460{
4461 const int wait_flags = (IWM_INIT_COMPLETE0x01 | IWM_CALIB_COMPLETE0x02);
4462 int err, s;
4463
4464 if ((sc->sc_flags & IWM_FLAG_RFKILL0x02) && !justnvm) {
4465 printf("%s: radio is disabled by hardware switch\n",
4466 DEVNAME(sc)((sc)->sc_dev.dv_xname));
4467 return EPERM1;
4468 }
4469
4470 s = splnet()splraise(0x4);
4471 sc->sc_init_complete = 0;
4472 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4473 if (err) {
4474 printf("%s: failed to load init firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4475 splx(s)spllower(s);
4476 return err;
4477 }
4478
4479 if (sc->sc_device_family < IWM_DEVICE_FAMILY_80002) {
4480 err = iwm_send_bt_init_conf(sc);
4481 if (err) {
4482 printf("%s: could not init bt coex (error %d)\n",
4483 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
4484 splx(s)spllower(s);
4485 return err;
4486 }
4487 }
4488
4489 if (justnvm) {
4490 err = iwm_nvm_init(sc);
4491 if (err) {
4492 printf("%s: failed to read nvm\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
4493 splx(s)spllower(s);
4494 return err;
4495 }
4496
4497 if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr)(__builtin_memcmp((etheranyaddr), (sc->sc_ic.ic_myaddr), (
6)) == 0)
)
4498 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
4499 sc->sc_nvm.hw_addr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (sc->sc_nvm.hw_addr
), (6))
;
4500
4501 splx(s)spllower(s);
4502 return 0;
4503 }
4504
4505 err = iwm_sf_config(sc, IWM_SF_INIT_OFF3);
4506 if (err) {
4507 splx(s)spllower(s);
4508 return err;
4509 }
4510
4511 /* Send TX valid antennas before triggering calibrations */
4512 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4513 if (err) {
4514 splx(s)spllower(s);
4515 return err;
4516 }
4517
4518 /*
4519 * Send phy configurations command to init uCode
4520 * to start the 16.0 uCode init image internal calibrations.
4521 */
4522 err = iwm_send_phy_cfg_cmd(sc);
4523 if (err) {
4524 splx(s)spllower(s);
4525 return err;
4526 }
4527
4528 /*
4529 * Nothing to do but wait for the init complete and phy DB
4530 * notifications from the firmware.
4531 */
4532 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4533 err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4534 SEC_TO_NSEC(2));
4535 if (err)
4536 break;
4537 }
4538
4539 splx(s)spllower(s);
4540 return err;
4541}
4542
4543int
4544iwm_config_ltr(struct iwm_softc *sc)
4545{
4546 struct iwm_ltr_config_cmd cmd = {
4547 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE)((__uint32_t)(0x00000001)),
4548 };
4549
4550 if (!sc->sc_ltr_enabled)
4551 return 0;
4552
4553 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG0xee, 0, sizeof(cmd), &cmd);
4554}
4555
4556int
4557iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4558{
4559 struct iwm_rx_ring *ring = &sc->rxq;
4560 struct iwm_rx_data *data = &ring->data[idx];
4561 struct mbuf *m;
4562 int err;
4563 int fatal = 0;
4564
4565 m = m_gethdr(M_DONTWAIT0x0002, MT_DATA1);
4566 if (m == NULL((void *)0))
4567 return ENOBUFS55;
4568
4569 if (size <= MCLBYTES(1 << 11)) {
4570 MCLGET(m, M_DONTWAIT)(void) m_clget((m), (0x0002), (1 << 11));
4571 } else {
4572 MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE)m_clget((m), (0x0002), (4096));
4573 }
4574 if ((m->m_flagsm_hdr.mh_flags & M_EXT0x0001) == 0) {
4575 m_freem(m);
4576 return ENOBUFS55;
4577 }
4578
4579 if (data->m != NULL((void *)0)) {
4580 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
4581 fatal = 1;
4582 }
4583
4584 m->m_lenm_hdr.mh_len = m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_extM_dat.MH.MH_dat.MH_ext.ext_size;
4585 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
4586 BUS_DMA_READ|BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0200|0x0001))
;
4587 if (err) {
4588 /* XXX */
4589 if (fatal)
4590 panic("iwm: could not load RX mbuf");
4591 m_freem(m);
4592 return err;
4593 }
4594 data->m = m;
4595 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (size), (0x01))
;
4596
4597 /* Update RX descriptor. */
4598 if (sc->sc_mqrx_supported) {
4599 ((uint64_t *)ring->desc)[idx] =
4600 htole64(data->map->dm_segs[0].ds_addr)((__uint64_t)(data->map->dm_segs[0].ds_addr));
4601 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
4602 idx * sizeof(uint64_t), sizeof(uint64_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
4603 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint64_t)), (sizeof(uint64_t
)), (0x04))
;
4604 } else {
4605 ((uint32_t *)ring->desc)[idx] =
4606 htole32(data->map->dm_segs[0].ds_addr >> 8)((__uint32_t)(data->map->dm_segs[0].ds_addr >> 8)
)
;
4607 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t
)), (0x04))
4608 idx * sizeof(uint32_t), sizeof(uint32_t),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t
)), (0x04))
4609 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
free_desc_dma.map), (idx * sizeof(uint32_t)), (sizeof(uint32_t
)), (0x04))
;
4610 }
4611
4612 return 0;
4613}
4614
4615/*
4616 * RSSI values are reported by the FW as positive values - need to negate
4617 * to obtain their dBM. Account for missing antennas by replacing 0
4618 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4619 */
4620int
4621iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4622{
4623 int energy_a, energy_b, energy_c, max_energy;
4624 uint32_t val;
4625
4626 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX])((__uint32_t)(phy_info->non_cfg_phy[1]));
4627 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK0x000000ff) >>
4628 IWM_RX_INFO_ENERGY_ANT_A_POS0;
4629 energy_a = energy_a ? -energy_a : -256;
4630 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK0x0000ff00) >>
4631 IWM_RX_INFO_ENERGY_ANT_B_POS8;
4632 energy_b = energy_b ? -energy_b : -256;
4633 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK0x00ff0000) >>
4634 IWM_RX_INFO_ENERGY_ANT_C_POS16;
4635 energy_c = energy_c ? -energy_c : -256;
4636 max_energy = MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b));
4637 max_energy = MAX(max_energy, energy_c)(((max_energy)>(energy_c))?(max_energy):(energy_c));
4638
4639 return max_energy;
4640}
4641
4642int
4643iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4644 struct iwm_rx_mpdu_desc *desc)
4645{
4646 int energy_a, energy_b;
4647
4648 energy_a = desc->v1.energy_a;
4649 energy_b = desc->v1.energy_b;
4650 energy_a = energy_a ? -energy_a : -256;
4651 energy_b = energy_b ? -energy_b : -256;
4652 return MAX(energy_a, energy_b)(((energy_a)>(energy_b))?(energy_a):(energy_b));
4653}
4654
4655void
4656iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4657 struct iwm_rx_data *data)
4658{
4659 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4660
4661 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
4662 sizeof(*phy_info), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*phy_info)), (0x02))
;
4663
4664 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info))__builtin_memcpy((&sc->sc_last_phy_info), (phy_info), (
sizeof(sc->sc_last_phy_info)))
;
4665}
4666
4667/*
4668 * Retrieve the average noise (in dBm) among receivers.
4669 */
4670int
4671iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4672{
4673 int i, total, nbant, noise;
4674
4675 total = nbant = noise = 0;
4676 for (i = 0; i < 3; i++) {
4677 noise = letoh32(stats->beacon_silence_rssi[i])((__uint32_t)(stats->beacon_silence_rssi[i])) & 0xff;
4678 if (noise) {
4679 total += noise;
4680 nbant++;
4681 }
4682 }
4683
4684 /* There should be at least one antenna but check anyway. */
4685 return (nbant == 0) ? -127 : (total / nbant) - 107;
4686}
4687
4688int
4689iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4690 struct ieee80211_rxinfo *rxi)
4691{
4692 struct ieee80211com *ic = &sc->sc_ic;
4693 struct ieee80211_key *k = &ni->ni_pairwise_key;
4694 struct ieee80211_frame *wh;
4695 uint64_t pn, *prsc;
4696 uint8_t *ivp;
4697 uint8_t tid;
4698 int hdrlen, hasqos;
4699
4700 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4701 hdrlen = ieee80211_get_hdrlen(wh);
4702 ivp = (uint8_t *)wh + hdrlen;
4703
4704 /* Check that ExtIV bit is set. */
4705 if (!(ivp[3] & IEEE80211_WEP_EXTIV0x20))
4706 return 1;
4707
4708 hasqos = ieee80211_has_qos(wh);
4709 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
4710 prsc = &k->k_rsc[tid];
4711
4712 /* Extract the 48-bit PN from the CCMP header. */
4713 pn = (uint64_t)ivp[0] |
4714 (uint64_t)ivp[1] << 8 |
4715 (uint64_t)ivp[4] << 16 |
4716 (uint64_t)ivp[5] << 24 |
4717 (uint64_t)ivp[6] << 32 |
4718 (uint64_t)ivp[7] << 40;
4719 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN0x00000004) {
4720 if (pn < *prsc) {
4721 ic->ic_stats.is_ccmp_replays++;
4722 return 1;
4723 }
4724 } else if (pn <= *prsc) {
4725 ic->ic_stats.is_ccmp_replays++;
4726 return 1;
4727 }
4728 /* Last seen packet number is updated in ieee80211_inputm(). */
4729
4730 /*
4731 * Some firmware versions strip the MIC, and some don't. It is not
4732 * clear which of the capability flags could tell us what to expect.
4733 * For now, keep things simple and just leave the MIC in place if
4734 * it is present.
4735 *
4736 * The IV will be stripped by ieee80211_inputm().
4737 */
4738 return 0;
4739}
4740
4741int
4742iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4743 struct ieee80211_rxinfo *rxi)
4744{
4745 struct ieee80211com *ic = &sc->sc_ic;
4746 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4747 struct ieee80211_frame *wh;
4748 struct ieee80211_node *ni;
4749 int ret = 0;
4750 uint8_t type, subtype;
4751
4752 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4753
4754 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4755 if (type == IEEE80211_FC0_TYPE_CTL0x04)
4756 return 0;
4757
4758 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4759 if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40))
4760 return 0;
4761
4762 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ||
4763 !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40))
4764 return 0;
4765
4766 ni = ieee80211_find_rxnode(ic, wh);
4767 /* Handle hardware decryption. */
4768 if ((ni->ni_flags & IEEE80211_NODE_RXPROT0x0008) &&
4769 ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4770 if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) !=
4771 IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
4772 ic->ic_stats.is_ccmp_dec_errs++;
4773 ret = 1;
4774 goto out;
4775 }
4776 /* Check whether decryption was successful or not. */
4777 if ((rx_pkt_status &
4778 (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
4779 IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) !=
4780 (IWM_RX_MPDU_RES_STATUS_DEC_DONE(1 << 11) |
4781 IWM_RX_MPDU_RES_STATUS_MIC_OK(1 << 6))) {
4782 ic->ic_stats.is_ccmp_dec_errs++;
4783 ret = 1;
4784 goto out;
4785 }
4786 rxi->rxi_flags |= IEEE80211_RXI_HWDEC0x00000001;
4787 }
4788out:
4789 if (ret)
4790 ifp->if_ierrorsif_data.ifi_ierrors++;
4791 ieee80211_release_node(ic, ni);
4792 return ret;
4793}
4794
4795void
4796iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4797 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4798 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4799 struct mbuf_list *ml)
4800{
4801 struct ieee80211com *ic = &sc->sc_ic;
4802 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
4803 struct ieee80211_frame *wh;
4804 struct ieee80211_node *ni;
4805
4806 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)(sizeof((ic->ic_channels)) / sizeof((ic->ic_channels)[0
]))
)
4807 chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4808
4809 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4810 ni = ieee80211_find_rxnode(ic, wh);
4811 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC0x00000001) &&
4812 iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4813 ifp->if_ierrorsif_data.ifi_ierrors++;
4814 m_freem(m);
4815 ieee80211_release_node(ic, ni);
4816 return;
4817 }
4818
4819#if NBPFILTER1 > 0
4820 if (sc->sc_drvbpf != NULL((void *)0)) {
4821 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtapsc_rxtapu.th;
4822 uint16_t chan_flags;
4823
4824 tap->wr_flags = 0;
4825 if (is_shortpre)
4826 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE0x02;
4827 tap->wr_chan_freq =
4828 htole16(ic->ic_channels[chanidx].ic_freq)((__uint16_t)(ic->ic_channels[chanidx].ic_freq));
4829 chan_flags = ic->ic_channels[chanidx].ic_flags;
4830 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4831 ic->ic_curmode != IEEE80211_MODE_11AC) {
4832 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
4833 chan_flags &= ~IEEE80211_CHAN_40MHZ0x8000;
4834 }
4835 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4836 chan_flags &= ~IEEE80211_CHAN_VHT0x4000;
4837 tap->wr_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
4838 tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4839 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4840 tap->wr_tsft = device_timestamp;
4841 if (rate_n_flags & IWM_RATE_MCS_HT_MSK(1 << 8)) {
4842 uint8_t mcs = (rate_n_flags &
4843 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 |
4844 IWM_RATE_HT_MCS_NSS_MSK(3 << 3)));
4845 tap->wr_rate = (0x80 | mcs);
4846 } else {
4847 uint8_t rate = (rate_n_flags &
4848 IWM_RATE_LEGACY_RATE_MSK0xff);
4849 switch (rate) {
4850 /* CCK rates. */
4851 case 10: tap->wr_rate = 2; break;
4852 case 20: tap->wr_rate = 4; break;
4853 case 55: tap->wr_rate = 11; break;
4854 case 110: tap->wr_rate = 22; break;
4855 /* OFDM rates. */
4856 case 0xd: tap->wr_rate = 12; break;
4857 case 0xf: tap->wr_rate = 18; break;
4858 case 0x5: tap->wr_rate = 24; break;
4859 case 0x7: tap->wr_rate = 36; break;
4860 case 0x9: tap->wr_rate = 48; break;
4861 case 0xb: tap->wr_rate = 72; break;
4862 case 0x1: tap->wr_rate = 96; break;
4863 case 0x3: tap->wr_rate = 108; break;
4864 /* Unknown rate: should not happen. */
4865 default: tap->wr_rate = 0;
4866 }
4867 }
4868
4869 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4870 m, BPF_DIRECTION_IN(1 << 0));
4871 }
4872#endif
4873 ieee80211_inputm(IC2IFP(ic)(&(ic)->ic_ac.ac_if), m, ni, rxi, ml);
4874 ieee80211_release_node(ic, ni);
4875}
4876
4877void
4878iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4879 size_t maxlen, struct mbuf_list *ml)
4880{
4881 struct ieee80211com *ic = &sc->sc_ic;
4882 struct ieee80211_rxinfo rxi;
4883 struct iwm_rx_phy_info *phy_info;
4884 struct iwm_rx_mpdu_res_start *rx_res;
4885 int device_timestamp;
4886 uint16_t phy_flags;
4887 uint32_t len;
4888 uint32_t rx_pkt_status;
4889 int rssi, chanidx, rate_n_flags;
4890
4891 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
4892
4893 phy_info = &sc->sc_last_phy_info;
4894 rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4895 len = le16toh(rx_res->byte_count)((__uint16_t)(rx_res->byte_count));
4896 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4897 /* Allow control frames in monitor mode. */
4898 if (len < sizeof(struct ieee80211_frame_cts)) {
4899 ic->ic_stats.is_rx_tooshort++;
4900 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4901 m_freem(m);
4902 return;
4903 }
4904 } else if (len < sizeof(struct ieee80211_frame)) {
4905 ic->ic_stats.is_rx_tooshort++;
4906 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4907 m_freem(m);
4908 return;
4909 }
4910 if (len > maxlen - sizeof(*rx_res)) {
4911 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
4912 m_freem(m);
4913 return;
4914 }
4915
4916 if (__predict_false(phy_info->cfg_phy_cnt > 20)__builtin_expect(((phy_info->cfg_phy_cnt > 20) != 0), 0
)
) {
4917 m_freem(m);
4918 return;
4919 }
4920
4921 rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len))((__uint32_t)(*(uint32_t *)(pktdata + sizeof(*rx_res) + len))
)
;
4922 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK(1 << 0)) ||
4923 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK(1 << 1))) {
4924 m_freem(m);
4925 return; /* drop */
4926 }
4927
4928 m->m_datam_hdr.mh_data = pktdata + sizeof(*rx_res);
4929 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
4930
4931 if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4932 m_freem(m);
4933 return;
4934 }
4935
4936 chanidx = letoh32(phy_info->channel)((__uint32_t)(phy_info->channel));
4937 device_timestamp = le32toh(phy_info->system_timestamp)((__uint32_t)(phy_info->system_timestamp));
4938 phy_flags = letoh16(phy_info->phy_flags)((__uint16_t)(phy_info->phy_flags));
4939 rate_n_flags = le32toh(phy_info->rate_n_flags)((__uint32_t)(phy_info->rate_n_flags));
4940
4941 rssi = iwm_get_signal_strength(sc, phy_info);
4942 rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */
4943 rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi)
)
; /* clip to max. 100% */
4944
4945 rxi.rxi_rssi = rssi;
4946 rxi.rxi_tstamp = device_timestamp;
4947 rxi.rxi_chan = chanidx;
4948
4949 iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4950 (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE(1 << 2)),
4951 rate_n_flags, device_timestamp, &rxi, ml);
4952}
4953
4954void
4955iwm_flip_address(uint8_t *addr)
4956{
4957 int i;
4958 uint8_t mac_addr[ETHER_ADDR_LEN6];
4959
4960 for (i = 0; i < ETHER_ADDR_LEN6; i++)
4961 mac_addr[i] = addr[ETHER_ADDR_LEN6 - i - 1];
4962 IEEE80211_ADDR_COPY(addr, mac_addr)__builtin_memcpy((addr), (mac_addr), (6));
4963}
4964
4965/*
4966 * Drop duplicate 802.11 retransmissions
4967 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4968 * and handle pseudo-duplicate frames which result from deaggregation
4969 * of A-MSDU frames in hardware.
4970 */
4971int
4972iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4973 struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4974{
4975 struct ieee80211com *ic = &sc->sc_ic;
4976 struct iwm_node *in = (void *)ic->ic_bss;
4977 struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4978 uint8_t tid = IWM_MAX_TID_COUNT8, subframe_idx;
4979 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
4980 uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
4981 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
4982 int hasqos = ieee80211_has_qos(wh);
4983 uint16_t seq;
4984
4985 if (type == IEEE80211_FC0_TYPE_CTL0x04 ||
4986 (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)) ||
4987 IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
4988 return 0;
4989
4990 if (hasqos) {
4991 tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f);
4992 if (tid > IWM_MAX_TID_COUNT8)
4993 tid = IWM_MAX_TID_COUNT8;
4994 }
4995
4996 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4997 subframe_idx = desc->amsdu_info &
4998 IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f;
4999
5000 seq = letoh16(*(u_int16_t *)wh->i_seq)((__uint16_t)(*(u_int16_t *)wh->i_seq)) >> IEEE80211_SEQ_SEQ_SHIFT4;
5001 if ((wh->i_fc[1] & IEEE80211_FC1_RETRY0x08) &&
5002 dup_data->last_seq[tid] == seq &&
5003 dup_data->last_sub_frame[tid] >= subframe_idx)
5004 return 1;
5005
5006 /*
5007 * Allow the same frame sequence number for all A-MSDU subframes
5008 * following the first subframe.
5009 * Otherwise these subframes would be discarded as replays.
5010 */
5011 if (dup_data->last_seq[tid] == seq &&
5012 subframe_idx > dup_data->last_sub_frame[tid] &&
5013 (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40)) {
5014 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
5015 }
5016
5017 dup_data->last_seq[tid] = seq;
5018 dup_data->last_sub_frame[tid] = subframe_idx;
5019
5020 return 0;
5021}
5022
5023/*
5024 * Returns true if sn2 - buffer_size < sn1 < sn2.
5025 * To be used only in order to compare reorder buffer head with NSSN.
5026 * We fully trust NSSN unless it is behind us due to reorder timeout.
5027 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
5028 */
5029int
5030iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
5031{
5032 return SEQ_LT(sn1, sn2)((((u_int16_t)(sn1) - (u_int16_t)(sn2)) & 0xfff) > 2048
)
&& !SEQ_LT(sn1, sn2 - buffer_size)((((u_int16_t)(sn1) - (u_int16_t)(sn2 - buffer_size)) & 0xfff
) > 2048)
;
5033}
5034
5035void
5036iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
5037 struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
5038 uint16_t nssn, struct mbuf_list *ml)
5039{
5040 struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
5041 uint16_t ssn = reorder_buf->head_sn;
5042
5043 /* ignore nssn smaller than head sn - this can happen due to timeout */
5044 if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
5045 goto set_timer;
5046
5047 while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
5048 int index = ssn % reorder_buf->buf_size;
5049 struct mbuf *m;
5050 int chanidx, is_shortpre;
5051 uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
5052 struct ieee80211_rxinfo *rxi;
5053
5054 /* This data is the same for all A-MSDU subframes. */
5055 chanidx = entries[index].chanidx;
5056 rx_pkt_status = entries[index].rx_pkt_status;
5057 is_shortpre = entries[index].is_shortpre;
5058 rate_n_flags = entries[index].rate_n_flags;
5059 device_timestamp = entries[index].device_timestamp;
5060 rxi = &entries[index].rxi;
5061
5062 /*
5063 * Empty the list. Will have more than one frame for A-MSDU.
5064 * Empty list is valid as well since nssn indicates frames were
5065 * received.
5066 */
5067 while ((m = ml_dequeue(&entries[index].frames)) != NULL((void *)0)) {
5068 iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
5069 rate_n_flags, device_timestamp, rxi, ml);
5070 reorder_buf->num_stored--;
5071
5072 /*
5073 * Allow the same frame sequence number and CCMP PN for
5074 * all A-MSDU subframes following the first subframe.
5075 * Otherwise they would be discarded as replays.
5076 */
5077 rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ0x00000008;
5078 rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
5079 }
5080
5081 ssn = (ssn + 1) & 0xfff;
5082 }
5083 reorder_buf->head_sn = nssn;
5084
5085set_timer:
5086 if (reorder_buf->num_stored && !reorder_buf->removed) {
5087 timeout_add_usec(&reorder_buf->reorder_timer,
5088 RX_REORDER_BUF_TIMEOUT_MQ_USEC(100000ULL));
5089 } else
5090 timeout_del(&reorder_buf->reorder_timer);
5091}
5092
5093int
5094iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5095 struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5096{
5097 struct ieee80211com *ic = &sc->sc_ic;
5098
5099 if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5100 /* we have a new (A-)MPDU ... */
5101
5102 /*
5103 * reset counter to 0 if we didn't have any oldsn in
5104 * the last A-MPDU (as detected by GP2 being identical)
5105 */
5106 if (!buffer->consec_oldsn_prev_drop)
5107 buffer->consec_oldsn_drops = 0;
5108
5109 /* either way, update our tracking state */
5110 buffer->consec_oldsn_ampdu_gp2 = gp2;
5111 } else if (buffer->consec_oldsn_prev_drop) {
5112 /*
5113 * tracking state didn't change, and we had an old SN
5114 * indication before - do nothing in this case, we
5115 * already noted this one down and are waiting for the
5116 * next A-MPDU (by GP2)
5117 */
5118 return 0;
5119 }
5120
5121 /* return unless this MPDU has old SN */
5122 if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000))
5123 return 0;
5124
5125 /* update state */
5126 buffer->consec_oldsn_prev_drop = 1;
5127 buffer->consec_oldsn_drops++;
5128
5129 /* if limit is reached, send del BA and reset state */
5130 if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA10) {
5131 ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5132 0, tid);
5133 buffer->consec_oldsn_prev_drop = 0;
5134 buffer->consec_oldsn_drops = 0;
5135 return 1;
5136 }
5137
5138 return 0;
5139}
5140
5141/*
5142 * Handle re-ordering of frames which were de-aggregated in hardware.
5143 * Returns 1 if the MPDU was consumed (buffered or dropped).
5144 * Returns 0 if the MPDU should be passed to upper layer.
5145 */
5146int
5147iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5148 struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5149 uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5150 struct mbuf_list *ml)
5151{
5152 struct ieee80211com *ic = &sc->sc_ic;
5153 struct ieee80211_frame *wh;
5154 struct ieee80211_node *ni;
5155 struct iwm_rxba_data *rxba;
5156 struct iwm_reorder_buffer *buffer;
5157 uint32_t reorder_data = le32toh(desc->reorder_data)((__uint32_t)(desc->reorder_data));
5158 int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40);
5159 int last_subframe =
5160 (desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME0x80);
5161 uint8_t tid;
5162 uint8_t subframe_idx = (desc->amsdu_info &
5163 IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
5164 struct iwm_reorder_buf_entry *entries;
5165 int index;
5166 uint16_t nssn, sn;
5167 uint8_t baid, type, subtype;
5168 int hasqos;
5169
5170 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5171 hasqos = ieee80211_has_qos(wh);
5172 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID0x000f : 0;
5173
5174 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5175 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
5176
5177 /*
5178 * We are only interested in Block Ack requests and unicast QoS data.
5179 */
5180 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01))
5181 return 0;
5182 if (hasqos) {
5183 if (subtype & IEEE80211_FC0_SUBTYPE_NODATA0x40)
5184 return 0;
5185 } else {
5186 if (type != IEEE80211_FC0_TYPE_CTL0x04 ||
5187 subtype != IEEE80211_FC0_SUBTYPE_BAR0x80)
5188 return 0;
5189 }
5190
5191 baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK0x7f000000) >>
5192 IWM_RX_MPDU_REORDER_BAID_SHIFT24;
5193 if (baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f ||
5194 baid >= nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
)
5195 return 0;
5196
5197 rxba = &sc->sc_rxba_data[baid];
5198 if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f ||
5199 tid != rxba->tid || rxba->sta_id != IWM_STATION_ID0)
5200 return 0;
5201
5202 if (rxba->timeout != 0)
5203 getmicrouptime(&rxba->last_rx);
5204
5205 /* Bypass A-MPDU re-ordering in net80211. */
5206 rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE0x00000002;
5207
5208 nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK0x00000fff;
5209 sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK0x00fff000) >>
5210 IWM_RX_MPDU_REORDER_SN_SHIFT12;
5211
5212 buffer = &rxba->reorder_buf;
5213 entries = &rxba->entries[0];
5214
5215 if (!buffer->valid) {
5216 if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN0x80000000)
5217 return 0;
5218 buffer->valid = 1;
5219 }
5220
5221 ni = ieee80211_find_rxnode(ic, wh);
5222 if (type == IEEE80211_FC0_TYPE_CTL0x04 &&
5223 subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) {
5224 iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5225 goto drop;
5226 }
5227
5228 /*
5229 * If there was a significant jump in the nssn - adjust.
5230 * If the SN is smaller than the NSSN it might need to first go into
5231 * the reorder buffer, in which case we just release up to it and the
5232 * rest of the function will take care of storing it and releasing up to
5233 * the nssn.
5234 */
5235 if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5236 buffer->buf_size) ||
5237 !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn + buffer->
buf_size)) & 0xfff) > 2048)
) {
5238 uint16_t min_sn = SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
? sn : nssn;
5239 ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5240 iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5241 }
5242
5243 if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5244 device_timestamp)) {
5245 /* BA session will be torn down. */
5246 ic->ic_stats.is_ht_rx_ba_window_jump++;
5247 goto drop;
5248
5249 }
5250
5251 /* drop any outdated packets */
5252 if (SEQ_LT(sn, buffer->head_sn)((((u_int16_t)(sn) - (u_int16_t)(buffer->head_sn)) & 0xfff
) > 2048)
) {
5253 ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5254 goto drop;
5255 }
5256
5257 /* release immediately if allowed by nssn and no stored frames */
5258 if (!buffer->num_stored && SEQ_LT(sn, nssn)((((u_int16_t)(sn) - (u_int16_t)(nssn)) & 0xfff) > 2048
)
) {
5259 if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5260 (!is_amsdu || last_subframe))
5261 buffer->head_sn = nssn;
5262 ieee80211_release_node(ic, ni);
5263 return 0;
5264 }
5265
5266 /*
5267 * release immediately if there are no stored frames, and the sn is
5268 * equal to the head.
5269 * This can happen due to reorder timer, where NSSN is behind head_sn.
5270 * When we released everything, and we got the next frame in the
5271 * sequence, according to the NSSN we can't release immediately,
5272 * while technically there is no hole and we can move forward.
5273 */
5274 if (!buffer->num_stored && sn == buffer->head_sn) {
5275 if (!is_amsdu || last_subframe)
5276 buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5277 ieee80211_release_node(ic, ni);
5278 return 0;
5279 }
5280
5281 index = sn % buffer->buf_size;
5282
5283 /*
5284 * Check if we already stored this frame
5285 * As AMSDU is either received or not as whole, logic is simple:
5286 * If we have frames in that position in the buffer and the last frame
5287 * originated from AMSDU had a different SN then it is a retransmission.
5288 * If it is the same SN then if the subframe index is incrementing it
5289 * is the same AMSDU - otherwise it is a retransmission.
5290 */
5291 if (!ml_empty(&entries[index].frames)((&entries[index].frames)->ml_len == 0)) {
5292 if (!is_amsdu) {
5293 ic->ic_stats.is_ht_rx_ba_no_buf++;
5294 goto drop;
5295 } else if (sn != buffer->last_amsdu ||
5296 buffer->last_sub_index >= subframe_idx) {
5297 ic->ic_stats.is_ht_rx_ba_no_buf++;
5298 goto drop;
5299 }
5300 } else {
5301 /* This data is the same for all A-MSDU subframes. */
5302 entries[index].chanidx = chanidx;
5303 entries[index].is_shortpre = is_shortpre;
5304 entries[index].rate_n_flags = rate_n_flags;
5305 entries[index].device_timestamp = device_timestamp;
5306 memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi))__builtin_memcpy((&entries[index].rxi), (rxi), (sizeof(entries
[index].rxi)))
;
5307 }
5308
5309 /* put in reorder buffer */
5310 ml_enqueue(&entries[index].frames, m);
5311 buffer->num_stored++;
5312 getmicrouptime(&entries[index].reorder_time);
5313
5314 if (is_amsdu) {
5315 buffer->last_amsdu = sn;
5316 buffer->last_sub_index = subframe_idx;
5317 }
5318
5319 /*
5320 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5321 * The reason is that NSSN advances on the first sub-frame, and may
5322 * cause the reorder buffer to advance before all the sub-frames arrive.
5323 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5324 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5325 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5326 * already ahead and it will be dropped.
5327 * If the last sub-frame is not on this queue - we will get frame
5328 * release notification with up to date NSSN.
5329 */
5330 if (!is_amsdu || last_subframe)
5331 iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5332
5333 ieee80211_release_node(ic, ni);
5334 return 1;
5335
5336drop:
5337 m_freem(m);
5338 ieee80211_release_node(ic, ni);
5339 return 1;
5340}
5341
5342void
5343iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5344 size_t maxlen, struct mbuf_list *ml)
5345{
5346 struct ieee80211com *ic = &sc->sc_ic;
5347 struct ieee80211_rxinfo rxi;
5348 struct iwm_rx_mpdu_desc *desc;
5349 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5350 int rssi;
5351 uint8_t chanidx;
5352 uint16_t phy_info;
5353
5354 memset(&rxi, 0, sizeof(rxi))__builtin_memset((&rxi), (0), (sizeof(rxi)));
5355
5356 desc = (struct iwm_rx_mpdu_desc *)pktdata;
5357
5358 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)((__uint16_t)((1 << 0)))) ||
5359 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)((__uint16_t)((1 << 1))))) {
5360 m_freem(m);
5361 return; /* drop */
5362 }
5363
5364 len = le16toh(desc->mpdu_len)((__uint16_t)(desc->mpdu_len));
5365 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5366 /* Allow control frames in monitor mode. */
5367 if (len < sizeof(struct ieee80211_frame_cts)) {
5368 ic->ic_stats.is_rx_tooshort++;
5369 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5370 m_freem(m);
5371 return;
5372 }
5373 } else if (len < sizeof(struct ieee80211_frame)) {
5374 ic->ic_stats.is_rx_tooshort++;
5375 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5376 m_freem(m);
5377 return;
5378 }
5379 if (len > maxlen - sizeof(*desc)) {
5380 IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_ierrorsif_data.ifi_ierrors++;
5381 m_freem(m);
5382 return;
5383 }
5384
5385 m->m_datam_hdr.mh_data = pktdata + sizeof(*desc);
5386 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
5387
5388 /* Account for padding following the frame header. */
5389 if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD0x20) {
5390 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5391 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
5392 if (type == IEEE80211_FC0_TYPE_CTL0x04) {
5393 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0) {
5394 case IEEE80211_FC0_SUBTYPE_CTS0xc0:
5395 hdrlen = sizeof(struct ieee80211_frame_cts);
5396 break;
5397 case IEEE80211_FC0_SUBTYPE_ACK0xd0:
5398 hdrlen = sizeof(struct ieee80211_frame_ack);
5399 break;
5400 default:
5401 hdrlen = sizeof(struct ieee80211_frame_min);
5402 break;
5403 }
5404 } else
5405 hdrlen = ieee80211_get_hdrlen(wh);
5406
5407 if ((le16toh(desc->status)((__uint16_t)(desc->status)) &
5408 IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK(7 << 8)) ==
5409 IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC(2 << 8)) {
5410 /* Padding is inserted after the IV. */
5411 hdrlen += IEEE80211_CCMP_HDRLEN8;
5412 }
5413
5414 memmove(m->m_data + 2, m->m_data, hdrlen)__builtin_memmove((m->m_hdr.mh_data + 2), (m->m_hdr.mh_data
), (hdrlen))
;
5415 m_adj(m, 2);
5416 }
5417
5418 /*
5419 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5420 * in place for each subframe. But it leaves the 'A-MSDU present'
5421 * bit set in the frame header. We need to clear this bit ourselves.
5422 *
5423 * And we must allow the same CCMP PN for subframes following the
5424 * first subframe. Otherwise they would be discarded as replays.
5425 */
5426 if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU0x40) {
5427 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
5428 uint8_t subframe_idx = (desc->amsdu_info &
5429 IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK0x7f);
5430 if (subframe_idx > 0)
5431 rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN0x00000004;
5432 if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5433 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5434 struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data))
5435 struct ieee80211_qosframe_addr4 *)((struct ieee80211_qosframe_addr4 *)((m)->m_hdr.mh_data));
5436 qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
5437
5438 /* HW reverses addr3 and addr4. */
5439 iwm_flip_address(qwh4->i_addr3);
5440 iwm_flip_address(qwh4->i_addr4);
5441 } else if (ieee80211_has_qos(wh) &&
5442 m->m_lenm_hdr.mh_len >= sizeof(struct ieee80211_qosframe)) {
5443 struct ieee80211_qosframe *qwh = mtod(m,((struct ieee80211_qosframe *)((m)->m_hdr.mh_data))
5444 struct ieee80211_qosframe *)((struct ieee80211_qosframe *)((m)->m_hdr.mh_data));
5445 qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU)((__uint16_t)(~0x0080));
5446
5447 /* HW reverses addr3. */
5448 iwm_flip_address(qwh->i_addr3);
5449 }
5450 }
5451
5452 /*
5453 * Verify decryption before duplicate detection. The latter uses
5454 * the TID supplied in QoS frame headers and this TID is implicitly
5455 * verified as part of the CCMP nonce.
5456 */
5457 if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status)((__uint16_t)(desc->status)), &rxi)) {
5458 m_freem(m);
5459 return;
5460 }
5461
5462 if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5463 m_freem(m);
5464 return;
5465 }
5466
5467 phy_info = le16toh(desc->phy_info)((__uint16_t)(desc->phy_info));
5468 rate_n_flags = le32toh(desc->v1.rate_n_flags)((__uint32_t)(desc->v1.rate_n_flags));
5469 chanidx = desc->v1.channel;
5470 device_timestamp = desc->v1.gp2_on_air_rise;
5471
5472 rssi = iwm_rxmq_get_signal_strength(sc, desc);
5473 rssi = (0 - IWM_MIN_DBM-100) + rssi; /* normalize */
5474 rssi = MIN(rssi, ic->ic_max_rssi)(((rssi)<(ic->ic_max_rssi))?(rssi):(ic->ic_max_rssi)
)
; /* clip to max. 100% */
5475
5476 rxi.rxi_rssi = rssi;
5477 rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise)((__uint64_t)(desc->v1.tsf_on_air_rise));
5478 rxi.rxi_chan = chanidx;
5479
5480 if (iwm_rx_reorder(sc, m, chanidx, desc,
5481 (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
5482 rate_n_flags, device_timestamp, &rxi, ml))
5483 return;
5484
5485 iwm_rx_frame(sc, m, chanidx, le16toh(desc->status)((__uint16_t)(desc->status)),
5486 (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE(1 << 7)),
5487 rate_n_flags, device_timestamp, &rxi, ml);
5488}
5489
5490void
5491iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5492{
5493 struct ieee80211com *ic = &sc->sc_ic;
5494 struct iwm_node *in = (void *)ni;
5495 int old_txmcs = ni->ni_txmcs;
5496 int old_nss = ni->ni_vht_ss;
5497
5498 if (ni->ni_flags & IEEE80211_NODE_VHT0x10000)
5499 ieee80211_ra_vht_choose(&in->in_rn_vht, ic, ni);
5500 else
5501 ieee80211_ra_choose(&in->in_rn, ic, ni);
5502
5503 /*
5504 * If RA has chosen a new TX rate we must update
5505 * the firmware's LQ rate table.
5506 */
5507 if (ni->ni_txmcs != old_txmcs || ni->ni_vht_ss != old_nss)
5508 iwm_setrates(in, 1);
5509}
5510
5511void
5512iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5513 int txmcs, uint8_t failure_frame, int txfail)
5514{
5515 struct ieee80211com *ic = &sc->sc_ic;
5516 struct iwm_node *in = (void *)ni;
5517
5518 /* Ignore Tx reports which don't match our last LQ command. */
5519 if (txmcs != ni->ni_txmcs) {
5520 if (++in->lq_rate_mismatch > 15) {
5521 /* Try to sync firmware with the driver... */
5522 iwm_setrates(in, 1);
5523 in->lq_rate_mismatch = 0;
5524 }
5525 } else {
5526 int mcs = txmcs;
5527 const struct ieee80211_ht_rateset *rs =
5528 ieee80211_ra_get_ht_rateset(txmcs,
5529 ieee80211_node_supports_ht_chan40(ni),
5530 ieee80211_ra_use_ht_sgi(ni));
5531 unsigned int retries = 0, i;
5532
5533 in->lq_rate_mismatch = 0;
5534
5535 for (i = 0; i < failure_frame; i++) {
5536 if (mcs > rs->min_mcs) {
5537 ieee80211_ra_add_stats_ht(&in->in_rn,
5538 ic, ni, mcs, 1, 1);
5539 mcs--;
5540 } else
5541 retries++;
5542 }
5543
5544 if (txfail && failure_frame == 0) {
5545 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5546 txmcs, 1, 1);
5547 } else {
5548 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5549 mcs, retries + 1, retries);
5550 }
5551
5552 iwm_ra_choose(sc, ni);
5553 }
5554}
5555
5556void
5557iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5558 int txmcs, int nss, uint8_t failure_frame, int txfail)
5559{
5560 struct ieee80211com *ic = &sc->sc_ic;
5561 struct iwm_node *in = (void *)ni;
5562 uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_801;
5563 uint8_t sco = IEEE80211_HTOP0_SCO_SCN0;
5564
5565 /* Ignore Tx reports which don't match our last LQ command. */
5566 if (txmcs != ni->ni_txmcs || nss != ni->ni_vht_ss) {
5567 if (++in->lq_rate_mismatch > 15) {
5568 /* Try to sync firmware with the driver... */
5569 iwm_setrates(in, 1);
5570 in->lq_rate_mismatch = 0;
5571 }
5572 } else {
5573 int mcs = txmcs;
5574 unsigned int retries = 0, i;
5575
5576 if (in->in_phyctxt) {
5577 vht_chan_width = in->in_phyctxt->vht_chan_width;
5578 sco = in->in_phyctxt->sco;
5579 }
5580 in->lq_rate_mismatch = 0;
5581
5582 for (i = 0; i < failure_frame; i++) {
5583 if (mcs > 0) {
5584 ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5585 ic, ni, mcs, nss, 1, 1);
5586 if (vht_chan_width >=
5587 IEEE80211_VHTOP0_CHAN_WIDTH_801) {
5588 /*
5589 * First 4 Tx attempts used same MCS,
5590 * twice at 80MHz and twice at 40MHz.
5591 */
5592 if (i >= 4)
5593 mcs--;
5594 } else if (sco == IEEE80211_HTOP0_SCO_SCA1 ||
5595 sco == IEEE80211_HTOP0_SCO_SCB3) {
5596 /*
5597 * First 4 Tx attempts used same MCS,
5598 * four times at 40MHz.
5599 */
5600 if (i >= 4)
5601 mcs--;
5602 } else
5603 mcs--;
5604 } else
5605 retries++;
5606 }
5607
5608 if (txfail && failure_frame == 0) {
5609 ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5610 txmcs, nss, 1, 1);
5611 } else {
5612 ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5613 mcs, nss, retries + 1, retries);
5614 }
5615
5616 iwm_ra_choose(sc, ni);
5617 }
5618}
5619
5620void
5621iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5622 struct iwm_node *in, int txmcs, int txrate)
5623{
5624 struct ieee80211com *ic = &sc->sc_ic;
5625 struct ieee80211_node *ni = &in->in_ni;
5626 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
5627 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5628 int status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWM_TX_STATUS_MSK0x000000ff;
5629 uint32_t initial_rate = le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate));
5630 int txfail;
5631
5632 KASSERT(tx_resp->frame_count == 1)((tx_resp->frame_count == 1) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwm.c", 5632, "tx_resp->frame_count == 1"
))
;
5633
5634 txfail = (status != IWM_TX_STATUS_SUCCESS0x01 &&
5635 status != IWM_TX_STATUS_DIRECT_DONE0x02);
5636
5637 /*
5638 * Update rate control statistics.
5639 * Only report frames which were actually queued with the currently
5640 * selected Tx rate. Because Tx queues are relatively long we may
5641 * encounter previously selected rates here during Tx bursts.
5642 * Providing feedback based on such frames can lead to suboptimal
5643 * Tx rate control decisions.
5644 */
5645 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) == 0) {
5646 if (txrate != ni->ni_txrate) {
5647 if (++in->lq_rate_mismatch > 15) {
5648 /* Try to sync firmware with the driver... */
5649 iwm_setrates(in, 1);
5650 in->lq_rate_mismatch = 0;
5651 }
5652 } else {
5653 in->lq_rate_mismatch = 0;
5654
5655 in->in_amn.amn_txcnt++;
5656 if (txfail)
5657 in->in_amn.amn_retrycnt++;
5658 if (tx_resp->failure_frame > 0)
5659 in->in_amn.amn_retrycnt++;
5660 }
5661 } else if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) &&
5662 ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5663 (initial_rate & IWM_RATE_MCS_VHT_MSK(1 << 26))) {
5664 int txmcs = initial_rate & IWM_RATE_VHT_MCS_RATE_CODE_MSK0xf;
5665 int nss = ((initial_rate & IWM_RATE_VHT_MCS_NSS_MSK(3 << 4)) >>
5666 IWM_RATE_VHT_MCS_NSS_POS4) + 1;
5667 iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5668 tx_resp->failure_frame, txfail);
5669 } else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5670 (initial_rate & IWM_RATE_MCS_HT_MSK(1 << 8))) {
5671 int txmcs = initial_rate &
5672 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 | IWM_RATE_HT_MCS_NSS_MSK(3 << 3));
5673 iwm_ht_single_rate_control(sc, ni, txmcs,
5674 tx_resp->failure_frame, txfail);
5675 }
5676
5677 if (txfail)
5678 ifp->if_oerrorsif_data.ifi_oerrors++;
5679}
5680
5681void
5682iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5683{
5684 struct ieee80211com *ic = &sc->sc_ic;
5685
5686 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
5687 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txd->
map), (0), (txd->map->dm_mapsize), (0x08))
;
5688 bus_dmamap_unload(sc->sc_dmat, txd->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (txd
->map))
;
5689 m_freem(txd->m);
5690 txd->m = NULL((void *)0);
5691
5692 KASSERT(txd->in)((txd->in) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 5692, "txd->in"))
;
5693 ieee80211_release_node(ic, &txd->in->in_ni);
5694 txd->in = NULL((void *)0);
5695 txd->ampdu_nframes = 0;
5696 txd->ampdu_txmcs = 0;
5697 txd->ampdu_txnss = 0;
5698}
5699
5700void
5701iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5702{
5703 struct iwm_tx_data *txd;
5704
5705 while (ring->tail != idx) {
5706 txd = &ring->data[ring->tail];
5707 if (txd->m != NULL((void *)0)) {
5708 iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID0);
5709 iwm_txd_done(sc, txd);
5710 ring->queued--;
5711 }
5712 ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT256;
5713 }
5714
5715 wakeup(ring);
5716}
5717
5718void
5719iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5720 struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5721 uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5722 struct iwm_agg_tx_status *agg_status)
5723{
5724 struct ieee80211com *ic = &sc->sc_ic;
5725 int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE10;
5726 struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5727 struct ieee80211_node *ni = &in->in_ni;
5728 struct ieee80211_tx_ba *ba;
5729 int txfail = (status != IWM_TX_STATUS_SUCCESS0x01 &&
5730 status != IWM_TX_STATUS_DIRECT_DONE0x02);
5731 uint16_t seq;
5732
5733 if (ic->ic_state != IEEE80211_S_RUN)
5734 return;
5735
5736 if (nframes > 1) {
5737 int i;
5738 /*
5739 * Collect information about this A-MPDU.
5740 */
5741
5742 for (i = 0; i < nframes; i++) {
5743 uint8_t qid = agg_status[i].qid;
5744 uint8_t idx = agg_status[i].idx;
5745 uint16_t txstatus = (le16toh(agg_status[i].status)((__uint16_t)(agg_status[i].status)) &
5746 IWM_AGG_TX_STATE_STATUS_MSK0x0fff);
5747
5748 if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED0x0000)
5749 continue;
5750
5751 if (qid != cmd_hdr->qid)
5752 continue;
5753
5754 txdata = &txq->data[idx];
5755 if (txdata->m == NULL((void *)0))
5756 continue;
5757
5758 /* The Tx rate was the same for all subframes. */
5759 if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) &&
5760 (initial_rate & IWM_RATE_MCS_VHT_MSK(1 << 26))) {
5761 txdata->ampdu_txmcs = initial_rate &
5762 IWM_RATE_VHT_MCS_RATE_CODE_MSK0xf;
5763 txdata->ampdu_txnss = ((initial_rate &
5764 IWM_RATE_VHT_MCS_NSS_MSK(3 << 4)) >>
5765 IWM_RATE_VHT_MCS_NSS_POS4) + 1;
5766 txdata->ampdu_nframes = nframes;
5767 } else if (initial_rate & IWM_RATE_MCS_HT_MSK(1 << 8)) {
5768 txdata->ampdu_txmcs = initial_rate &
5769 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 |
5770 IWM_RATE_HT_MCS_NSS_MSK(3 << 3));
5771 txdata->ampdu_nframes = nframes;
5772 }
5773 }
5774 return;
5775 }
5776
5777 ba = &ni->ni_tx_ba[tid];
5778 if (ba->ba_state != IEEE80211_BA_AGREED2)
5779 return;
5780 if (SEQ_LT(ssn, ba->ba_winstart)((((u_int16_t)(ssn) - (u_int16_t)(ba->ba_winstart)) & 0xfff
) > 2048)
)
5781 return;
5782
5783 /* This was a final single-frame Tx attempt for frame SSN-1. */
5784 seq = (ssn - 1) & 0xfff;
5785
5786 /*
5787 * Skip rate control if our Tx rate is fixed.
5788 * Don't report frames to MiRA which were sent at a different
5789 * Tx rate than ni->ni_txmcs.
5790 */
5791 if (ic->ic_fixed_mcs == -1) {
5792 if (txdata->ampdu_nframes > 1) {
5793 /*
5794 * This frame was once part of an A-MPDU.
5795 * Report one failed A-MPDU Tx attempt.
5796 * The firmware might have made several such
5797 * attempts but we don't keep track of this.
5798 */
5799 if (ni->ni_flags & IEEE80211_NODE_VHT0x10000) {
5800 ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5801 ic, ni, txdata->ampdu_txmcs,
5802 txdata->ampdu_txnss, 1, 1);
5803 } else {
5804 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5805 txdata->ampdu_txmcs, 1, 1);
5806 }
5807 }
5808
5809 /* Report the final single-frame Tx attempt. */
5810 if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) &&
5811 (initial_rate & IWM_RATE_MCS_VHT_MSK(1 << 26))) {
5812 int txmcs = initial_rate &
5813 IWM_RATE_VHT_MCS_RATE_CODE_MSK0xf;
5814 int nss = ((initial_rate &
5815 IWM_RATE_VHT_MCS_NSS_MSK(3 << 4)) >>
5816 IWM_RATE_VHT_MCS_NSS_POS4) + 1;
5817 iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5818 failure_frame, txfail);
5819 } else if (initial_rate & IWM_RATE_MCS_HT_MSK(1 << 8)) {
5820 int txmcs = initial_rate &
5821 (IWM_RATE_HT_MCS_RATE_CODE_MSK0x7 |
5822 IWM_RATE_HT_MCS_NSS_MSK(3 << 3));
5823 iwm_ht_single_rate_control(sc, ni, txmcs,
5824 failure_frame, txfail);
5825 }
5826 }
5827
5828 if (txfail)
5829 ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5830
5831 /*
5832 * SSN corresponds to the first (perhaps not yet transmitted) frame
5833 * in firmware's BA window. Firmware is not going to retransmit any
5834 * frames before its BA window so mark them all as done.
5835 */
5836 ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5837 iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)));
5838 iwm_clear_oactive(sc, txq);
5839}
5840
5841void
5842iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5843 struct iwm_rx_data *data)
5844{
5845 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5846 int idx = cmd_hdr->idx;
5847 int qid = cmd_hdr->qid;
5848 struct iwm_tx_ring *ring = &sc->txq[qid];
5849 struct iwm_tx_data *txd;
5850 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5851 uint32_t ssn;
5852 uint32_t len = iwm_rx_packet_len(pkt);
5853
5854 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
5855 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
5856
5857 /* Sanity checks. */
5858 if (sizeof(*tx_resp) > len)
5859 return;
5860 if (qid < IWM_FIRST_AGG_TX_QUEUE10 && tx_resp->frame_count > 1)
5861 return;
5862 if (qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
5863 return;
5864 if (sizeof(*tx_resp) + sizeof(ssn) +
5865 tx_resp->frame_count * sizeof(tx_resp->status) > len)
5866 return;
5867
5868 sc->sc_tx_timer[qid] = 0;
5869
5870 txd = &ring->data[idx];
5871 if (txd->m == NULL((void *)0))
5872 return;
5873
5874 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn))__builtin_memcpy((&ssn), (&tx_resp->status + tx_resp
->frame_count), (sizeof(ssn)))
;
5875 ssn = le32toh(ssn)((__uint32_t)(ssn)) & 0xfff;
5876 if (qid >= IWM_FIRST_AGG_TX_QUEUE10) {
5877 int status;
5878 status = le16toh(tx_resp->status.status)((__uint16_t)(tx_resp->status.status)) & IWM_TX_STATUS_MSK0x000000ff;
5879 iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5880 le32toh(tx_resp->initial_rate)((__uint32_t)(tx_resp->initial_rate)), tx_resp->frame_count,
5881 tx_resp->failure_frame, ssn, status, &tx_resp->status);
5882 } else {
5883 /*
5884 * Even though this is not an agg queue, we must only free
5885 * frames before the firmware's starting sequence number.
5886 */
5887 iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5888 iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)));
5889 iwm_clear_oactive(sc, ring);
5890 }
5891}
5892
5893void
5894iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5895{
5896 struct ieee80211com *ic = &sc->sc_ic;
5897 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
5898
5899 if (ring->queued < IWM_TX_RING_LOMARK192) {
5900 sc->qfullmsk &= ~(1 << ring->qid);
5901 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5902 ifq_clr_oactive(&ifp->if_snd);
5903 /*
5904 * Well, we're in interrupt context, but then again
5905 * I guess net80211 does all sorts of stunts in
5906 * interrupt context, so maybe this is no biggie.
5907 */
5908 (*ifp->if_start)(ifp);
5909 }
5910 }
5911}
5912
5913void
5914iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5915 struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5916{
5917 struct ieee80211com *ic = &sc->sc_ic;
5918 struct iwm_node *in = (void *)ni;
5919 int idx, end_idx;
5920
5921 /*
5922 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5923 */
5924 idx = IWM_AGG_SSN_TO_TXQ_IDX(seq)((seq) & (256 - 1));
5925 end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1));
5926 while (idx != end_idx) {
5927 struct iwm_tx_data *txdata = &txq->data[idx];
5928 if (txdata->m != NULL((void *)0) && txdata->ampdu_nframes > 1) {
5929 /*
5930 * We can assume that this subframe has been ACKed
5931 * because ACK failures come as single frames and
5932 * before failing an A-MPDU subframe the firmware
5933 * sends it as a single frame at least once.
5934 */
5935 if (ni->ni_flags & IEEE80211_NODE_VHT0x10000) {
5936 ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5937 ic, ni, txdata->ampdu_txmcs,
5938 txdata->ampdu_txnss, 1, 0);
5939 } else {
5940 ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5941 txdata->ampdu_txmcs, 1, 0);
5942 }
5943 /* Report this frame only once. */
5944 txdata->ampdu_nframes = 0;
5945 }
5946
5947 idx = (idx + 1) % IWM_TX_RING_COUNT256;
5948 }
5949
5950 iwm_ra_choose(sc, ni);
5951}
5952
5953void
5954iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5955{
5956 struct iwm_ba_notif *ban = (void *)pkt->data;
5957 struct ieee80211com *ic = &sc->sc_ic;
5958 struct ieee80211_node *ni = ic->ic_bss;
5959 struct iwm_node *in = (void *)ni;
5960 struct ieee80211_tx_ba *ba;
5961 struct iwm_tx_ring *ring;
5962 uint16_t seq, ssn;
5963 int qid;
5964
5965 if (ic->ic_state != IEEE80211_S_RUN)
5966 return;
5967
5968 if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5969 return;
5970
5971 if (ban->sta_id != IWM_STATION_ID0 ||
5972 !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr)(__builtin_memcmp((in->in_macaddr), (ban->sta_addr), (6
)) == 0)
)
5973 return;
5974
5975 qid = le16toh(ban->scd_flow)((__uint16_t)(ban->scd_flow));
5976 if (qid < IWM_FIRST_AGG_TX_QUEUE10 || qid > IWM_LAST_AGG_TX_QUEUE(10 + 8 - 1))
5977 return;
5978
5979 /* Protect against a firmware bug where the queue/TID are off. */
5980 if (qid != IWM_FIRST_AGG_TX_QUEUE10 + ban->tid)
5981 return;
5982
5983 sc->sc_tx_timer[qid] = 0;
5984
5985 ba = &ni->ni_tx_ba[ban->tid];
5986 if (ba->ba_state != IEEE80211_BA_AGREED2)
5987 return;
5988
5989 ring = &sc->txq[qid];
5990
5991 /*
5992 * The first bit in ban->bitmap corresponds to the sequence number
5993 * stored in the sequence control field ban->seq_ctl.
5994 * Multiple BA notifications in a row may be using this number, with
5995 * additional bits being set in cba->bitmap. It is unclear how the
5996 * firmware decides to shift this window forward.
5997 * We rely on ba->ba_winstart instead.
5998 */
5999 seq = le16toh(ban->seq_ctl)((__uint16_t)(ban->seq_ctl)) >> IEEE80211_SEQ_SEQ_SHIFT4;
6000
6001 /*
6002 * The firmware's new BA window starting sequence number
6003 * corresponds to the first hole in ban->scd_ssn, implying
6004 * that all frames between 'seq' and 'ssn' (non-inclusive)
6005 * have been acked.
6006 */
6007 ssn = le16toh(ban->scd_ssn)((__uint16_t)(ban->scd_ssn));
6008
6009 if (SEQ_LT(ssn, ba->ba_winstart)((((u_int16_t)(ssn) - (u_int16_t)(ba->ba_winstart)) & 0xfff
) > 2048)
)
6010 return;
6011
6012 /* Skip rate control if our Tx rate is fixed. */
6013 if (ic->ic_fixed_mcs == -1)
6014 iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
6015 ba->ba_winstart, ssn);
6016
6017 /*
6018 * SSN corresponds to the first (perhaps not yet transmitted) frame
6019 * in firmware's BA window. Firmware is not going to retransmit any
6020 * frames before its BA window so mark them all as done.
6021 */
6022 ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
6023 iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn)((ssn) & (256 - 1)));
6024 iwm_clear_oactive(sc, ring);
6025}
6026
6027void
6028iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
6029 struct iwm_rx_data *data)
6030{
6031 struct ieee80211com *ic = &sc->sc_ic;
6032 struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
6033 uint32_t missed;
6034
6035 if ((ic->ic_opmode != IEEE80211_M_STA) ||
6036 (ic->ic_state != IEEE80211_S_RUN))
6037 return;
6038
6039 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
6040 sizeof(*mbn), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (sizeof(*mbn)), (0x02))
;
6041
6042 missed = le32toh(mbn->consec_missed_beacons_since_last_rx)((__uint32_t)(mbn->consec_missed_beacons_since_last_rx));
6043 if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
6044 if (ic->ic_ific_ac.ac_if.if_flags & IFF_DEBUG0x4)
6045 printf("%s: receiving no beacons from %s; checking if "
6046 "this AP is still responding to probe requests\n",
6047 DEVNAME(sc)((sc)->sc_dev.dv_xname), ether_sprintf(ic->ic_bss->ni_macaddr));
6048 /*
6049 * Rather than go directly to scan state, try to send a
6050 * directed probe request first. If that fails then the
6051 * state machine will drop us into scanning after timing
6052 * out waiting for a probe response.
6053 */
6054 IEEE80211_SEND_MGMT(ic, ic->ic_bss,((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0))
6055 IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0)((*(ic)->ic_send_mgmt)(ic, ic->ic_bss, 0x40, 0, 0));
6056 }
6057
6058}
6059
6060int
6061iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
6062{
6063 struct iwm_binding_cmd cmd;
6064 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
6065 uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color)((in->in_id << (0)) | (in->in_color << (8))
)
;
6066 int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE0x10);
6067 uint32_t status;
6068 size_t len;
6069
6070 if (action == IWM_FW_CTXT_ACTION_ADD1 && active)
6071 panic("binding already added");
6072 if (action == IWM_FW_CTXT_ACTION_REMOVE3 && !active)
6073 panic("binding already removed");
6074
6075 if (phyctxt == NULL((void *)0)) /* XXX race with iwm_stop() */
6076 return EINVAL22;
6077
6078 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
6079
6080 cmd.id_and_color
6081 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
6082 cmd.action = htole32(action)((__uint32_t)(action));
6083 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color))((__uint32_t)(((phyctxt->id << (0)) | (phyctxt->color
<< (8)))))
;
6084
6085 cmd.macs[0] = htole32(mac_id)((__uint32_t)(mac_id));
6086 for (i = 1; i < IWM_MAX_MACS_IN_BINDING(3); i++)
6087 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID)((__uint32_t)((0xffffffff)));
6088
6089 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel)(((phyctxt->channel)->ic_flags & 0x0080) != 0) ||
6090 !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT)((sc->sc_enabled_capa)[(40)>>3] & (1<<((40
)&(8 -1))))
)
6091 cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX)((__uint32_t)(0));
6092 else
6093 cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX)((__uint32_t)(1));
6094
6095 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
)
6096 len = sizeof(cmd);
6097 else
6098 len = sizeof(struct iwm_binding_cmd_v1);
6099 status = 0;
6100 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD0x2b, len, &cmd,
6101 &status);
6102 if (err == 0 && status != 0)
6103 err = EIO5;
6104
6105 return err;
6106}
6107
6108void
6109iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6110 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
6111{
6112 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd))__builtin_memset((cmd), (0), (sizeof(struct iwm_phy_context_cmd
)))
;
6113
6114 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
6115 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
6116 cmd->action = htole32(action)((__uint32_t)(action));
6117 cmd->apply_time = htole32(apply_time)((__uint32_t)(apply_time));
6118}
6119
6120void
6121iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
6122 struct ieee80211_channel *chan, uint8_t chains_static,
6123 uint8_t chains_dynamic, uint8_t sco, uint8_t vht_chan_width)
6124{
6125 struct ieee80211com *ic = &sc->sc_ic;
6126 uint8_t active_cnt, idle_cnt;
6127
6128 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
6129 IWM_PHY_BAND_24(1) : IWM_PHY_BAND_5(0);
6130 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
6131 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_801) {
6132 cmd->ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6133 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE80(0x2);
6134 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
6135 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
6136 /* secondary chan above -> control chan below */
6137 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6138 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
6139 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
6140 /* secondary chan below -> control chan above */
6141 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
6142 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
6143 } else {
6144 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
6145 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6146 }
6147 } else {
6148 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
6149 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6150 }
6151
6152 /* Set rx the chains */
6153 idle_cnt = chains_static;
6154 active_cnt = chains_dynamic;
6155
6156 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)))
6157 IWM_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)));
6158 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
6159 cmd->rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
6160 IWM_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
6161
6162 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc))((__uint32_t)(iwm_fw_valid_tx_ant(sc)));
6163}
6164
6165uint8_t
6166iwm_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
6167{
6168 int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
6169 int primary_idx = ic->ic_bss->ni_primary_chan;
6170 /*
6171 * The FW is expected to check the control channel position only
6172 * when in HT/VHT and the channel width is not 20MHz. Return
6173 * this value as the default one:
6174 */
6175 uint8_t pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6176
6177 switch (primary_idx - center_idx) {
6178 case -6:
6179 pos = IWM_PHY_VHT_CTRL_POS_2_BELOW(0x1);
6180 break;
6181 case -2:
6182 pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6183 break;
6184 case 2:
6185 pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
6186 break;
6187 case 6:
6188 pos = IWM_PHY_VHT_CTRL_POS_2_ABOVE(0x5);
6189 break;
6190 default:
6191 break;
6192 }
6193
6194 return pos;
6195}
6196
6197int
6198iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6199 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6200 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6201{
6202 struct ieee80211com *ic = &sc->sc_ic;
6203 struct iwm_phy_context_cmd_uhb cmd;
6204 uint8_t active_cnt, idle_cnt;
6205 struct ieee80211_channel *chan = ctxt->channel;
6206
6207 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
6208 cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
6209 ctxt->color))((__uint32_t)(((ctxt->id << (0)) | (ctxt->color <<
(8)))))
;
6210 cmd.action = htole32(action)((__uint32_t)(action));
6211 cmd.apply_time = htole32(apply_time)((__uint32_t)(apply_time));
6212
6213 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan)(((chan)->ic_flags & 0x0080) != 0) ?
6214 IWM_PHY_BAND_24(1) : IWM_PHY_BAND_5(0);
6215 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan))((__uint32_t)(ieee80211_chan2ieee(ic, chan)));
6216 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_801) {
6217 cmd.ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6218 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE80(0x2);
6219 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ0x8000) {
6220 if (sco == IEEE80211_HTOP0_SCO_SCA1) {
6221 /* secondary chan above -> control chan below */
6222 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6223 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
6224 } else if (sco == IEEE80211_HTOP0_SCO_SCB3) {
6225 /* secondary chan below -> control chan above */
6226 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE(0x4);
6227 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40(0x1);
6228 } else {
6229 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
6230 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6231 }
6232 } else {
6233 cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20(0x0);
6234 cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW(0x0);
6235 }
6236
6237 idle_cnt = chains_static;
6238 active_cnt = chains_dynamic;
6239 cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)))
6240 IWM_PHY_RX_CHAIN_VALID_POS)((__uint32_t)(iwm_fw_valid_rx_ant(sc) << (1)));
6241 cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS)((__uint32_t)(idle_cnt << (10)));
6242 cmd.rxchain_info |= htole32(active_cnt <<((__uint32_t)(active_cnt << (12)))
6243 IWM_PHY_RX_CHAIN_MIMO_CNT_POS)((__uint32_t)(active_cnt << (12)));
6244 cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc))((__uint32_t)(iwm_fw_valid_tx_ant(sc)));
6245
6246 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD0x8, 0, sizeof(cmd), &cmd);
6247}
6248
6249int
6250iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6251 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6252 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6253{
6254 struct iwm_phy_context_cmd cmd;
6255
6256 /*
6257 * Intel increased the size of the fw_channel_info struct and neglected
6258 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6259 * member in the middle.
6260 * To keep things simple we use a separate function to handle the larger
6261 * variant of the phy context command.
6262 */
6263 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)((sc->sc_enabled_capa)[(48)>>3] & (1<<((48
)&(8 -1))))
)
6264 return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6265 chains_dynamic, action, apply_time, sco, vht_chan_width);
6266
6267 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6268
6269 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6270 chains_static, chains_dynamic, sco, vht_chan_width);
6271
6272 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD0x8, 0,
6273 sizeof(struct iwm_phy_context_cmd), &cmd);
6274}
6275
6276int
6277iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6278{
6279 struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6280 struct iwm_tfd *desc;
6281 struct iwm_tx_data *txdata;
6282 struct iwm_device_cmd *cmd;
6283 struct mbuf *m;
6284 bus_addr_t paddr;
6285 uint32_t addr_lo;
6286 int err = 0, i, paylen, off, s;
6287 int idx, code, async, group_id;
6288 size_t hdrlen, datasz;
6289 uint8_t *data;
6290 int generation = sc->sc_generation;
6291
6292 code = hcmd->id;
6293 async = hcmd->flags & IWM_CMD_ASYNC;
6294 idx = ring->cur;
6295
6296 for (i = 0, paylen = 0; i < nitems(hcmd->len)(sizeof((hcmd->len)) / sizeof((hcmd->len)[0])); i++) {
6297 paylen += hcmd->len[i];
6298 }
6299
6300 /* If this command waits for a response, allocate response buffer. */
6301 hcmd->resp_pkt = NULL((void *)0);
6302 if (hcmd->flags & IWM_CMD_WANT_RESP) {
6303 uint8_t *resp_buf;
6304 KASSERT(!async)((!async) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6304, "!async"))
;
6305 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet))((hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet)) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6305, "hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet)"
))
;
6306 KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX)((hcmd->resp_pkt_len <= (1 << 12)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 6306, "hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX"
))
;
6307 if (sc->sc_cmd_resp_pkt[idx] != NULL((void *)0))
6308 return ENOSPC28;
6309 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF2,
6310 M_NOWAIT0x0002 | M_ZERO0x0008);
6311 if (resp_buf == NULL((void *)0))
6312 return ENOMEM12;
6313 sc->sc_cmd_resp_pkt[idx] = resp_buf;
6314 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6315 } else {
6316 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
6317 }
6318
6319 s = splnet()splraise(0x4);
6320
6321 desc = &ring->desc[idx];
6322 txdata = &ring->data[idx];
6323
6324 group_id = iwm_cmd_groupid(code);
6325 if (group_id != 0) {
6326 hdrlen = sizeof(cmd->hdr_wide);
6327 datasz = sizeof(cmd->data_wide);
6328 } else {
6329 hdrlen = sizeof(cmd->hdr);
6330 datasz = sizeof(cmd->data);
6331 }
6332
6333 if (paylen > datasz) {
6334 /* Command is too large to fit in pre-allocated space. */
6335 size_t totlen = hdrlen + paylen;
6336 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header))) {
6337 printf("%s: firmware command too long (%zd bytes)\n",
6338 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
6339 err = EINVAL22;
6340 goto out;
6341 }
6342 m = MCLGETL(NULL, M_DONTWAIT, totlen)m_clget((((void *)0)), (0x0002), (totlen));
6343 if (m == NULL((void *)0)) {
6344 printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6345 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
6346 err = ENOMEM12;
6347 goto out;
6348 }
6349 cmd = mtod(m, struct iwm_device_cmd *)((struct iwm_device_cmd *)((m)->m_hdr.mh_data));
6350 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
6351 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (txdata
->map), (cmd), (totlen), (((void *)0)), (0x0001 | 0x0400))
;
6352 if (err) {
6353 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6354 DEVNAME(sc)((sc)->sc_dev.dv_xname), totlen);
6355 m_freem(m);
6356 goto out;
6357 }
6358 txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6359 paddr = txdata->map->dm_segs[0].ds_addr;
6360 } else {
6361 cmd = &ring->cmd[idx];
6362 paddr = txdata->cmd_paddr;
6363 }
6364
6365 if (group_id != 0) {
6366 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6367 cmd->hdr_wide.group_id = group_id;
6368 cmd->hdr_wide.qid = ring->qid;
6369 cmd->hdr_wide.idx = idx;
6370 cmd->hdr_wide.length = htole16(paylen)((__uint16_t)(paylen));
6371 cmd->hdr_wide.version = iwm_cmd_version(code);
6372 data = cmd->data_wide;
6373 } else {
6374 cmd->hdr.code = code;
6375 cmd->hdr.flags = 0;
6376 cmd->hdr.qid = ring->qid;
6377 cmd->hdr.idx = idx;
6378 data = cmd->data;
6379 }
6380
6381 for (i = 0, off = 0; i < nitems(hcmd->data)(sizeof((hcmd->data)) / sizeof((hcmd->data)[0])); i++) {
6382 if (hcmd->len[i] == 0)
6383 continue;
6384 memcpy(data + off, hcmd->data[i], hcmd->len[i])__builtin_memcpy((data + off), (hcmd->data[i]), (hcmd->
len[i]))
;
6385 off += hcmd->len[i];
6386 }
6387 KASSERT(off == paylen)((off == paylen) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6387, "off == paylen"))
;
6388
6389 /* lo field is not aligned */
6390 addr_lo = htole32((uint32_t)paddr)((__uint32_t)((uint32_t)paddr));
6391 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t))__builtin_memcpy((&desc->tbs[0].lo), (&addr_lo), (
sizeof(uint32_t)))
;
6392 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)((__uint16_t)(iwm_get_dma_hi_addr(paddr) | ((hdrlen + paylen)
<< 4)))
6393 | ((hdrlen + paylen) << 4))((__uint16_t)(iwm_get_dma_hi_addr(paddr) | ((hdrlen + paylen)
<< 4)))
;
6394 desc->num_tbs = 1;
6395
6396 if (paylen > datasz) {
6397 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
6398 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (txdata
->map), (0), (hdrlen + paylen), (0x04))
;
6399 } else {
6400 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
6401 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
6402 hdrlen + paylen, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (hdrlen + paylen), (0x04))
;
6403 }
6404 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6405 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6406 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
6407
6408 /*
6409 * Wake up the NIC to make sure that the firmware will see the host
6410 * command - we will let the NIC sleep once all the host commands
6411 * returned. This needs to be done only on 7000 family NICs.
6412 */
6413 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001) {
6414 if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6415 err = EBUSY16;
6416 goto out;
6417 }
6418 }
6419
6420 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6421
6422 /* Kick command ring. */
6423 ring->queued++;
6424 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT256;
6425 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 8 | ring->cur))))
;
6426
6427 if (!async) {
6428 err = tsleep_nsec(desc, PCATCH0x100, "iwmcmd", SEC_TO_NSEC(1));
6429 if (err == 0) {
6430 /* if hardware is no longer up, return error */
6431 if (generation != sc->sc_generation) {
6432 err = ENXIO6;
6433 goto out;
6434 }
6435
6436 /* Response buffer will be freed in iwm_free_resp(). */
6437 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6438 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
6439 } else if (generation == sc->sc_generation) {
6440 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF2,
6441 sc->sc_cmd_resp_len[idx]);
6442 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
6443 }
6444 }
6445 out:
6446 splx(s)spllower(s);
6447
6448 return err;
6449}
6450
6451int
6452iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6453 uint16_t len, const void *data)
6454{
6455 struct iwm_host_cmd cmd = {
6456 .id = id,
6457 .len = { len, },
6458 .data = { data, },
6459 .flags = flags,
6460 };
6461
6462 return iwm_send_cmd(sc, &cmd);
6463}
6464
6465int
6466iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6467 uint32_t *status)
6468{
6469 struct iwm_rx_packet *pkt;
6470 struct iwm_cmd_response *resp;
6471 int err, resp_len;
6472
6473 KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0)(((cmd->flags & IWM_CMD_WANT_RESP) == 0) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c", 6473, "(cmd->flags & IWM_CMD_WANT_RESP) == 0"
))
;
6474 cmd->flags |= IWM_CMD_WANT_RESP;
6475 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6476
6477 err = iwm_send_cmd(sc, cmd);
6478 if (err)
6479 return err;
6480
6481 pkt = cmd->resp_pkt;
6482 if (pkt == NULL((void *)0) || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40))
6483 return EIO5;
6484
6485 resp_len = iwm_rx_packet_payload_len(pkt);
6486 if (resp_len != sizeof(*resp)) {
6487 iwm_free_resp(sc, cmd);
6488 return EIO5;
6489 }
6490
6491 resp = (void *)pkt->data;
6492 *status = le32toh(resp->status)((__uint32_t)(resp->status));
6493 iwm_free_resp(sc, cmd);
6494 return err;
6495}
6496
6497int
6498iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6499 const void *data, uint32_t *status)
6500{
6501 struct iwm_host_cmd cmd = {
6502 .id = id,
6503 .len = { len, },
6504 .data = { data, },
6505 };
6506
6507 return iwm_send_cmd_status(sc, &cmd, status);
6508}
6509
6510void
6511iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6512{
6513 KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP)(((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP
) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 6513, "(hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP"
))
;
6514 free(hcmd->resp_pkt, M_DEVBUF2, hcmd->resp_pkt_len);
6515 hcmd->resp_pkt = NULL((void *)0);
6516}
6517
6518void
6519iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6520{
6521 struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6522 struct iwm_tx_data *data;
6523
6524 if (qid != sc->cmdqid) {
6525 return; /* Not a command ack. */
6526 }
6527
6528 data = &ring->data[idx];
6529
6530 if (data->m != NULL((void *)0)) {
6531 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
6532 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x08))
;
6533 bus_dmamap_unload(sc->sc_dmat, data->map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (data
->map))
;
6534 m_freem(data->m);
6535 data->m = NULL((void *)0);
6536 }
6537 wakeup(&ring->desc[idx]);
6538
6539 if (ring->queued == 0) {
6540 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",do { ; } while (0)
6541 DEVNAME(sc), code))do { ; } while (0);
6542 } else if (--ring->queued == 0) {
6543 /*
6544 * 7000 family NICs are locked while commands are in progress.
6545 * All commands are now done so we may unlock the NIC again.
6546 */
6547 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
6548 iwm_nic_unlock(sc);
6549 }
6550}
6551
6552void
6553iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6554 uint16_t len)
6555{
6556 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6557 uint16_t val;
6558
6559 scd_bc_tbl = sc->sched_dma.vaddr;
6560
6561 len += IWM_TX_CRC_SIZE4 + IWM_TX_DELIMITER_SIZE4;
6562 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE(1 << 4))
6563 len = roundup(len, 4)((((len)+((4)-1))/(4))*(4)) / 4;
6564
6565 val = htole16(sta_id << 12 | len)((__uint16_t)(sta_id << 12 | len));
6566
6567 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
6568 0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
;
6569
6570 /* Update TX scheduler. */
6571 scd_bc_tbl[qid].tfd_offset[idx] = val;
6572 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP(64))
6573 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX(256) + idx] = val;
6574 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
6575 0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
;
6576}
6577
6578void
6579iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6580{
6581 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6582 uint16_t val;
6583
6584 scd_bc_tbl = sc->sched_dma.vaddr;
6585
6586 val = htole16(1 | (sta_id << 12))((__uint16_t)(1 | (sta_id << 12)));
6587
6588 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
6589 0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x04))
;
6590
6591 /* Update TX scheduler. */
6592 scd_bc_tbl[qid].tfd_offset[idx] = val;
6593 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP(64))
6594 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX(256) + idx] = val;
6595
6596 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
6597 0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
sched_dma.map), (0), (sc->sched_dma.size), (0x08))
;
6598}
6599
6600/*
6601 * Fill in various bit for management frames, and leave them
6602 * unfilled for data frames (firmware takes care of that).
6603 * Return the selected legacy TX rate, or zero if HT/VHT is used.
6604 */
6605uint8_t
6606iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6607 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6608{
6609 struct ieee80211com *ic = &sc->sc_ic;
6610 struct ieee80211_node *ni = &in->in_ni;
6611 const struct iwm_rate *rinfo;
6612 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
6613 int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6614 int ridx, rate_flags;
6615 uint8_t rate = 0;
6616
6617 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT3;
6618 tx->data_retry_limit = IWM_LOW_RETRY_LIMIT7;
6619
6620 if (IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) ||
6621 type != IEEE80211_FC0_TYPE_DATA0x08) {
6622 /* for non-data, use the lowest supported rate */
6623 ridx = min_ridx;
6624 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT3;
6625 } else if (ic->ic_fixed_mcs != -1) {
6626 if (ni->ni_flags & IEEE80211_NODE_VHT0x10000)
6627 ridx = IWM_FIRST_OFDM_RATE;
6628 else
6629 ridx = sc->sc_fixed_ridx;
6630 } else if (ic->ic_fixed_rate != -1) {
6631 ridx = sc->sc_fixed_ridx;
6632 } else {
6633 int i;
6634 /* Use firmware rateset retry table. */
6635 tx->initial_rate_index = 0;
6636 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE)((__uint32_t)((1 << 4)));
6637 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) /* VHT implies HT */
6638 return 0;
6639 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x0100) != 0)) ?
6640 IWM_RIDX_OFDM4 : IWM_RIDX_CCK0;
6641 for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6642 if (iwm_rates[i].rate == (ni->ni_txrate &
6643 IEEE80211_RATE_VAL0x7f)) {
6644 ridx = i;
6645 break;
6646 }
6647 }
6648 return iwm_rates[ridx].rate & 0xff;
6649 }
6650
6651 rinfo = &iwm_rates[ridx];
6652 if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) == 0 &&
6653 iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6654 rate_flags = IWM_RATE_MCS_ANT_AB_MSK((1 << 14) | (2 << 14));
6655 else
6656 rate_flags = iwm_valid_siso_ant_rate_mask(sc);
6657 if (IWM_RIDX_IS_CCK(ridx)((ridx) < 4))
6658 rate_flags |= IWM_RATE_MCS_CCK_MSK(1 << 9);
6659 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
6660 type == IEEE80211_FC0_TYPE_DATA0x08 &&
6661 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP0x20) {
6662 uint8_t sco = IEEE80211_HTOP0_SCO_SCN0;
6663 uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT0;
6664 if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) &&
6665 IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan)(((ni->ni_chan)->ic_xflags & 0x00000001) != 0) &&
6666 ieee80211_node_supports_vht_chan80(ni))
6667 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_801;
6668 else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x8000) != 0) &&
6669 ieee80211_node_supports_ht_chan40(ni))
6670 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
6671 if (ni->ni_flags & IEEE80211_NODE_VHT0x10000)
6672 rate_flags |= IWM_RATE_MCS_VHT_MSK(1 << 26);
6673 else
6674 rate_flags |= IWM_RATE_MCS_HT_MSK(1 << 8);
6675 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_801 &&
6676 in->in_phyctxt != NULL((void *)0) &&
6677 in->in_phyctxt->vht_chan_width == vht_chan_width) {
6678 rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_80(2 << 11);
6679 if (ieee80211_node_supports_vht_sgi80(ni))
6680 rate_flags |= IWM_RATE_MCS_SGI_MSK(1 << 13);
6681 } else if ((sco == IEEE80211_HTOP0_SCO_SCA1 ||
6682 sco == IEEE80211_HTOP0_SCO_SCB3) &&
6683 in->in_phyctxt != NULL((void *)0) && in->in_phyctxt->sco == sco) {
6684 rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40(1 << 11);
6685 if (ieee80211_node_supports_ht_sgi40(ni))
6686 rate_flags |= IWM_RATE_MCS_SGI_MSK(1 << 13);
6687 } else if (ieee80211_node_supports_ht_sgi20(ni))
6688 rate_flags |= IWM_RATE_MCS_SGI_MSK(1 << 13);
6689 if (ni->ni_flags & IEEE80211_NODE_VHT0x10000) {
6690 /*
6691 * ifmedia only provides an MCS index, no NSS.
6692 * Use a fixed SISO rate.
6693 */
6694 tx->rate_n_flags = htole32(rate_flags |((__uint32_t)(rate_flags | (ic->ic_fixed_mcs & 0xf)))
6695 (ic->ic_fixed_mcs &((__uint32_t)(rate_flags | (ic->ic_fixed_mcs & 0xf)))
6696 IWM_RATE_VHT_MCS_RATE_CODE_MSK))((__uint32_t)(rate_flags | (ic->ic_fixed_mcs & 0xf)));
6697 } else
6698 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp)((__uint32_t)(rate_flags | rinfo->ht_plcp));
6699 } else
6700 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp)((__uint32_t)(rate_flags | rinfo->plcp));
6701
6702 return rate;
6703}
6704
6705#define TB0_SIZE16 16
6706int
6707iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6708{
6709 struct ieee80211com *ic = &sc->sc_ic;
6710 struct iwm_node *in = (void *)ni;
6711 struct iwm_tx_ring *ring;
6712 struct iwm_tx_data *data;
6713 struct iwm_tfd *desc;
6714 struct iwm_device_cmd *cmd;
6715 struct iwm_tx_cmd *tx;
6716 struct ieee80211_frame *wh;
6717 struct ieee80211_key *k = NULL((void *)0);
6718 uint8_t rate;
6719 uint8_t *ivp;
6720 uint32_t flags;
6721 u_int hdrlen;
6722 bus_dma_segment_t *seg;
6723 uint8_t tid, type, subtype;
6724 int i, totlen, err, pad;
6725 int qid, hasqos;
6726
6727 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
6728 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK0x0c;
6729 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK0xf0;
6730 if (type == IEEE80211_FC0_TYPE_CTL0x04)
6731 hdrlen = sizeof(struct ieee80211_frame_min);
6732 else
6733 hdrlen = ieee80211_get_hdrlen(wh);
6734
6735 hasqos = ieee80211_has_qos(wh);
6736 if (type == IEEE80211_FC0_TYPE_DATA0x08)
6737 tid = IWM_TID_NON_QOS0;
6738 else
6739 tid = IWM_MAX_TID_COUNT8;
6740
6741 /*
6742 * Map EDCA categories to Tx data queues.
6743 *
6744 * We use static data queue assignments even in DQA mode. We do not
6745 * need to share Tx queues between stations because we only implement
6746 * client mode; the firmware's station table contains only one entry
6747 * which represents our access point.
6748 */
6749 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6750 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
6751 qid = IWM_DQA_INJECT_MONITOR_QUEUE2;
6752 else
6753 qid = IWM_AUX_QUEUE15;
6754 } else if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
6755 qid = IWM_DQA_MIN_MGMT_QUEUE5 + ac;
6756 else
6757 qid = ac;
6758
6759 /* If possible, put this frame on an aggregation queue. */
6760 if (hasqos) {
6761 struct ieee80211_tx_ba *ba;
6762 uint16_t qos = ieee80211_get_qos(wh);
6763 int qostid = qos & IEEE80211_QOS_TID0x000f;
6764 int agg_qid = IWM_FIRST_AGG_TX_QUEUE10 + qostid;
6765
6766 ba = &ni->ni_tx_ba[qostid];
6767 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
6768 type == IEEE80211_FC0_TYPE_DATA0x08 &&
6769 subtype != IEEE80211_FC0_SUBTYPE_NODATA0x40 &&
6770 (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6771 ba->ba_state == IEEE80211_BA_AGREED2) {
6772 qid = agg_qid;
6773 tid = qostid;
6774 ac = ieee80211_up_to_ac(ic, qostid);
6775 }
6776 }
6777
6778 ring = &sc->txq[qid];
6779 desc = &ring->desc[ring->cur];
6780 memset(desc, 0, sizeof(*desc))__builtin_memset((desc), (0), (sizeof(*desc)));
6781 data = &ring->data[ring->cur];
6782
6783 cmd = &ring->cmd[ring->cur];
6784 cmd->hdr.code = IWM_TX_CMD0x1c;
6785 cmd->hdr.flags = 0;
6786 cmd->hdr.qid = ring->qid;
6787 cmd->hdr.idx = ring->cur;
6788
6789 tx = (void *)cmd->data;
6790 memset(tx, 0, sizeof(*tx))__builtin_memset((tx), (0), (sizeof(*tx)));
6791
6792 rate = iwm_tx_fill_cmd(sc, in, wh, tx);
6793
6794#if NBPFILTER1 > 0
6795 if (sc->sc_drvbpf != NULL((void *)0)) {
6796 struct iwm_tx_radiotap_header *tap = &sc->sc_txtapsc_txtapu.th;
6797 uint16_t chan_flags;
6798
6799 tap->wt_flags = 0;
6800 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq)((__uint16_t)(ni->ni_chan->ic_freq));
6801 chan_flags = ni->ni_chan->ic_flags;
6802 if (ic->ic_curmode != IEEE80211_MODE_11N &&
6803 ic->ic_curmode != IEEE80211_MODE_11AC) {
6804 chan_flags &= ~IEEE80211_CHAN_HT0x2000;
6805 chan_flags &= ~IEEE80211_CHAN_40MHZ0x8000;
6806 }
6807 if (ic->ic_curmode != IEEE80211_MODE_11AC)
6808 chan_flags &= ~IEEE80211_CHAN_VHT0x4000;
6809 tap->wt_chan_flags = htole16(chan_flags)((__uint16_t)(chan_flags));
6810 if ((ni->ni_flags & IEEE80211_NODE_HT0x0400) &&
6811 !IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
6812 type == IEEE80211_FC0_TYPE_DATA0x08) {
6813 tap->wt_rate = (0x80 | ni->ni_txmcs);
6814 } else
6815 tap->wt_rate = rate;
6816 if ((ic->ic_flags & IEEE80211_F_WEPON0x00000100) &&
6817 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40))
6818 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP0x04;
6819
6820 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6821 m, BPF_DIRECTION_OUT(1 << 1));
6822 }
6823#endif
6824 totlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
6825
6826 if (ic->ic_opmode != IEEE80211_M_MONITOR &&
6827 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED0x40)) {
6828 k = ieee80211_get_txkey(ic, wh, ni);
6829 if ((k->k_flags & IEEE80211_KEY_GROUP0x00000001) ||
6830 (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6831 if ((m = ieee80211_encrypt(ic, m, k)) == NULL((void *)0))
6832 return ENOBUFS55;
6833 /* 802.11 header may have moved. */
6834 wh = mtod(m, struct ieee80211_frame *)((struct ieee80211_frame *)((m)->m_hdr.mh_data));
6835 totlen = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
6836 k = NULL((void *)0); /* skip hardware crypto below */
6837 } else {
6838 /* HW appends CCMP MIC */
6839 totlen += IEEE80211_CCMP_HDRLEN8;
6840 }
6841 }
6842
6843 flags = 0;
6844 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01)) {
6845 flags |= IWM_TX_CMD_FLG_ACK(1 << 3);
6846 }
6847
6848 if (type == IEEE80211_FC0_TYPE_DATA0x08 &&
6849 !IEEE80211_IS_MULTICAST(wh->i_addr1)(*(wh->i_addr1) & 0x01) &&
6850 (totlen + IEEE80211_CRC_LEN4 > ic->ic_rtsthreshold ||
6851 (ic->ic_flags & IEEE80211_F_USEPROT0x00100000)))
6852 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE(1 << 0);
6853
6854 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6855 tx->sta_id = IWM_MONITOR_STA_ID2;
6856 else
6857 tx->sta_id = IWM_STATION_ID0;
6858
6859 if (type == IEEE80211_FC0_TYPE_MGT0x00) {
6860 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ0x00 ||
6861 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ0x20)
6862 tx->pm_frame_timeout = htole16(3)((__uint16_t)(3));
6863 else
6864 tx->pm_frame_timeout = htole16(2)((__uint16_t)(2));
6865 } else {
6866 if (type == IEEE80211_FC0_TYPE_CTL0x04 &&
6867 subtype == IEEE80211_FC0_SUBTYPE_BAR0x80) {
6868 struct ieee80211_frame_min *mwh;
6869 uint8_t *barfrm;
6870 uint16_t ctl;
6871 mwh = mtod(m, struct ieee80211_frame_min *)((struct ieee80211_frame_min *)((m)->m_hdr.mh_data));
6872 barfrm = (uint8_t *)&mwh[1];
6873 ctl = LE_READ_2(barfrm)((u_int16_t) ((((const u_int8_t *)(barfrm))[0]) | (((const u_int8_t
*)(barfrm))[1] << 8)))
;
6874 tid = (ctl & IEEE80211_BA_TID_INFO_MASK0xf000) >>
6875 IEEE80211_BA_TID_INFO_SHIFT12;
6876 flags |= IWM_TX_CMD_FLG_ACK(1 << 3) | IWM_TX_CMD_FLG_BAR(1 << 6);
6877 tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT60;
6878 }
6879
6880 tx->pm_frame_timeout = htole16(0)((__uint16_t)(0));
6881 }
6882
6883 if (hdrlen & 3) {
6884 /* First segment length must be a multiple of 4. */
6885 flags |= IWM_TX_CMD_FLG_MH_PAD(1 << 20);
6886 tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD)((__uint16_t)((1 << 13)));
6887 pad = 4 - (hdrlen & 3);
6888 } else
6889 pad = 0;
6890
6891 tx->len = htole16(totlen)((__uint16_t)(totlen));
6892 tx->tid_tspec = tid;
6893 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE)((__uint32_t)(0xFFFFFFFF));
6894
6895 /* Set physical address of "scratch area". */
6896 tx->dram_lsb_ptr = htole32(data->scratch_paddr)((__uint32_t)(data->scratch_paddr));
6897 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6898
6899 /* Copy 802.11 header in TX command. */
6900 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen)__builtin_memcpy((((uint8_t *)tx) + sizeof(*tx)), (wh), (hdrlen
))
;
6901
6902 if (k != NULL((void *)0) && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6903 /* Trim 802.11 header and prepend CCMP IV. */
6904 m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN8);
6905 ivp = mtod(m, u_int8_t *)((u_int8_t *)((m)->m_hdr.mh_data));
6906 k->k_tsc++; /* increment the 48-bit PN */
6907 ivp[0] = k->k_tsc; /* PN0 */
6908 ivp[1] = k->k_tsc >> 8; /* PN1 */
6909 ivp[2] = 0; /* Rsvd */
6910 ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV0x20;
6911 ivp[4] = k->k_tsc >> 16; /* PN2 */
6912 ivp[5] = k->k_tsc >> 24; /* PN3 */
6913 ivp[6] = k->k_tsc >> 32; /* PN4 */
6914 ivp[7] = k->k_tsc >> 40; /* PN5 */
6915
6916 tx->sec_ctl = IWM_TX_CMD_SEC_CCM0x02;
6917 memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len))__builtin_memcpy((tx->key), (k->k_key), ((((sizeof(tx->
key))<(k->k_len))?(sizeof(tx->key)):(k->k_len))))
;
6918 /* TX scheduler includes CCMP MIC length. */
6919 totlen += IEEE80211_CCMP_MICLEN8;
6920 } else {
6921 /* Trim 802.11 header. */
6922 m_adj(m, hdrlen);
6923 tx->sec_ctl = 0;
6924 }
6925
6926 flags |= IWM_TX_CMD_FLG_BT_DIS(1 << 12);
6927 if (!hasqos)
6928 flags |= IWM_TX_CMD_FLG_SEQ_CTL(1 << 13);
6929
6930 tx->tx_flags |= htole32(flags)((__uint32_t)(flags));
6931
6932 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
6933 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
6934 if (err && err != EFBIG27) {
6935 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
6936 m_freem(m);
6937 return err;
6938 }
6939 if (err) {
6940 /* Too many DMA segments, linearize mbuf. */
6941 if (m_defrag(m, M_DONTWAIT0x0002)) {
6942 m_freem(m);
6943 return ENOBUFS55;
6944 }
6945 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
6946 BUS_DMA_NOWAIT | BUS_DMA_WRITE)(*(sc->sc_dmat)->_dmamap_load_mbuf)((sc->sc_dmat), (
data->map), (m), (0x0001 | 0x0400))
;
6947 if (err) {
6948 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
6949 err);
6950 m_freem(m);
6951 return err;
6952 }
6953 }
6954 data->m = m;
6955 data->in = in;
6956 data->txmcs = ni->ni_txmcs;
6957 data->txrate = ni->ni_txrate;
6958 data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6959 data->ampdu_txnss = ni->ni_vht_ss; /* updated upon Tx interrupt */
6960
6961 /* Fill TX descriptor. */
6962 desc->num_tbs = 2 + data->map->dm_nsegs;
6963
6964 desc->tbs[0].lo = htole32(data->cmd_paddr)((__uint32_t)(data->cmd_paddr));
6965 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | (16 <<
4)))
6966 (TB0_SIZE << 4))((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | (16 <<
4)))
;
6967 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE)((__uint32_t)(data->cmd_paddr + 16));
6968 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | ((sizeof
(struct iwm_cmd_header) + sizeof(*tx) + hdrlen + pad - 16) <<
4)))
6969 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | ((sizeof
(struct iwm_cmd_header) + sizeof(*tx) + hdrlen + pad - 16) <<
4)))
6970 + hdrlen + pad - TB0_SIZE) << 4))((__uint16_t)(iwm_get_dma_hi_addr(data->cmd_paddr) | ((sizeof
(struct iwm_cmd_header) + sizeof(*tx) + hdrlen + pad - 16) <<
4)))
;
6971
6972 /* Other DMA segments are for data payload. */
6973 seg = data->map->dm_segs;
6974 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6975 desc->tbs[i+2].lo = htole32(seg->ds_addr)((__uint32_t)(seg->ds_addr));
6976 desc->tbs[i+2].hi_n_len = \
6977 htole16(iwm_get_dma_hi_addr(seg->ds_addr)((__uint16_t)(iwm_get_dma_hi_addr(seg->ds_addr) | ((seg->
ds_len) << 4)))
6978 | ((seg->ds_len) << 4))((__uint16_t)(iwm_get_dma_hi_addr(seg->ds_addr) | ((seg->
ds_len) << 4)))
;
6979 }
6980
6981 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
6982 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (data->map->dm_mapsize), (0x04))
;
6983 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
6984 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
6985 sizeof (*cmd), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
cmd_dma.map), ((char *)(void *)cmd - (char *)(void *)ring->
cmd_dma.vaddr), (sizeof (*cmd)), (0x04))
;
6986 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6987 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
6988 sizeof (*desc), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ring->
desc_dma.map), ((char *)(void *)desc - (char *)(void *)ring->
desc_dma.vaddr), (sizeof (*desc)), (0x04))
;
6989
6990 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6991
6992 /* Kick TX ring. */
6993 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT256;
6994 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x400)+0x060
))), ((ring->qid << 8 | ring->cur))))
;
6995
6996 /* Mark TX ring as full if we reach a certain threshold. */
6997 if (++ring->queued > IWM_TX_RING_HIMARK224) {
6998 sc->qfullmsk |= 1 << ring->qid;
6999 }
7000
7001 if (ic->ic_ific_ac.ac_if.if_flags & IFF_UP0x1)
7002 sc->sc_tx_timer[ring->qid] = 15;
7003
7004 return 0;
7005}
7006
7007int
7008iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
7009{
7010 struct iwm_tx_path_flush_cmd flush_cmd = {
7011 .sta_id = htole32(IWM_STATION_ID)((__uint32_t)(0)),
7012 .tid_mask = htole16(0xffff)((__uint16_t)(0xffff)),
7013 };
7014 int err;
7015
7016 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH0x1e, 0,
7017 sizeof(flush_cmd), &flush_cmd);
7018 if (err)
7019 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7020 return err;
7021}
7022
7023#define IWM_FLUSH_WAIT_MS2000 2000
7024
7025int
7026iwm_wait_tx_queues_empty(struct iwm_softc *sc)
7027{
7028 int i, err;
7029
7030 for (i = 0; i < IWM_MAX_QUEUES31; i++) {
7031 struct iwm_tx_ring *ring = &sc->txq[i];
7032
7033 if (i == sc->cmdqid)
7034 continue;
7035
7036 while (ring->queued > 0) {
7037 err = tsleep_nsec(ring, 0, "iwmflush",
7038 MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS2000));
7039 if (err)
7040 return err;
7041 }
7042 }
7043
7044 return 0;
7045}
7046
7047void
7048iwm_led_enable(struct iwm_softc *sc)
7049{
7050 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x094))), (
((0x60)))))
;
7051}
7052
7053void
7054iwm_led_disable(struct iwm_softc *sc)
7055{
7056 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x094))), (
((0x20)))))
;
7057}
7058
7059int
7060iwm_led_is_enabled(struct iwm_softc *sc)
7061{
7062 return (IWM_READ(sc, IWM_CSR_LED_REG)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x094))))) == IWM_CSR_LED_REG_TURN_ON(0x60));
7063}
7064
7065#define IWM_LED_BLINK_TIMEOUT_MSEC200 200
7066
7067void
7068iwm_led_blink_timeout(void *arg)
7069{
7070 struct iwm_softc *sc = arg;
7071
7072 if (iwm_led_is_enabled(sc))
7073 iwm_led_disable(sc);
7074 else
7075 iwm_led_enable(sc);
7076
7077 timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC200);
7078}
7079
7080void
7081iwm_led_blink_start(struct iwm_softc *sc)
7082{
7083 timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC200);
7084 iwm_led_enable(sc);
7085}
7086
7087void
7088iwm_led_blink_stop(struct iwm_softc *sc)
7089{
7090 timeout_del(&sc->sc_led_blink_to);
7091 iwm_led_disable(sc);
7092}
7093
7094#define IWM_POWER_KEEP_ALIVE_PERIOD_SEC25 25
7095
7096int
7097iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
7098 struct iwm_beacon_filter_cmd *cmd)
7099{
7100 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD0xd2,
7101 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
7102}
7103
7104void
7105iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
7106 struct iwm_beacon_filter_cmd *cmd)
7107{
7108 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled)((__uint32_t)(sc->sc_bf.ba_enabled));
7109}
7110
7111int
7112iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
7113{
7114 struct iwm_beacon_filter_cmd cmd = {
7115 IWM_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
7116 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
7117 .ba_enable_beacon_abort = htole32(enable)((__uint32_t)(enable)),
7118 };
7119
7120 if (!sc->sc_bf.bf_enabled)
7121 return 0;
7122
7123 sc->sc_bf.ba_enabled = enable;
7124 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7125 return iwm_beacon_filter_send_cmd(sc, &cmd);
7126}
7127
7128void
7129iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
7130 struct iwm_mac_power_cmd *cmd)
7131{
7132 struct ieee80211com *ic = &sc->sc_ic;
7133 struct ieee80211_node *ni = &in->in_ni;
7134 int dtim_period, dtim_msec, keep_alive;
7135
7136 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
7137 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
7138 if (ni->ni_dtimperiod)
7139 dtim_period = ni->ni_dtimperiod;
7140 else
7141 dtim_period = 1;
7142
7143 /*
7144 * Regardless of power management state the driver must set
7145 * keep alive period. FW will use it for sending keep alive NDPs
7146 * immediately after association. Check that keep alive period
7147 * is at least 3 * DTIM.
7148 */
7149 dtim_msec = dtim_period * ni->ni_intval;
7150 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC)(((3 * dtim_msec)>(1000 * 25))?(3 * dtim_msec):(1000 * 25)
)
;
7151 keep_alive = roundup(keep_alive, 1000)((((keep_alive)+((1000)-1))/(1000))*(1000)) / 1000;
7152 cmd->keep_alive_seconds = htole16(keep_alive)((__uint16_t)(keep_alive));
7153
7154 if (ic->ic_opmode != IEEE80211_M_MONITOR)
7155 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
7156}
7157
7158int
7159iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
7160{
7161 int err;
7162 int ba_enable;
7163 struct iwm_mac_power_cmd cmd;
7164
7165 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7166
7167 iwm_power_build_cmd(sc, in, &cmd);
7168
7169 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE0xa9, 0,
7170 sizeof(cmd), &cmd);
7171 if (err != 0)
7172 return err;
7173
7174 ba_enable = !!(cmd.flags &
7175 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)((__uint16_t)((1 << 1))));
7176 return iwm_update_beacon_abort(sc, in, ba_enable);
7177}
7178
7179int
7180iwm_power_update_device(struct iwm_softc *sc)
7181{
7182 struct iwm_device_power_cmd cmd = { };
7183 struct ieee80211com *ic = &sc->sc_ic;
7184
7185 if (ic->ic_opmode != IEEE80211_M_MONITOR)
7186 cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK)((__uint16_t)((1 << 0)));
7187
7188 return iwm_send_cmd_pdu(sc,
7189 IWM_POWER_TABLE_CMD0x77, 0, sizeof(cmd), &cmd);
7190}
7191
7192int
7193iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
7194{
7195 struct iwm_beacon_filter_cmd cmd = {
7196 IWM_BF_CMD_CONFIG_DEFAULTS.bf_energy_delta = ((__uint32_t)(5)), .bf_roaming_energy_delta
= ((__uint32_t)(1)), .bf_roaming_state = ((__uint32_t)(72)),
.bf_temp_threshold = ((__uint32_t)(112)), .bf_temp_fast_filter
= ((__uint32_t)(1)), .bf_temp_slow_filter = ((__uint32_t)(5)
), .bf_debug_flag = ((__uint32_t)(0)), .bf_escape_timer = ((__uint32_t
)(50)), .ba_escape_timer = ((__uint32_t)(6))
,
7197 .bf_enable_beacon_filter = htole32(1)((__uint32_t)(1)),
7198 };
7199 int err;
7200
7201 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7202 err = iwm_beacon_filter_send_cmd(sc, &cmd);
7203
7204 if (err == 0)
7205 sc->sc_bf.bf_enabled = 1;
7206
7207 return err;
7208}
7209
7210int
7211iwm_disable_beacon_filter(struct iwm_softc *sc)
7212{
7213 struct iwm_beacon_filter_cmd cmd;
7214 int err;
7215
7216 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7217
7218 err = iwm_beacon_filter_send_cmd(sc, &cmd);
7219 if (err == 0)
7220 sc->sc_bf.bf_enabled = 0;
7221
7222 return err;
7223}
7224
7225int
7226iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
7227{
7228 struct iwm_add_sta_cmd add_sta_cmd;
7229 int err;
7230 uint32_t status, aggsize;
7231 const uint32_t max_aggsize = (IWM_STA_FLG_MAX_AGG_SIZE_64K(3 << 19) >>
7232 IWM_STA_FLG_MAX_AGG_SIZE_SHIFT19);
7233 size_t cmdsize;
7234 struct ieee80211com *ic = &sc->sc_ic;
7235
7236 if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE0x20))
7237 panic("STA already added");
7238
7239 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd))__builtin_memset((&add_sta_cmd), (0), (sizeof(add_sta_cmd
)))
;
7240
7241 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7242 add_sta_cmd.sta_id = IWM_MONITOR_STA_ID2;
7243 else
7244 add_sta_cmd.sta_id = IWM_STATION_ID0;
7245 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
) {
7246 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7247 add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE1;
7248 else
7249 add_sta_cmd.station_type = IWM_STA_LINK0;
7250 }
7251 add_sta_cmd.mac_id_n_color
7252 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
7253 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7254 int qid;
7255 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr)__builtin_memcpy((&add_sta_cmd.addr), (etheranyaddr), (6)
)
;
7256 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
7257 qid = IWM_DQA_INJECT_MONITOR_QUEUE2;
7258 else
7259 qid = IWM_AUX_QUEUE15;
7260 in->tfd_queue_msk |= (1 << qid);
7261 } else {
7262 int ac;
7263 for (ac = 0; ac < EDCA_NUM_AC4; ac++) {
7264 int qid = ac;
7265 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
7266 IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
7267 qid += IWM_DQA_MIN_MGMT_QUEUE5;
7268 in->tfd_queue_msk |= (1 << qid);
7269 }
7270 }
7271 if (!update) {
7272 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7273 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,__builtin_memcpy((&add_sta_cmd.addr), (etherbroadcastaddr
), (6))
7274 etherbroadcastaddr)__builtin_memcpy((&add_sta_cmd.addr), (etherbroadcastaddr
), (6))
;
7275 else
7276 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,__builtin_memcpy((&add_sta_cmd.addr), (in->in_macaddr)
, (6))
7277 in->in_macaddr)__builtin_memcpy((&add_sta_cmd.addr), (in->in_macaddr)
, (6))
;
7278 }
7279 add_sta_cmd.add_modify = update ? 1 : 0;
7280 add_sta_cmd.station_flags_msk
7281 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK)((__uint32_t)((3 << 26) | (3 << 28)));
7282 if (update) {
7283 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES(1 << 7) |
7284 IWM_STA_MODIFY_TID_DISABLE_TX(1 << 1));
7285 }
7286 add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu)((__uint16_t)(in->tid_disable_ampdu));
7287 add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk)((__uint32_t)(in->tfd_queue_msk));
7288
7289 if (in->in_ni.ni_flags & IEEE80211_NODE_HT0x0400) {
7290 add_sta_cmd.station_flags_msk
7291 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |((__uint32_t)((7 << 19) | (7 << 23)))
7292 IWM_STA_FLG_AGG_MPDU_DENS_MSK)((__uint32_t)((7 << 19) | (7 << 23)));
7293
7294 if (iwm_mimo_enabled(sc)) {
7295 if (in->in_ni.ni_flags & IEEE80211_NODE_VHT0x10000) {
7296 uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
7297 IEEE80211_VHT_MCS_FOR_SS_MASK(2)(0x3 << (2*((2)-1)))) >>
7298 IEEE80211_VHT_MCS_FOR_SS_SHIFT(2)(2*((2)-1));
7299 if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP3) {
7300 add_sta_cmd.station_flags |=
7301 htole32(IWM_STA_FLG_MIMO_EN_MIMO2)((__uint32_t)((1 << 28)));
7302 }
7303 } else {
7304 if (in->in_ni.ni_rxmcs[1] != 0) {
7305 add_sta_cmd.station_flags |=
7306 htole32(IWM_STA_FLG_MIMO_EN_MIMO2)((__uint32_t)((1 << 28)));
7307 }
7308 if (in->in_ni.ni_rxmcs[2] != 0) {
7309 add_sta_cmd.station_flags |=
7310 htole32(IWM_STA_FLG_MIMO_EN_MIMO3)((__uint32_t)((2 << 28)));
7311 }
7312 }
7313 }
7314
7315 if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan)(((in->in_ni.ni_chan)->ic_flags & 0x8000) != 0) &&
7316 ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7317 add_sta_cmd.station_flags |= htole32(((__uint32_t)((1 << 26)))
7318 IWM_STA_FLG_FAT_EN_40MHZ)((__uint32_t)((1 << 26)));
7319 }
7320
7321 if (in->in_ni.ni_flags & IEEE80211_NODE_VHT0x10000) {
7322 if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan)(((in->in_ni.ni_chan)->ic_xflags & 0x00000001) != 0
)
&&
7323 ieee80211_node_supports_vht_chan80(&in->in_ni)) {
7324 add_sta_cmd.station_flags |= htole32(((__uint32_t)((2 << 26)))
7325 IWM_STA_FLG_FAT_EN_80MHZ)((__uint32_t)((2 << 26)));
7326 }
7327 aggsize = (in->in_ni.ni_vhtcaps &
7328 IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK0x03800000) >>
7329 IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT23;
7330 } else {
7331 aggsize = (in->in_ni.ni_ampdu_param &
7332 IEEE80211_AMPDU_PARAM_LE0x03);
7333 }
7334 if (aggsize > max_aggsize)
7335 aggsize = max_aggsize;
7336 add_sta_cmd.station_flags |= htole32((aggsize <<((__uint32_t)((aggsize << 19) & (7 << 19)))
7337 IWM_STA_FLG_MAX_AGG_SIZE_SHIFT) &((__uint32_t)((aggsize << 19) & (7 << 19)))
7338 IWM_STA_FLG_MAX_AGG_SIZE_MSK)((__uint32_t)((aggsize << 19) & (7 << 19)));
7339
7340 switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS0x1c) {
7341 case IEEE80211_AMPDU_PARAM_SS_2(4 << 2):
7342 add_sta_cmd.station_flags
7343 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US)((__uint32_t)((4 << 23)));
7344 break;
7345 case IEEE80211_AMPDU_PARAM_SS_4(5 << 2):
7346 add_sta_cmd.station_flags
7347 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US)((__uint32_t)((5 << 23)));
7348 break;
7349 case IEEE80211_AMPDU_PARAM_SS_8(6 << 2):
7350 add_sta_cmd.station_flags
7351 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US)((__uint32_t)((6 << 23)));
7352 break;
7353 case IEEE80211_AMPDU_PARAM_SS_16(7 << 2):
7354 add_sta_cmd.station_flags
7355 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US)((__uint32_t)((7 << 23)));
7356 break;
7357 default:
7358 break;
7359 }
7360 }
7361
7362 status = IWM_ADD_STA_SUCCESS0x1;
7363 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7364 cmdsize = sizeof(add_sta_cmd);
7365 else
7366 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7367 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize,
7368 &add_sta_cmd, &status);
7369 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
7370 err = EIO5;
7371
7372 return err;
7373}
7374
7375int
7376iwm_add_aux_sta(struct iwm_softc *sc)
7377{
7378 struct iwm_add_sta_cmd cmd;
7379 int err, qid;
7380 uint32_t status;
7381 size_t cmdsize;
7382
7383 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
) {
7384 qid = IWM_DQA_AUX_QUEUE1;
7385 err = iwm_enable_txq(sc, IWM_AUX_STA_ID1, qid,
7386 IWM_TX_FIFO_MCAST5, 0, IWM_MAX_TID_COUNT8, 0);
7387 } else {
7388 qid = IWM_AUX_QUEUE15;
7389 err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST5);
7390 }
7391 if (err)
7392 return err;
7393
7394 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7395 cmd.sta_id = IWM_AUX_STA_ID1;
7396 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7397 cmd.station_type = IWM_STA_AUX_ACTIVITY4;
7398 cmd.mac_id_n_color =
7399 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0))((__uint32_t)(((4 << (0)) | (0 << (8)))));
7400 cmd.tfd_queue_msk = htole32(1 << qid)((__uint32_t)(1 << qid));
7401 cmd.tid_disable_tx = htole16(0xffff)((__uint16_t)(0xffff));
7402
7403 status = IWM_ADD_STA_SUCCESS0x1;
7404 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7405 cmdsize = sizeof(cmd);
7406 else
7407 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7408 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18, cmdsize, &cmd,
7409 &status);
7410 if (!err && (status & IWM_ADD_STA_STATUS_MASK0xFF) != IWM_ADD_STA_SUCCESS0x1)
7411 err = EIO5;
7412
7413 return err;
7414}
7415
7416int
7417iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7418{
7419 struct iwm_add_sta_cmd cmd;
7420 int err;
7421 uint32_t status;
7422 size_t cmdsize;
7423
7424 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
7425 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
7426 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
7427 cmd.sta_id = IWM_STATION_ID0;
7428 cmd.add_modify = IWM_STA_MODE_MODIFY1;
7429 cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12))) : 0;
7430 cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW)((__uint32_t)((1 << 12)));
7431
7432 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)((sc->sc_ucode_api)[(30)>>3] & (1<<((30)&
(8 -1))))
)
7433 cmdsize = sizeof(cmd);
7434 else
7435 cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7436
7437 status = IWM_ADD_STA_SUCCESS0x1;
7438 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA0x18,
7439 cmdsize, &cmd, &status);
7440 if (err) {
7441 printf("%s: could not update sta (error %d)\n",
7442 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7443 return err;
7444 }
7445
7446 switch (status & IWM_ADD_STA_STATUS_MASK0xFF) {
7447 case IWM_ADD_STA_SUCCESS0x1:
7448 break;
7449 default:
7450 err = EIO5;
7451 printf("%s: Couldn't %s draining for station\n",
7452 DEVNAME(sc)((sc)->sc_dev.dv_xname), drain ? "enable" : "disable");
7453 break;
7454 }
7455
7456 return err;
7457}
7458
7459int
7460iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7461{
7462 int err;
7463
7464 sc->sc_flags |= IWM_FLAG_TXFLUSH0x400;
7465
7466 err = iwm_drain_sta(sc, in, 1);
7467 if (err)
7468 goto done;
7469
7470 err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7471 if (err) {
7472 printf("%s: could not flush Tx path (error %d)\n",
7473 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
7474 goto done;
7475 }
7476
7477 /*
7478 * Flushing Tx rings may fail if the AP has disappeared.
7479 * We can rely on iwm_newstate_task() to reset everything and begin
7480 * scanning again if we are left with outstanding frames on queues.
7481 */
7482 err = iwm_wait_tx_queues_empty(sc);
7483 if (err)
7484 goto done;
7485
7486 err = iwm_drain_sta(sc, in, 0);
7487done:
7488 sc->sc_flags &= ~IWM_FLAG_TXFLUSH0x400;
7489 return err;
7490}
7491
7492int
7493iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7494{
7495 struct ieee80211com *ic = &sc->sc_ic;
7496 struct iwm_rm_sta_cmd rm_sta_cmd;
7497 int err;
7498
7499 if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE0x20) == 0)
7500 panic("sta already removed");
7501
7502 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd))__builtin_memset((&rm_sta_cmd), (0), (sizeof(rm_sta_cmd))
)
;
7503 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7504 rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID2;
7505 else
7506 rm_sta_cmd.sta_id = IWM_STATION_ID0;
7507
7508 err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA0x19, 0, sizeof(rm_sta_cmd),
7509 &rm_sta_cmd);
7510
7511 return err;
7512}
7513
7514uint16_t
7515iwm_scan_rx_chain(struct iwm_softc *sc)
7516{
7517 uint16_t rx_chain;
7518 uint8_t rx_ant;
7519
7520 rx_ant = iwm_fw_valid_rx_ant(sc);
7521 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS(1);
7522 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS(7);
7523 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS(4);
7524 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS(0);
7525 return htole16(rx_chain)((__uint16_t)(rx_chain));
7526}
7527
7528uint32_t
7529iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7530{
7531 uint32_t tx_ant;
7532 int i, ind;
7533
7534 for (i = 0, ind = sc->sc_scan_last_antenna;
7535 i < IWM_RATE_MCS_ANT_NUM3; i++) {
7536 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM3;
7537 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7538 sc->sc_scan_last_antenna = ind;
7539 break;
7540 }
7541 }
7542 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS14;
7543
7544 if ((flags & IEEE80211_CHAN_2GHZ0x0080) && !no_cck)
7545 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |((__uint32_t)(10 | (1 << 9) | tx_ant))
7546 tx_ant)((__uint32_t)(10 | (1 << 9) | tx_ant));
7547 else
7548 return htole32(IWM_RATE_6M_PLCP | tx_ant)((__uint32_t)(13 | tx_ant));
7549}
7550
7551uint8_t
7552iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7553 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7554{
7555 struct ieee80211com *ic = &sc->sc_ic;
7556 struct ieee80211_channel *c;
7557 uint8_t nchan;
7558
7559 for (nchan = 0, c = &ic->ic_channels[1];
7560 c <= &ic->ic_channels[IEEE80211_CHAN_MAX255] &&
7561 nchan < sc->sc_capa_n_scan_channels;
7562 c++) {
7563 if (c->ic_flags == 0)
7564 continue;
7565
7566 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0))((__uint16_t)(ieee80211_mhz2ieee(c->ic_freq, 0)));
7567 chan->iter_count = htole16(1)((__uint16_t)(1));
7568 chan->iter_interval = 0;
7569 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL)((__uint32_t)((1 << 28)));
7570 if (n_ssids != 0 && !bgscan)
7571 chan->flags |= htole32(1 << 1)((__uint32_t)(1 << 1)); /* select SSID 0 */
7572 chan++;
7573 nchan++;
7574 }
7575
7576 return nchan;
7577}
7578
7579uint8_t
7580iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7581 struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7582{
7583 struct ieee80211com *ic = &sc->sc_ic;
7584 struct ieee80211_channel *c;
7585 uint8_t nchan;
7586
7587 for (nchan = 0, c = &ic->ic_channels[1];
7588 c <= &ic->ic_channels[IEEE80211_CHAN_MAX255] &&
7589 nchan < sc->sc_capa_n_scan_channels;
7590 c++) {
7591 if (c->ic_flags == 0)
7592 continue;
7593
7594 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7595 chan->iter_count = 1;
7596 chan->iter_interval = htole16(0)((__uint16_t)(0));
7597 if (n_ssids != 0 && !bgscan)
7598 chan->flags = htole32(1 << 0)((__uint32_t)(1 << 0)); /* select SSID 0 */
7599 chan++;
7600 nchan++;
7601 }
7602
7603 return nchan;
7604}
7605
7606int
7607iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7608{
7609 struct iwm_scan_probe_req preq2;
7610 int err, i;
7611
7612 err = iwm_fill_probe_req(sc, &preq2);
7613 if (err)
7614 return err;
7615
7616 preq1->mac_header = preq2.mac_header;
7617 for (i = 0; i < nitems(preq1->band_data)(sizeof((preq1->band_data)) / sizeof((preq1->band_data)
[0]))
; i++)
7618 preq1->band_data[i] = preq2.band_data[i];
7619 preq1->common_data = preq2.common_data;
7620 memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf))__builtin_memcpy((preq1->buf), (preq2.buf), (sizeof(preq1->
buf)))
;
7621 return 0;
7622}
7623
7624int
7625iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7626{
7627 struct ieee80211com *ic = &sc->sc_ic;
7628 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7629 struct ieee80211_rateset *rs;
7630 size_t remain = sizeof(preq->buf);
7631 uint8_t *frm, *pos;
7632
7633 memset(preq, 0, sizeof(*preq))__builtin_memset((preq), (0), (sizeof(*preq)));
7634
7635 if (remain < sizeof(*wh) + 2)
7636 return ENOBUFS55;
7637
7638 /*
7639 * Build a probe request frame. Most of the following code is a
7640 * copy & paste of what is done in net80211.
7641 */
7642 wh->i_fc[0] = IEEE80211_FC0_VERSION_00x00 | IEEE80211_FC0_TYPE_MGT0x00 |
7643 IEEE80211_FC0_SUBTYPE_PROBE_REQ0x40;
7644 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS0x00;
7645 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr)__builtin_memcpy((wh->i_addr1), (etherbroadcastaddr), (6));
7646 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr)__builtin_memcpy((wh->i_addr2), (ic->ic_myaddr), (6));
7647 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr)__builtin_memcpy((wh->i_addr3), (etherbroadcastaddr), (6));
7648 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
7649 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
7650
7651 frm = (uint8_t *)(wh + 1);
7652
7653 *frm++ = IEEE80211_ELEMID_SSID;
7654 *frm++ = 0;
7655 /* hardware inserts SSID */
7656
7657 /* Tell firmware where the MAC header and SSID IE are. */
7658 preq->mac_header.offset = 0;
7659 preq->mac_header.len = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7660 remain -= frm - (uint8_t *)wh;
7661
7662 /* Fill in 2GHz IEs and tell firmware where they are. */
7663 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7664 if (rs->rs_nrates > IEEE80211_RATE_SIZE8) {
7665 if (remain < 4 + rs->rs_nrates)
7666 return ENOBUFS55;
7667 } else if (remain < 2 + rs->rs_nrates)
7668 return ENOBUFS55;
7669 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7670 pos = frm;
7671 frm = ieee80211_add_rates(frm, rs);
7672 if (rs->rs_nrates > IEEE80211_RATE_SIZE8)
7673 frm = ieee80211_add_xrates(frm, rs);
7674 remain -= frm - pos;
7675
7676 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
7677 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
) {
7678 if (remain < 3)
7679 return ENOBUFS55;
7680 *frm++ = IEEE80211_ELEMID_DSPARMS;
7681 *frm++ = 1;
7682 *frm++ = 0;
7683 remain -= 3;
7684 }
7685 preq->band_data[0].len = htole16(frm - pos)((__uint16_t)(frm - pos));
7686
7687 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7688 /* Fill in 5GHz IEs. */
7689 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7690 if (rs->rs_nrates > IEEE80211_RATE_SIZE8) {
7691 if (remain < 4 + rs->rs_nrates)
7692 return ENOBUFS55;
7693 } else if (remain < 2 + rs->rs_nrates)
7694 return ENOBUFS55;
7695 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7696 pos = frm;
7697 frm = ieee80211_add_rates(frm, rs);
7698 if (rs->rs_nrates > IEEE80211_RATE_SIZE8)
7699 frm = ieee80211_add_xrates(frm, rs);
7700 preq->band_data[1].len = htole16(frm - pos)((__uint16_t)(frm - pos));
7701 remain -= frm - pos;
7702 if (ic->ic_flags & IEEE80211_F_VHTON0x20000000) {
7703 if (remain < 14)
7704 return ENOBUFS55;
7705 frm = ieee80211_add_vhtcaps(frm, ic);
7706 remain -= frm - pos;
7707 preq->band_data[1].len = htole16(frm - pos)((__uint16_t)(frm - pos));
7708 }
7709 }
7710
7711 /* Send 11n IEs on both 2GHz and 5GHz bands. */
7712 preq->common_data.offset = htole16(frm - (uint8_t *)wh)((__uint16_t)(frm - (uint8_t *)wh));
7713 pos = frm;
7714 if (ic->ic_flags & IEEE80211_F_HTON0x02000000) {
7715 if (remain < 28)
7716 return ENOBUFS55;
7717 frm = ieee80211_add_htcaps(frm, ic);
7718 /* XXX add WME info? */
7719 remain -= frm - pos;
7720 }
7721
7722 preq->common_data.len = htole16(frm - pos)((__uint16_t)(frm - pos));
7723
7724 return 0;
7725}
7726
7727int
7728iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7729{
7730 struct ieee80211com *ic = &sc->sc_ic;
7731 struct iwm_host_cmd hcmd = {
7732 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD0x51,
7733 .len = { 0, },
7734 .data = { NULL((void *)0), },
7735 .flags = 0,
7736 };
7737 struct iwm_scan_req_lmac *req;
7738 struct iwm_scan_probe_req_v1 *preq;
7739 size_t req_len;
7740 int err, async = bgscan;
7741
7742 req_len = sizeof(struct iwm_scan_req_lmac) +
7743 (sizeof(struct iwm_scan_channel_cfg_lmac) *
7744 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7745 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header)))
7746 return ENOMEM12;
7747 req = malloc(req_len, M_DEVBUF2,
7748 (async ? M_NOWAIT0x0002 : M_WAIT0x0001) | M_CANFAIL0x0004 | M_ZERO0x0008);
7749 if (req == NULL((void *)0))
7750 return ENOMEM12;
7751
7752 hcmd.len[0] = (uint16_t)req_len;
7753 hcmd.data[0] = (void *)req;
7754 hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7755
7756 /* These timings correspond to iwlwifi's UNASSOC scan. */
7757 req->active_dwell = 10;
7758 req->passive_dwell = 110;
7759 req->fragmented_dwell = 44;
7760 req->extended_dwell = 90;
7761 if (bgscan) {
7762 req->max_out_time = htole32(120)((__uint32_t)(120));
7763 req->suspend_time = htole32(120)((__uint32_t)(120));
7764 } else {
7765 req->max_out_time = htole32(0)((__uint32_t)(0));
7766 req->suspend_time = htole32(0)((__uint32_t)(0));
7767 }
7768 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
7769 req->rx_chain_select = iwm_scan_rx_chain(sc);
7770 req->iter_num = htole32(1)((__uint32_t)(1));
7771 req->delay = 0;
7772
7773 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 7
)))
7774 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 7
)))
7775 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL)((__uint32_t)((1 << 0) | (1 << 3) | (1 << 7
)))
;
7776 if (ic->ic_des_esslen == 0)
7777 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE)((__uint32_t)((1 << 1)));
7778 else
7779 req->scan_flags |=
7780 htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION)((__uint32_t)((1 << 2)));
7781 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
7782 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
&&
7783 isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
7784 IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
)
7785 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED)((__uint32_t)((1 << 6)));
7786
7787 req->flags = htole32(IWM_PHY_BAND_24)((__uint32_t)((1)));
7788 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7789 req->flags |= htole32(IWM_PHY_BAND_5)((__uint32_t)((0)));
7790 req->filter_flags =
7791 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON)((__uint32_t)((1 << 2) | (1 << 6)));
7792
7793 /* Tx flags 2 GHz. */
7794 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |((__uint32_t)((1 << 13) | (1 << 12)))
7795 IWM_TX_CMD_FLG_BT_DIS)((__uint32_t)((1 << 13) | (1 << 12)));
7796 req->tx_cmd[0].rate_n_flags =
7797 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ0x0080, 1/*XXX*/);
7798 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID1;
7799
7800 /* Tx flags 5 GHz. */
7801 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |((__uint32_t)((1 << 13) | (1 << 12)))
7802 IWM_TX_CMD_FLG_BT_DIS)((__uint32_t)((1 << 13) | (1 << 12)));
7803 req->tx_cmd[1].rate_n_flags =
7804 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ0x0100, 1/*XXX*/);
7805 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID1;
7806
7807 /* Check if we're doing an active directed scan. */
7808 if (ic->ic_des_esslen != 0) {
7809 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7810 req->direct_scan[0].len = ic->ic_des_esslen;
7811 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,__builtin_memcpy((req->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
7812 ic->ic_des_esslen)__builtin_memcpy((req->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
;
7813 }
7814
7815 req->n_channels = iwm_lmac_scan_fill_channels(sc,
7816 (struct iwm_scan_channel_cfg_lmac *)req->data,
7817 ic->ic_des_esslen != 0, bgscan);
7818
7819 preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7820 (sizeof(struct iwm_scan_channel_cfg_lmac) *
7821 sc->sc_capa_n_scan_channels));
7822 err = iwm_fill_probe_req_v1(sc, preq);
7823 if (err) {
7824 free(req, M_DEVBUF2, req_len);
7825 return err;
7826 }
7827
7828 /* Specify the scan plan: We'll do one iteration. */
7829 req->schedule[0].iterations = 1;
7830 req->schedule[0].full_scan_mul = 1;
7831
7832 /* Disable EBS. */
7833 req->channel_opt[0].non_ebs_ratio = 1;
7834 req->channel_opt[1].non_ebs_ratio = 1;
7835
7836 err = iwm_send_cmd(sc, &hcmd);
7837 free(req, M_DEVBUF2, req_len);
7838 return err;
7839}
7840
7841int
7842iwm_config_umac_scan(struct iwm_softc *sc)
7843{
7844 struct ieee80211com *ic = &sc->sc_ic;
7845 struct iwm_scan_config *scan_config;
7846 int err, nchan;
7847 size_t cmd_size;
7848 struct ieee80211_channel *c;
7849 struct iwm_host_cmd hcmd = {
7850 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD0xc, IWM_LONG_GROUP0x1, 0),
7851 .flags = 0,
7852 };
7853 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M(1 << 8) |
7854 IWM_SCAN_CONFIG_RATE_2M(1 << 9) | IWM_SCAN_CONFIG_RATE_5M(1 << 10) |
7855 IWM_SCAN_CONFIG_RATE_11M(1 << 11) | IWM_SCAN_CONFIG_RATE_6M(1 << 0) |
7856 IWM_SCAN_CONFIG_RATE_9M(1 << 1) | IWM_SCAN_CONFIG_RATE_12M(1 << 2) |
7857 IWM_SCAN_CONFIG_RATE_18M(1 << 3) | IWM_SCAN_CONFIG_RATE_24M(1 << 4) |
7858 IWM_SCAN_CONFIG_RATE_36M(1 << 5) | IWM_SCAN_CONFIG_RATE_48M(1 << 6) |
7859 IWM_SCAN_CONFIG_RATE_54M(1 << 7));
7860
7861 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7862
7863 scan_config = malloc(cmd_size, M_DEVBUF2, M_WAIT0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
7864 if (scan_config == NULL((void *)0))
7865 return ENOMEM12;
7866
7867 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc))((__uint32_t)(iwm_fw_valid_tx_ant(sc)));
7868 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc))((__uint32_t)(iwm_fw_valid_rx_ant(sc)));
7869 scan_config->legacy_rates = htole32(rates |((__uint32_t)(rates | ((rates) << 16)))
7870 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates))((__uint32_t)(rates | ((rates) << 16)));
7871
7872 /* These timings correspond to iwlwifi's UNASSOC scan. */
7873 scan_config->dwell_active = 10;
7874 scan_config->dwell_passive = 110;
7875 scan_config->dwell_fragmented = 44;
7876 scan_config->dwell_extended = 90;
7877 scan_config->out_of_channel_time = htole32(0)((__uint32_t)(0));
7878 scan_config->suspend_time = htole32(0)((__uint32_t)(0));
7879
7880 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr)__builtin_memcpy((scan_config->mac_addr), (sc->sc_ic.ic_myaddr
), (6))
;
7881
7882 scan_config->bcast_sta_id = IWM_AUX_STA_ID1;
7883 scan_config->channel_flags = 0;
7884
7885 for (c = &ic->ic_channels[1], nchan = 0;
7886 c <= &ic->ic_channels[IEEE80211_CHAN_MAX255] &&
7887 nchan < sc->sc_capa_n_scan_channels; c++) {
7888 if (c->ic_flags == 0)
7889 continue;
7890 scan_config->channel_array[nchan++] =
7891 ieee80211_mhz2ieee(c->ic_freq, 0);
7892 }
7893
7894 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7895 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7896 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7897 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7898 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7899 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7900 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7901 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7902 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7903 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
7904 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED)((__uint32_t)((1 << 0) | (1 << 3) | (1 << 8
) | (1 << 9) | (1 << 10) | (1 << 11) | (1 <<
14) | (1 << 15) | (1 << 13)| ((nchan) << 26
) | (1 << 17)))
;
7905
7906 hcmd.data[0] = scan_config;
7907 hcmd.len[0] = cmd_size;
7908
7909 err = iwm_send_cmd(sc, &hcmd);
7910 free(scan_config, M_DEVBUF2, cmd_size);
7911 return err;
7912}
7913
7914int
7915iwm_umac_scan_size(struct iwm_softc *sc)
7916{
7917 int base_size = IWM_SCAN_REQ_UMAC_SIZE_V136;
7918 int tail_size;
7919
7920 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
)
7921 base_size = IWM_SCAN_REQ_UMAC_SIZE_V8sizeof(struct iwm_scan_req_umac);
7922 else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
)
7923 base_size = IWM_SCAN_REQ_UMAC_SIZE_V748;
7924 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
)
7925 tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7926 else
7927 tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7928
7929 return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7930 sc->sc_capa_n_scan_channels + tail_size;
7931}
7932
7933struct iwm_scan_umac_chan_param *
7934iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7935 struct iwm_scan_req_umac *req)
7936{
7937 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
)
7938 return &req->v8.channel;
7939
7940 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
)
7941 return &req->v7.channel;
7942
7943 return &req->v1.channel;
7944}
7945
7946void *
7947iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7948{
7949 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
)
7950 return (void *)&req->v8.data;
7951
7952 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
)
7953 return (void *)&req->v7.data;
7954
7955 return (void *)&req->v1.data;
7956
7957}
7958
7959/* adaptive dwell max budget time [TU] for full scan */
7960#define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN300 300
7961/* adaptive dwell max budget time [TU] for directed scan */
7962#define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN100 100
7963/* adaptive dwell default high band APs number */
7964#define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS8 8
7965/* adaptive dwell default low band APs number */
7966#define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS2 2
7967/* adaptive dwell default APs number in social channels (1, 6, 11) */
7968#define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL10 10
7969
7970int
7971iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7972{
7973 struct ieee80211com *ic = &sc->sc_ic;
7974 struct iwm_host_cmd hcmd = {
7975 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC0xd, IWM_LONG_GROUP0x1, 0),
7976 .len = { 0, },
7977 .data = { NULL((void *)0), },
7978 .flags = 0,
7979 };
7980 struct iwm_scan_req_umac *req;
7981 void *cmd_data, *tail_data;
7982 struct iwm_scan_req_umac_tail_v2 *tail;
7983 struct iwm_scan_req_umac_tail_v1 *tailv1;
7984 struct iwm_scan_umac_chan_param *chanparam;
7985 size_t req_len;
7986 int err, async = bgscan;
7987
7988 req_len = iwm_umac_scan_size(sc);
7989 if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V136 +
7990 sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7991 req_len > IWM_MAX_CMD_PAYLOAD_SIZE((4096 - 4) - sizeof(struct iwm_cmd_header)))
7992 return ERANGE34;
7993 req = malloc(req_len, M_DEVBUF2,
7994 (async ? M_NOWAIT0x0002 : M_WAIT0x0001) | M_CANFAIL0x0004 | M_ZERO0x0008);
7995 if (req == NULL((void *)0))
7996 return ENOMEM12;
7997
7998 hcmd.len[0] = (uint16_t)req_len;
7999 hcmd.data[0] = (void *)req;
8000 hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
8001
8002 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
) {
8003 req->v7.adwell_default_n_aps_social =
8004 IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL10;
8005 req->v7.adwell_default_n_aps =
8006 IWM_SCAN_ADWELL_DEFAULT_LB_N_APS2;
8007
8008 if (ic->ic_des_esslen != 0)
8009 req->v7.adwell_max_budget =
8010 htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN)((__uint16_t)(100));
8011 else
8012 req->v7.adwell_max_budget =
8013 htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN)((__uint16_t)(300));
8014
8015 req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
8016 req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX0] = 0;
8017 req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX0] = 0;
8018
8019 if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
8020 IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
) {
8021 req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX0] = 10;
8022 req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX0] = 110;
8023 } else {
8024 req->v7.active_dwell = 10;
8025 req->v7.passive_dwell = 110;
8026 req->v7.fragmented_dwell = 44;
8027 }
8028 } else {
8029 /* These timings correspond to iwlwifi's UNASSOC scan. */
8030 req->v1.active_dwell = 10;
8031 req->v1.passive_dwell = 110;
8032 req->v1.fragmented_dwell = 44;
8033 req->v1.extended_dwell = 90;
8034
8035 req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
8036 }
8037
8038 if (bgscan) {
8039 const uint32_t timeout = htole32(120)((__uint32_t)(120));
8040 if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
8041 IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
) {
8042 req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
8043 req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
8044 } else if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
8045 IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
) {
8046 req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
8047 req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX0] = timeout;
8048 } else {
8049 req->v1.max_out_time = timeout;
8050 req->v1.suspend_time = timeout;
8051 }
8052 }
8053
8054 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH)((__uint32_t)(2));
8055
8056 cmd_data = iwm_get_scan_req_umac_data(sc, req);
8057 chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
8058 chanparam->count = iwm_umac_scan_fill_channels(sc,
8059 (struct iwm_scan_channel_cfg_umac *)cmd_data,
8060 ic->ic_des_esslen != 0, bgscan);
8061 chanparam->flags = 0;
8062
8063 tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
8064 sc->sc_capa_n_scan_channels;
8065 tail = tail_data;
8066 /* tail v1 layout differs in preq and direct_scan member fields. */
8067 tailv1 = tail_data;
8068
8069 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |((__uint32_t)((1 << 2) | (1 << 5)))
8070 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE)((__uint32_t)((1 << 2) | (1 << 5)));
8071 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)((sc->sc_ucode_api)[(42)>>3] & (1<<((42)&
(8 -1))))
) {
8072 req->v8.general_flags2 =
8073 IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER(1 << 1);
8074 }
8075
8076 if (ic->ic_des_esslen != 0) {
8077 if (isset(sc->sc_ucode_api,((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
8078 IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
) {
8079 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8080 tail->direct_scan[0].len = ic->ic_des_esslen;
8081 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,__builtin_memcpy((tail->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
8082 ic->ic_des_esslen)__builtin_memcpy((tail->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
;
8083 } else {
8084 tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8085 tailv1->direct_scan[0].len = ic->ic_des_esslen;
8086 memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,__builtin_memcpy((tailv1->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
8087 ic->ic_des_esslen)__builtin_memcpy((tailv1->direct_scan[0].ssid), (ic->ic_des_essid
), (ic->ic_des_esslen))
;
8088 }
8089 req->general_flags |=
8090 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT)((__uint32_t)((1 << 4)));
8091 } else
8092 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE)((__uint32_t)((1 << 3)));
8093
8094 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
8095 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)((sc->sc_enabled_capa)[(9)>>3] & (1<<((9)&
(8 -1))))
&&
8096 isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
8097 IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)((sc->sc_enabled_capa)[(10)>>3] & (1<<((10
)&(8 -1))))
)
8098 req->general_flags |=
8099 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED)((__uint32_t)((1 << 8)));
8100
8101 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)((sc->sc_ucode_api)[(32)>>3] & (1<<((32)&
(8 -1))))
) {
8102 req->general_flags |=
8103 htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL)((__uint32_t)((1 << 13)));
8104 } else {
8105 req->general_flags |=
8106 htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL)((__uint32_t)((1 << 10)));
8107 }
8108
8109 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)((sc->sc_ucode_api)[(58)>>3] & (1<<((58)&
(8 -1))))
)
8110 err = iwm_fill_probe_req(sc, &tail->preq);
8111 else
8112 err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
8113 if (err) {
8114 free(req, M_DEVBUF2, req_len);
8115 return err;
8116 }
8117
8118 /* Specify the scan plan: We'll do one iteration. */
8119 tail->schedule[0].interval = 0;
8120 tail->schedule[0].iter_count = 1;
8121
8122 err = iwm_send_cmd(sc, &hcmd);
8123 free(req, M_DEVBUF2, req_len);
8124 return err;
8125}
8126
8127void
8128iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
8129{
8130 struct ieee80211com *ic = &sc->sc_ic;
8131 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
8132 char alpha2[3];
8133
8134 snprintf(alpha2, sizeof(alpha2), "%c%c",
8135 (le16toh(notif->mcc)((__uint16_t)(notif->mcc)) & 0xff00) >> 8, le16toh(notif->mcc)((__uint16_t)(notif->mcc)) & 0xff);
8136
8137 if (ifp->if_flags & IFF_DEBUG0x4) {
8138 printf("%s: firmware has detected regulatory domain '%s' "
8139 "(0x%x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), alpha2, le16toh(notif->mcc)((__uint16_t)(notif->mcc)));
8140 }
8141
8142 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
8143}
8144
8145uint8_t
8146iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
8147{
8148 int i;
8149 uint8_t rval;
8150
8151 for (i = 0; i < rs->rs_nrates; i++) {
8152 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL0x7f);
8153 if (rval == iwm_rates[ridx].rate)
8154 return rs->rs_rates[i];
8155 }
8156
8157 return 0;
8158}
8159
8160int
8161iwm_rval2ridx(int rval)
8162{
8163 int ridx;
8164
8165 for (ridx = 0; ridx < nitems(iwm_rates)(sizeof((iwm_rates)) / sizeof((iwm_rates)[0])); ridx++) {
8166 if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP0xff)
8167 continue;
8168 if (rval == iwm_rates[ridx].rate)
8169 break;
8170 }
8171
8172 return ridx;
8173}
8174
8175void
8176iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
8177 int *ofdm_rates)
8178{
8179 struct ieee80211_node *ni = &in->in_ni;
8180 struct ieee80211_rateset *rs = &ni->ni_rates;
8181 int lowest_present_ofdm = -1;
8182 int lowest_present_cck = -1;
8183 uint8_t cck = 0;
8184 uint8_t ofdm = 0;
8185 int i;
8186
8187 if (ni->ni_chan == IEEE80211_CHAN_ANYC((struct ieee80211_channel *) ((void *)0)) ||
8188 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)(((ni->ni_chan)->ic_flags & 0x0080) != 0)) {
8189 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
8190 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC0x80) == 0)
8191 continue;
8192 cck |= (1 << i);
8193 if (lowest_present_cck == -1 || lowest_present_cck > i)
8194 lowest_present_cck = i;
8195 }
8196 }
8197 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
8198 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC0x80) == 0)
8199 continue;
8200 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
8201 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
8202 lowest_present_ofdm = i;
8203 }
8204
8205 /*
8206 * Now we've got the basic rates as bitmaps in the ofdm and cck
8207 * variables. This isn't sufficient though, as there might not
8208 * be all the right rates in the bitmap. E.g. if the only basic
8209 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
8210 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
8211 *
8212 * [...] a STA responding to a received frame shall transmit
8213 * its Control Response frame [...] at the highest rate in the
8214 * BSSBasicRateSet parameter that is less than or equal to the
8215 * rate of the immediately previous frame in the frame exchange
8216 * sequence ([...]) and that is of the same modulation class
8217 * ([...]) as the received frame. If no rate contained in the
8218 * BSSBasicRateSet parameter meets these conditions, then the
8219 * control frame sent in response to a received frame shall be
8220 * transmitted at the highest mandatory rate of the PHY that is
8221 * less than or equal to the rate of the received frame, and
8222 * that is of the same modulation class as the received frame.
8223 *
8224 * As a consequence, we need to add all mandatory rates that are
8225 * lower than all of the basic rates to these bitmaps.
8226 */
8227
8228 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
8229 ofdm |= IWM_RATE_BIT_MSK(24)(1 << (IWM_RATE_24M_INDEX)) >> IWM_FIRST_OFDM_RATE;
8230 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
8231 ofdm |= IWM_RATE_BIT_MSK(12)(1 << (IWM_RATE_12M_INDEX)) >> IWM_FIRST_OFDM_RATE;
8232 /* 6M already there or needed so always add */
8233 ofdm |= IWM_RATE_BIT_MSK(6)(1 << (IWM_RATE_6M_INDEX)) >> IWM_FIRST_OFDM_RATE;
8234
8235 /*
8236 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
8237 * Note, however:
8238 * - if no CCK rates are basic, it must be ERP since there must
8239 * be some basic rates at all, so they're OFDM => ERP PHY
8240 * (or we're in 5 GHz, and the cck bitmap will never be used)
8241 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
8242 * - if 5.5M is basic, 1M and 2M are mandatory
8243 * - if 2M is basic, 1M is mandatory
8244 * - if 1M is basic, that's the only valid ACK rate.
8245 * As a consequence, it's not as complicated as it sounds, just add
8246 * any lower rates to the ACK rate bitmap.
8247 */
8248 if (IWM_RATE_11M_INDEX < lowest_present_cck)
8249 cck |= IWM_RATE_BIT_MSK(11)(1 << (IWM_RATE_11M_INDEX)) >> IWM_FIRST_CCK_RATE;
8250 if (IWM_RATE_5M_INDEX < lowest_present_cck)
8251 cck |= IWM_RATE_BIT_MSK(5)(1 << (IWM_RATE_5M_INDEX)) >> IWM_FIRST_CCK_RATE;
8252 if (IWM_RATE_2M_INDEX < lowest_present_cck)
8253 cck |= IWM_RATE_BIT_MSK(2)(1 << (IWM_RATE_2M_INDEX)) >> IWM_FIRST_CCK_RATE;
8254 /* 1M already there or needed so always add */
8255 cck |= IWM_RATE_BIT_MSK(1)(1 << (IWM_RATE_1M_INDEX)) >> IWM_FIRST_CCK_RATE;
8256
8257 *cck_rates = cck;
8258 *ofdm_rates = ofdm;
8259}
8260
8261void
8262iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
8263 struct iwm_mac_ctx_cmd *cmd, uint32_t action)
8264{
8265#define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
8266 struct ieee80211com *ic = &sc->sc_ic;
8267 struct ieee80211_node *ni = ic->ic_bss;
8268 int cck_ack_rates, ofdm_ack_rates;
8269 int i;
8270
8271 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
8272 in->in_color))((__uint32_t)(((in->in_id << (0)) | (in->in_color
<< (8)))))
;
8273 cmd->action = htole32(action)((__uint32_t)(action));
8274
8275 if (ic->ic_opmode == IEEE80211_M_MONITOR)
8276 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER)((__uint32_t)(2));
8277 else if (ic->ic_opmode == IEEE80211_M_STA)
8278 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA)((__uint32_t)(5));
8279 else
8280 panic("unsupported operating mode %d", ic->ic_opmode);
8281 cmd->tsf_id = htole32(IWM_TSF_ID_A)((__uint32_t)(0));
8282
8283 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr)__builtin_memcpy((cmd->node_addr), (ic->ic_myaddr), (6)
)
;
8284 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8285 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr)__builtin_memcpy((cmd->bssid_addr), (etherbroadcastaddr), (
6))
;
8286 return;
8287 }
8288
8289 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr)__builtin_memcpy((cmd->bssid_addr), (in->in_macaddr), (
6))
;
8290 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8291 cmd->cck_rates = htole32(cck_ack_rates)((__uint32_t)(cck_ack_rates));
8292 cmd->ofdm_rates = htole32(ofdm_ack_rates)((__uint32_t)(ofdm_ack_rates));
8293
8294 cmd->cck_short_preamble
8295 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)((__uint32_t)((ic->ic_flags & 0x00040000) ? (1 <<
5) : 0))
8296 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0)((__uint32_t)((ic->ic_flags & 0x00040000) ? (1 <<
5) : 0))
;
8297 cmd->short_slot
8298 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)((__uint32_t)((ic->ic_flags & 0x00020000) ? (1 <<
4) : 0))
8299 ? IWM_MAC_FLG_SHORT_SLOT : 0)((__uint32_t)((ic->ic_flags & 0x00020000) ? (1 <<
4) : 0))
;
8300
8301 for (i = 0; i < EDCA_NUM_AC4; i++) {
8302 struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8303 int txf = iwm_ac_to_tx_fifo[i];
8304
8305 cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin))((__uint16_t)(IWM_EXP2(ac->ac_ecwmin)));
8306 cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax))((__uint16_t)(IWM_EXP2(ac->ac_ecwmax)));
8307 cmd->ac[txf].aifsn = ac->ac_aifsn;
8308 cmd->ac[txf].fifos_mask = (1 << txf);
8309 cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32)((__uint16_t)(ac->ac_txoplimit * 32));
8310 }
8311 if (ni->ni_flags & IEEE80211_NODE_QOS0x0002)
8312 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA)((__uint32_t)((1 << 0)));
8313
8314 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
8315 enum ieee80211_htprot htprot =
8316 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK0x0003);
8317 switch (htprot) {
8318 case IEEE80211_HTPROT_NONE:
8319 break;
8320 case IEEE80211_HTPROT_NONMEMBER:
8321 case IEEE80211_HTPROT_NONHT_MIXED:
8322 cmd->protection_flags |=
8323 htole32(IWM_MAC_PROT_FLG_HT_PROT |((__uint32_t)((1 << 23) | (1 << 24)))
8324 IWM_MAC_PROT_FLG_FAT_PROT)((__uint32_t)((1 << 23) | (1 << 24)));
8325 break;
8326 case IEEE80211_HTPROT_20MHZ:
8327 if (in->in_phyctxt &&
8328 (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA1 ||
8329 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB3)) {
8330 cmd->protection_flags |=
8331 htole32(IWM_MAC_PROT_FLG_HT_PROT |((__uint32_t)((1 << 23) | (1 << 24)))
8332 IWM_MAC_PROT_FLG_FAT_PROT)((__uint32_t)((1 << 23) | (1 << 24)));
8333 }
8334 break;
8335 default:
8336 break;
8337 }
8338
8339 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN)((__uint32_t)((1 << 1)));
8340 }
8341 if (ic->ic_flags & IEEE80211_F_USEPROT0x00100000)
8342 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT)((__uint32_t)((1 << 3)));
8343
8344 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP)((__uint32_t)((1 << 2)));
8345#undef IWM_EXP2
8346}
8347
8348void
8349iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8350 struct iwm_mac_data_sta *sta, int assoc)
8351{
8352 struct ieee80211_node *ni = &in->in_ni;
8353 uint32_t dtim_off;
8354 uint64_t tsf;
8355
8356 dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU1024;
8357 memcpy(&tsf, ni->ni_tstamp, sizeof(tsf))__builtin_memcpy((&tsf), (ni->ni_tstamp), (sizeof(tsf)
))
;
8358 tsf = letoh64(tsf)((__uint64_t)(tsf));
8359
8360 sta->is_assoc = htole32(assoc)((__uint32_t)(assoc));
8361 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off)((__uint32_t)(ni->ni_rstamp + dtim_off));
8362 sta->dtim_tsf = htole64(tsf + dtim_off)((__uint64_t)(tsf + dtim_off));
8363 sta->bi = htole32(ni->ni_intval)((__uint32_t)(ni->ni_intval));
8364 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval))((__uint32_t)(iwm_reciprocal(ni->ni_intval)));
8365 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod)((__uint32_t)(ni->ni_intval * ni->ni_dtimperiod));
8366 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval))((__uint32_t)(iwm_reciprocal(sta->dtim_interval)));
8367 sta->listen_interval = htole32(10)((__uint32_t)(10));
8368 sta->assoc_id = htole32(ni->ni_associd)((__uint32_t)(ni->ni_associd));
8369 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp)((__uint32_t)(ni->ni_rstamp));
8370}
8371
8372int
8373iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8374 int assoc)
8375{
8376 struct ieee80211com *ic = &sc->sc_ic;
8377 struct ieee80211_node *ni = &in->in_ni;
8378 struct iwm_mac_ctx_cmd cmd;
8379 int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE0x08);
8380
8381 if (action == IWM_FW_CTXT_ACTION_ADD1 && active)
8382 panic("MAC already added");
8383 if (action == IWM_FW_CTXT_ACTION_REMOVE3 && !active)
8384 panic("MAC already removed");
8385
8386 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8387
8388 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8389
8390 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8391 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8392 IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8393 IWM_MAC_FILTER_ACCEPT_GRP |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8394 IWM_MAC_FILTER_IN_BEACON |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8395 IWM_MAC_FILTER_IN_PROBE_REQUEST |((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
8396 IWM_MAC_FILTER_IN_CRC32)((__uint32_t)((1 << 0) | (1 << 1) | (1 << 2
) | (1 << 6) | (1 << 12) | (1 << 11)))
;
8397 } else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8398 /*
8399 * Allow beacons to pass through as long as we are not
8400 * associated or we do not have dtim period information.
8401 */
8402 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON)((__uint32_t)((1 << 6)));
8403 else
8404 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8405
8406 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD0x28, 0, sizeof(cmd), &cmd);
8407}
8408
8409int
8410iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8411{
8412 struct iwm_time_quota_cmd_v1 cmd;
8413 int i, idx, num_active_macs, quota, quota_rem;
8414 int colors[IWM_MAX_BINDINGS(4)] = { -1, -1, -1, -1, };
8415 int n_ifs[IWM_MAX_BINDINGS(4)] = {0, };
8416 uint16_t id;
8417
8418 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
8419
8420 /* currently, PHY ID == binding ID */
8421 if (in && in->in_phyctxt) {
8422 id = in->in_phyctxt->id;
8423 KASSERT(id < IWM_MAX_BINDINGS)((id < (4)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 8423, "id < IWM_MAX_BINDINGS"))
;
8424 colors[id] = in->in_phyctxt->color;
8425 if (running)
8426 n_ifs[id] = 1;
8427 }
8428
8429 /*
8430 * The FW's scheduling session consists of
8431 * IWM_MAX_QUOTA fragments. Divide these fragments
8432 * equally between all the bindings that require quota
8433 */
8434 num_active_macs = 0;
8435 for (i = 0; i < IWM_MAX_BINDINGS(4); i++) {
8436 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID)((__uint32_t)((0xffffffff)));
8437 num_active_macs += n_ifs[i];
8438 }
8439
8440 quota = 0;
8441 quota_rem = 0;
8442 if (num_active_macs) {
8443 quota = IWM_MAX_QUOTA128 / num_active_macs;
8444 quota_rem = IWM_MAX_QUOTA128 % num_active_macs;
8445 }
8446
8447 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS(4); i++) {
8448 if (colors[i] < 0)
8449 continue;
8450
8451 cmd.quotas[idx].id_and_color =
8452 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]))((__uint32_t)(((i << (0)) | (colors[i] << (8)))));
8453
8454 if (n_ifs[i] <= 0) {
8455 cmd.quotas[idx].quota = htole32(0)((__uint32_t)(0));
8456 cmd.quotas[idx].max_duration = htole32(0)((__uint32_t)(0));
8457 } else {
8458 cmd.quotas[idx].quota = htole32(quota * n_ifs[i])((__uint32_t)(quota * n_ifs[i]));
8459 cmd.quotas[idx].max_duration = htole32(0)((__uint32_t)(0));
8460 }
8461 idx++;
8462 }
8463
8464 /* Give the remainder of the session to the first binding */
8465 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem)((__uint32_t)(((__uint32_t)(cmd.quotas[0].quota)) + quota_rem
))
;
8466
8467 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)((sc->sc_ucode_api)[(38)>>3] & (1<<((38)&
(8 -1))))
) {
8468 struct iwm_time_quota_cmd cmd_v2;
8469
8470 memset(&cmd_v2, 0, sizeof(cmd_v2))__builtin_memset((&cmd_v2), (0), (sizeof(cmd_v2)));
8471 for (i = 0; i < IWM_MAX_BINDINGS(4); i++) {
8472 cmd_v2.quotas[i].id_and_color =
8473 cmd.quotas[i].id_and_color;
8474 cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8475 cmd_v2.quotas[i].max_duration =
8476 cmd.quotas[i].max_duration;
8477 }
8478 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD0x2c, 0,
8479 sizeof(cmd_v2), &cmd_v2);
8480 }
8481
8482 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD0x2c, 0, sizeof(cmd), &cmd);
8483}
8484
8485void
8486iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8487{
8488 int s = splnet()splraise(0x4);
8489
8490 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) {
8491 splx(s)spllower(s);
8492 return;
8493 }
8494
8495 refcnt_take(&sc->task_refs);
8496 if (!task_add(taskq, task))
8497 refcnt_rele_wake(&sc->task_refs);
8498 splx(s)spllower(s);
8499}
8500
8501void
8502iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8503{
8504 if (task_del(taskq, task))
8505 refcnt_rele(&sc->task_refs);
8506}
8507
8508int
8509iwm_scan(struct iwm_softc *sc)
8510{
8511 struct ieee80211com *ic = &sc->sc_ic;
8512 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
8513 int err;
8514
8515 if (sc->sc_flags & IWM_FLAG_BGSCAN0x200) {
8516 err = iwm_scan_abort(sc);
8517 if (err) {
8518 printf("%s: could not abort background scan\n",
8519 DEVNAME(sc)((sc)->sc_dev.dv_xname));
8520 return err;
8521 }
8522 }
8523
8524 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
)
8525 err = iwm_umac_scan(sc, 0);
8526 else
8527 err = iwm_lmac_scan(sc, 0);
8528 if (err) {
8529 printf("%s: could not initiate scan\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8530 return err;
8531 }
8532
8533 /*
8534 * The current mode might have been fixed during association.
8535 * Ensure all channels get scanned.
8536 */
8537 if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media)((ic->ic_media.ifm_cur->ifm_media) & 0x000000ff00000000ULL
)
== IFM_AUTO0ULL)
8538 ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8539
8540 sc->sc_flags |= IWM_FLAG_SCANNING0x04;
8541 if (ifp->if_flags & IFF_DEBUG0x4)
8542 printf("%s: %s -> %s\n", ifp->if_xname,
8543 ieee80211_state_name[ic->ic_state],
8544 ieee80211_state_name[IEEE80211_S_SCAN]);
8545 if ((sc->sc_flags & IWM_FLAG_BGSCAN0x200) == 0) {
8546 ieee80211_set_link_state(ic, LINK_STATE_DOWN2);
8547 ieee80211_node_cleanup(ic, ic->ic_bss);
8548 }
8549 ic->ic_state = IEEE80211_S_SCAN;
8550 iwm_led_blink_start(sc);
8551 wakeup(&ic->ic_state); /* wake iwm_init() */
8552
8553 return 0;
8554}
8555
8556int
8557iwm_bgscan(struct ieee80211com *ic)
8558{
8559 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
8560 int err;
8561
8562 if (sc->sc_flags & IWM_FLAG_SCANNING0x04)
8563 return 0;
8564
8565 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
)
8566 err = iwm_umac_scan(sc, 1);
8567 else
8568 err = iwm_lmac_scan(sc, 1);
8569 if (err) {
8570 printf("%s: could not initiate scan\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8571 return err;
8572 }
8573
8574 sc->sc_flags |= IWM_FLAG_BGSCAN0x200;
8575 return 0;
8576}
8577
8578void
8579iwm_bgscan_done(struct ieee80211com *ic,
8580 struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
8581{
8582 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
8583
8584 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
8585 sc->bgscan_unref_arg = arg;
8586 sc->bgscan_unref_arg_size = arg_size;
8587 iwm_add_task(sc, systq, &sc->bgscan_done_task);
8588}
8589
8590void
8591iwm_bgscan_done_task(void *arg)
8592{
8593 struct iwm_softc *sc = arg;
8594 struct ieee80211com *ic = &sc->sc_ic;
8595 struct iwm_node *in = (void *)ic->ic_bss;
8596 struct ieee80211_node *ni = &in->in_ni;
8597 int tid, err = 0, s = splnet()splraise(0x4);
8598
8599 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) ||
8600 (ic->ic_flags & IEEE80211_F_BGSCAN0x08000000) == 0 ||
8601 ic->ic_state != IEEE80211_S_RUN) {
8602 err = ENXIO6;
8603 goto done;
8604 }
8605
8606 for (tid = 0; tid < IWM_MAX_TID_COUNT8; tid++) {
8607 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
8608
8609 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8610 continue;
8611
8612 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8613 if (err)
8614 goto done;
8615 err = iwm_disable_txq(sc, IWM_STATION_ID0, qid, tid);
8616 if (err)
8617 goto done;
8618 in->tfd_queue_msk &= ~(1 << qid);
8619#if 0 /* disabled for now; we are going to DEAUTH soon anyway */
8620 IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
8621 IEEE80211_ACTION_DELBA,((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
8622 IEEE80211_REASON_AUTH_LEAVE << 16 |((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
8623 IEEE80211_FC1_DIR_TODS << 8 | tid)((*(ic)->ic_send_mgmt)(ic, ni, 0xd0, (IEEE80211_CATEG_BA) <<
16 | (2), IEEE80211_REASON_AUTH_LEAVE << 16 | 0x01 <<
8 | tid))
;
8624#endif
8625 ieee80211_node_tx_ba_clear(ni, tid);
8626 }
8627
8628 err = iwm_flush_sta(sc, in);
8629 if (err)
8630 goto done;
8631
8632 /*
8633 * Tx queues have been flushed and Tx agg has been stopped.
8634 * Allow roaming to proceed.
8635 */
8636 ni->ni_unref_arg = sc->bgscan_unref_arg;
8637 ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8638 sc->bgscan_unref_arg = NULL((void *)0);
8639 sc->bgscan_unref_arg_size = 0;
8640 ieee80211_node_tx_stopped(ic, &in->in_ni);
8641done:
8642 if (err) {
8643 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
8644 sc->bgscan_unref_arg = NULL((void *)0);
8645 sc->bgscan_unref_arg_size = 0;
8646 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
8647 task_add(systq, &sc->init_task);
8648 }
8649 refcnt_rele_wake(&sc->task_refs);
8650 splx(s)spllower(s);
8651}
8652
8653int
8654iwm_umac_scan_abort(struct iwm_softc *sc)
8655{
8656 struct iwm_umac_scan_abort cmd = { 0 };
8657
8658 return iwm_send_cmd_pdu(sc,
8659 IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC)((0x1 << 8) | 0xe),
8660 0, sizeof(cmd), &cmd);
8661}
8662
8663int
8664iwm_lmac_scan_abort(struct iwm_softc *sc)
8665{
8666 struct iwm_host_cmd cmd = {
8667 .id = IWM_SCAN_OFFLOAD_ABORT_CMD0x52,
8668 };
8669 int err, status;
8670
8671 err = iwm_send_cmd_status(sc, &cmd, &status);
8672 if (err)
8673 return err;
8674
8675 if (status != IWM_CAN_ABORT_STATUS1) {
8676 /*
8677 * The scan abort will return 1 for success or
8678 * 2 for "failure". A failure condition can be
8679 * due to simply not being in an active scan which
8680 * can occur if we send the scan abort before the
8681 * microcode has notified us that a scan is completed.
8682 */
8683 return EBUSY16;
8684 }
8685
8686 return 0;
8687}
8688
8689int
8690iwm_scan_abort(struct iwm_softc *sc)
8691{
8692 int err;
8693
8694 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
)
8695 err = iwm_umac_scan_abort(sc);
8696 else
8697 err = iwm_lmac_scan_abort(sc);
8698
8699 if (err == 0)
8700 sc->sc_flags &= ~(IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200);
8701 return err;
8702}
8703
8704int
8705iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8706 struct ieee80211_channel *chan, uint8_t chains_static,
8707 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8708 uint8_t vht_chan_width)
8709{
8710 uint16_t band_flags = (IEEE80211_CHAN_2GHZ0x0080 | IEEE80211_CHAN_5GHZ0x0100);
8711 int err;
8712
8713 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
8714 IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)((sc->sc_enabled_capa)[(39)>>3] & (1<<((39
)&(8 -1))))
&&
8715 (phyctxt->channel->ic_flags & band_flags) !=
8716 (chan->ic_flags & band_flags)) {
8717 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8718 chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE3, apply_time, sco,
8719 vht_chan_width);
8720 if (err) {
8721 printf("%s: could not remove PHY context "
8722 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8723 return err;
8724 }
8725 phyctxt->channel = chan;
8726 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8727 chains_dynamic, IWM_FW_CTXT_ACTION_ADD1, apply_time, sco,
8728 vht_chan_width);
8729 if (err) {
8730 printf("%s: could not add PHY context "
8731 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8732 return err;
8733 }
8734 } else {
8735 phyctxt->channel = chan;
8736 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8737 chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY2, apply_time, sco,
8738 vht_chan_width);
8739 if (err) {
8740 printf("%s: could not update PHY context (error %d)\n",
8741 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8742 return err;
8743 }
8744 }
8745
8746 phyctxt->sco = sco;
8747 phyctxt->vht_chan_width = vht_chan_width;
8748 return 0;
8749}
8750
8751int
8752iwm_auth(struct iwm_softc *sc)
8753{
8754 struct ieee80211com *ic = &sc->sc_ic;
8755 struct iwm_node *in = (void *)ic->ic_bss;
8756 uint32_t duration;
8757 int generation = sc->sc_generation, err;
8758
8759 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
8760
8761 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8762 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8763 ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0,
8764 IEEE80211_VHTOP0_CHAN_WIDTH_HT0);
8765 if (err)
8766 return err;
8767 } else {
8768 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8769 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0,
8770 IEEE80211_VHTOP0_CHAN_WIDTH_HT0);
8771 if (err)
8772 return err;
8773 }
8774 in->in_phyctxt = &sc->sc_phyctxt[0];
8775 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr)__builtin_memcpy((in->in_macaddr), (in->in_ni.ni_macaddr
), (6))
;
8776 iwm_setrates(in, 0);
8777
8778 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD1, 0);
8779 if (err) {
8780 printf("%s: could not add MAC context (error %d)\n",
8781 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8782 return err;
8783 }
8784 sc->sc_flags |= IWM_FLAG_MAC_ACTIVE0x08;
8785
8786 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD1);
8787 if (err) {
8788 printf("%s: could not add binding (error %d)\n",
8789 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8790 goto rm_mac_ctxt;
8791 }
8792 sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE0x10;
8793
8794 in->tid_disable_ampdu = 0xffff;
8795 err = iwm_add_sta_cmd(sc, in, 0);
8796 if (err) {
8797 printf("%s: could not add sta (error %d)\n",
8798 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8799 goto rm_binding;
8800 }
8801 sc->sc_flags |= IWM_FLAG_STA_ACTIVE0x20;
8802
8803 if (ic->ic_opmode == IEEE80211_M_MONITOR)
8804 return 0;
8805
8806 /*
8807 * Prevent the FW from wandering off channel during association
8808 * by "protecting" the session with a time event.
8809 */
8810 if (in->in_ni.ni_intval)
8811 duration = in->in_ni.ni_intval * 2;
8812 else
8813 duration = IEEE80211_DUR_TU1024;
8814 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8815
8816 return 0;
8817
8818rm_binding:
8819 if (generation == sc->sc_generation) {
8820 iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3);
8821 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE0x10;
8822 }
8823rm_mac_ctxt:
8824 if (generation == sc->sc_generation) {
8825 iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3, 0);
8826 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE0x08;
8827 }
8828 return err;
8829}
8830
8831int
8832iwm_deauth(struct iwm_softc *sc)
8833{
8834 struct ieee80211com *ic = &sc->sc_ic;
8835 struct iwm_node *in = (void *)ic->ic_bss;
8836 int err;
8837
8838 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
8839
8840 iwm_unprotect_session(sc, in);
8841
8842 if (sc->sc_flags & IWM_FLAG_STA_ACTIVE0x20) {
8843 err = iwm_flush_sta(sc, in);
8844 if (err)
8845 return err;
8846 err = iwm_rm_sta_cmd(sc, in);
8847 if (err) {
8848 printf("%s: could not remove STA (error %d)\n",
8849 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8850 return err;
8851 }
8852 in->tid_disable_ampdu = 0xffff;
8853 sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE0x20;
8854 sc->sc_rx_ba_sessions = 0;
8855 sc->ba_rx.start_tidmask = 0;
8856 sc->ba_rx.stop_tidmask = 0;
8857 sc->tx_ba_queue_mask = 0;
8858 sc->ba_tx.start_tidmask = 0;
8859 sc->ba_tx.stop_tidmask = 0;
8860 }
8861
8862 if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE0x10) {
8863 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3);
8864 if (err) {
8865 printf("%s: could not remove binding (error %d)\n",
8866 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8867 return err;
8868 }
8869 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE0x10;
8870 }
8871
8872 if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE0x08) {
8873 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE3, 0);
8874 if (err) {
8875 printf("%s: could not remove MAC context (error %d)\n",
8876 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8877 return err;
8878 }
8879 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE0x08;
8880 }
8881
8882 /* Move unused PHY context to a default channel. */
8883 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8884 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0,
8885 IEEE80211_VHTOP0_CHAN_WIDTH_HT0);
8886 if (err)
8887 return err;
8888
8889 return 0;
8890}
8891
8892int
8893iwm_run(struct iwm_softc *sc)
8894{
8895 struct ieee80211com *ic = &sc->sc_ic;
8896 struct iwm_node *in = (void *)ic->ic_bss;
8897 struct ieee80211_node *ni = &in->in_ni;
8898 int err;
8899
8900 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
8901
8902 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8903 /* Add a MAC context and a sniffing STA. */
8904 err = iwm_auth(sc);
8905 if (err)
8906 return err;
8907 }
8908
8909 /* Configure Rx chains for MIMO and configure 40 MHz channel. */
8910 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8911 uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8912 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8913 in->in_phyctxt->channel, chains, chains,
8914 0, IEEE80211_HTOP0_SCO_SCN0,
8915 IEEE80211_VHTOP0_CHAN_WIDTH_HT0);
8916 if (err) {
8917 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8918 return err;
8919 }
8920 } else if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
8921 uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8922 uint8_t sco, vht_chan_width;
8923 if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan)(((in->in_ni.ni_chan)->ic_flags & 0x8000) != 0) &&
8924 ieee80211_node_supports_ht_chan40(ni))
8925 sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK0x03);
8926 else
8927 sco = IEEE80211_HTOP0_SCO_SCN0;
8928 if ((ni->ni_flags & IEEE80211_NODE_VHT0x10000) &&
8929 IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan)(((in->in_ni.ni_chan)->ic_xflags & 0x00000001) != 0
)
&&
8930 ieee80211_node_supports_vht_chan80(ni))
8931 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_801;
8932 else
8933 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT0;
8934 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8935 in->in_phyctxt->channel, chains, chains,
8936 0, sco, vht_chan_width);
8937 if (err) {
8938 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8939 return err;
8940 }
8941 }
8942
8943 /* Update STA again to apply HT and VHT settings. */
8944 err = iwm_add_sta_cmd(sc, in, 1);
8945 if (err) {
8946 printf("%s: could not update STA (error %d)\n",
8947 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8948 return err;
8949 }
8950
8951 /* We have now been assigned an associd by the AP. */
8952 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 1);
8953 if (err) {
8954 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
8955 return err;
8956 }
8957
8958 err = iwm_sf_config(sc, IWM_SF_FULL_ON1);
8959 if (err) {
8960 printf("%s: could not set sf full on (error %d)\n",
8961 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8962 return err;
8963 }
8964
8965 err = iwm_allow_mcast(sc);
8966 if (err) {
8967 printf("%s: could not allow mcast (error %d)\n",
8968 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8969 return err;
8970 }
8971
8972 err = iwm_power_update_device(sc);
8973 if (err) {
8974 printf("%s: could not send power command (error %d)\n",
8975 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8976 return err;
8977 }
8978#ifdef notyet
8979 /*
8980 * Disabled for now. Default beacon filter settings
8981 * prevent net80211 from getting ERP and HT protection
8982 * updates from beacons.
8983 */
8984 err = iwm_enable_beacon_filter(sc, in);
8985 if (err) {
8986 printf("%s: could not enable beacon filter\n",
8987 DEVNAME(sc)((sc)->sc_dev.dv_xname));
8988 return err;
8989 }
8990#endif
8991 err = iwm_power_mac_update_mode(sc, in);
8992 if (err) {
8993 printf("%s: could not update MAC power (error %d)\n",
8994 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
8995 return err;
8996 }
8997
8998 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)((sc->sc_enabled_capa)[(44)>>3] & (1<<((44
)&(8 -1))))
) {
8999 err = iwm_update_quotas(sc, in, 1);
9000 if (err) {
9001 printf("%s: could not update quotas (error %d)\n",
9002 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9003 return err;
9004 }
9005 }
9006
9007 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
9008 ieee80211_ra_node_init(&in->in_rn);
9009 ieee80211_ra_vht_node_init(&in->in_rn_vht);
9010
9011 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9012 iwm_led_blink_start(sc);
9013 return 0;
9014 }
9015
9016 /* Start at lowest available bit-rate, AMRR will raise. */
9017 in->in_ni.ni_txrate = 0;
9018 in->in_ni.ni_txmcs = 0;
9019 in->in_ni.ni_vht_ss = 1;
9020 iwm_setrates(in, 0);
9021
9022 timeout_add_msec(&sc->sc_calib_to, 500);
9023 iwm_led_enable(sc);
9024
9025 return 0;
9026}
9027
9028int
9029iwm_run_stop(struct iwm_softc *sc)
9030{
9031 struct ieee80211com *ic = &sc->sc_ic;
9032 struct iwm_node *in = (void *)ic->ic_bss;
9033 struct ieee80211_node *ni = &in->in_ni;
9034 int err, i, tid;
9035
9036 splassert(IPL_NET)do { if (splassert_ctl > 0) { splassert_check(0x4, __func__
); } } while (0)
;
9037
9038 /*
9039 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
9040 * for this when moving out of RUN state since it runs in a
9041 * separate thread.
9042 * Note that in->in_ni (struct ieee80211_node) already represents
9043 * our new access point in case we are roaming between APs.
9044 * This means we cannot rely on struct ieee802111_node to tell
9045 * us which BA sessions exist.
9046 */
9047 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
9048 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9049 if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID0x7f)
9050 continue;
9051 err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
9052 if (err)
9053 return err;
9054 iwm_clear_reorder_buffer(sc, rxba);
9055 if (sc->sc_rx_ba_sessions > 0)
9056 sc->sc_rx_ba_sessions--;
9057 }
9058 for (tid = 0; tid < IWM_MAX_TID_COUNT8; tid++) {
9059 int qid = IWM_FIRST_AGG_TX_QUEUE10 + tid;
9060 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
9061 continue;
9062 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
9063 if (err)
9064 return err;
9065 err = iwm_disable_txq(sc, IWM_STATION_ID0, qid, tid);
9066 if (err)
9067 return err;
9068 in->tfd_queue_msk &= ~(1 << qid);
9069 }
9070 ieee80211_ba_del(ni);
9071
9072 if (ic->ic_opmode == IEEE80211_M_MONITOR)
9073 iwm_led_blink_stop(sc);
9074
9075 err = iwm_sf_config(sc, IWM_SF_INIT_OFF3);
9076 if (err)
9077 return err;
9078
9079 iwm_disable_beacon_filter(sc);
9080
9081 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)((sc->sc_enabled_capa)[(44)>>3] & (1<<((44
)&(8 -1))))
) {
9082 err = iwm_update_quotas(sc, in, 0);
9083 if (err) {
9084 printf("%s: could not update quotas (error %d)\n",
9085 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9086 return err;
9087 }
9088 }
9089
9090 /* Mark station as disassociated. */
9091 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY2, 0);
9092 if (err) {
9093 printf("%s: failed to update MAC\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9094 return err;
9095 }
9096
9097 /* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
9098 if (in->in_ni.ni_flags & IEEE80211_NODE_HT0x0400) {
9099 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
9100 in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN0,
9101 IEEE80211_VHTOP0_CHAN_WIDTH_HT0);
9102 if (err) {
9103 printf("%s: failed to update PHY\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
9104 return err;
9105 }
9106 }
9107
9108 return 0;
9109}
9110
9111struct ieee80211_node *
9112iwm_node_alloc(struct ieee80211com *ic)
9113{
9114 return malloc(sizeof (struct iwm_node), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
9115}
9116
9117int
9118iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9119 struct ieee80211_key *k)
9120{
9121 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
9122 struct iwm_add_sta_key_cmd_v1 cmd;
9123
9124 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9125
9126 cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
9127 IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
9128 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
9129 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
;
9130 if (k->k_flags & IEEE80211_KEY_GROUP0x00000001)
9131 cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST)((__uint16_t)((1 << 14)));
9132
9133 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
9134 cmd.common.key_offset = 0;
9135 cmd.common.sta_id = IWM_STATION_ID0;
9136
9137 return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC,
9138 sizeof(cmd), &cmd);
9139}
9140
9141int
9142iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9143 struct ieee80211_key *k)
9144{
9145 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
9146 struct iwm_add_sta_key_cmd cmd;
9147
9148 if ((k->k_flags & IEEE80211_KEY_GROUP0x00000001) ||
9149 k->k_cipher != IEEE80211_CIPHER_CCMP) {
9150 /* Fallback to software crypto for other ciphers. */
9151 return (ieee80211_set_key(ic, ni, k));
9152 }
9153
9154 if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS)((sc->sc_ucode_api)[(29)>>3] & (1<<((29)&
(8 -1))))
)
9155 return iwm_set_key_v1(ic, ni, k);
9156
9157 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9158
9159 cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
9160 IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
9161 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
9162 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((2 << 0) | (1 << 3) | ((k->k_id <<
8) & (3 << 8))))
;
9163 if (k->k_flags & IEEE80211_KEY_GROUP0x00000001)
9164 cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST)((__uint16_t)((1 << 14)));
9165
9166 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
9167 cmd.common.key_offset = 0;
9168 cmd.common.sta_id = IWM_STATION_ID0;
9169
9170 cmd.transmit_seq_cnt = htole64(k->k_tsc)((__uint64_t)(k->k_tsc));
9171
9172 return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC,
9173 sizeof(cmd), &cmd);
9174}
9175
9176void
9177iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9178 struct ieee80211_key *k)
9179{
9180 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
9181 struct iwm_add_sta_key_cmd_v1 cmd;
9182
9183 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9184
9185 cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
9186 IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
9187 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
9188 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
;
9189 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
9190 cmd.common.key_offset = 0;
9191 cmd.common.sta_id = IWM_STATION_ID0;
9192
9193 iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9194}
9195
9196void
9197iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9198 struct ieee80211_key *k)
9199{
9200 struct iwm_softc *sc = ic->ic_softcic_ac.ac_if.if_softc;
9201 struct iwm_add_sta_key_cmd cmd;
9202
9203 if ((k->k_flags & IEEE80211_KEY_GROUP0x00000001) ||
9204 (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
9205 /* Fallback to software crypto for other ciphers. */
9206 ieee80211_delete_key(ic, ni, k);
9207 return;
9208 }
9209
9210 if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE0x20) == 0)
9211 return;
9212
9213 if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS)((sc->sc_ucode_api)[(29)>>3] & (1<<((29)&
(8 -1))))
)
9214 return iwm_delete_key_v1(ic, ni, k);
9215
9216 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9217
9218 cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
9219 IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
9220 ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
9221 IWM_STA_KEY_FLG_KEYID_MSK))((__uint16_t)((1 << 11) | (0 << 0) | (1 << 3
) | ((k->k_id << 8) & (3 << 8))))
;
9222 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len))__builtin_memcpy((cmd.common.key), (k->k_key), ((((sizeof(
cmd.common.key))<(k->k_len))?(sizeof(cmd.common.key)):(
k->k_len))))
;
9223 cmd.common.key_offset = 0;
9224 cmd.common.sta_id = IWM_STATION_ID0;
9225
9226 iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY0x17, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9227}
9228
9229void
9230iwm_calib_timeout(void *arg)
9231{
9232 struct iwm_softc *sc = arg;
9233 struct ieee80211com *ic = &sc->sc_ic;
9234 struct iwm_node *in = (void *)ic->ic_bss;
9235 struct ieee80211_node *ni = &in->in_ni;
9236 int s;
9237
9238 s = splnet()splraise(0x4);
9239 if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
9240 (ni->ni_flags & IEEE80211_NODE_HT0x0400) == 0 &&
9241 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
9242 int old_txrate = ni->ni_txrate;
9243 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
9244 /*
9245 * If AMRR has chosen a new TX rate we must update
9246 * the firwmare's LQ rate table.
9247 * ni_txrate may change again before the task runs so
9248 * cache the chosen rate in the iwm_node structure.
9249 */
9250 if (ni->ni_txrate != old_txrate)
9251 iwm_setrates(in, 1);
9252 }
9253
9254 splx(s)spllower(s);
9255
9256 timeout_add_msec(&sc->sc_calib_to, 500);
9257}
9258
9259void
9260iwm_set_rate_table_vht(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9261{
9262 struct ieee80211_node *ni = &in->in_ni;
9263 struct ieee80211com *ic = ni->ni_ic;
9264 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
9265 int ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9266 int i, tab, txmcs;
9267
9268 /*
9269 * Fill the LQ rate selection table with VHT rates in descending
9270 * order, i.e. with the node's current TX rate first. Keep reducing
9271 * channel width during later Tx attempts, and eventually fall back
9272 * to legacy OFDM. Do not mix SISO and MIMO rates.
9273 */
9274 lqcmd->mimo_delim = 0;
9275 txmcs = ni->ni_txmcs;
9276 for (i = 0; i < nitems(lqcmd->rs_table)(sizeof((lqcmd->rs_table)) / sizeof((lqcmd->rs_table)[0
]))
; i++) {
9277 if (txmcs >= 0) {
9278 tab = IWM_RATE_MCS_VHT_MSK(1 << 26);
9279 tab |= txmcs & IWM_RATE_VHT_MCS_RATE_CODE_MSK0xf;
9280 tab |= ((ni->ni_vht_ss - 1) <<
9281 IWM_RATE_VHT_MCS_NSS_POS4) &
9282 IWM_RATE_VHT_MCS_NSS_MSK(3 << 4);
9283 if (ni->ni_vht_ss > 1)
9284 tab |= IWM_RATE_MCS_ANT_AB_MSK((1 << 14) | (2 << 14));
9285 else
9286 tab |= iwm_valid_siso_ant_rate_mask(sc);
9287
9288 /*
9289 * First two Tx attempts may use 80MHz/40MHz/SGI.
9290 * Next two Tx attempts may use 40MHz/SGI.
9291 * Beyond that use 20 MHz and decrease the rate.
9292 * As a special case, MCS 9 is invalid on 20 Mhz.
9293 */
9294 if (txmcs == 9) {
9295 if (i < 2 && in->in_phyctxt->vht_chan_width >=
9296 IEEE80211_VHTOP0_CHAN_WIDTH_801)
9297 tab |= IWM_RATE_MCS_CHAN_WIDTH_80(2 << 11);
9298 else if (in->in_phyctxt->sco ==
9299 IEEE80211_HTOP0_SCO_SCA1 ||
9300 in->in_phyctxt->sco ==
9301 IEEE80211_HTOP0_SCO_SCB3)
9302 tab |= IWM_RATE_MCS_CHAN_WIDTH_40(1 << 11);
9303 else {
9304 /* no 40 MHz, fall back on MCS 8 */
9305 tab &= ~IWM_RATE_VHT_MCS_RATE_CODE_MSK0xf;
9306 tab |= 8;
9307 }
9308
9309 tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK(1 << 30);
9310 if (i < 4) {
9311 if (ieee80211_ra_vht_use_sgi(ni))
9312 tab |= IWM_RATE_MCS_SGI_MSK(1 << 13);
9313 } else
9314 txmcs--;
9315 } else if (i < 2 && in->in_phyctxt->vht_chan_width >=
9316 IEEE80211_VHTOP0_CHAN_WIDTH_801) {
9317 tab |= IWM_RATE_MCS_CHAN_WIDTH_80(2 << 11);
9318 tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK(1 << 30);
9319 if (ieee80211_ra_vht_use_sgi(ni))
9320 tab |= IWM_RATE_MCS_SGI_MSK(1 << 13);
9321 } else if (i < 4 &&
9322 in->in_phyctxt->vht_chan_width >=
9323 IEEE80211_VHTOP0_CHAN_WIDTH_HT0 &&
9324 (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA1 ||
9325 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB3)) {
9326 tab |= IWM_RATE_MCS_CHAN_WIDTH_40(1 << 11);
9327 tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK(1 << 30);
9328 if (ieee80211_ra_vht_use_sgi(ni))
9329 tab |= IWM_RATE_MCS_SGI_MSK(1 << 13);
9330 } else if (txmcs >= 0)
9331 txmcs--;
9332 } else {
9333 /* Fill the rest with the lowest possible rate. */
9334 tab = iwm_rates[ridx_min].plcp;
9335 tab |= iwm_valid_siso_ant_rate_mask(sc);
9336 if (ni->ni_vht_ss > 1 && lqcmd->mimo_delim == 0)
9337 lqcmd->mimo_delim = i;
9338 }
9339
9340 lqcmd->rs_table[i] = htole32(tab)((__uint32_t)(tab));
9341 }
9342}
9343
9344void
9345iwm_set_rate_table(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9346{
9347 struct ieee80211_node *ni = &in->in_ni;
9348 struct ieee80211com *ic = ni->ni_ic;
9349 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
9350 struct ieee80211_rateset *rs = &ni->ni_rates;
9351 int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
9352
9353 /*
9354 * Fill the LQ rate selection table with legacy and/or HT rates
9355 * in descending order, i.e. with the node's current TX rate first.
9356 * In cases where throughput of an HT rate corresponds to a legacy
9357 * rate it makes no sense to add both. We rely on the fact that
9358 * iwm_rates is laid out such that equivalent HT/legacy rates share
9359 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
9360 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
9361 */
9362 j = 0;
9363 ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9364 mimo = iwm_is_mimo_ht_mcs(ni->ni_txmcs);
9365 ridx_max = (mimo ? IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1) : IWM_LAST_HT_SISO_RATE);
9366 for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
9367 uint8_t plcp = iwm_rates[ridx].plcp;
9368 uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
9369
9370 if (j >= nitems(lqcmd->rs_table)(sizeof((lqcmd->rs_table)) / sizeof((lqcmd->rs_table)[0
]))
)
9371 break;
9372 tab = 0;
9373 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
9374 if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP0x20)
9375 continue;
9376 /* Do not mix SISO and MIMO HT rates. */
9377 if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
9378 (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
9379 continue;
9380 for (i = ni->ni_txmcs; i >= 0; i--) {
9381 if (isclr(ni->ni_rxmcs, i)(((ni->ni_rxmcs)[(i)>>3] & (1<<((i)&(8
-1)))) == 0)
)
9382 continue;
9383 if (ridx != iwm_ht_mcs2ridx[i])
9384 continue;
9385 tab = ht_plcp;
9386 tab |= IWM_RATE_MCS_HT_MSK(1 << 8);
9387 /* First two Tx attempts may use 40MHz/SGI. */
9388 if (j > 1)
9389 break;
9390 if (in->in_phyctxt->sco ==
9391 IEEE80211_HTOP0_SCO_SCA1 ||
9392 in->in_phyctxt->sco ==
9393 IEEE80211_HTOP0_SCO_SCB3) {
9394 tab |= IWM_RATE_MCS_CHAN_WIDTH_40(1 << 11);
9395 tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK(1 << 30);
9396 }
9397 if (ieee80211_ra_use_ht_sgi(ni))
9398 tab |= IWM_RATE_MCS_SGI_MSK(1 << 13);
9399 break;
9400 }
9401 } else if (plcp != IWM_RATE_INVM_PLCP0xff) {
9402 for (i = ni->ni_txrate; i >= 0; i--) {
9403 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
9404 IEEE80211_RATE_VAL0x7f)) {
9405 tab = plcp;
9406 break;
9407 }
9408 }
9409 }
9410
9411 if (tab == 0)
9412 continue;
9413
9414 if (iwm_is_mimo_ht_plcp(ht_plcp))
9415 tab |= IWM_RATE_MCS_ANT_AB_MSK((1 << 14) | (2 << 14));
9416 else
9417 tab |= iwm_valid_siso_ant_rate_mask(sc);
9418
9419 if (IWM_RIDX_IS_CCK(ridx)((ridx) < 4))
9420 tab |= IWM_RATE_MCS_CCK_MSK(1 << 9);
9421 lqcmd->rs_table[j++] = htole32(tab)((__uint32_t)(tab));
9422 }
9423
9424 lqcmd->mimo_delim = (mimo ? j : 0);
9425
9426 /* Fill the rest with the lowest possible rate */
9427 while (j < nitems(lqcmd->rs_table)(sizeof((lqcmd->rs_table)) / sizeof((lqcmd->rs_table)[0
]))
) {
9428 tab = iwm_rates[ridx_min].plcp;
9429 if (IWM_RIDX_IS_CCK(ridx_min)((ridx_min) < 4))
9430 tab |= IWM_RATE_MCS_CCK_MSK(1 << 9);
9431 tab |= iwm_valid_siso_ant_rate_mask(sc);
9432 lqcmd->rs_table[j++] = htole32(tab)((__uint32_t)(tab));
9433 }
9434}
9435
9436void
9437iwm_setrates(struct iwm_node *in, int async)
9438{
9439 struct ieee80211_node *ni = &in->in_ni;
9440 struct ieee80211com *ic = ni->ni_ic;
9441 struct iwm_softc *sc = IC2IFP(ic)(&(ic)->ic_ac.ac_if)->if_softc;
9442 struct iwm_lq_cmd lqcmd;
9443 struct iwm_host_cmd cmd = {
9444 .id = IWM_LQ_CMD0x4e,
9445 .len = { sizeof(lqcmd), },
9446 };
9447
9448 cmd.flags = async ? IWM_CMD_ASYNC : 0;
9449
9450 memset(&lqcmd, 0, sizeof(lqcmd))__builtin_memset((&lqcmd), (0), (sizeof(lqcmd)));
9451 lqcmd.sta_id = IWM_STATION_ID0;
9452
9453 if (ic->ic_flags & IEEE80211_F_USEPROT0x00100000)
9454 lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK(1 << 0);
9455
9456 if (ni->ni_flags & IEEE80211_NODE_VHT0x10000)
9457 iwm_set_rate_table_vht(in, &lqcmd);
9458 else
9459 iwm_set_rate_table(in, &lqcmd);
9460
9461 if (sc->sc_device_family == IWM_DEVICE_FAMILY_90003 &&
9462 (iwm_fw_valid_tx_ant(sc) & IWM_ANT_B(1 << 1)))
9463 lqcmd.single_stream_ant_msk = IWM_ANT_B(1 << 1);
9464 else
9465 lqcmd.single_stream_ant_msk = IWM_ANT_A(1 << 0);
9466 lqcmd.dual_stream_ant_msk = IWM_ANT_AB((1 << 0) | (1 << 1));
9467
9468 lqcmd.agg_time_limit = htole16(4000)((__uint16_t)(4000)); /* 4ms */
9469 lqcmd.agg_disable_start_th = 3;
9470 lqcmd.agg_frame_cnt_limit = 0x3f;
9471
9472 cmd.data[0] = &lqcmd;
9473 iwm_send_cmd(sc, &cmd);
9474}
9475
9476int
9477iwm_media_change(struct ifnet *ifp)
9478{
9479 struct iwm_softc *sc = ifp->if_softc;
9480 struct ieee80211com *ic = &sc->sc_ic;
9481 uint8_t rate, ridx;
9482 int err;
9483
9484 err = ieee80211_media_change(ifp);
9485 if (err != ENETRESET52)
9486 return err;
9487
9488 if (ic->ic_fixed_mcs != -1)
9489 sc->sc_fixed_ridx = iwm_ht_mcs2ridx[ic->ic_fixed_mcs];
9490 else if (ic->ic_fixed_rate != -1) {
9491 rate = ic->ic_sup_rates[ic->ic_curmode].
9492 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL0x7f;
9493 /* Map 802.11 rate to HW rate index. */
9494 for (ridx = 0; ridx <= IWM_RIDX_MAX((sizeof((iwm_rates)) / sizeof((iwm_rates)[0]))-1); ridx++)
9495 if (iwm_rates[ridx].rate == rate)
9496 break;
9497 sc->sc_fixed_ridx = ridx;
9498 }
9499
9500 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) ==
9501 (IFF_UP0x1 | IFF_RUNNING0x40)) {
9502 iwm_stop(ifp);
9503 err = iwm_init(ifp);
9504 }
9505 return err;
9506}
9507
9508void
9509iwm_newstate_task(void *psc)
9510{
9511 struct iwm_softc *sc = (struct iwm_softc *)psc;
9512 struct ieee80211com *ic = &sc->sc_ic;
9513 enum ieee80211_state nstate = sc->ns_nstate;
9514 enum ieee80211_state ostate = ic->ic_state;
9515 int arg = sc->ns_arg;
9516 int err = 0, s = splnet()splraise(0x4);
9517
9518 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) {
9519 /* iwm_stop() is waiting for us. */
9520 refcnt_rele_wake(&sc->task_refs);
9521 splx(s)spllower(s);
9522 return;
9523 }
9524
9525 if (ostate == IEEE80211_S_SCAN) {
9526 if (nstate == ostate) {
9527 if (sc->sc_flags & IWM_FLAG_SCANNING0x04) {
9528 refcnt_rele_wake(&sc->task_refs);
9529 splx(s)spllower(s);
9530 return;
9531 }
9532 /* Firmware is no longer scanning. Do another scan. */
9533 goto next_scan;
9534 } else
9535 iwm_led_blink_stop(sc);
9536 }
9537
9538 if (nstate <= ostate) {
9539 switch (ostate) {
9540 case IEEE80211_S_RUN:
9541 err = iwm_run_stop(sc);
9542 if (err)
9543 goto out;
9544 /* FALLTHROUGH */
9545 case IEEE80211_S_ASSOC:
9546 case IEEE80211_S_AUTH:
9547 if (nstate <= IEEE80211_S_AUTH) {
9548 err = iwm_deauth(sc);
9549 if (err)
9550 goto out;
9551 }
9552 /* FALLTHROUGH */
9553 case IEEE80211_S_SCAN:
9554 case IEEE80211_S_INIT:
9555 break;
9556 }
9557
9558 /* Die now if iwm_stop() was called while we were sleeping. */
9559 if (sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) {
9560 refcnt_rele_wake(&sc->task_refs);
9561 splx(s)spllower(s);
9562 return;
9563 }
9564 }
9565
9566 switch (nstate) {
9567 case IEEE80211_S_INIT:
9568 break;
9569
9570 case IEEE80211_S_SCAN:
9571next_scan:
9572 err = iwm_scan(sc);
9573 if (err)
9574 break;
9575 refcnt_rele_wake(&sc->task_refs);
9576 splx(s)spllower(s);
9577 return;
9578
9579 case IEEE80211_S_AUTH:
9580 err = iwm_auth(sc);
9581 break;
9582
9583 case IEEE80211_S_ASSOC:
9584 break;
9585
9586 case IEEE80211_S_RUN:
9587 err = iwm_run(sc);
9588 break;
9589 }
9590
9591out:
9592 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) {
9593 if (err)
9594 task_add(systq, &sc->init_task);
9595 else
9596 sc->sc_newstate(ic, nstate, arg);
9597 }
9598 refcnt_rele_wake(&sc->task_refs);
9599 splx(s)spllower(s);
9600}
9601
9602int
9603iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9604{
9605 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
9606 struct iwm_softc *sc = ifp->if_softc;
9607
9608 /*
9609 * Prevent attempts to transition towards the same state, unless
9610 * we are scanning in which case a SCAN -> SCAN transition
9611 * triggers another scan iteration. And AUTH -> AUTH is needed
9612 * to support band-steering.
9613 */
9614 if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9615 nstate != IEEE80211_S_AUTH)
9616 return 0;
9617
9618 if (ic->ic_state == IEEE80211_S_RUN) {
9619 timeout_del(&sc->sc_calib_to);
9620 iwm_del_task(sc, systq, &sc->ba_task);
9621 iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9622 iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9623 iwm_del_task(sc, systq, &sc->bgscan_done_task);
9624 }
9625
9626 sc->ns_nstate = nstate;
9627 sc->ns_arg = arg;
9628
9629 iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9630
9631 return 0;
9632}
9633
9634void
9635iwm_endscan(struct iwm_softc *sc)
9636{
9637 struct ieee80211com *ic = &sc->sc_ic;
9638
9639 if ((sc->sc_flags & (IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200)) == 0)
9640 return;
9641
9642 sc->sc_flags &= ~(IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200);
9643 ieee80211_end_scan(&ic->ic_ific_ac.ac_if);
9644}
9645
9646/*
9647 * Aging and idle timeouts for the different possible scenarios
9648 * in default configuration
9649 */
9650static const uint32_t
9651iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO5][IWM_SF_NUM_TIMEOUT_TYPES2] = {
9652 {
9653 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF)((__uint32_t)(400)),
9654 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)((__uint32_t)(160))
9655 },
9656 {
9657 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF)((__uint32_t)(400)),
9658 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)((__uint32_t)(160))
9659 },
9660 {
9661 htole32(IWM_SF_MCAST_AGING_TIMER_DEF)((__uint32_t)(400)),
9662 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)((__uint32_t)(160))
9663 },
9664 {
9665 htole32(IWM_SF_BA_AGING_TIMER_DEF)((__uint32_t)(400)),
9666 htole32(IWM_SF_BA_IDLE_TIMER_DEF)((__uint32_t)(160))
9667 },
9668 {
9669 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF)((__uint32_t)(400)),
9670 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)((__uint32_t)(160))
9671 },
9672};
9673
9674/*
9675 * Aging and idle timeouts for the different possible scenarios
9676 * in single BSS MAC configuration.
9677 */
9678static const uint32_t
9679iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO5][IWM_SF_NUM_TIMEOUT_TYPES2] = {
9680 {
9681 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER)((__uint32_t)(2016)),
9682 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)((__uint32_t)(320))
9683 },
9684 {
9685 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER)((__uint32_t)(2016)),
9686 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)((__uint32_t)(320))
9687 },
9688 {
9689 htole32(IWM_SF_MCAST_AGING_TIMER)((__uint32_t)(10016)),
9690 htole32(IWM_SF_MCAST_IDLE_TIMER)((__uint32_t)(2016))
9691 },
9692 {
9693 htole32(IWM_SF_BA_AGING_TIMER)((__uint32_t)(2016)),
9694 htole32(IWM_SF_BA_IDLE_TIMER)((__uint32_t)(320))
9695 },
9696 {
9697 htole32(IWM_SF_TX_RE_AGING_TIMER)((__uint32_t)(2016)),
9698 htole32(IWM_SF_TX_RE_IDLE_TIMER)((__uint32_t)(320))
9699 },
9700};
9701
9702void
9703iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9704 struct ieee80211_node *ni)
9705{
9706 int i, j, watermark;
9707
9708 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON0] = htole32(IWM_SF_W_MARK_SCAN)((__uint32_t)(4096));
9709
9710 /*
9711 * If we are in association flow - check antenna configuration
9712 * capabilities of the AP station, and choose the watermark accordingly.
9713 */
9714 if (ni) {
9715 if (ni->ni_flags & IEEE80211_NODE_HT0x0400) {
9716 if (ni->ni_rxmcs[1] != 0)
9717 watermark = IWM_SF_W_MARK_MIMO28192;
9718 else
9719 watermark = IWM_SF_W_MARK_SISO4096;
9720 } else {
9721 watermark = IWM_SF_W_MARK_LEGACY4096;
9722 }
9723 /* default watermark value for unassociated mode. */
9724 } else {
9725 watermark = IWM_SF_W_MARK_MIMO28192;
9726 }
9727 sf_cmd->watermark[IWM_SF_FULL_ON1] = htole32(watermark)((__uint32_t)(watermark));
9728
9729 for (i = 0; i < IWM_SF_NUM_SCENARIO5; i++) {
9730 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES2; j++) {
9731 sf_cmd->long_delay_timeouts[i][j] =
9732 htole32(IWM_SF_LONG_DELAY_AGING_TIMER)((__uint32_t)(1000000));
9733 }
9734 }
9735
9736 if (ni) {
9737 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout
), (sizeof(iwm_sf_full_timeout)))
9738 sizeof(iwm_sf_full_timeout))__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout
), (sizeof(iwm_sf_full_timeout)))
;
9739 } else {
9740 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout_def
), (sizeof(iwm_sf_full_timeout_def)))
9741 sizeof(iwm_sf_full_timeout_def))__builtin_memcpy((sf_cmd->full_on_timeouts), (iwm_sf_full_timeout_def
), (sizeof(iwm_sf_full_timeout_def)))
;
9742 }
9743
9744}
9745
9746int
9747iwm_sf_config(struct iwm_softc *sc, int new_state)
9748{
9749 struct ieee80211com *ic = &sc->sc_ic;
9750 struct iwm_sf_cfg_cmd sf_cmd = {
9751 .state = htole32(new_state)((__uint32_t)(new_state)),
9752 };
9753 int err = 0;
9754
9755#if 0 /* only used for models with sdio interface, in iwlwifi */
9756 if (sc->sc_device_family == IWM_DEVICE_FAMILY_80002)
9757 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF)((__uint32_t)((1 << 16)));
9758#endif
9759
9760 switch (new_state) {
9761 case IWM_SF_UNINIT2:
9762 case IWM_SF_INIT_OFF3:
9763 iwm_fill_sf_command(sc, &sf_cmd, NULL((void *)0));
9764 break;
9765 case IWM_SF_FULL_ON1:
9766 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9767 break;
9768 default:
9769 return EINVAL22;
9770 }
9771
9772 err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD0xd1, IWM_CMD_ASYNC,
9773 sizeof(sf_cmd), &sf_cmd);
9774 return err;
9775}
9776
9777int
9778iwm_send_bt_init_conf(struct iwm_softc *sc)
9779{
9780 struct iwm_bt_coex_cmd bt_cmd;
9781
9782 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI)((__uint32_t)(0x3));
9783 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET)((__uint32_t)((1 << 4)));
9784
9785 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG0x9b, 0, sizeof(bt_cmd),
9786 &bt_cmd);
9787}
9788
9789int
9790iwm_send_soc_conf(struct iwm_softc *sc)
9791{
9792 struct iwm_soc_configuration_cmd cmd;
9793 int err;
9794 uint32_t cmd_id, flags = 0;
9795
9796 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9797
9798 /*
9799 * In VER_1 of this command, the discrete value is considered
9800 * an integer; In VER_2, it's a bitmask. Since we have only 2
9801 * values in VER_1, this is backwards-compatible with VER_2,
9802 * as long as we don't set any other flag bits.
9803 */
9804 if (!sc->sc_integrated) { /* VER_1 */
9805 flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE(1 << 0);
9806 } else { /* VER_2 */
9807 uint8_t scan_cmd_ver;
9808 if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE0)
9809 flags |= (sc->sc_ltr_delay &
9810 IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK0xc);
9811 scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP0x1,
9812 IWM_SCAN_REQ_UMAC0xd);
9813 if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN99 &&
9814 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9815 flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY(1 << 1);
9816 }
9817 cmd.flags = htole32(flags)((__uint32_t)(flags));
9818
9819 cmd.latency = htole32(sc->sc_xtal_latency)((__uint32_t)(sc->sc_xtal_latency));
9820
9821 cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD0x01, IWM_SYSTEM_GROUP0x2, 0);
9822 err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9823 if (err)
9824 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9825 return err;
9826}
9827
9828int
9829iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9830{
9831 struct iwm_mcc_update_cmd mcc_cmd;
9832 struct iwm_host_cmd hcmd = {
9833 .id = IWM_MCC_UPDATE_CMD0xc8,
9834 .flags = IWM_CMD_WANT_RESP,
9835 .resp_pkt_len = IWM_CMD_RESP_MAX(1 << 12),
9836 .data = { &mcc_cmd },
9837 };
9838 struct iwm_rx_packet *pkt;
9839 size_t resp_len;
9840 int err;
9841 int resp_v3 = isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(73)>>3] & (1<<((73
)&(8 -1))))
9842 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3)((sc->sc_enabled_capa)[(73)>>3] & (1<<((73
)&(8 -1))))
;
9843
9844 if (sc->sc_device_family == IWM_DEVICE_FAMILY_80002 &&
9845 !sc->sc_nvm.lar_enabled) {
9846 return 0;
9847 }
9848
9849 memset(&mcc_cmd, 0, sizeof(mcc_cmd))__builtin_memset((&mcc_cmd), (0), (sizeof(mcc_cmd)));
9850 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1])((__uint16_t)(alpha2[0] << 8 | alpha2[1]));
9851 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE)((sc->sc_ucode_api)[(9)>>3] & (1<<((9)&
(8 -1))))
||
9852 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC)((sc->sc_enabled_capa)[(29)>>3] & (1<<((29
)&(8 -1))))
)
9853 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT0x10;
9854 else
9855 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW0;
9856
9857 if (resp_v3) { /* same size as resp_v2 */
9858 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9859 } else {
9860 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9861 }
9862
9863 err = iwm_send_cmd(sc, &hcmd);
9864 if (err)
9865 return err;
9866
9867 pkt = hcmd.resp_pkt;
9868 if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40)) {
9869 err = EIO5;
9870 goto out;
9871 }
9872
9873 if (resp_v3) {
9874 struct iwm_mcc_update_resp_v3 *resp;
9875 resp_len = iwm_rx_packet_payload_len(pkt);
9876 if (resp_len < sizeof(*resp)) {
9877 err = EIO5;
9878 goto out;
9879 }
9880
9881 resp = (void *)pkt->data;
9882 if (resp_len != sizeof(*resp) +
9883 resp->n_channels * sizeof(resp->channels[0])) {
9884 err = EIO5;
9885 goto out;
9886 }
9887 } else {
9888 struct iwm_mcc_update_resp_v1 *resp_v1;
9889 resp_len = iwm_rx_packet_payload_len(pkt);
9890 if (resp_len < sizeof(*resp_v1)) {
9891 err = EIO5;
9892 goto out;
9893 }
9894
9895 resp_v1 = (void *)pkt->data;
9896 if (resp_len != sizeof(*resp_v1) +
9897 resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9898 err = EIO5;
9899 goto out;
9900 }
9901 }
9902out:
9903 iwm_free_resp(sc, &hcmd);
9904 return err;
9905}
9906
9907int
9908iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9909{
9910 struct iwm_temp_report_ths_cmd cmd;
9911 int err;
9912
9913 /*
9914 * In order to give responsibility for critical-temperature-kill
9915 * and TX backoff to FW we need to send an empty temperature
9916 * reporting command at init time.
9917 */
9918 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
9919
9920 err = iwm_send_cmd_pdu(sc,
9921 IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD)((0x4 << 8) | 0x04),
9922 0, sizeof(cmd), &cmd);
9923 if (err)
9924 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9925 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
9926
9927 return err;
9928}
9929
9930void
9931iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9932{
9933 struct iwm_host_cmd cmd = {
9934 .id = IWM_REPLY_THERMAL_MNG_BACKOFF0x7e,
9935 .len = { sizeof(uint32_t), },
9936 .data = { &backoff, },
9937 };
9938
9939 iwm_send_cmd(sc, &cmd);
9940}
9941
9942void
9943iwm_free_fw_paging(struct iwm_softc *sc)
9944{
9945 int i;
9946
9947 if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL((void *)0))
9948 return;
9949
9950 for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS33; i++) {
9951 iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9952 }
9953
9954 memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db))__builtin_memset((sc->fw_paging_db), (0), (sizeof(sc->fw_paging_db
)))
;
9955}
9956
9957int
9958iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9959{
9960 int sec_idx, idx;
9961 uint32_t offset = 0;
9962
9963 /*
9964 * find where is the paging image start point:
9965 * if CPU2 exist and it's in paging format, then the image looks like:
9966 * CPU1 sections (2 or more)
9967 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9968 * CPU2 sections (not paged)
9969 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9970 * non paged to CPU2 paging sec
9971 * CPU2 paging CSS
9972 * CPU2 paging image (including instruction and data)
9973 */
9974 for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX16; sec_idx++) {
9975 if (image->fw_sect[sec_idx].fws_devoff ==
9976 IWM_PAGING_SEPARATOR_SECTION0xAAAABBBB) {
9977 sec_idx++;
9978 break;
9979 }
9980 }
9981
9982 /*
9983 * If paging is enabled there should be at least 2 more sections left
9984 * (one for CSS and one for Paging data)
9985 */
9986 if (sec_idx >= nitems(image->fw_sect)(sizeof((image->fw_sect)) / sizeof((image->fw_sect)[0])
)
- 1) {
9987 printf("%s: Paging: Missing CSS and/or paging sections\n",
9988 DEVNAME(sc)((sc)->sc_dev.dv_xname));
9989 iwm_free_fw_paging(sc);
9990 return EINVAL22;
9991 }
9992
9993 /* copy the CSS block to the dram */
9994 DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",do { ; } while (0)
9995 DEVNAME(sc), sec_idx))do { ; } while (0);
9996
9997 memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,__builtin_memcpy((sc->fw_paging_db[0].fw_paging_block.vaddr
), (image->fw_sect[sec_idx].fws_data), (sc->fw_paging_db
[0].fw_paging_size))
9998 image->fw_sect[sec_idx].fws_data,__builtin_memcpy((sc->fw_paging_db[0].fw_paging_block.vaddr
), (image->fw_sect[sec_idx].fws_data), (sc->fw_paging_db
[0].fw_paging_size))
9999 sc->fw_paging_db[0].fw_paging_size)__builtin_memcpy((sc->fw_paging_db[0].fw_paging_block.vaddr
), (image->fw_sect[sec_idx].fws_data), (sc->fw_paging_db
[0].fw_paging_size))
;
10000
10001 DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",do { ; } while (0)
10002 DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size))do { ; } while (0);
10003
10004 sec_idx++;
10005
10006 /*
10007 * copy the paging blocks to the dram
10008 * loop index start from 1 since that CSS block already copied to dram
10009 * and CSS index is 0.
10010 * loop stop at num_of_paging_blk since that last block is not full.
10011 */
10012 for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
10013 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), (sc->fw_paging_db[idx].fw_paging_size))
10014 (const char *)image->fw_sect[sec_idx].fws_data + offset,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), (sc->fw_paging_db[idx].fw_paging_size))
10015 sc->fw_paging_db[idx].fw_paging_size)__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), (sc->fw_paging_db[idx].fw_paging_size))
;
10016
10017 DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",do { ; } while (0)
10018 DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx))do { ; } while (0);
10019
10020 offset += sc->fw_paging_db[idx].fw_paging_size;
10021 }
10022
10023 /* copy the last paging block */
10024 if (sc->num_of_pages_in_last_blk > 0) {
10025 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), ((1 << 12) * sc->num_of_pages_in_last_blk))
10026 (const char *)image->fw_sect[sec_idx].fws_data + offset,__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), ((1 << 12) * sc->num_of_pages_in_last_blk))
10027 IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk)__builtin_memcpy((sc->fw_paging_db[idx].fw_paging_block.vaddr
), ((const char *)image->fw_sect[sec_idx].fws_data + offset
), ((1 << 12) * sc->num_of_pages_in_last_blk))
;
10028
10029 DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",do { ; } while (0)
10030 DEVNAME(sc), sc->num_of_pages_in_last_blk, idx))do { ; } while (0);
10031 }
10032
10033 return 0;
10034}
10035
10036int
10037iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10038{
10039 int blk_idx = 0;
10040 int error, num_of_pages;
10041
10042 if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL((void *)0)) {
10043 int i;
10044 /* Device got reset, and we setup firmware paging again */
10045 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
10046 sc->fw_paging_db[0].fw_paging_block.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
10047 0, IWM_FW_PAGING_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
10048 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[0].fw_paging_block.map), (0), ((1 << 12)),
(0x08 | 0x02))
;
10049 for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
10050 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
10051 sc->fw_paging_db[i].fw_paging_block.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
10052 0, IWM_PAGING_BLOCK_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
10053 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[i].fw_paging_block.map), (0), (((1 << 3) *
(1 << 12))), (0x08 | 0x02))
;
10054 }
10055 return 0;
10056 }
10057
10058 /* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
10059#if (1 << IWM_BLOCK_2_EXP_SIZE(12 + 3)) != IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12))
10060#error IWM_BLOCK_2_EXP_SIZE(12 + 3) must be power of 2 of IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12))
10061#endif
10062
10063 num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE(1 << 12);
10064 sc->num_of_paging_blk =
10065 ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP(1 << 3)) + 1;
10066
10067 sc->num_of_pages_in_last_blk =
10068 num_of_pages -
10069 IWM_NUM_OF_PAGE_PER_GROUP(1 << 3) * (sc->num_of_paging_blk - 1);
10070
10071 DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"do { ; } while (0)
10072 " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),do { ; } while (0)
10073 sc->num_of_paging_blk,do { ; } while (0)
10074 sc->num_of_pages_in_last_blk))do { ; } while (0);
10075
10076 /* allocate block of 4Kbytes for paging CSS */
10077 error = iwm_dma_contig_alloc(sc->sc_dmat,
10078 &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE(1 << 12),
10079 4096);
10080 if (error) {
10081 /* free all the previous pages since we failed */
10082 iwm_free_fw_paging(sc);
10083 return ENOMEM12;
10084 }
10085
10086 sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE(1 << 12);
10087
10088 DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",do { ; } while (0)
10089 DEVNAME(sc)))do { ; } while (0);
10090
10091 /*
10092 * allocate blocks in dram.
10093 * since that CSS allocated in fw_paging_db[0] loop start from index 1
10094 */
10095 for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10096 /* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
10097 /* XXX Use iwm_dma_contig_alloc for allocating */
10098 error = iwm_dma_contig_alloc(sc->sc_dmat,
10099 &sc->fw_paging_db[blk_idx].fw_paging_block,
10100 IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12)), 4096);
10101 if (error) {
10102 /* free all the previous pages since we failed */
10103 iwm_free_fw_paging(sc);
10104 return ENOMEM12;
10105 }
10106
10107 sc->fw_paging_db[blk_idx].fw_paging_size =
10108 IWM_PAGING_BLOCK_SIZE((1 << 3) * (1 << 12));
10109
10110 DPRINTF((do { ; } while (0)
10111 "%s: Paging: allocated 32K bytes for firmware paging.\n",do { ; } while (0)
10112 DEVNAME(sc)))do { ; } while (0);
10113 }
10114
10115 return 0;
10116}
10117
10118int
10119iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10120{
10121 int ret;
10122
10123 ret = iwm_alloc_fw_paging_mem(sc, fw);
10124 if (ret)
10125 return ret;
10126
10127 return iwm_fill_paging_mem(sc, fw);
10128}
10129
10130/* send paging cmd to FW in case CPU2 has paging image */
10131int
10132iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10133{
10134 int blk_idx;
10135 uint32_t dev_phy_addr;
10136 struct iwm_fw_paging_cmd fw_paging_cmd = {
10137 .flags =
10138 htole32(IWM_PAGING_CMD_IS_SECURED |((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
10139 IWM_PAGING_CMD_IS_ENABLED |((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
10140 (sc->num_of_pages_in_last_blk <<((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
10141 IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS))((__uint32_t)((1 << 9) | (1 << 8) | (sc->num_of_pages_in_last_blk
<< 0)))
,
10142 .block_size = htole32(IWM_BLOCK_2_EXP_SIZE)((__uint32_t)((12 + 3))),
10143 .block_num = htole32(sc->num_of_paging_blk)((__uint32_t)(sc->num_of_paging_blk)),
10144 };
10145
10146 /* loop for all paging blocks + CSS block */
10147 for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10148 dev_phy_addr = htole32(((__uint32_t)(sc->fw_paging_db[blk_idx].fw_paging_block.paddr
>> 12))
10149 sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>((__uint32_t)(sc->fw_paging_db[blk_idx].fw_paging_block.paddr
>> 12))
10150 IWM_PAGE_2_EXP_SIZE)((__uint32_t)(sc->fw_paging_db[blk_idx].fw_paging_block.paddr
>> 12))
;
10151 fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
10152 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
10153 sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
10154 blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
10155 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
fw_paging_db[blk_idx].fw_paging_block.map), (0), (blk_idx == 0
? (1 << 12) : ((1 << 3) * (1 << 12))), (0x04
| 0x01))
;
10156 }
10157
10158 return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD0x4f,
10159 IWM_LONG_GROUP0x1, 0),
10160 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
10161}
10162
10163int
10164iwm_init_hw(struct iwm_softc *sc)
10165{
10166 struct ieee80211com *ic = &sc->sc_ic;
10167 int err, i, ac, qid, s;
10168
10169 err = iwm_run_init_mvm_ucode(sc, 0);
10170 if (err)
10171 return err;
10172
10173 /* Should stop and start HW since INIT image just loaded. */
10174 iwm_stop_device(sc);
10175 err = iwm_start_hw(sc);
10176 if (err) {
10177 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10178 return err;
10179 }
10180
10181 /* Restart, this time with the regular firmware */
10182 s = splnet()splraise(0x4);
10183 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
10184 if (err) {
10185 printf("%s: could not load firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10186 splx(s)spllower(s);
10187 return err;
10188 }
10189
10190 if (!iwm_nic_lock(sc)) {
10191 splx(s)spllower(s);
10192 return EBUSY16;
10193 }
10194
10195 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
10196 if (err) {
10197 printf("%s: could not init tx ant config (error %d)\n",
10198 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10199 goto err;
10200 }
10201
10202 err = iwm_send_phy_db_data(sc);
10203 if (err) {
10204 printf("%s: could not init phy db (error %d)\n",
10205 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10206 goto err;
10207 }
10208
10209 err = iwm_send_phy_cfg_cmd(sc);
10210 if (err) {
10211 printf("%s: could not send phy config (error %d)\n",
10212 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10213 goto err;
10214 }
10215
10216 err = iwm_send_bt_init_conf(sc);
10217 if (err) {
10218 printf("%s: could not init bt coex (error %d)\n",
10219 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10220 goto err;
10221 }
10222
10223 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(37)>>3] & (1<<((37
)&(8 -1))))
10224 IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)((sc->sc_enabled_capa)[(37)>>3] & (1<<((37
)&(8 -1))))
) {
10225 err = iwm_send_soc_conf(sc);
10226 if (err)
10227 goto err;
10228 }
10229
10230 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
) {
10231 err = iwm_send_dqa_cmd(sc);
10232 if (err)
10233 goto err;
10234 }
10235
10236 /* Add auxiliary station for scanning */
10237 err = iwm_add_aux_sta(sc);
10238 if (err) {
10239 printf("%s: could not add aux station (error %d)\n",
10240 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10241 goto err;
10242 }
10243
10244 for (i = 0; i < IWM_NUM_PHY_CTX3; i++) {
10245 /*
10246 * The channel used here isn't relevant as it's
10247 * going to be overwritten in the other flows.
10248 * For now use the first channel we have.
10249 */
10250 sc->sc_phyctxt[i].id = i;
10251 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
10252 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
10253 IWM_FW_CTXT_ACTION_ADD1, 0, IEEE80211_HTOP0_SCO_SCN0,
10254 IEEE80211_VHTOP0_CHAN_WIDTH_HT0);
10255 if (err) {
10256 printf("%s: could not add phy context %d (error %d)\n",
10257 DEVNAME(sc)((sc)->sc_dev.dv_xname), i, err);
10258 goto err;
10259 }
10260 }
10261
10262 /* Initialize tx backoffs to the minimum. */
10263 if (sc->sc_device_family == IWM_DEVICE_FAMILY_70001)
10264 iwm_tt_tx_backoff(sc, 0);
10265
10266
10267 err = iwm_config_ltr(sc);
10268 if (err) {
10269 printf("%s: PCIe LTR configuration failed (error %d)\n",
10270 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10271 }
10272
10273 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)((sc->sc_enabled_capa)[(74)>>3] & (1<<((74
)&(8 -1))))
) {
10274 err = iwm_send_temp_report_ths_cmd(sc);
10275 if (err)
10276 goto err;
10277 }
10278
10279 err = iwm_power_update_device(sc);
10280 if (err) {
10281 printf("%s: could not send power command (error %d)\n",
10282 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10283 goto err;
10284 }
10285
10286 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)((sc->sc_enabled_capa)[(1)>>3] & (1<<((1)&
(8 -1))))
) {
10287 err = iwm_send_update_mcc_cmd(sc, "ZZ");
10288 if (err) {
10289 printf("%s: could not init LAR (error %d)\n",
10290 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10291 goto err;
10292 }
10293 }
10294
10295 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)((sc->sc_enabled_capa)[(2)>>3] & (1<<((2)&
(8 -1))))
) {
10296 err = iwm_config_umac_scan(sc);
10297 if (err) {
10298 printf("%s: could not configure scan (error %d)\n",
10299 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10300 goto err;
10301 }
10302 }
10303
10304 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10305 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
10306 qid = IWM_DQA_INJECT_MONITOR_QUEUE2;
10307 else
10308 qid = IWM_AUX_QUEUE15;
10309 err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID2, qid,
10310 iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT8, 0);
10311 if (err) {
10312 printf("%s: could not enable monitor inject Tx queue "
10313 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10314 goto err;
10315 }
10316 } else {
10317 for (ac = 0; ac < EDCA_NUM_AC4; ac++) {
10318 if (isset(sc->sc_enabled_capa,((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
10319 IWM_UCODE_TLV_CAPA_DQA_SUPPORT)((sc->sc_enabled_capa)[(12)>>3] & (1<<((12
)&(8 -1))))
)
10320 qid = ac + IWM_DQA_MIN_MGMT_QUEUE5;
10321 else
10322 qid = ac;
10323 err = iwm_enable_txq(sc, IWM_STATION_ID0, qid,
10324 iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS0, 0);
10325 if (err) {
10326 printf("%s: could not enable Tx queue %d "
10327 "(error %d)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), ac, err);
10328 goto err;
10329 }
10330 }
10331 }
10332
10333 err = iwm_disable_beacon_filter(sc);
10334 if (err) {
10335 printf("%s: could not disable beacon filter (error %d)\n",
10336 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
10337 goto err;
10338 }
10339
10340err:
10341 iwm_nic_unlock(sc);
10342 splx(s)spllower(s);
10343 return err;
10344}
10345
10346/* Allow multicast from our BSSID. */
10347int
10348iwm_allow_mcast(struct iwm_softc *sc)
10349{
10350 struct ieee80211com *ic = &sc->sc_ic;
10351 struct iwm_node *in = (void *)ic->ic_bss;
10352 struct iwm_mcast_filter_cmd *cmd;
10353 size_t size;
10354 int err;
10355
10356 size = roundup(sizeof(*cmd), 4)((((sizeof(*cmd))+((4)-1))/(4))*(4));
10357 cmd = malloc(size, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
10358 if (cmd == NULL((void *)0))
10359 return ENOMEM12;
10360 cmd->filter_own = 1;
10361 cmd->port_id = 0;
10362 cmd->count = 0;
10363 cmd->pass_all = 1;
10364 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr)__builtin_memcpy((cmd->bssid), (in->in_macaddr), (6));
10365
10366 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD0xd0,
10367 0, size, cmd);
10368 free(cmd, M_DEVBUF2, size);
10369 return err;
10370}
10371
10372int
10373iwm_init(struct ifnet *ifp)
10374{
10375 struct iwm_softc *sc = ifp->if_softc;
10376 struct ieee80211com *ic = &sc->sc_ic;
10377 int err, generation;
10378
10379 rw_assert_wrlock(&sc->ioctl_rwl);
10380
10381 generation = ++sc->sc_generation;
10382
10383 err = iwm_preinit(sc);
10384 if (err)
10385 return err;
10386
10387 err = iwm_start_hw(sc);
10388 if (err) {
10389 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10390 return err;
10391 }
10392
10393 err = iwm_init_hw(sc);
10394 if (err) {
10395 if (generation == sc->sc_generation)
10396 iwm_stop_device(sc);
10397 return err;
10398 }
10399
10400 if (sc->sc_nvm.sku_cap_11n_enable)
10401 iwm_setup_ht_rates(sc);
10402 if (sc->sc_nvm.sku_cap_11ac_enable)
10403 iwm_setup_vht_rates(sc);
10404
10405 KASSERT(sc->task_refs.r_refs == 0)((sc->task_refs.r_refs == 0) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwm.c", 10405, "sc->task_refs.r_refs == 0"
))
;
10406 refcnt_init(&sc->task_refs);
10407 ifq_clr_oactive(&ifp->if_snd);
10408 ifp->if_flags |= IFF_RUNNING0x40;
10409
10410 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10411 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
10412 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
10413 return 0;
10414 }
10415
10416 ieee80211_begin_scan(ifp);
10417
10418 /*
10419 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
10420 * Wait until the transition to SCAN state has completed.
10421 */
10422 do {
10423 err = tsleep_nsec(&ic->ic_state, PCATCH0x100, "iwminit",
10424 SEC_TO_NSEC(1));
10425 if (generation != sc->sc_generation)
10426 return ENXIO6;
10427 if (err) {
10428 iwm_stop(ifp);
10429 return err;
10430 }
10431 } while (ic->ic_state != IEEE80211_S_SCAN);
10432
10433 return 0;
10434}
10435
10436void
10437iwm_start(struct ifnet *ifp)
10438{
10439 struct iwm_softc *sc = ifp->if_softc;
10440 struct ieee80211com *ic = &sc->sc_ic;
10441 struct ieee80211_node *ni;
10442 struct ether_header *eh;
10443 struct mbuf *m;
10444 int ac = EDCA_AC_BE; /* XXX */
10445
10446 if (!(ifp->if_flags & IFF_RUNNING0x40) || ifq_is_oactive(&ifp->if_snd))
10447 return;
10448
10449 for (;;) {
10450 /* why isn't this done per-queue? */
10451 if (sc->qfullmsk != 0) {
10452 ifq_set_oactive(&ifp->if_snd);
10453 break;
10454 }
10455
10456 /* Don't queue additional frames while flushing Tx queues. */
10457 if (sc->sc_flags & IWM_FLAG_TXFLUSH0x400)
10458 break;
10459
10460 /* need to send management frames even if we're not RUNning */
10461 m = mq_dequeue(&ic->ic_mgtq);
10462 if (m) {
10463 ni = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_cookie;
10464 goto sendit;
10465 }
10466
10467 if (ic->ic_state != IEEE80211_S_RUN ||
10468 (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY0x00000001))
10469 break;
10470
10471 m = ifq_dequeue(&ifp->if_snd);
10472 if (!m)
10473 break;
10474 if (m->m_lenm_hdr.mh_len < sizeof (*eh) &&
10475 (m = m_pullup(m, sizeof (*eh))) == NULL((void *)0)) {
10476 ifp->if_oerrorsif_data.ifi_oerrors++;
10477 continue;
10478 }
10479#if NBPFILTER1 > 0
10480 if (ifp->if_bpf != NULL((void *)0))
10481 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
10482#endif
10483 if ((m = ieee80211_encap(ifp, m, &ni)) == NULL((void *)0)) {
10484 ifp->if_oerrorsif_data.ifi_oerrors++;
10485 continue;
10486 }
10487
10488 sendit:
10489#if NBPFILTER1 > 0
10490 if (ic->ic_rawbpf != NULL((void *)0))
10491 bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT(1 << 1));
10492#endif
10493 if (iwm_tx(sc, m, ni, ac) != 0) {
10494 ieee80211_release_node(ic, ni);
10495 ifp->if_oerrorsif_data.ifi_oerrors++;
10496 continue;
10497 }
10498
10499 if (ifp->if_flags & IFF_UP0x1)
10500 ifp->if_timer = 1;
10501 }
10502
10503 return;
10504}
10505
10506void
10507iwm_stop(struct ifnet *ifp)
10508{
10509 struct iwm_softc *sc = ifp->if_softc;
10510 struct ieee80211com *ic = &sc->sc_ic;
10511 struct iwm_node *in = (void *)ic->ic_bss;
10512 int i, s = splnet()splraise(0x4);
10513
10514 rw_assert_wrlock(&sc->ioctl_rwl);
10515
10516 sc->sc_flags |= IWM_FLAG_SHUTDOWN0x100; /* Disallow new tasks. */
10517
10518 /* Cancel scheduled tasks and let any stale tasks finish up. */
10519 task_del(systq, &sc->init_task);
10520 iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10521 iwm_del_task(sc, systq, &sc->ba_task);
10522 iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10523 iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10524 iwm_del_task(sc, systq, &sc->bgscan_done_task);
10525 KASSERT(sc->task_refs.r_refs >= 1)((sc->task_refs.r_refs >= 1) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/if_iwm.c", 10525, "sc->task_refs.r_refs >= 1"
))
;
10526 refcnt_finalize(&sc->task_refs, "iwmstop");
10527
10528 iwm_stop_device(sc);
10529
10530 free(sc->bgscan_unref_arg, M_DEVBUF2, sc->bgscan_unref_arg_size);
10531 sc->bgscan_unref_arg = NULL((void *)0);
10532 sc->bgscan_unref_arg_size = 0;
10533
10534 /* Reset soft state. */
10535
10536 sc->sc_generation++;
10537 for (i = 0; i < nitems(sc->sc_cmd_resp_pkt)(sizeof((sc->sc_cmd_resp_pkt)) / sizeof((sc->sc_cmd_resp_pkt
)[0]))
; i++) {
10538 free(sc->sc_cmd_resp_pkt[i], M_DEVBUF2, sc->sc_cmd_resp_len[i]);
10539 sc->sc_cmd_resp_pkt[i] = NULL((void *)0);
10540 sc->sc_cmd_resp_len[i] = 0;
10541 }
10542 ifp->if_flags &= ~IFF_RUNNING0x40;
10543 ifq_clr_oactive(&ifp->if_snd);
10544
10545 in->in_phyctxt = NULL((void *)0);
10546 in->tid_disable_ampdu = 0xffff;
10547 in->tfd_queue_msk = 0;
10548 IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr)__builtin_memcpy((in->in_macaddr), (etheranyaddr), (6));
10549
10550 sc->sc_flags &= ~(IWM_FLAG_SCANNING0x04 | IWM_FLAG_BGSCAN0x200);
10551 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE0x08;
10552 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE0x10;
10553 sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE0x20;
10554 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40;
10555 sc->sc_flags &= ~IWM_FLAG_HW_ERR0x80;
10556 sc->sc_flags &= ~IWM_FLAG_SHUTDOWN0x100;
10557 sc->sc_flags &= ~IWM_FLAG_TXFLUSH0x400;
10558
10559 sc->sc_rx_ba_sessions = 0;
10560 sc->ba_rx.start_tidmask = 0;
10561 sc->ba_rx.stop_tidmask = 0;
10562 sc->tx_ba_queue_mask = 0;
10563 sc->ba_tx.start_tidmask = 0;
10564 sc->ba_tx.stop_tidmask = 0;
10565
10566 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10567 sc->ns_nstate = IEEE80211_S_INIT;
10568
10569 timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10570 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
10571 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10572 iwm_clear_reorder_buffer(sc, rxba);
10573 }
10574 iwm_led_blink_stop(sc);
10575 memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer))__builtin_memset((sc->sc_tx_timer), (0), (sizeof(sc->sc_tx_timer
)))
;
10576 ifp->if_timer = 0;
10577
10578 splx(s)spllower(s);
10579}
10580
10581void
10582iwm_watchdog(struct ifnet *ifp)
10583{
10584 struct iwm_softc *sc = ifp->if_softc;
10585 int i;
10586
10587 ifp->if_timer = 0;
10588
10589 /*
10590 * We maintain a separate timer for each Tx queue because
10591 * Tx aggregation queues can get "stuck" while other queues
10592 * keep working. The Linux driver uses a similar workaround.
10593 */
10594 for (i = 0; i < nitems(sc->sc_tx_timer)(sizeof((sc->sc_tx_timer)) / sizeof((sc->sc_tx_timer)[0
]))
; i++) {
10595 if (sc->sc_tx_timer[i] > 0) {
10596 if (--sc->sc_tx_timer[i] == 0) {
10597 printf("%s: device timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10598 if (ifp->if_flags & IFF_DEBUG0x4) {
10599 iwm_nic_error(sc);
10600 iwm_dump_driver_status(sc);
10601 }
10602 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
10603 task_add(systq, &sc->init_task);
10604 ifp->if_oerrorsif_data.ifi_oerrors++;
10605 return;
10606 }
10607 ifp->if_timer = 1;
10608 }
10609 }
10610
10611 ieee80211_watchdog(ifp);
10612}
10613
10614int
10615iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10616{
10617 struct iwm_softc *sc = ifp->if_softc;
10618 int s, err = 0, generation = sc->sc_generation;
10619
10620 /*
10621 * Prevent processes from entering this function while another
10622 * process is tsleep'ing in it.
10623 */
10624 err = rw_enter(&sc->ioctl_rwl, RW_WRITE0x0001UL | RW_INTR0x0010UL);
10625 if (err == 0 && generation != sc->sc_generation) {
10626 rw_exit(&sc->ioctl_rwl);
10627 return ENXIO6;
10628 }
10629 if (err)
10630 return err;
10631 s = splnet()splraise(0x4);
10632
10633 switch (cmd) {
10634 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
10635 ifp->if_flags |= IFF_UP0x1;
10636 /* FALLTHROUGH */
10637 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
10638 if (ifp->if_flags & IFF_UP0x1) {
10639 if (!(ifp->if_flags & IFF_RUNNING0x40)) {
10640 /* Force reload of firmware image from disk. */
10641 sc->sc_fw.fw_status = IWM_FW_STATUS_NONE0;
10642 err = iwm_init(ifp);
10643 }
10644 } else {
10645 if (ifp->if_flags & IFF_RUNNING0x40)
10646 iwm_stop(ifp);
10647 }
10648 break;
10649
10650 default:
10651 err = ieee80211_ioctl(ifp, cmd, data);
10652 }
10653
10654 if (err == ENETRESET52) {
10655 err = 0;
10656 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) ==
10657 (IFF_UP0x1 | IFF_RUNNING0x40)) {
10658 iwm_stop(ifp);
10659 err = iwm_init(ifp);
10660 }
10661 }
10662
10663 splx(s)spllower(s);
10664 rw_exit(&sc->ioctl_rwl);
10665
10666 return err;
10667}
10668
10669/*
10670 * Note: This structure is read from the device with IO accesses,
10671 * and the reading already does the endian conversion. As it is
10672 * read with uint32_t-sized accesses, any members with a different size
10673 * need to be ordered correctly though!
10674 */
10675struct iwm_error_event_table {
10676 uint32_t valid; /* (nonzero) valid, (0) log is empty */
10677 uint32_t error_id; /* type of error */
10678 uint32_t trm_hw_status0; /* TRM HW status */
10679 uint32_t trm_hw_status1; /* TRM HW status */
10680 uint32_t blink2; /* branch link */
10681 uint32_t ilink1; /* interrupt link */
10682 uint32_t ilink2; /* interrupt link */
10683 uint32_t data1; /* error-specific data */
10684 uint32_t data2; /* error-specific data */
10685 uint32_t data3; /* error-specific data */
10686 uint32_t bcon_time; /* beacon timer */
10687 uint32_t tsf_low; /* network timestamp function timer */
10688 uint32_t tsf_hi; /* network timestamp function timer */
10689 uint32_t gp1; /* GP1 timer register */
10690 uint32_t gp2; /* GP2 timer register */
10691 uint32_t fw_rev_type; /* firmware revision type */
10692 uint32_t major; /* uCode version major */
10693 uint32_t minor; /* uCode version minor */
10694 uint32_t hw_ver; /* HW Silicon version */
10695 uint32_t brd_ver; /* HW board version */
10696 uint32_t log_pc; /* log program counter */
10697 uint32_t frame_ptr; /* frame pointer */
10698 uint32_t stack_ptr; /* stack pointer */
10699 uint32_t hcmd; /* last host command header */
10700 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
10701 * rxtx_flag */
10702 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
10703 * host_flag */
10704 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
10705 * enc_flag */
10706 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
10707 * time_flag */
10708 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
10709 * wico interrupt */
10710 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
10711 uint32_t wait_event; /* wait event() caller address */
10712 uint32_t l2p_control; /* L2pControlField */
10713 uint32_t l2p_duration; /* L2pDurationField */
10714 uint32_t l2p_mhvalid; /* L2pMhValidBits */
10715 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
10716 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
10717 * (LMPM_PMG_SEL) */
10718 uint32_t u_timestamp; /* indicate when the date and time of the
10719 * compilation */
10720 uint32_t flow_handler; /* FH read/write pointers, RX credit */
10721} __packed__attribute__((__packed__)) /* LOG_ERROR_TABLE_API_S_VER_3 */;
10722
10723/*
10724 * UMAC error struct - relevant starting from family 8000 chip.
10725 * Note: This structure is read from the device with IO accesses,
10726 * and the reading already does the endian conversion. As it is
10727 * read with u32-sized accesses, any members with a different size
10728 * need to be ordered correctly though!
10729 */
10730struct iwm_umac_error_event_table {
10731 uint32_t valid; /* (nonzero) valid, (0) log is empty */
10732 uint32_t error_id; /* type of error */
10733 uint32_t blink1; /* branch link */
10734 uint32_t blink2; /* branch link */
10735 uint32_t ilink1; /* interrupt link */
10736 uint32_t ilink2; /* interrupt link */
10737 uint32_t data1; /* error-specific data */
10738 uint32_t data2; /* error-specific data */
10739 uint32_t data3; /* error-specific data */
10740 uint32_t umac_major;
10741 uint32_t umac_minor;
10742 uint32_t frame_pointer; /* core register 27*/
10743 uint32_t stack_pointer; /* core register 28 */
10744 uint32_t cmd_header; /* latest host cmd sent to UMAC */
10745 uint32_t nic_isr_pref; /* ISR status register */
10746} __packed__attribute__((__packed__));
10747
10748#define ERROR_START_OFFSET(1 * sizeof(uint32_t)) (1 * sizeof(uint32_t))
10749#define ERROR_ELEM_SIZE(7 * sizeof(uint32_t)) (7 * sizeof(uint32_t))
10750
10751void
10752iwm_nic_umac_error(struct iwm_softc *sc)
10753{
10754 struct iwm_umac_error_event_table table;
10755 uint32_t base;
10756
10757 base = sc->sc_uc.uc_umac_error_event_table;
10758
10759 if (base < 0x800000) {
10760 printf("%s: Invalid error log pointer 0x%08x\n",
10761 DEVNAME(sc)((sc)->sc_dev.dv_xname), base);
10762 return;
10763 }
10764
10765 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10766 printf("%s: reading errlog failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10767 return;
10768 }
10769
10770 if (ERROR_START_OFFSET(1 * sizeof(uint32_t)) <= table.valid * ERROR_ELEM_SIZE(7 * sizeof(uint32_t))) {
10771 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10772 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10773 sc->sc_flags, table.valid);
10774 }
10775
10776 printf("%s: 0x%08X | %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.error_id,
10777 iwm_desc_lookup(table.error_id));
10778 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink1);
10779 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink2);
10780 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink1);
10781 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink2);
10782 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data1);
10783 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data2);
10784 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data3);
10785 printf("%s: 0x%08X | umac major\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.umac_major);
10786 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.umac_minor);
10787 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10788 table.frame_pointer);
10789 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10790 table.stack_pointer);
10791 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.cmd_header);
10792 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10793 table.nic_isr_pref);
10794}
10795
10796#define IWM_FW_SYSASSERT_CPU_MASK0xf0000000 0xf0000000
10797static struct {
10798 const char *name;
10799 uint8_t num;
10800} advanced_lookup[] = {
10801 { "NMI_INTERRUPT_WDG", 0x34 },
10802 { "SYSASSERT", 0x35 },
10803 { "UCODE_VERSION_MISMATCH", 0x37 },
10804 { "BAD_COMMAND", 0x38 },
10805 { "BAD_COMMAND", 0x39 },
10806 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10807 { "FATAL_ERROR", 0x3D },
10808 { "NMI_TRM_HW_ERR", 0x46 },
10809 { "NMI_INTERRUPT_TRM", 0x4C },
10810 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10811 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10812 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10813 { "NMI_INTERRUPT_HOST", 0x66 },
10814 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10815 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10816 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10817 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
10818 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
10819 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10820 { "ADVANCED_SYSASSERT", 0 },
10821};
10822
10823const char *
10824iwm_desc_lookup(uint32_t num)
10825{
10826 int i;
10827
10828 for (i = 0; i < nitems(advanced_lookup)(sizeof((advanced_lookup)) / sizeof((advanced_lookup)[0])) - 1; i++)
10829 if (advanced_lookup[i].num ==
10830 (num & ~IWM_FW_SYSASSERT_CPU_MASK0xf0000000))
10831 return advanced_lookup[i].name;
10832
10833 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10834 return advanced_lookup[i].name;
10835}
10836
10837/*
10838 * Support for dumping the error log seemed like a good idea ...
10839 * but it's mostly hex junk and the only sensible thing is the
10840 * hw/ucode revision (which we know anyway). Since it's here,
10841 * I'll just leave it in, just in case e.g. the Intel guys want to
10842 * help us decipher some "ADVANCED_SYSASSERT" later.
10843 */
10844void
10845iwm_nic_error(struct iwm_softc *sc)
10846{
10847 struct iwm_error_event_table table;
10848 uint32_t base;
10849
10850 printf("%s: dumping device error log\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10851 base = sc->sc_uc.uc_error_event_table;
10852 if (base < 0x800000) {
10853 printf("%s: Invalid error log pointer 0x%08x\n",
10854 DEVNAME(sc)((sc)->sc_dev.dv_xname), base);
10855 return;
10856 }
10857
10858 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10859 printf("%s: reading errlog failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10860 return;
10861 }
10862
10863 if (!table.valid) {
10864 printf("%s: errlog not found, skipping\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10865 return;
10866 }
10867
10868 if (ERROR_START_OFFSET(1 * sizeof(uint32_t)) <= table.valid * ERROR_ELEM_SIZE(7 * sizeof(uint32_t))) {
10869 printf("%s: Start Error Log Dump:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
10870 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10871 sc->sc_flags, table.valid);
10872 }
10873
10874 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.error_id,
10875 iwm_desc_lookup(table.error_id));
10876 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10877 table.trm_hw_status0);
10878 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10879 table.trm_hw_status1);
10880 printf("%s: %08X | branchlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.blink2);
10881 printf("%s: %08X | interruptlink1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink1);
10882 printf("%s: %08X | interruptlink2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.ilink2);
10883 printf("%s: %08X | data1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data1);
10884 printf("%s: %08X | data2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data2);
10885 printf("%s: %08X | data3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.data3);
10886 printf("%s: %08X | beacon time\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.bcon_time);
10887 printf("%s: %08X | tsf low\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.tsf_low);
10888 printf("%s: %08X | tsf hi\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.tsf_hi);
10889 printf("%s: %08X | time gp1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.gp1);
10890 printf("%s: %08X | time gp2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.gp2);
10891 printf("%s: %08X | uCode revision type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10892 table.fw_rev_type);
10893 printf("%s: %08X | uCode version major\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10894 table.major);
10895 printf("%s: %08X | uCode version minor\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
10896 table.minor);
10897 printf("%s: %08X | hw version\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.hw_ver);
10898 printf("%s: %08X | board version\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.brd_ver);
10899 printf("%s: %08X | hcmd\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.hcmd);
10900 printf("%s: %08X | isr0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr0);
10901 printf("%s: %08X | isr1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr1);
10902 printf("%s: %08X | isr2\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr2);
10903 printf("%s: %08X | isr3\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr3);
10904 printf("%s: %08X | isr4\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.isr4);
10905 printf("%s: %08X | last cmd Id\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.last_cmd_id);
10906 printf("%s: %08X | wait_event\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.wait_event);
10907 printf("%s: %08X | l2p_control\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_control);
10908 printf("%s: %08X | l2p_duration\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_duration);
10909 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_mhvalid);
10910 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.l2p_addr_match);
10911 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.lmpm_pmg_sel);
10912 printf("%s: %08X | timestamp\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.u_timestamp);
10913 printf("%s: %08X | flow_handler\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), table.flow_handler);
10914
10915 if (sc->sc_uc.uc_umac_error_event_table)
10916 iwm_nic_umac_error(sc);
10917}
10918
10919void
10920iwm_dump_driver_status(struct iwm_softc *sc)
10921{
10922 int i;
10923
10924 printf("driver status:\n");
10925 for (i = 0; i < IWM_MAX_QUEUES31; i++) {
10926 struct iwm_tx_ring *ring = &sc->txq[i];
10927 printf(" tx ring %2d: qid=%-2d cur=%-3d "
10928 "queued=%-3d\n",
10929 i, ring->qid, ring->cur, ring->queued);
10930 }
10931 printf(" rx ring: cur=%d\n", sc->rxq.cur);
10932 printf(" 802.11 state %s\n",
10933 ieee80211_state_name[sc->sc_ic.ic_state]);
10934}
10935
10936#define SYNC_RESP_STRUCT(_var_, _pkt_)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02)
); _var_ = (void *)((_pkt_)+1); } while ( 0)
\
10937do { \
10938 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02))
10939 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(*(_var_))), (0x02))
; \
10940 _var_ = (void *)((_pkt_)+1); \
10941} while (/*CONSTCOND*/0)
10942
10943#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02)); _ptr_
= (void *)((_pkt_)+1); } while ( 0)
\
10944do { \
10945 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02))
10946 sizeof(len), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*(_pkt_))), (sizeof(len)), (0x02))
; \
10947 _ptr_ = (void *)((_pkt_)+1); \
10948} while (/*CONSTCOND*/0)
10949
10950#define ADVANCE_RXQ(sc)(sc->rxq.cur = (sc->rxq.cur + 1) % count); (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10951
10952int
10953iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10954{
10955 int qid, idx, code;
10956
10957 qid = pkt->hdr.qid & ~0x80;
10958 idx = pkt->hdr.idx;
10959 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)((pkt->hdr.flags << 8) | pkt->hdr.code);
10960
10961 return (!(qid == 0 && idx == 0 && code == 0) &&
10962 pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID)((__uint32_t)(0x55550000)));
10963}
10964
10965void
10966iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10967{
10968 struct ifnet *ifp = IC2IFP(&sc->sc_ic)(&(&sc->sc_ic)->ic_ac.ac_if);
10969 struct iwm_rx_packet *pkt, *nextpkt;
10970 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10971 struct mbuf *m0, *m;
10972 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10973 int qid, idx, code, handled = 1;
10974
10975 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
10976 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (4096), (0x02))
;
10977
10978 m0 = data->m;
10979 while (m0 && offset + minsz < IWM_RBUF_SIZE4096) {
10980 pkt = (struct iwm_rx_packet *)(m0->m_datam_hdr.mh_data + offset);
10981 qid = pkt->hdr.qid;
10982 idx = pkt->hdr.idx;
10983
10984 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)((pkt->hdr.flags << 8) | pkt->hdr.code);
10985
10986 if (!iwm_rx_pkt_valid(pkt))
10987 break;
10988
10989 len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
10990 if (len < minsz || len > (IWM_RBUF_SIZE4096 - offset))
10991 break;
10992
10993 if (code == IWM_REPLY_RX_MPDU_CMD0xc1 && ++nmpdu == 1) {
10994 /* Take mbuf m0 off the RX ring. */
10995 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE4096, sc->rxq.cur)) {
10996 ifp->if_ierrorsif_data.ifi_ierrors++;
10997 break;
10998 }
10999 KASSERT(data->m != m0)((data->m != m0) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 10999, "data->m != m0"))
;
11000 }
11001
11002 switch (code) {
11003 case IWM_REPLY_RX_PHY_CMD0xc0:
11004 iwm_rx_rx_phy_cmd(sc, pkt, data);
11005 break;
11006
11007 case IWM_REPLY_RX_MPDU_CMD0xc1: {
11008 size_t maxlen = IWM_RBUF_SIZE4096 - offset - minsz;
11009 nextoff = offset +
11010 roundup(len, IWM_FH_RSCSR_FRAME_ALIGN)((((len)+((0x40)-1))/(0x40))*(0x40));
11011 nextpkt = (struct iwm_rx_packet *)
11012 (m0->m_datam_hdr.mh_data + nextoff);
11013 if (nextoff + minsz >= IWM_RBUF_SIZE4096 ||
11014 !iwm_rx_pkt_valid(nextpkt)) {
11015 /* No need to copy last frame in buffer. */
11016 if (offset > 0)
11017 m_adj(m0, offset);
11018 if (sc->sc_mqrx_supported)
11019 iwm_rx_mpdu_mq(sc, m0, pkt->data,
11020 maxlen, ml);
11021 else
11022 iwm_rx_mpdu(sc, m0, pkt->data,
11023 maxlen, ml);
11024 m0 = NULL((void *)0); /* stack owns m0 now; abort loop */
11025 } else {
11026 /*
11027 * Create an mbuf which points to the current
11028 * packet. Always copy from offset zero to
11029 * preserve m_pkthdr.
11030 */
11031 m = m_copym(m0, 0, M_COPYALL1000000000, M_DONTWAIT0x0002);
11032 if (m == NULL((void *)0)) {
11033 ifp->if_ierrorsif_data.ifi_ierrors++;
11034 m_freem(m0);
11035 m0 = NULL((void *)0);
11036 break;
11037 }
11038 m_adj(m, offset);
11039 if (sc->sc_mqrx_supported)
11040 iwm_rx_mpdu_mq(sc, m, pkt->data,
11041 maxlen, ml);
11042 else
11043 iwm_rx_mpdu(sc, m, pkt->data,
11044 maxlen, ml);
11045 }
11046 break;
11047 }
11048
11049 case IWM_TX_CMD0x1c:
11050 iwm_rx_tx_cmd(sc, pkt, data);
11051 break;
11052
11053 case IWM_BA_NOTIF0xc5:
11054 iwm_rx_compressed_ba(sc, pkt);
11055 break;
11056
11057 case IWM_MISSED_BEACONS_NOTIFICATION0xa2:
11058 iwm_rx_bmiss(sc, pkt, data);
11059 break;
11060
11061 case IWM_MFUART_LOAD_NOTIFICATION0xb1:
11062 break;
11063
11064 case IWM_ALIVE0x1: {
11065 struct iwm_alive_resp_v1 *resp1;
11066 struct iwm_alive_resp_v2 *resp2;
11067 struct iwm_alive_resp_v3 *resp3;
11068
11069 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
11070 SYNC_RESP_STRUCT(resp1, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp1))), (0x02));
resp1 = (void *)((pkt)+1); } while ( 0)
;
11071 sc->sc_uc.uc_error_event_table
11072 = le32toh(resp1->error_event_table_ptr)((__uint32_t)(resp1->error_event_table_ptr));
11073 sc->sc_uc.uc_log_event_table
11074 = le32toh(resp1->log_event_table_ptr)((__uint32_t)(resp1->log_event_table_ptr));
11075 sc->sched_base = le32toh(resp1->scd_base_ptr)((__uint32_t)(resp1->scd_base_ptr));
11076 if (resp1->status == IWM_ALIVE_STATUS_OK0xCAFE)
11077 sc->sc_uc.uc_ok = 1;
11078 else
11079 sc->sc_uc.uc_ok = 0;
11080 }
11081
11082 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
11083 SYNC_RESP_STRUCT(resp2, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp2))), (0x02));
resp2 = (void *)((pkt)+1); } while ( 0)
;
11084 sc->sc_uc.uc_error_event_table
11085 = le32toh(resp2->error_event_table_ptr)((__uint32_t)(resp2->error_event_table_ptr));
11086 sc->sc_uc.uc_log_event_table
11087 = le32toh(resp2->log_event_table_ptr)((__uint32_t)(resp2->log_event_table_ptr));
11088 sc->sched_base = le32toh(resp2->scd_base_ptr)((__uint32_t)(resp2->scd_base_ptr));
11089 sc->sc_uc.uc_umac_error_event_table
11090 = le32toh(resp2->error_info_addr)((__uint32_t)(resp2->error_info_addr));
11091 if (resp2->status == IWM_ALIVE_STATUS_OK0xCAFE)
11092 sc->sc_uc.uc_ok = 1;
11093 else
11094 sc->sc_uc.uc_ok = 0;
11095 }
11096
11097 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
11098 SYNC_RESP_STRUCT(resp3, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp3))), (0x02));
resp3 = (void *)((pkt)+1); } while ( 0)
;
11099 sc->sc_uc.uc_error_event_table
11100 = le32toh(resp3->error_event_table_ptr)((__uint32_t)(resp3->error_event_table_ptr));
11101 sc->sc_uc.uc_log_event_table
11102 = le32toh(resp3->log_event_table_ptr)((__uint32_t)(resp3->log_event_table_ptr));
11103 sc->sched_base = le32toh(resp3->scd_base_ptr)((__uint32_t)(resp3->scd_base_ptr));
11104 sc->sc_uc.uc_umac_error_event_table
11105 = le32toh(resp3->error_info_addr)((__uint32_t)(resp3->error_info_addr));
11106 if (resp3->status == IWM_ALIVE_STATUS_OK0xCAFE)
11107 sc->sc_uc.uc_ok = 1;
11108 else
11109 sc->sc_uc.uc_ok = 0;
11110 }
11111
11112 sc->sc_uc.uc_intr = 1;
11113 wakeup(&sc->sc_uc);
11114 break;
11115 }
11116
11117 case IWM_CALIB_RES_NOTIF_PHY_DB0x6b: {
11118 struct iwm_calib_res_notif_phy_db *phy_db_notif;
11119 SYNC_RESP_STRUCT(phy_db_notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(phy_db_notif))), (
0x02)); phy_db_notif = (void *)((pkt)+1); } while ( 0)
;
11120 iwm_phy_db_set_section(sc, phy_db_notif);
11121 sc->sc_init_complete |= IWM_CALIB_COMPLETE0x02;
11122 wakeup(&sc->sc_init_complete);
11123 break;
11124 }
11125
11126 case IWM_STATISTICS_NOTIFICATION0x9d: {
11127 struct iwm_notif_statistics *stats;
11128 SYNC_RESP_STRUCT(stats, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(stats))), (0x02));
stats = (void *)((pkt)+1); } while ( 0)
;
11129 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats))__builtin_memcpy((&sc->sc_stats), (stats), (sizeof(sc->
sc_stats)))
;
11130 sc->sc_noise = iwm_get_noise(&stats->rx.general);
11131 break;
11132 }
11133
11134 case IWM_MCC_CHUB_UPDATE_CMD0xc9: {
11135 struct iwm_mcc_chub_notif *notif;
11136 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
11137 iwm_mcc_update(sc, notif);
11138 break;
11139 }
11140
11141 case IWM_DTS_MEASUREMENT_NOTIFICATION0xdd:
11142 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,((0x4 << 8) | 0xFF)
11143 IWM_DTS_MEASUREMENT_NOTIF_WIDE)((0x4 << 8) | 0xFF):
11144 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,((0x4 << 8) | 0x04)
11145 IWM_TEMP_REPORTING_THRESHOLDS_CMD)((0x4 << 8) | 0x04):
11146 break;
11147
11148 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,((0x4 << 8) | 0xFE)
11149 IWM_CT_KILL_NOTIFICATION)((0x4 << 8) | 0xFE): {
11150 struct iwm_ct_kill_notif *notif;
11151 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
11152 printf("%s: device at critical temperature (%u degC), "
11153 "stopping device\n",
11154 DEVNAME(sc)((sc)->sc_dev.dv_xname), le16toh(notif->temperature)((__uint16_t)(notif->temperature)));
11155 sc->sc_flags |= IWM_FLAG_HW_ERR0x80;
11156 task_add(systq, &sc->init_task);
11157 break;
11158 }
11159
11160 case IWM_ADD_STA_KEY0x17:
11161 case IWM_PHY_CONFIGURATION_CMD0x6a:
11162 case IWM_TX_ANT_CONFIGURATION_CMD0x98:
11163 case IWM_ADD_STA0x18:
11164 case IWM_MAC_CONTEXT_CMD0x28:
11165 case IWM_REPLY_SF_CFG_CMD0xd1:
11166 case IWM_POWER_TABLE_CMD0x77:
11167 case IWM_LTR_CONFIG0xee:
11168 case IWM_PHY_CONTEXT_CMD0x8:
11169 case IWM_BINDING_CONTEXT_CMD0x2b:
11170 case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD)((0x1 << 8) | 0xc):
11171 case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC)((0x1 << 8) | 0xd):
11172 case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC)((0x1 << 8) | 0xe):
11173 case IWM_SCAN_OFFLOAD_REQUEST_CMD0x51:
11174 case IWM_SCAN_OFFLOAD_ABORT_CMD0x52:
11175 case IWM_REPLY_BEACON_FILTERING_CMD0xd2:
11176 case IWM_MAC_PM_POWER_TABLE0xa9:
11177 case IWM_TIME_QUOTA_CMD0x2c:
11178 case IWM_REMOVE_STA0x19:
11179 case IWM_TXPATH_FLUSH0x1e:
11180 case IWM_LQ_CMD0x4e:
11181 case IWM_WIDE_ID(IWM_LONG_GROUP,((0x1 << 8) | 0x4f)
11182 IWM_FW_PAGING_BLOCK_CMD)((0x1 << 8) | 0x4f):
11183 case IWM_BT_CONFIG0x9b:
11184 case IWM_REPLY_THERMAL_MNG_BACKOFF0x7e:
11185 case IWM_NVM_ACCESS_CMD0x88:
11186 case IWM_MCC_UPDATE_CMD0xc8:
11187 case IWM_TIME_EVENT_CMD0x29: {
11188 size_t pkt_len;
11189
11190 if (sc->sc_cmd_resp_pkt[idx] == NULL((void *)0))
11191 break;
11192
11193 bus_dmamap_sync(sc->sc_dmat, data->map, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (sizeof(*pkt)), (0x02))
11194 sizeof(*pkt), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (0), (sizeof(*pkt)), (0x02))
;
11195
11196 pkt_len = sizeof(pkt->len_n_flags) +
11197 iwm_rx_packet_len(pkt);
11198
11199 if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK0x40) ||
11200 pkt_len < sizeof(*pkt) ||
11201 pkt_len > sc->sc_cmd_resp_len[idx]) {
11202 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF2,
11203 sc->sc_cmd_resp_len[idx]);
11204 sc->sc_cmd_resp_pkt[idx] = NULL((void *)0);
11205 break;
11206 }
11207
11208 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (pkt_len - sizeof(*pkt)), (0x02))
11209 pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (data->
map), (sizeof(*pkt)), (pkt_len - sizeof(*pkt)), (0x02))
;
11210 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len)__builtin_memcpy((sc->sc_cmd_resp_pkt[idx]), (pkt), (pkt_len
))
;
11211 break;
11212 }
11213
11214 /* ignore */
11215 case IWM_PHY_DB_CMD0x6c:
11216 break;
11217
11218 case IWM_INIT_COMPLETE_NOTIF0x4:
11219 sc->sc_init_complete |= IWM_INIT_COMPLETE0x01;
11220 wakeup(&sc->sc_init_complete);
11221 break;
11222
11223 case IWM_SCAN_OFFLOAD_COMPLETE0x6d: {
11224 struct iwm_periodic_scan_complete *notif;
11225 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
11226 break;
11227 }
11228
11229 case IWM_SCAN_ITERATION_COMPLETE0xe7: {
11230 struct iwm_lmac_scan_complete_notif *notif;
11231 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
11232 iwm_endscan(sc);
11233 break;
11234 }
11235
11236 case IWM_SCAN_COMPLETE_UMAC0xf: {
11237 struct iwm_umac_scan_complete *notif;
11238 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
11239 iwm_endscan(sc);
11240 break;
11241 }
11242
11243 case IWM_SCAN_ITERATION_COMPLETE_UMAC0xb5: {
11244 struct iwm_umac_scan_iter_complete_notif *notif;
11245 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
11246 iwm_endscan(sc);
11247 break;
11248 }
11249
11250 case IWM_REPLY_ERROR0x2: {
11251 struct iwm_error_resp *resp;
11252 SYNC_RESP_STRUCT(resp, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(resp))), (0x02)); resp
= (void *)((pkt)+1); } while ( 0)
;
11253 printf("%s: firmware error 0x%x, cmd 0x%x\n",
11254 DEVNAME(sc)((sc)->sc_dev.dv_xname), le32toh(resp->error_type)((__uint32_t)(resp->error_type)),
11255 resp->cmd_id);
11256 break;
11257 }
11258
11259 case IWM_TIME_EVENT_NOTIFICATION0x2a: {
11260 struct iwm_time_event_notif *notif;
11261 uint32_t action;
11262 SYNC_RESP_STRUCT(notif, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(notif))), (0x02));
notif = (void *)((pkt)+1); } while ( 0)
;
11263
11264 if (sc->sc_time_event_uid != le32toh(notif->unique_id)((__uint32_t)(notif->unique_id)))
11265 break;
11266 action = le32toh(notif->action)((__uint32_t)(notif->action));
11267 if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END(1 << 1))
11268 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE0x40;
11269 break;
11270 }
11271
11272 case IWM_WIDE_ID(IWM_SYSTEM_GROUP,((0x2 << 8) | 0xff)
11273 IWM_FSEQ_VER_MISMATCH_NOTIFICATION)((0x2 << 8) | 0xff):
11274 break;
11275
11276 /*
11277 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
11278 * messages. Just ignore them for now.
11279 */
11280 case IWM_DEBUG_LOG_MSG0xf7:
11281 break;
11282
11283 case IWM_MCAST_FILTER_CMD0xd0:
11284 break;
11285
11286 case IWM_SCD_QUEUE_CFG0x1d: {
11287 struct iwm_scd_txq_cfg_rsp *rsp;
11288 SYNC_RESP_STRUCT(rsp, pkt)do { (*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (
data->map), (sizeof(*(pkt))), (sizeof(*(rsp))), (0x02)); rsp
= (void *)((pkt)+1); } while ( 0)
;
11289
11290 break;
11291 }
11292
11293 case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD)((0x5 << 8) | 0x00):
11294 break;
11295
11296 case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD)((0x2 << 8) | 0x01):
11297 break;
11298
11299 default:
11300 handled = 0;
11301 printf("%s: unhandled firmware response 0x%x/0x%x "
11302 "rx ring %d[%d]\n",
11303 DEVNAME(sc)((sc)->sc_dev.dv_xname), code, pkt->len_n_flags,
11304 (qid & ~0x80), idx);
11305 break;
11306 }
11307
11308 /*
11309 * uCode sets bit 0x80 when it originates the notification,
11310 * i.e. when the notification is not a direct response to a
11311 * command sent by the driver.
11312 * For example, uCode issues IWM_REPLY_RX when it sends a
11313 * received frame to the driver.
11314 */
11315 if (handled && !(qid & (1 << 7))) {
11316 iwm_cmd_done(sc, qid, idx, code);
11317 }
11318
11319 offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN)((((len)+((0x40)-1))/(0x40))*(0x40));
11320 }
11321
11322 if (m0 && m0 != data->m)
11323 m_freem(m0);
11324}
11325
11326void
11327iwm_notif_intr(struct iwm_softc *sc)
11328{
11329 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
11330 uint32_t wreg;
11331 uint16_t hw;
11332 int count;
11333
11334 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.stat_dma.map), (0), (sc->rxq.stat_dma.size), (0x02))
11335 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (sc->
rxq.stat_dma.map), (0), (sc->rxq.stat_dma.size), (0x02))
;
11336
11337 if (sc->sc_mqrx_supported) {
11338 count = IWM_RX_MQ_RING_COUNT512;
11339 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG0x1C80;
11340 } else {
11341 count = IWM_RX_RING_COUNT256;
11342 wreg = IWM_FH_RSCSR_CHNL0_WPTR(((((0x1000) + 0xBC0)) + 0x008));
11343 }
11344
11345 hw = le16toh(sc->rxq.stat->closed_rb_num)((__uint16_t)(sc->rxq.stat->closed_rb_num)) & 0xfff;
11346 hw &= (count - 1);
11347 while (sc->rxq.cur != hw) {
11348 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
11349 iwm_rx_pkt(sc, data, &ml);
11350 ADVANCE_RXQ(sc)(sc->rxq.cur = (sc->rxq.cur + 1) % count);;
11351 }
11352 if_input(&sc->sc_ic.ic_ific_ac.ac_if, &ml);
11353
11354 /*
11355 * Tell the firmware what we have processed.
11356 * Seems like the hardware gets upset unless we align the write by 8??
11357 */
11358 hw = (hw == 0) ? count - 1 : hw - 1;
11359 IWM_WRITE(sc, wreg, hw & ~7)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((wreg)), ((hw
& ~7))))
;
11360}
11361
11362int
11363iwm_intr(void *arg)
11364{
11365 struct iwm_softc *sc = arg;
11366 struct ieee80211com *ic = &sc->sc_ic;
11367 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
11368 int handled = 0;
11369 int rv = 0;
11370 uint32_t r1, r2;
11371
11372 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x00c))), (
(0))))
;
11373
11374 if (sc->sc_flags & IWM_FLAG_USE_ICT0x01) {
11375 uint32_t *ict = sc->ict_dma.vaddr;
11376 int tmp;
11377
11378 tmp = htole32(ict[sc->ict_cur])((__uint32_t)(ict[sc->ict_cur]));
11379 if (!tmp)
11380 goto out_ena;
11381
11382 /*
11383 * ok, there was something. keep plowing until we have all.
11384 */
11385 r1 = r2 = 0;
11386 while (tmp) {
11387 r1 |= tmp;
11388 ict[sc->ict_cur] = 0;
11389 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT(4096 / sizeof (uint32_t));
11390 tmp = htole32(ict[sc->ict_cur])((__uint32_t)(ict[sc->ict_cur]));
11391 }
11392
11393 /* this is where the fun begins. don't ask */
11394 if (r1 == 0xffffffff)
11395 r1 = 0;
11396
11397 /*
11398 * Workaround for hardware bug where bits are falsely cleared
11399 * when using interrupt coalescing. Bit 15 should be set if
11400 * bits 18 and 19 are set.
11401 */
11402 if (r1 & 0xc0000)
11403 r1 |= 0x8000;
11404
11405 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
11406 } else {
11407 r1 = IWM_READ(sc, IWM_CSR_INT)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x008)))));
11408 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x010)))));
11409 }
11410 if (r1 == 0 && r2 == 0) {
11411 goto out_ena;
11412 }
11413 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
11414 goto out;
11415
11416 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(r1 | ~sc->sc_intmask))))
;
11417
11418 /* ignored */
11419 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE(1 << 0) /*| IWM_CSR_INT_BIT_SCD*/));
11420
11421 if (r1 & IWM_CSR_INT_BIT_RF_KILL(1 << 7)) {
11422 handled |= IWM_CSR_INT_BIT_RF_KILL(1 << 7);
11423 iwm_check_rfkill(sc);
11424 task_add(systq, &sc->init_task);
11425 rv = 1;
11426 goto out_ena;
11427 }
11428
11429 if (r1 & IWM_CSR_INT_BIT_SW_ERR(1 << 25)) {
11430 if (ifp->if_flags & IFF_DEBUG0x4) {
11431 iwm_nic_error(sc);
11432 iwm_dump_driver_status(sc);
11433 }
11434 printf("%s: fatal firmware error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11435 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
11436 task_add(systq, &sc->init_task);
11437 rv = 1;
11438 goto out;
11439
11440 }
11441
11442 if (r1 & IWM_CSR_INT_BIT_HW_ERR(1 << 29)) {
11443 handled |= IWM_CSR_INT_BIT_HW_ERR(1 << 29);
11444 printf("%s: hardware error, stopping device \n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11445 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) {
11446 sc->sc_flags |= IWM_FLAG_HW_ERR0x80;
11447 task_add(systq, &sc->init_task);
11448 }
11449 rv = 1;
11450 goto out;
11451 }
11452
11453 /* firmware chunk loaded */
11454 if (r1 & IWM_CSR_INT_BIT_FH_TX(1 << 27)) {
11455 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(((1 << 1) | (1 << 0))))))
;
11456 handled |= IWM_CSR_INT_BIT_FH_TX(1 << 27);
11457
11458 sc->sc_fw_chunk_done = 1;
11459 wakeup(&sc->sc_fw);
11460 }
11461
11462 if (r1 & (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3) |
11463 IWM_CSR_INT_BIT_RX_PERIODIC(1 << 28))) {
11464 if (r1 & (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3))) {
11465 handled |= (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3));
11466 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x010))), (
(((1 << 30) | (1 << 17) | (1 << 16))))))
;
11467 }
11468 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC(1 << 28)) {
11469 handled |= IWM_CSR_INT_BIT_RX_PERIODIC(1 << 28);
Value stored to 'handled' is never read
11470 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
((1 << 28)))))
;
11471 }
11472
11473 /* Disable periodic interrupt; we use it as just a one-shot. */
11474 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0x00)))))
;
11475
11476 /*
11477 * Enable periodic interrupt in 8 msec only if we received
11478 * real RX interrupt (instead of just periodic int), to catch
11479 * any dangling Rx interrupt. If it was just the periodic
11480 * interrupt, there was no dangling Rx activity, and no need
11481 * to extend the periodic interrupt; one-shot is enough.
11482 */
11483 if (r1 & (IWM_CSR_INT_BIT_FH_RX(1U << 31) | IWM_CSR_INT_BIT_SW_RX(1 << 3)))
11484 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0xFF)))))
11485 IWM_CSR_INT_PERIODIC_ENA)(((sc)->sc_st)->write_1(((sc)->sc_sh), (((0x005))), (
((0xFF)))))
;
11486
11487 iwm_notif_intr(sc);
11488 }
11489
11490 rv = 1;
11491
11492 out_ena:
11493 iwm_restore_interrupts(sc);
11494 out:
11495 return rv;
11496}
11497
11498int
11499iwm_intr_msix(void *arg)
11500{
11501 struct iwm_softc *sc = arg;
11502 struct ieee80211com *ic = &sc->sc_ic;
11503 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
11504 uint32_t inta_fh, inta_hw;
11505 int vector = 0;
11506
11507 inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x800
)))))
;
11508 inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD)(((sc)->sc_st)->read_4(((sc)->sc_sh), ((((0x2000) + 0x808
)))))
;
11509 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x800))), ((inta_fh))))
;
11510 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x808))), ((inta_hw))))
;
11511 inta_fh &= sc->sc_fh_mask;
11512 inta_hw &= sc->sc_hw_mask;
11513
11514 if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11515 inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11516 iwm_notif_intr(sc);
11517 }
11518
11519 /* firmware chunk loaded */
11520 if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11521 sc->sc_fw_chunk_done = 1;
11522 wakeup(&sc->sc_fw);
11523 }
11524
11525 if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11526 (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11527 (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11528 if (ifp->if_flags & IFF_DEBUG0x4) {
11529 iwm_nic_error(sc);
11530 iwm_dump_driver_status(sc);
11531 }
11532 printf("%s: fatal firmware error\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11533 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0)
11534 task_add(systq, &sc->init_task);
11535 return 1;
11536 }
11537
11538 if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11539 iwm_check_rfkill(sc);
11540 task_add(systq, &sc->init_task);
11541 }
11542
11543 if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11544 printf("%s: hardware error, stopping device \n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11545 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN0x100) == 0) {
11546 sc->sc_flags |= IWM_FLAG_HW_ERR0x80;
11547 task_add(systq, &sc->init_task);
11548 }
11549 return 1;
11550 }
11551
11552 /*
11553 * Before sending the interrupt the HW disables it to prevent
11554 * a nested interrupt. This is done by writing 1 to the corresponding
11555 * bit in the mask register. After handling the interrupt, it should be
11556 * re-enabled by clearing this bit. This register is defined as
11557 * write 1 clear (W1C) register, meaning that it's being clear
11558 * by writing 1 to the bit.
11559 */
11560 IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector)(((sc)->sc_st)->write_4(((sc)->sc_sh), ((((0x2000) +
0x810))), ((1 << vector))))
;
11561 return 1;
11562}
11563
11564typedef void *iwm_match_t;
11565
11566static const struct pci_matchid iwm_devices[] = {
11567 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3160_10x08b3 },
11568 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3160_20x08b4 },
11569 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3165_10x3165 },
11570 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3165_20x3166 },
11571 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_3168_10x24fb },
11572 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7260_10x08b1 },
11573 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7260_20x08b2 },
11574 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7265_10x095a },
11575 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_7265_20x095b },
11576 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_8260_10x24f3 },
11577 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_8260_20x24f4 },
11578 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_8265_10x24fd },
11579 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9260_10x2526 },
11580 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9560_10x9df0 },
11581 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9560_20xa370 },
11582 { PCI_VENDOR_INTEL0x8086, PCI_PRODUCT_INTEL_WL_9560_30x31dc },
11583};
11584
11585int
11586iwm_match(struct device *parent, iwm_match_t match __unused__attribute__((__unused__)), void *aux)
11587{
11588 return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11589 nitems(iwm_devices)(sizeof((iwm_devices)) / sizeof((iwm_devices)[0])));
11590}
11591
11592int
11593iwm_preinit(struct iwm_softc *sc)
11594{
11595 struct ieee80211com *ic = &sc->sc_ic;
11596 struct ifnet *ifp = IC2IFP(ic)(&(ic)->ic_ac.ac_if);
11597 int err;
11598
11599 err = iwm_prepare_card_hw(sc);
11600 if (err) {
11601 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11602 return err;
11603 }
11604
11605 if (sc->attached) {
11606 /* Update MAC in case the upper layers changed it. */
11607 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,__builtin_memcpy((sc->sc_ic.ic_myaddr), (((struct arpcom *
)ifp)->ac_enaddr), (6))
11608 ((struct arpcom *)ifp)->ac_enaddr)__builtin_memcpy((sc->sc_ic.ic_myaddr), (((struct arpcom *
)ifp)->ac_enaddr), (6))
;
11609 return 0;
11610 }
11611
11612 err = iwm_start_hw(sc);
11613 if (err) {
11614 printf("%s: could not initialize hardware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11615 return err;
11616 }
11617
11618 err = iwm_run_init_mvm_ucode(sc, 1);
11619 iwm_stop_device(sc);
11620 if (err)
11621 return err;
11622
11623 /* Print version info and MAC address on first successful fw load. */
11624 sc->attached = 1;
11625 printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11626 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK(0x000FFF0),
11627 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11628
11629 if (sc->sc_nvm.sku_cap_11n_enable)
11630 iwm_setup_ht_rates(sc);
11631
11632 /* not all hardware can do 5GHz band */
11633 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11634 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,__builtin_memset((&ic->ic_sup_rates[IEEE80211_MODE_11A
]), (0), (sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])))
11635 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]))__builtin_memset((&ic->ic_sup_rates[IEEE80211_MODE_11A
]), (0), (sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])))
;
11636
11637 /* Configure channel information obtained from firmware. */
11638 ieee80211_channel_init(ifp);
11639
11640 /* Configure MAC address. */
11641 err = if_setlladdr(ifp, ic->ic_myaddr);
11642 if (err)
11643 printf("%s: could not set MAC address (error %d)\n",
11644 DEVNAME(sc)((sc)->sc_dev.dv_xname), err);
11645
11646 ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11647
11648 return 0;
11649}
11650
11651void
11652iwm_attach_hook(struct device *self)
11653{
11654 struct iwm_softc *sc = (void *)self;
11655
11656 KASSERT(!cold)((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/if_iwm.c"
, 11656, "!cold"))
;
11657
11658 iwm_preinit(sc);
11659}
11660
11661void
11662iwm_attach(struct device *parent, struct device *self, void *aux)
11663{
11664 struct iwm_softc *sc = (void *)self;
11665 struct pci_attach_args *pa = aux;
11666 pci_intr_handle_t ih;
11667 pcireg_t reg, memtype;
11668 struct ieee80211com *ic = &sc->sc_ic;
11669 struct ifnet *ifp = &ic->ic_ific_ac.ac_if;
11670 const char *intrstr;
11671 int err;
11672 int txq_i, i, j;
11673
11674 sc->sc_pct = pa->pa_pc;
11675 sc->sc_pcitag = pa->pa_tag;
11676 sc->sc_dmat = pa->pa_dmat;
11677
11678 rw_init(&sc->ioctl_rwl, "iwmioctl")_rw_init_flags(&sc->ioctl_rwl, "iwmioctl", 0, ((void *
)0))
;
11679
11680 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11681 PCI_CAP_PCIEXPRESS0x10, &sc->sc_cap_off, NULL((void *)0));
11682 if (err == 0) {
11683 printf("%s: PCIe capability structure not found!\n",
11684 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11685 return;
11686 }
11687
11688 /*
11689 * We disable the RETRY_TIMEOUT register (0x41) to keep
11690 * PCI Tx retries from interfering with C3 CPU state.
11691 */
11692 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11693 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11694
11695 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START0x10);
11696 err = pci_mapreg_map(pa, PCI_MAPREG_START0x10, memtype, 0,
11697 &sc->sc_st, &sc->sc_sh, NULL((void *)0), &sc->sc_sz, 0);
11698 if (err) {
11699 printf("%s: can't map mem space\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11700 return;
11701 }
11702
11703 if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11704 sc->sc_msix = 1;
11705 } else if (pci_intr_map_msi(pa, &ih)) {
11706 if (pci_intr_map(pa, &ih)) {
11707 printf("%s: can't map interrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11708 return;
11709 }
11710 /* Hardware bug workaround. */
11711 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11712 PCI_COMMAND_STATUS_REG0x04);
11713 if (reg & PCI_COMMAND_INTERRUPT_DISABLE0x00000400)
11714 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE0x00000400;
11715 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11716 PCI_COMMAND_STATUS_REG0x04, reg);
11717 }
11718
11719 intrstr = pci_intr_string(sc->sc_pct, ih);
11720 if (sc->sc_msix)
11721 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET0x4,
11722 iwm_intr_msix, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
11723 else
11724 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET0x4,
11725 iwm_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
11726
11727 if (sc->sc_ih == NULL((void *)0)) {
11728 printf("\n");
11729 printf("%s: can't establish interrupt", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11730 if (intrstr != NULL((void *)0))
11731 printf(" at %s", intrstr);
11732 printf("\n");
11733 return;
11734 }
11735 printf(", %s\n", intrstr);
11736
11737 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV)(((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x028)))));
11738 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
11739 case PCI_PRODUCT_INTEL_WL_3160_10x08b3:
11740 case PCI_PRODUCT_INTEL_WL_3160_20x08b4:
11741 sc->sc_fwname = "iwm-3160-17";
11742 sc->host_interrupt_operation_mode = 1;
11743 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11744 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11745 sc->sc_nvm_max_section_size = 16384;
11746 sc->nvm_type = IWM_NVM;
11747 break;
11748 case PCI_PRODUCT_INTEL_WL_3165_10x3165:
11749 case PCI_PRODUCT_INTEL_WL_3165_20x3166:
11750 sc->sc_fwname = "iwm-7265D-29";
11751 sc->host_interrupt_operation_mode = 0;
11752 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11753 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11754 sc->sc_nvm_max_section_size = 16384;
11755 sc->nvm_type = IWM_NVM;
11756 break;
11757 case PCI_PRODUCT_INTEL_WL_3168_10x24fb:
11758 sc->sc_fwname = "iwm-3168-29";
11759 sc->host_interrupt_operation_mode = 0;
11760 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11761 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11762 sc->sc_nvm_max_section_size = 16384;
11763 sc->nvm_type = IWM_NVM_SDP;
11764 break;
11765 case PCI_PRODUCT_INTEL_WL_7260_10x08b1:
11766 case PCI_PRODUCT_INTEL_WL_7260_20x08b2:
11767 sc->sc_fwname = "iwm-7260-17";
11768 sc->host_interrupt_operation_mode = 1;
11769 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11770 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11771 sc->sc_nvm_max_section_size = 16384;
11772 sc->nvm_type = IWM_NVM;
11773 break;
11774 case PCI_PRODUCT_INTEL_WL_7265_10x095a:
11775 case PCI_PRODUCT_INTEL_WL_7265_20x095b:
11776 sc->sc_fwname = "iwm-7265-17";
11777 sc->host_interrupt_operation_mode = 0;
11778 sc->sc_device_family = IWM_DEVICE_FAMILY_70001;
11779 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ(192*1024);
11780 sc->sc_nvm_max_section_size = 16384;
11781 sc->nvm_type = IWM_NVM;
11782 break;
11783 case PCI_PRODUCT_INTEL_WL_8260_10x24f3:
11784 case PCI_PRODUCT_INTEL_WL_8260_20x24f4:
11785 sc->sc_fwname = "iwm-8000C-36";
11786 sc->host_interrupt_operation_mode = 0;
11787 sc->sc_device_family = IWM_DEVICE_FAMILY_80002;
11788 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11789 sc->sc_nvm_max_section_size = 32768;
11790 sc->nvm_type = IWM_NVM_EXT;
11791 break;
11792 case PCI_PRODUCT_INTEL_WL_8265_10x24fd:
11793 sc->sc_fwname = "iwm-8265-36";
11794 sc->host_interrupt_operation_mode = 0;
11795 sc->sc_device_family = IWM_DEVICE_FAMILY_80002;
11796 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11797 sc->sc_nvm_max_section_size = 32768;
11798 sc->nvm_type = IWM_NVM_EXT;
11799 break;
11800 case PCI_PRODUCT_INTEL_WL_9260_10x2526:
11801 sc->sc_fwname = "iwm-9260-46";
11802 sc->host_interrupt_operation_mode = 0;
11803 sc->sc_device_family = IWM_DEVICE_FAMILY_90003;
11804 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11805 sc->sc_nvm_max_section_size = 32768;
11806 sc->sc_mqrx_supported = 1;
11807 break;
11808 case PCI_PRODUCT_INTEL_WL_9560_10x9df0:
11809 case PCI_PRODUCT_INTEL_WL_9560_20xa370:
11810 case PCI_PRODUCT_INTEL_WL_9560_30x31dc:
11811 sc->sc_fwname = "iwm-9000-46";
11812 sc->host_interrupt_operation_mode = 0;
11813 sc->sc_device_family = IWM_DEVICE_FAMILY_90003;
11814 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000(320*1024);
11815 sc->sc_nvm_max_section_size = 32768;
11816 sc->sc_mqrx_supported = 1;
11817 sc->sc_integrated = 1;
11818 if (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff) == PCI_PRODUCT_INTEL_WL_9560_30x31dc) {
11819 sc->sc_xtal_latency = 670;
11820 sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK(1U << 31);
11821 } else
11822 sc->sc_xtal_latency = 650;
11823 break;
11824 default:
11825 printf("%s: unknown adapter type\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11826 return;
11827 }
11828
11829 /*
11830 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11831 * changed, and now the revision step also includes bit 0-1 (no more
11832 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11833 * in the old format.
11834 */
11835 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_80002) {
11836 uint32_t hw_step;
11837
11838 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11839 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2)(((sc->sc_hw_rev << 2) & 0x000000C) >> 2) << 2);
11840
11841 if (iwm_prepare_card_hw(sc) != 0) {
11842 printf("%s: could not initialize hardware\n",
11843 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11844 return;
11845 }
11846
11847 /*
11848 * In order to recognize C step the driver should read the
11849 * chip version id located at the AUX bus MISC address.
11850 */
11851 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
11852 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x024))), (
((((sc)->sc_st)->read_4(((sc)->sc_sh), (((0x024)))))
| ((0x00000004))))))
;
11853 DELAY(2)(*delay_func)(2);
11854
11855 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL(0x024),
11856 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
11857 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY(0x00000001),
11858 25000);
11859 if (!err) {
11860 printf("%s: Failed to wake up the nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11861 return;
11862 }
11863
11864 if (iwm_nic_lock(sc)) {
11865 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG0xa03030);
11866 hw_step |= IWM_ENABLE_WFPM0x80000000;
11867 iwm_write_prph(sc, IWM_WFPM_CTRL_REG0xa03030, hw_step);
11868 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG0xa200b0);
11869 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS24) & 0xF;
11870 if (hw_step == 0x3)
11871 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11872 (IWM_SILICON_C_STEP2 << 2);
11873 iwm_nic_unlock(sc);
11874 } else {
11875 printf("%s: Failed to lock the nic\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11876 return;
11877 }
11878 }
11879
11880 /*
11881 * Allocate DMA memory for firmware transfers.
11882 * Must be aligned on a 16-byte boundary.
11883 */
11884 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11885 sc->sc_fwdmasegsz, 16);
11886 if (err) {
11887 printf("%s: could not allocate memory for firmware\n",
11888 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11889 return;
11890 }
11891
11892 /* Allocate "Keep Warm" page, used internally by the card. */
11893 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11894 if (err) {
11895 printf("%s: could not allocate keep warm page\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11896 goto fail1;
11897 }
11898
11899 /* Allocate interrupt cause table (ICT).*/
11900 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11901 IWM_ICT_SIZE4096, 1<<IWM_ICT_PADDR_SHIFT12);
11902 if (err) {
11903 printf("%s: could not allocate ICT table\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11904 goto fail2;
11905 }
11906
11907 /* TX scheduler rings must be aligned on a 1KB boundary. */
11908 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
11909 nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
11910 if (err) {
11911 printf("%s: could not allocate TX scheduler rings\n",
11912 DEVNAME(sc)((sc)->sc_dev.dv_xname));
11913 goto fail3;
11914 }
11915
11916 for (txq_i = 0; txq_i < nitems(sc->txq)(sizeof((sc->txq)) / sizeof((sc->txq)[0])); txq_i++) {
11917 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11918 if (err) {
11919 printf("%s: could not allocate TX ring %d\n",
11920 DEVNAME(sc)((sc)->sc_dev.dv_xname), txq_i);
11921 goto fail4;
11922 }
11923 }
11924
11925 err = iwm_alloc_rx_ring(sc, &sc->rxq);
11926 if (err) {
11927 printf("%s: could not allocate RX ring\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
11928 goto fail4;
11929 }
11930
11931 sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET0x4, 0);
11932 if (sc->sc_nswq == NULL((void *)0))
11933 goto fail4;
11934
11935 /* Clear pending interrupts. */
11936 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff)(((sc)->sc_st)->write_4(((sc)->sc_sh), (((0x008))), (
(0xffffffff))))
;
11937
11938 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
11939 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
11940 ic->ic_state = IEEE80211_S_INIT;
11941
11942 /* Set device capabilities. */
11943 ic->ic_caps =
11944 IEEE80211_C_QOS0x00000800 | IEEE80211_C_TX_AMPDU0x00010000 | /* A-MPDU */
11945 IEEE80211_C_WEP0x00000001 | /* WEP */
11946 IEEE80211_C_RSN0x00001000 | /* WPA/RSN */
11947 IEEE80211_C_SCANALL0x00000400 | /* device scans all channels at once */
11948 IEEE80211_C_SCANALLBAND0x00008000 | /* device scans all bands at once */
11949 IEEE80211_C_MONITOR0x00000200 | /* monitor mode supported */
11950 IEEE80211_C_SHSLOT0x00000080 | /* short slot time supported */
11951 IEEE80211_C_SHPREAMBLE0x00000100; /* short preamble supported */
11952
11953 ic->ic_htcaps = IEEE80211_HTCAP_SGI200x00000020 | IEEE80211_HTCAP_SGI400x00000040;
11954 ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_400x00000002;
11955 ic->ic_htcaps |=
11956 (IEEE80211_HTCAP_SMPS_DIS3 << IEEE80211_HTCAP_SMPS_SHIFT2);
11957 ic->ic_htxcaps = 0;
11958 ic->ic_txbfcaps = 0;
11959 ic->ic_aselcaps = 0;
11960 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4(5 << 2) | 0x3 /* 64k */);
11961
11962 ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_38950 |
11963 (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K3 <<
11964 IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT23) |
11965 (IEEE80211_VHTCAP_CHAN_WIDTH_800 <<
11966 IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT2) | IEEE80211_VHTCAP_SGI800x00000020 |
11967 IEEE80211_VHTCAP_RX_ANT_PATTERN0x10000000 | IEEE80211_VHTCAP_TX_ANT_PATTERN0x20000000;
11968
11969 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11970 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11971 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11972
11973 for (i = 0; i < nitems(sc->sc_phyctxt)(sizeof((sc->sc_phyctxt)) / sizeof((sc->sc_phyctxt)[0])
)
; i++) {
11974 sc->sc_phyctxt[i].id = i;
11975 sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN0;
11976 sc->sc_phyctxt[i].vht_chan_width =
11977 IEEE80211_VHTOP0_CHAN_WIDTH_HT0;
11978 }
11979
11980 sc->sc_amrr.amrr_min_success_threshold = 1;
11981 sc->sc_amrr.amrr_max_success_threshold = 15;
11982
11983 /* IBSS channel undefined for now. */
11984 ic->ic_ibss_chan = &ic->ic_channels[1];
11985
11986 ic->ic_max_rssi = IWM_MAX_DBM-33 - IWM_MIN_DBM-100;
11987
11988 ifp->if_softc = sc;
11989 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
11990 ifp->if_ioctl = iwm_ioctl;
11991 ifp->if_start = iwm_start;
11992 ifp->if_watchdog = iwm_watchdog;
11993 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ)__builtin_memcpy((ifp->if_xname), (((sc)->sc_dev.dv_xname
)), (16))
;
11994
11995 if_attach(ifp);
11996 ieee80211_ifattach(ifp);
11997 ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11998
11999#if NBPFILTER1 > 0
12000 iwm_radiotap_attach(sc);
12001#endif
12002 timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
12003 timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
12004 for (i = 0; i < nitems(sc->sc_rxba_data)(sizeof((sc->sc_rxba_data)) / sizeof((sc->sc_rxba_data)
[0]))
; i++) {
12005 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
12006 rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID0x7f;
12007 rxba->sc = sc;
12008 timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
12009 rxba);
12010 timeout_set(&rxba->reorder_buf.reorder_timer,
12011 iwm_reorder_timer_expired, &rxba->reorder_buf);
12012 for (j = 0; j < nitems(rxba->entries)(sizeof((rxba->entries)) / sizeof((rxba->entries)[0])); j++)
12013 ml_init(&rxba->entries[j].frames);
12014 }
12015 task_set(&sc->init_task, iwm_init_task, sc);
12016 task_set(&sc->newstate_task, iwm_newstate_task, sc);
12017 task_set(&sc->ba_task, iwm_ba_task, sc);
12018 task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
12019 task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
12020 task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
12021
12022 ic->ic_node_alloc = iwm_node_alloc;
12023 ic->ic_bgscan_start = iwm_bgscan;
12024 ic->ic_bgscan_done = iwm_bgscan_done;
12025 ic->ic_set_key = iwm_set_key;
12026 ic->ic_delete_key = iwm_delete_key;
12027
12028 /* Override 802.11 state transition machine. */
12029 sc->sc_newstate = ic->ic_newstate;
12030 ic->ic_newstate = iwm_newstate;
12031 ic->ic_updateprot = iwm_updateprot;
12032 ic->ic_updateslot = iwm_updateslot;
12033 ic->ic_updateedca = iwm_updateedca;
12034 ic->ic_updatechan = iwm_updatechan;
12035 ic->ic_updatedtim = iwm_updatedtim;
12036 ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
12037 ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
12038 ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
12039 ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
12040 /*
12041 * We cannot read the MAC address without loading the
12042 * firmware from disk. Postpone until mountroot is done.
12043 */
12044 config_mountroot(self, iwm_attach_hook);
12045
12046 return;
12047
12048fail4: while (--txq_i >= 0)
12049 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
12050 iwm_free_rx_ring(sc, &sc->rxq);
12051 iwm_dma_contig_free(&sc->sched_dma);
12052fail3: if (sc->ict_dma.vaddr != NULL((void *)0))
12053 iwm_dma_contig_free(&sc->ict_dma);
12054
12055fail2: iwm_dma_contig_free(&sc->kw_dma);
12056fail1: iwm_dma_contig_free(&sc->fw_dma);
12057 return;
12058}
12059
12060#if NBPFILTER1 > 0
12061void
12062iwm_radiotap_attach(struct iwm_softc *sc)
12063{
12064 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_ific_ac.ac_if, DLT_IEEE802_11_RADIO127,
12065 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN64);
12066
12067 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
12068 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_len = htole16(sc->sc_rxtap_len)((__uint16_t)(sc->sc_rxtap_len));
12069 sc->sc_rxtapsc_rxtapu.th.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_TSFT) | (1 <<
IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_RATE
) | (1 << IEEE80211_RADIOTAP_CHANNEL) | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL
) | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))))
;
12070
12071 sc->sc_txtap_len = sizeof sc->sc_txtapu;
12072 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_len = htole16(sc->sc_txtap_len)((__uint16_t)(sc->sc_txtap_len));
12073 sc->sc_txtapsc_txtapu.th.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT)((__uint32_t)(((1 << IEEE80211_RADIOTAP_FLAGS) | (1 <<
IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_CHANNEL
))))
;
12074}
12075#endif
12076
12077void
12078iwm_init_task(void *arg1)
12079{
12080 struct iwm_softc *sc = arg1;
12081 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
12082 int s = splnet()splraise(0x4);
12083 int generation = sc->sc_generation;
12084 int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR0x80 | IWM_FLAG_RFKILL0x02));
12085
12086 rw_enter_write(&sc->ioctl_rwl);
12087 if (generation != sc->sc_generation) {
12088 rw_exit(&sc->ioctl_rwl);
12089 splx(s)spllower(s);
12090 return;
12091 }
12092
12093 if (ifp->if_flags & IFF_RUNNING0x40)
12094 iwm_stop(ifp);
12095 else
12096 sc->sc_flags &= ~IWM_FLAG_HW_ERR0x80;
12097
12098 if (!fatal && (ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) == IFF_UP0x1)
12099 iwm_init(ifp);
12100
12101 rw_exit(&sc->ioctl_rwl);
12102 splx(s)spllower(s);
12103}
12104
12105void
12106iwm_resume(struct iwm_softc *sc)
12107{
12108 pcireg_t reg;
12109
12110 /*
12111 * We disable the RETRY_TIMEOUT register (0x41) to keep
12112 * PCI Tx retries from interfering with C3 CPU state.
12113 */
12114 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
12115 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
12116
12117 if (!sc->sc_msix) {
12118 /* Hardware bug workaround. */
12119 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
12120 PCI_COMMAND_STATUS_REG0x04);
12121 if (reg & PCI_COMMAND_INTERRUPT_DISABLE0x00000400)
12122 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE0x00000400;
12123 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
12124 PCI_COMMAND_STATUS_REG0x04, reg);
12125 }
12126
12127 iwm_disable_interrupts(sc);
12128}
12129
12130int
12131iwm_wakeup(struct iwm_softc *sc)
12132{
12133 struct ieee80211com *ic = &sc->sc_ic;
12134 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
12135 int err;
12136
12137 err = iwm_start_hw(sc);
12138 if (err)
12139 return err;
12140
12141 err = iwm_init_hw(sc);
12142 if (err)
12143 return err;
12144
12145 refcnt_init(&sc->task_refs);
12146 ifq_clr_oactive(&ifp->if_snd);
12147 ifp->if_flags |= IFF_RUNNING0x40;
12148
12149 if (ic->ic_opmode == IEEE80211_M_MONITOR)
12150 ieee80211_new_state(ic, IEEE80211_S_RUN, -1)(((ic)->ic_newstate)((ic), (IEEE80211_S_RUN), (-1)));
12151 else
12152 ieee80211_begin_scan(ifp);
12153
12154 return 0;
12155}
12156
12157int
12158iwm_activate(struct device *self, int act)
12159{
12160 struct iwm_softc *sc = (struct iwm_softc *)self;
12161 struct ifnet *ifp = &sc->sc_ic.ic_ific_ac.ac_if;
12162 int err = 0;
12163
12164 switch (act) {
12165 case DVACT_QUIESCE2:
12166 if (ifp->if_flags & IFF_RUNNING0x40) {
12167 rw_enter_write(&sc->ioctl_rwl);
12168 iwm_stop(ifp);
12169 rw_exit(&sc->ioctl_rwl);
12170 }
12171 break;
12172 case DVACT_RESUME4:
12173 iwm_resume(sc);
12174 break;
12175 case DVACT_WAKEUP5:
12176 if ((ifp->if_flags & (IFF_UP0x1 | IFF_RUNNING0x40)) == IFF_UP0x1) {
12177 err = iwm_wakeup(sc);
12178 if (err)
12179 printf("%s: could not initialize hardware\n",
12180 DEVNAME(sc)((sc)->sc_dev.dv_xname));
12181 }
12182 break;
12183 }
12184
12185 return 0;
12186}
12187
12188struct cfdriver iwm_cd = {
12189 NULL((void *)0), "iwm", DV_IFNET
12190};
12191
12192const struct cfattach iwm_ca = {
12193 sizeof(struct iwm_softc), iwm_match, iwm_attach,
12194 NULL((void *)0), iwm_activate
12195};